🎚️ feat: Anthropic Parameter Set Support via Custom Endpoints (#9415)

* refactor: modularize openai llm config logic into new getOpenAILLMConfig function (#9412)

* ✈️ refactor: Migrate Anthropic's getLLMConfig to TypeScript (#9413)

* refactor: move tokens.js over to packages/api and update imports

* refactor: port tokens.js to typescript

* refactor: move helpers.js over to packages/api and update imports

* refactor: port helpers.js to typescript

* refactor: move anthropic/llm.js over to packages/api and update imports

* refactor: port anthropic/llm.js to typescript with supporting types in types/anthropic.ts and updated tests in llm.spec.js

* refactor: move llm.spec.js over to packages/api and update import

* refactor: port llm.spec.js over to typescript

* 📝  Add Prompt Parameter Support for Anthropic Custom Endpoints (#9414)

feat: add anthropic llm config support for openai-like (custom) endpoints

* fix: missed compiler / type issues from addition of getAnthropicLLMConfig

* refactor: update tokens.ts to export constants and functions, enhance type definitions, and adjust default values

* WIP: first pass, decouple `llmConfig` from `configOptions`

* chore: update import path for OpenAI configuration from 'llm' to 'config'

* refactor: enhance type definitions for ThinkingConfig and update modelOptions in AnthropicConfigOptions

* refactor: cleanup type, introduce openai transform from alt provider

* chore: integrate removeNullishValues in Google llmConfig and update OpenAI exports

* chore: bump version of @librechat/api to 1.3.5 in package.json and package-lock.json

* refactor: update customParams type in OpenAIConfigOptions to use TConfig['customParams']

* refactor: enhance transformToOpenAIConfig to include fromEndpoint and improve config extraction

* refactor: conform userId field for anthropic/openai, cleanup anthropic typing

* ci: add backward compatibility tests for getOpenAIConfig with various endpoints and configurations

* ci: replace userId with user in clientOptions for getLLMConfig

* test: add Azure OpenAI endpoint tests for various configurations in getOpenAIConfig

* refactor: defaultHeaders retrieval for prompt caching for anthropic-based custom endpoint (litellm)

* test: add unit tests for getOpenAIConfig with various Anthropic model configurations

* test: enhance Anthropic compatibility tests with addParams and dropParams handling

* chore: update @librechat/agents dependency to version 2.4.78 in package.json and package-lock.json

* chore: update @librechat/agents dependency to version 2.4.79 in package.json and package-lock.json

---------

Co-authored-by: Danny Avila <danny@librechat.ai>
This commit is contained in:
Dustin Healy 2025-09-08 11:35:29 -07:00 committed by GitHub
parent 7de6f6e44c
commit c6ecf0095b
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
40 changed files with 1736 additions and 432 deletions

View file

@ -0,0 +1,132 @@
import { logger } from '@librechat/data-schemas';
import { AnthropicClientOptions } from '@librechat/agents';
import { EModelEndpoint, anthropicSettings } from 'librechat-data-provider';
import { matchModelName } from '~/utils/tokens';
/**
* @param {string} modelName
* @returns {boolean}
*/
function checkPromptCacheSupport(modelName: string): boolean {
const modelMatch = matchModelName(modelName, EModelEndpoint.anthropic) ?? '';
if (
modelMatch.includes('claude-3-5-sonnet-latest') ||
modelMatch.includes('claude-3.5-sonnet-latest')
) {
return false;
}
return (
/claude-3[-.]7/.test(modelMatch) ||
/claude-3[-.]5-(?:sonnet|haiku)/.test(modelMatch) ||
/claude-3-(?:sonnet|haiku|opus)?/.test(modelMatch) ||
/claude-(?:sonnet|opus|haiku)-[4-9]/.test(modelMatch) ||
/claude-[4-9]-(?:sonnet|opus|haiku)?/.test(modelMatch) ||
/claude-4(?:-(?:sonnet|opus|haiku))?/.test(modelMatch)
);
}
/**
* Gets the appropriate headers for Claude models with cache control
* @param {string} model The model name
* @param {boolean} supportsCacheControl Whether the model supports cache control
* @returns {AnthropicClientOptions['extendedOptions']['defaultHeaders']|undefined} The headers object or undefined if not applicable
*/
function getClaudeHeaders(
model: string,
supportsCacheControl: boolean,
): Record<string, string> | undefined {
if (!supportsCacheControl) {
return undefined;
}
if (/claude-3[-.]5-sonnet/.test(model)) {
return {
'anthropic-beta': 'max-tokens-3-5-sonnet-2024-07-15,prompt-caching-2024-07-31',
};
} else if (/claude-3[-.]7/.test(model)) {
return {
'anthropic-beta':
'token-efficient-tools-2025-02-19,output-128k-2025-02-19,prompt-caching-2024-07-31',
};
} else if (/claude-sonnet-4/.test(model)) {
return {
'anthropic-beta': 'prompt-caching-2024-07-31,context-1m-2025-08-07',
};
} else if (
/claude-(?:sonnet|opus|haiku)-[4-9]/.test(model) ||
/claude-[4-9]-(?:sonnet|opus|haiku)?/.test(model) ||
/claude-4(?:-(?:sonnet|opus|haiku))?/.test(model)
) {
return {
'anthropic-beta': 'prompt-caching-2024-07-31',
};
} else {
return {
'anthropic-beta': 'prompt-caching-2024-07-31',
};
}
}
/**
* Configures reasoning-related options for Claude models
* @param {AnthropicClientOptions & { max_tokens?: number }} anthropicInput The request options object
* @param {Object} extendedOptions Additional client configuration options
* @param {boolean} extendedOptions.thinking Whether thinking is enabled in client config
* @param {number|null} extendedOptions.thinkingBudget The token budget for thinking
* @returns {Object} Updated request options
*/
function configureReasoning(
anthropicInput: AnthropicClientOptions & { max_tokens?: number },
extendedOptions: { thinking?: boolean; thinkingBudget?: number | null } = {},
): AnthropicClientOptions & { max_tokens?: number } {
const updatedOptions = { ...anthropicInput };
const currentMaxTokens = updatedOptions.max_tokens ?? updatedOptions.maxTokens;
if (
extendedOptions.thinking &&
updatedOptions?.model &&
(/claude-3[-.]7/.test(updatedOptions.model) ||
/claude-(?:sonnet|opus|haiku)-[4-9]/.test(updatedOptions.model))
) {
updatedOptions.thinking = {
...updatedOptions.thinking,
type: 'enabled',
} as { type: 'enabled'; budget_tokens: number };
}
if (
updatedOptions.thinking != null &&
extendedOptions.thinkingBudget != null &&
updatedOptions.thinking.type === 'enabled'
) {
updatedOptions.thinking = {
...updatedOptions.thinking,
budget_tokens: extendedOptions.thinkingBudget,
};
}
if (
updatedOptions.thinking != null &&
updatedOptions.thinking.type === 'enabled' &&
(currentMaxTokens == null || updatedOptions.thinking.budget_tokens > currentMaxTokens)
) {
const maxTokens = anthropicSettings.maxOutputTokens.reset(updatedOptions.model ?? '');
updatedOptions.max_tokens = currentMaxTokens ?? maxTokens;
logger.warn(
updatedOptions.max_tokens === maxTokens
? '[AnthropicClient] max_tokens is not defined while thinking is enabled. Setting max_tokens to model default.'
: `[AnthropicClient] thinking budget_tokens (${updatedOptions.thinking.budget_tokens}) exceeds max_tokens (${updatedOptions.max_tokens}). Adjusting budget_tokens.`,
);
updatedOptions.thinking.budget_tokens = Math.min(
updatedOptions.thinking.budget_tokens,
Math.floor(updatedOptions.max_tokens * 0.9),
);
}
return updatedOptions;
}
export { checkPromptCacheSupport, getClaudeHeaders, configureReasoning };

View file

@ -0,0 +1,2 @@
export * from './helpers';
export * from './llm';

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,105 @@
import { Dispatcher, ProxyAgent } from 'undici';
import { AnthropicClientOptions } from '@librechat/agents';
import { anthropicSettings, removeNullishValues } from 'librechat-data-provider';
import type { AnthropicLLMConfigResult, AnthropicConfigOptions } from '~/types/anthropic';
import { checkPromptCacheSupport, getClaudeHeaders, configureReasoning } from './helpers';
/**
* Generates configuration options for creating an Anthropic language model (LLM) instance.
* @param apiKey - The API key for authentication with Anthropic.
* @param options={} - Additional options for configuring the LLM.
* @returns Configuration options for creating an Anthropic LLM instance, with null and undefined values removed.
*/
function getLLMConfig(
apiKey?: string,
options: AnthropicConfigOptions = {} as AnthropicConfigOptions,
): AnthropicLLMConfigResult {
const systemOptions = {
thinking: options.modelOptions?.thinking ?? anthropicSettings.thinking.default,
promptCache: options.modelOptions?.promptCache ?? anthropicSettings.promptCache.default,
thinkingBudget:
options.modelOptions?.thinkingBudget ?? anthropicSettings.thinkingBudget.default,
};
/** Couldn't figure out a way to still loop through the object while deleting the overlapping keys when porting this
* over from javascript, so for now they are being deleted manually until a better way presents itself.
*/
if (options.modelOptions) {
delete options.modelOptions.thinking;
delete options.modelOptions.promptCache;
delete options.modelOptions.thinkingBudget;
} else {
throw new Error('No modelOptions provided');
}
const defaultOptions = {
model: anthropicSettings.model.default,
maxOutputTokens: anthropicSettings.maxOutputTokens.default,
stream: true,
};
const mergedOptions = Object.assign(defaultOptions, options.modelOptions);
let requestOptions: AnthropicClientOptions & { stream?: boolean } = {
apiKey,
model: mergedOptions.model,
stream: mergedOptions.stream,
temperature: mergedOptions.temperature,
stopSequences: mergedOptions.stop,
maxTokens:
mergedOptions.maxOutputTokens || anthropicSettings.maxOutputTokens.reset(mergedOptions.model),
clientOptions: {},
invocationKwargs: {
metadata: {
user_id: mergedOptions.user,
},
},
};
requestOptions = configureReasoning(requestOptions, systemOptions);
if (!/claude-3[-.]7/.test(mergedOptions.model)) {
requestOptions.topP = mergedOptions.topP;
requestOptions.topK = mergedOptions.topK;
} else if (requestOptions.thinking == null) {
requestOptions.topP = mergedOptions.topP;
requestOptions.topK = mergedOptions.topK;
}
const supportsCacheControl =
systemOptions.promptCache === true && checkPromptCacheSupport(requestOptions.model ?? '');
const headers = getClaudeHeaders(requestOptions.model ?? '', supportsCacheControl);
if (headers && requestOptions.clientOptions) {
requestOptions.clientOptions.defaultHeaders = headers;
}
if (options.proxy && requestOptions.clientOptions) {
const proxyAgent = new ProxyAgent(options.proxy);
requestOptions.clientOptions.fetchOptions = {
dispatcher: proxyAgent,
};
}
if (options.reverseProxyUrl && requestOptions.clientOptions) {
requestOptions.clientOptions.baseURL = options.reverseProxyUrl;
requestOptions.anthropicApiUrl = options.reverseProxyUrl;
}
const tools = [];
if (mergedOptions.web_search) {
tools.push({
type: 'web_search_20250305',
name: 'web_search',
});
}
return {
tools,
llmConfig: removeNullishValues(
requestOptions as Record<string, unknown>,
) as AnthropicClientOptions & { clientOptions?: { fetchOptions?: { dispatcher: Dispatcher } } },
};
}
export { getLLMConfig };