🎚️ feat: Anthropic Parameter Set Support via Custom Endpoints (#9415)

* refactor: modularize openai llm config logic into new getOpenAILLMConfig function (#9412)

* ✈️ refactor: Migrate Anthropic's getLLMConfig to TypeScript (#9413)

* refactor: move tokens.js over to packages/api and update imports

* refactor: port tokens.js to typescript

* refactor: move helpers.js over to packages/api and update imports

* refactor: port helpers.js to typescript

* refactor: move anthropic/llm.js over to packages/api and update imports

* refactor: port anthropic/llm.js to typescript with supporting types in types/anthropic.ts and updated tests in llm.spec.js

* refactor: move llm.spec.js over to packages/api and update import

* refactor: port llm.spec.js over to typescript

* 📝  Add Prompt Parameter Support for Anthropic Custom Endpoints (#9414)

feat: add anthropic llm config support for openai-like (custom) endpoints

* fix: missed compiler / type issues from addition of getAnthropicLLMConfig

* refactor: update tokens.ts to export constants and functions, enhance type definitions, and adjust default values

* WIP: first pass, decouple `llmConfig` from `configOptions`

* chore: update import path for OpenAI configuration from 'llm' to 'config'

* refactor: enhance type definitions for ThinkingConfig and update modelOptions in AnthropicConfigOptions

* refactor: cleanup type, introduce openai transform from alt provider

* chore: integrate removeNullishValues in Google llmConfig and update OpenAI exports

* chore: bump version of @librechat/api to 1.3.5 in package.json and package-lock.json

* refactor: update customParams type in OpenAIConfigOptions to use TConfig['customParams']

* refactor: enhance transformToOpenAIConfig to include fromEndpoint and improve config extraction

* refactor: conform userId field for anthropic/openai, cleanup anthropic typing

* ci: add backward compatibility tests for getOpenAIConfig with various endpoints and configurations

* ci: replace userId with user in clientOptions for getLLMConfig

* test: add Azure OpenAI endpoint tests for various configurations in getOpenAIConfig

* refactor: defaultHeaders retrieval for prompt caching for anthropic-based custom endpoint (litellm)

* test: add unit tests for getOpenAIConfig with various Anthropic model configurations

* test: enhance Anthropic compatibility tests with addParams and dropParams handling

* chore: update @librechat/agents dependency to version 2.4.78 in package.json and package-lock.json

* chore: update @librechat/agents dependency to version 2.4.79 in package.json and package-lock.json

---------

Co-authored-by: Danny Avila <danny@librechat.ai>
This commit is contained in:
Dustin Healy 2025-09-08 11:35:29 -07:00 committed by GitHub
parent 7de6f6e44c
commit c6ecf0095b
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
40 changed files with 1736 additions and 432 deletions

View file

@ -872,11 +872,10 @@ class AgentClient extends BaseClient {
if (agent.useLegacyContent === true) {
messages = formatContentStrings(messages);
}
if (
agent.model_parameters?.clientOptions?.defaultHeaders?.['anthropic-beta']?.includes(
'prompt-caching',
)
) {
const defaultHeaders =
agent.model_parameters?.clientOptions?.defaultHeaders ??
agent.model_parameters?.configuration?.defaultHeaders;
if (defaultHeaders?.['anthropic-beta']?.includes('prompt-caching')) {
messages = addCacheControl(messages);
}

View file

@ -1,7 +1,7 @@
const { v4 } = require('uuid');
const { sleep } = require('@librechat/agents');
const { logger } = require('@librechat/data-schemas');
const { sendEvent, getBalanceConfig } = require('@librechat/api');
const { sendEvent, getBalanceConfig, getModelMaxTokens } = require('@librechat/api');
const {
Time,
Constants,
@ -34,7 +34,6 @@ const { checkBalance } = require('~/models/balanceMethods');
const { getConvo } = require('~/models/Conversation');
const getLogStores = require('~/cache/getLogStores');
const { countTokens } = require('~/server/utils');
const { getModelMaxTokens } = require('~/utils');
const { getOpenAIClient } = require('./helpers');
/**

View file

@ -1,7 +1,7 @@
const { v4 } = require('uuid');
const { sleep } = require('@librechat/agents');
const { logger } = require('@librechat/data-schemas');
const { sendEvent, getBalanceConfig } = require('@librechat/api');
const { sendEvent, getBalanceConfig, getModelMaxTokens } = require('@librechat/api');
const {
Time,
Constants,
@ -31,7 +31,6 @@ const { checkBalance } = require('~/models/balanceMethods');
const { getConvo } = require('~/models/Conversation');
const getLogStores = require('~/cache/getLogStores');
const { countTokens } = require('~/server/utils');
const { getModelMaxTokens } = require('~/utils');
const { getOpenAIClient } = require('./helpers');
/**

View file

@ -1,6 +1,7 @@
const { Providers } = require('@librechat/agents');
const {
primeResources,
getModelMaxTokens,
extractLibreChatParams,
optionalChainWithEmptyCheck,
} = require('@librechat/api');
@ -17,7 +18,6 @@ const { getProviderConfig } = require('~/server/services/Endpoints');
const { processFiles } = require('~/server/services/Files/process');
const { getFiles, getToolFilesByIds } = require('~/models/File');
const { getConvoFiles } = require('~/models/Conversation');
const { getModelMaxTokens } = require('~/utils');
/**
* @param {object} params

View file

@ -1,118 +0,0 @@
const { EModelEndpoint, anthropicSettings } = require('librechat-data-provider');
const { matchModelName } = require('~/utils');
const { logger } = require('~/config');
/**
* @param {string} modelName
* @returns {boolean}
*/
function checkPromptCacheSupport(modelName) {
const modelMatch = matchModelName(modelName, EModelEndpoint.anthropic);
if (
modelMatch.includes('claude-3-5-sonnet-latest') ||
modelMatch.includes('claude-3.5-sonnet-latest')
) {
return false;
}
return (
/claude-3[-.]7/.test(modelMatch) ||
/claude-3[-.]5-(?:sonnet|haiku)/.test(modelMatch) ||
/claude-3-(?:sonnet|haiku|opus)?/.test(modelMatch) ||
/claude-(?:sonnet|opus|haiku)-[4-9]/.test(modelMatch) ||
/claude-[4-9]-(?:sonnet|opus|haiku)?/.test(modelMatch) ||
/claude-4(?:-(?:sonnet|opus|haiku))?/.test(modelMatch)
);
}
/**
* Gets the appropriate headers for Claude models with cache control
* @param {string} model The model name
* @param {boolean} supportsCacheControl Whether the model supports cache control
* @returns {AnthropicClientOptions['extendedOptions']['defaultHeaders']|undefined} The headers object or undefined if not applicable
*/
function getClaudeHeaders(model, supportsCacheControl) {
if (!supportsCacheControl) {
return undefined;
}
if (/claude-3[-.]5-sonnet/.test(model)) {
return {
'anthropic-beta': 'max-tokens-3-5-sonnet-2024-07-15,prompt-caching-2024-07-31',
};
} else if (/claude-3[-.]7/.test(model)) {
return {
'anthropic-beta':
'token-efficient-tools-2025-02-19,output-128k-2025-02-19,prompt-caching-2024-07-31',
};
} else if (/claude-sonnet-4/.test(model)) {
return {
'anthropic-beta': 'prompt-caching-2024-07-31,context-1m-2025-08-07',
};
} else if (
/claude-(?:sonnet|opus|haiku)-[4-9]/.test(model) ||
/claude-[4-9]-(?:sonnet|opus|haiku)?/.test(model) ||
/claude-4(?:-(?:sonnet|opus|haiku))?/.test(model)
) {
return {
'anthropic-beta': 'prompt-caching-2024-07-31',
};
} else {
return {
'anthropic-beta': 'prompt-caching-2024-07-31',
};
}
}
/**
* Configures reasoning-related options for Claude models
* @param {AnthropicClientOptions & { max_tokens?: number }} anthropicInput The request options object
* @param {Object} extendedOptions Additional client configuration options
* @param {boolean} extendedOptions.thinking Whether thinking is enabled in client config
* @param {number|null} extendedOptions.thinkingBudget The token budget for thinking
* @returns {Object} Updated request options
*/
function configureReasoning(anthropicInput, extendedOptions = {}) {
const updatedOptions = { ...anthropicInput };
const currentMaxTokens = updatedOptions.max_tokens ?? updatedOptions.maxTokens;
if (
extendedOptions.thinking &&
updatedOptions?.model &&
(/claude-3[-.]7/.test(updatedOptions.model) ||
/claude-(?:sonnet|opus|haiku)-[4-9]/.test(updatedOptions.model))
) {
updatedOptions.thinking = {
type: 'enabled',
};
}
if (updatedOptions.thinking != null && extendedOptions.thinkingBudget != null) {
updatedOptions.thinking = {
...updatedOptions.thinking,
budget_tokens: extendedOptions.thinkingBudget,
};
}
if (
updatedOptions.thinking != null &&
(currentMaxTokens == null || updatedOptions.thinking.budget_tokens > currentMaxTokens)
) {
const maxTokens = anthropicSettings.maxOutputTokens.reset(updatedOptions.model);
updatedOptions.max_tokens = currentMaxTokens ?? maxTokens;
logger.warn(
updatedOptions.max_tokens === maxTokens
? '[AnthropicClient] max_tokens is not defined while thinking is enabled. Setting max_tokens to model default.'
: `[AnthropicClient] thinking budget_tokens (${updatedOptions.thinking.budget_tokens}) exceeds max_tokens (${updatedOptions.max_tokens}). Adjusting budget_tokens.`,
);
updatedOptions.thinking.budget_tokens = Math.min(
updatedOptions.thinking.budget_tokens,
Math.floor(updatedOptions.max_tokens * 0.9),
);
}
return updatedOptions;
}
module.exports = { checkPromptCacheSupport, getClaudeHeaders, configureReasoning };

View file

@ -1,6 +1,6 @@
const { getLLMConfig } = require('@librechat/api');
const { EModelEndpoint } = require('librechat-data-provider');
const { getUserKey, checkUserKeyExpiry } = require('~/server/services/UserService');
const { getLLMConfig } = require('~/server/services/Endpoints/anthropic/llm');
const AnthropicClient = require('~/app/clients/AnthropicClient');
const initializeClient = async ({ req, res, endpointOption, overrideModel, optionsOnly }) => {
@ -40,7 +40,6 @@ const initializeClient = async ({ req, res, endpointOption, overrideModel, optio
clientOptions = Object.assign(
{
proxy: PROXY ?? null,
userId: req.user.id,
reverseProxyUrl: ANTHROPIC_REVERSE_PROXY ?? null,
modelOptions: endpointOption?.model_parameters ?? {},
},
@ -49,6 +48,7 @@ const initializeClient = async ({ req, res, endpointOption, overrideModel, optio
if (overrideModel) {
clientOptions.modelOptions.model = overrideModel;
}
clientOptions.modelOptions.user = req.user.id;
return getLLMConfig(anthropicApiKey, clientOptions);
}

View file

@ -1,103 +0,0 @@
const { ProxyAgent } = require('undici');
const { anthropicSettings, removeNullishValues } = require('librechat-data-provider');
const { checkPromptCacheSupport, getClaudeHeaders, configureReasoning } = require('./helpers');
/**
* Generates configuration options for creating an Anthropic language model (LLM) instance.
*
* @param {string} apiKey - The API key for authentication with Anthropic.
* @param {Object} [options={}] - Additional options for configuring the LLM.
* @param {Object} [options.modelOptions] - Model-specific options.
* @param {string} [options.modelOptions.model] - The name of the model to use.
* @param {number} [options.modelOptions.maxOutputTokens] - The maximum number of tokens to generate.
* @param {number} [options.modelOptions.temperature] - Controls randomness in output generation.
* @param {number} [options.modelOptions.topP] - Controls diversity of output generation.
* @param {number} [options.modelOptions.topK] - Controls the number of top tokens to consider.
* @param {string[]} [options.modelOptions.stop] - Sequences where the API will stop generating further tokens.
* @param {boolean} [options.modelOptions.stream] - Whether to stream the response.
* @param {string} options.userId - The user ID for tracking and personalization.
* @param {string} [options.proxy] - Proxy server URL.
* @param {string} [options.reverseProxyUrl] - URL for a reverse proxy, if used.
*
* @returns {Object} Configuration options for creating an Anthropic LLM instance, with null and undefined values removed.
*/
function getLLMConfig(apiKey, options = {}) {
const systemOptions = {
thinking: options.modelOptions.thinking ?? anthropicSettings.thinking.default,
promptCache: options.modelOptions.promptCache ?? anthropicSettings.promptCache.default,
thinkingBudget: options.modelOptions.thinkingBudget ?? anthropicSettings.thinkingBudget.default,
};
for (let key in systemOptions) {
delete options.modelOptions[key];
}
const defaultOptions = {
model: anthropicSettings.model.default,
maxOutputTokens: anthropicSettings.maxOutputTokens.default,
stream: true,
};
const mergedOptions = Object.assign(defaultOptions, options.modelOptions);
/** @type {AnthropicClientOptions} */
let requestOptions = {
apiKey,
model: mergedOptions.model,
stream: mergedOptions.stream,
temperature: mergedOptions.temperature,
stopSequences: mergedOptions.stop,
maxTokens:
mergedOptions.maxOutputTokens || anthropicSettings.maxOutputTokens.reset(mergedOptions.model),
clientOptions: {},
invocationKwargs: {
metadata: {
user_id: options.userId,
},
},
};
requestOptions = configureReasoning(requestOptions, systemOptions);
if (!/claude-3[-.]7/.test(mergedOptions.model)) {
requestOptions.topP = mergedOptions.topP;
requestOptions.topK = mergedOptions.topK;
} else if (requestOptions.thinking == null) {
requestOptions.topP = mergedOptions.topP;
requestOptions.topK = mergedOptions.topK;
}
const supportsCacheControl =
systemOptions.promptCache === true && checkPromptCacheSupport(requestOptions.model);
const headers = getClaudeHeaders(requestOptions.model, supportsCacheControl);
if (headers) {
requestOptions.clientOptions.defaultHeaders = headers;
}
if (options.proxy) {
const proxyAgent = new ProxyAgent(options.proxy);
requestOptions.clientOptions.fetchOptions = {
dispatcher: proxyAgent,
};
}
if (options.reverseProxyUrl) {
requestOptions.clientOptions.baseURL = options.reverseProxyUrl;
requestOptions.anthropicApiUrl = options.reverseProxyUrl;
}
const tools = [];
if (mergedOptions.web_search) {
tools.push({
type: 'web_search_20250305',
name: 'web_search',
});
}
return {
tools,
/** @type {AnthropicClientOptions} */
llmConfig: removeNullishValues(requestOptions),
};
}
module.exports = { getLLMConfig };

File diff suppressed because it is too large Load diff

View file

@ -1,3 +1,4 @@
const { getModelMaxTokens } = require('@librechat/api');
const { createContentAggregator } = require('@librechat/agents');
const {
EModelEndpoint,
@ -7,7 +8,6 @@ const {
const { getDefaultHandlers } = require('~/server/controllers/agents/callbacks');
const getOptions = require('~/server/services/Endpoints/bedrock/options');
const AgentClient = require('~/server/controllers/agents/client');
const { getModelMaxTokens } = require('~/utils');
const initializeClient = async ({ req, res, endpointOption }) => {
if (!endpointOption) {

View file

@ -1,13 +1,13 @@
const axios = require('axios');
const { Providers } = require('@librechat/agents');
const { logAxiosError } = require('@librechat/api');
const { logger } = require('@librechat/data-schemas');
const { HttpsProxyAgent } = require('https-proxy-agent');
const { logAxiosError, inputSchema, processModelData } = require('@librechat/api');
const { EModelEndpoint, defaultModels, CacheKeys } = require('librechat-data-provider');
const { inputSchema, extractBaseURL, processModelData } = require('~/utils');
const { OllamaClient } = require('~/app/clients/OllamaClient');
const { isUserProvided } = require('~/server/utils');
const getLogStores = require('~/cache/getLogStores');
const { extractBaseURL } = require('~/utils');
/**
* Splits a string by commas and trims each resulting value.

View file

@ -11,8 +11,8 @@ const {
getAnthropicModels,
} = require('./ModelService');
jest.mock('~/utils', () => {
const originalUtils = jest.requireActual('~/utils');
jest.mock('@librechat/api', () => {
const originalUtils = jest.requireActual('@librechat/api');
return {
...originalUtils,
processModelData: jest.fn((...args) => {
@ -108,7 +108,7 @@ describe('fetchModels with createTokenConfig true', () => {
beforeEach(() => {
// Clears the mock's history before each test
const _utils = require('~/utils');
const _utils = require('@librechat/api');
axios.get.mockResolvedValue({ data });
});
@ -120,7 +120,7 @@ describe('fetchModels with createTokenConfig true', () => {
createTokenConfig: true,
});
const { processModelData } = require('~/utils');
const { processModelData } = require('@librechat/api');
expect(processModelData).toHaveBeenCalled();
expect(processModelData).toHaveBeenCalledWith(data);
});