mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-12-17 00:40:14 +01:00
🎚️ feat: Anthropic Parameter Set Support via Custom Endpoints (#9415)
* refactor: modularize openai llm config logic into new getOpenAILLMConfig function (#9412) * ✈️ refactor: Migrate Anthropic's getLLMConfig to TypeScript (#9413) * refactor: move tokens.js over to packages/api and update imports * refactor: port tokens.js to typescript * refactor: move helpers.js over to packages/api and update imports * refactor: port helpers.js to typescript * refactor: move anthropic/llm.js over to packages/api and update imports * refactor: port anthropic/llm.js to typescript with supporting types in types/anthropic.ts and updated tests in llm.spec.js * refactor: move llm.spec.js over to packages/api and update import * refactor: port llm.spec.js over to typescript * 📝 Add Prompt Parameter Support for Anthropic Custom Endpoints (#9414) feat: add anthropic llm config support for openai-like (custom) endpoints * fix: missed compiler / type issues from addition of getAnthropicLLMConfig * refactor: update tokens.ts to export constants and functions, enhance type definitions, and adjust default values * WIP: first pass, decouple `llmConfig` from `configOptions` * chore: update import path for OpenAI configuration from 'llm' to 'config' * refactor: enhance type definitions for ThinkingConfig and update modelOptions in AnthropicConfigOptions * refactor: cleanup type, introduce openai transform from alt provider * chore: integrate removeNullishValues in Google llmConfig and update OpenAI exports * chore: bump version of @librechat/api to 1.3.5 in package.json and package-lock.json * refactor: update customParams type in OpenAIConfigOptions to use TConfig['customParams'] * refactor: enhance transformToOpenAIConfig to include fromEndpoint and improve config extraction * refactor: conform userId field for anthropic/openai, cleanup anthropic typing * ci: add backward compatibility tests for getOpenAIConfig with various endpoints and configurations * ci: replace userId with user in clientOptions for getLLMConfig * test: add Azure OpenAI endpoint tests for various configurations in getOpenAIConfig * refactor: defaultHeaders retrieval for prompt caching for anthropic-based custom endpoint (litellm) * test: add unit tests for getOpenAIConfig with various Anthropic model configurations * test: enhance Anthropic compatibility tests with addParams and dropParams handling * chore: update @librechat/agents dependency to version 2.4.78 in package.json and package-lock.json * chore: update @librechat/agents dependency to version 2.4.79 in package.json and package-lock.json --------- Co-authored-by: Danny Avila <danny@librechat.ai>
This commit is contained in:
parent
7de6f6e44c
commit
c6ecf0095b
40 changed files with 1736 additions and 432 deletions
431
packages/api/src/endpoints/openai/config.backward-compat.spec.ts
Normal file
431
packages/api/src/endpoints/openai/config.backward-compat.spec.ts
Normal file
|
|
@ -0,0 +1,431 @@
|
|||
import {
|
||||
Verbosity,
|
||||
EModelEndpoint,
|
||||
ReasoningEffort,
|
||||
ReasoningSummary,
|
||||
} from 'librechat-data-provider';
|
||||
import { getOpenAIConfig } from './config';
|
||||
|
||||
describe('getOpenAIConfig - Backward Compatibility', () => {
|
||||
describe('OpenAI endpoint', () => {
|
||||
it('should handle GPT-5 model with reasoning and web search', () => {
|
||||
const apiKey = 'sk-proj-somekey';
|
||||
const endpoint = undefined;
|
||||
const options = {
|
||||
modelOptions: {
|
||||
model: 'gpt-5-nano',
|
||||
verbosity: Verbosity.high,
|
||||
reasoning_effort: ReasoningEffort.high,
|
||||
reasoning_summary: ReasoningSummary.detailed,
|
||||
useResponsesApi: true,
|
||||
web_search: true,
|
||||
user: 'some-user',
|
||||
},
|
||||
proxy: '',
|
||||
reverseProxyUrl: null,
|
||||
endpoint: EModelEndpoint.openAI,
|
||||
};
|
||||
|
||||
const result = getOpenAIConfig(apiKey, options, endpoint);
|
||||
|
||||
expect(result).toEqual({
|
||||
llmConfig: {
|
||||
streaming: true,
|
||||
model: 'gpt-5-nano',
|
||||
useResponsesApi: true,
|
||||
user: 'some-user',
|
||||
apiKey: 'sk-proj-somekey',
|
||||
reasoning: {
|
||||
effort: ReasoningEffort.high,
|
||||
summary: ReasoningSummary.detailed,
|
||||
},
|
||||
modelKwargs: {
|
||||
text: {
|
||||
verbosity: Verbosity.high,
|
||||
},
|
||||
},
|
||||
},
|
||||
configOptions: {},
|
||||
tools: [
|
||||
{
|
||||
type: 'web_search_preview',
|
||||
},
|
||||
],
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('OpenRouter endpoint', () => {
|
||||
it('should handle OpenRouter configuration with dropParams and custom headers', () => {
|
||||
const apiKey = 'sk-xxxx';
|
||||
const endpoint = 'OpenRouter';
|
||||
const options = {
|
||||
modelOptions: {
|
||||
model: 'qwen/qwen3-max',
|
||||
user: 'some-user',
|
||||
},
|
||||
reverseProxyUrl: 'https://gateway.ai.cloudflare.com/v1/account-id/gateway-id/openrouter',
|
||||
headers: {
|
||||
'x-librechat-thread-id': '{{LIBRECHAT_BODY_CONVERSATIONID}}',
|
||||
'x-test-key': '{{TESTING_USER_VAR}}',
|
||||
},
|
||||
proxy: '',
|
||||
dropParams: ['user'],
|
||||
};
|
||||
|
||||
const result = getOpenAIConfig(apiKey, options, endpoint);
|
||||
|
||||
expect(result).toEqual({
|
||||
llmConfig: {
|
||||
streaming: true,
|
||||
model: 'qwen/qwen3-max',
|
||||
include_reasoning: true,
|
||||
apiKey: 'sk-xxxx',
|
||||
},
|
||||
configOptions: {
|
||||
baseURL: 'https://gateway.ai.cloudflare.com/v1/account-id/gateway-id/openrouter',
|
||||
defaultHeaders: {
|
||||
'HTTP-Referer': 'https://librechat.ai',
|
||||
'X-Title': 'LibreChat',
|
||||
'x-librechat-thread-id': '{{LIBRECHAT_BODY_CONVERSATIONID}}',
|
||||
'x-test-key': '{{TESTING_USER_VAR}}',
|
||||
},
|
||||
},
|
||||
tools: [],
|
||||
provider: 'openrouter',
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('Azure OpenAI endpoint', () => {
|
||||
it('should handle basic Azure OpenAI configuration', () => {
|
||||
const apiKey = 'some_key';
|
||||
const endpoint = undefined;
|
||||
const options = {
|
||||
modelOptions: {
|
||||
model: 'gpt-4o',
|
||||
user: 'some_user_id',
|
||||
},
|
||||
reverseProxyUrl: null,
|
||||
endpoint: 'azureOpenAI',
|
||||
azure: {
|
||||
azureOpenAIApiKey: 'some_azure_key',
|
||||
azureOpenAIApiInstanceName: 'some_instance_name',
|
||||
azureOpenAIApiDeploymentName: 'gpt-4o',
|
||||
azureOpenAIApiVersion: '2024-02-15-preview',
|
||||
},
|
||||
};
|
||||
|
||||
const result = getOpenAIConfig(apiKey, options, endpoint);
|
||||
|
||||
expect(result).toEqual({
|
||||
llmConfig: {
|
||||
streaming: true,
|
||||
model: 'gpt-4o',
|
||||
user: 'some_user_id',
|
||||
azureOpenAIApiKey: 'some_azure_key',
|
||||
azureOpenAIApiInstanceName: 'some_instance_name',
|
||||
azureOpenAIApiDeploymentName: 'gpt-4o',
|
||||
azureOpenAIApiVersion: '2024-02-15-preview',
|
||||
},
|
||||
configOptions: {},
|
||||
tools: [],
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle Azure OpenAI with Responses API and reasoning', () => {
|
||||
const apiKey = 'some_azure_key';
|
||||
const endpoint = undefined;
|
||||
const options = {
|
||||
modelOptions: {
|
||||
model: 'gpt-5',
|
||||
reasoning_effort: ReasoningEffort.high,
|
||||
reasoning_summary: ReasoningSummary.detailed,
|
||||
verbosity: Verbosity.high,
|
||||
useResponsesApi: true,
|
||||
user: 'some_user_id',
|
||||
},
|
||||
endpoint: 'azureOpenAI',
|
||||
azure: {
|
||||
azureOpenAIApiKey: 'some_azure_key',
|
||||
azureOpenAIApiInstanceName: 'some_instance_name',
|
||||
azureOpenAIApiDeploymentName: 'gpt-5',
|
||||
azureOpenAIApiVersion: '2024-12-01-preview',
|
||||
},
|
||||
};
|
||||
|
||||
const result = getOpenAIConfig(apiKey, options, endpoint);
|
||||
|
||||
expect(result).toEqual({
|
||||
llmConfig: {
|
||||
streaming: true,
|
||||
model: 'gpt-5',
|
||||
useResponsesApi: true,
|
||||
user: 'some_user_id',
|
||||
apiKey: 'some_azure_key',
|
||||
reasoning: {
|
||||
effort: ReasoningEffort.high,
|
||||
summary: ReasoningSummary.detailed,
|
||||
},
|
||||
modelKwargs: {
|
||||
text: {
|
||||
verbosity: Verbosity.high,
|
||||
},
|
||||
},
|
||||
},
|
||||
configOptions: {
|
||||
baseURL: 'https://some_instance_name.openai.azure.com/openai/v1',
|
||||
defaultHeaders: {
|
||||
'api-key': 'some_azure_key',
|
||||
},
|
||||
defaultQuery: {
|
||||
'api-version': 'preview',
|
||||
},
|
||||
},
|
||||
tools: [],
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle Azure serverless configuration with dropParams', () => {
|
||||
const apiKey = 'some_azure_key';
|
||||
const endpoint = undefined;
|
||||
const options = {
|
||||
modelOptions: {
|
||||
model: 'jais-30b-chat',
|
||||
user: 'some_user_id',
|
||||
},
|
||||
reverseProxyUrl: 'https://some_endpoint_name.services.ai.azure.com/models',
|
||||
endpoint: 'azureOpenAI',
|
||||
headers: {
|
||||
'api-key': 'some_azure_key',
|
||||
},
|
||||
dropParams: ['stream_options', 'user'],
|
||||
azure: false as const,
|
||||
defaultQuery: {
|
||||
'api-version': '2024-05-01-preview',
|
||||
},
|
||||
};
|
||||
|
||||
const result = getOpenAIConfig(apiKey, options, endpoint);
|
||||
|
||||
expect(result).toEqual({
|
||||
llmConfig: {
|
||||
streaming: true,
|
||||
model: 'jais-30b-chat',
|
||||
apiKey: 'some_azure_key',
|
||||
},
|
||||
configOptions: {
|
||||
baseURL: 'https://some_endpoint_name.services.ai.azure.com/models',
|
||||
defaultHeaders: {
|
||||
'api-key': 'some_azure_key',
|
||||
},
|
||||
defaultQuery: {
|
||||
'api-version': '2024-05-01-preview',
|
||||
},
|
||||
},
|
||||
tools: [],
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle Azure serverless with user-provided key configuration', () => {
|
||||
const apiKey = 'some_azure_key';
|
||||
const endpoint = undefined;
|
||||
const options = {
|
||||
modelOptions: {
|
||||
model: 'grok-3',
|
||||
user: 'some_user_id',
|
||||
},
|
||||
reverseProxyUrl: 'https://some_endpoint_name.services.ai.azure.com/models',
|
||||
endpoint: 'azureOpenAI',
|
||||
headers: {
|
||||
'api-key': 'some_azure_key',
|
||||
},
|
||||
dropParams: ['stream_options', 'user'],
|
||||
azure: false as const,
|
||||
defaultQuery: {
|
||||
'api-version': '2024-05-01-preview',
|
||||
},
|
||||
};
|
||||
|
||||
const result = getOpenAIConfig(apiKey, options, endpoint);
|
||||
|
||||
expect(result).toEqual({
|
||||
llmConfig: {
|
||||
streaming: true,
|
||||
model: 'grok-3',
|
||||
apiKey: 'some_azure_key',
|
||||
},
|
||||
configOptions: {
|
||||
baseURL: 'https://some_endpoint_name.services.ai.azure.com/models',
|
||||
defaultHeaders: {
|
||||
'api-key': 'some_azure_key',
|
||||
},
|
||||
defaultQuery: {
|
||||
'api-version': '2024-05-01-preview',
|
||||
},
|
||||
},
|
||||
tools: [],
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle Azure serverless with Mistral model configuration', () => {
|
||||
const apiKey = 'some_azure_key';
|
||||
const endpoint = undefined;
|
||||
const options = {
|
||||
modelOptions: {
|
||||
model: 'Mistral-Large-2411',
|
||||
user: 'some_user_id',
|
||||
},
|
||||
reverseProxyUrl: 'https://some_endpoint_name.services.ai.azure.com/models',
|
||||
endpoint: 'azureOpenAI',
|
||||
headers: {
|
||||
'api-key': 'some_azure_key',
|
||||
},
|
||||
dropParams: ['stream_options', 'user'],
|
||||
azure: false as const,
|
||||
defaultQuery: {
|
||||
'api-version': '2024-05-01-preview',
|
||||
},
|
||||
};
|
||||
|
||||
const result = getOpenAIConfig(apiKey, options, endpoint);
|
||||
|
||||
expect(result).toEqual({
|
||||
llmConfig: {
|
||||
streaming: true,
|
||||
model: 'Mistral-Large-2411',
|
||||
apiKey: 'some_azure_key',
|
||||
},
|
||||
configOptions: {
|
||||
baseURL: 'https://some_endpoint_name.services.ai.azure.com/models',
|
||||
defaultHeaders: {
|
||||
'api-key': 'some_azure_key',
|
||||
},
|
||||
defaultQuery: {
|
||||
'api-version': '2024-05-01-preview',
|
||||
},
|
||||
},
|
||||
tools: [],
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle Azure serverless with DeepSeek model without dropParams', () => {
|
||||
const apiKey = 'some_azure_key';
|
||||
const endpoint = undefined;
|
||||
const options = {
|
||||
modelOptions: {
|
||||
model: 'DeepSeek-R1',
|
||||
user: 'some_user_id',
|
||||
},
|
||||
reverseProxyUrl: 'https://some_endpoint_name.models.ai.azure.com/v1/',
|
||||
endpoint: 'azureOpenAI',
|
||||
headers: {
|
||||
'api-key': 'some_azure_key',
|
||||
},
|
||||
azure: false as const,
|
||||
defaultQuery: {
|
||||
'api-version': '2024-08-01-preview',
|
||||
},
|
||||
};
|
||||
|
||||
const result = getOpenAIConfig(apiKey, options, endpoint);
|
||||
|
||||
expect(result).toEqual({
|
||||
llmConfig: {
|
||||
streaming: true,
|
||||
model: 'DeepSeek-R1',
|
||||
user: 'some_user_id',
|
||||
apiKey: 'some_azure_key',
|
||||
},
|
||||
configOptions: {
|
||||
baseURL: 'https://some_endpoint_name.models.ai.azure.com/v1/',
|
||||
defaultHeaders: {
|
||||
'api-key': 'some_azure_key',
|
||||
},
|
||||
defaultQuery: {
|
||||
'api-version': '2024-08-01-preview',
|
||||
},
|
||||
},
|
||||
tools: [],
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('Custom endpoints', () => {
|
||||
it('should handle Groq custom endpoint configuration', () => {
|
||||
const apiKey = 'gsk_somekey';
|
||||
const endpoint = 'groq';
|
||||
const options = {
|
||||
modelOptions: {
|
||||
model: 'qwen/qwen3-32b',
|
||||
user: 'some-user',
|
||||
},
|
||||
reverseProxyUrl: 'https://api.groq.com/openai/v1/',
|
||||
proxy: '',
|
||||
headers: {},
|
||||
endpoint: 'groq',
|
||||
endpointType: 'custom',
|
||||
};
|
||||
|
||||
const result = getOpenAIConfig(apiKey, options, endpoint);
|
||||
|
||||
expect(result).toEqual({
|
||||
llmConfig: {
|
||||
streaming: true,
|
||||
model: 'qwen/qwen3-32b',
|
||||
user: 'some-user',
|
||||
apiKey: 'gsk_somekey',
|
||||
},
|
||||
configOptions: {
|
||||
baseURL: 'https://api.groq.com/openai/v1/',
|
||||
defaultHeaders: {},
|
||||
},
|
||||
tools: [],
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle Cloudflare Workers AI with custom headers and addParams', () => {
|
||||
const apiKey = 'someKey';
|
||||
const endpoint = 'Cloudflare Workers AI';
|
||||
const options = {
|
||||
modelOptions: {
|
||||
model: '@cf/deepseek-ai/deepseek-r1-distill-qwen-32b',
|
||||
user: 'some-user',
|
||||
},
|
||||
reverseProxyUrl:
|
||||
'https://gateway.ai.cloudflare.com/v1/${CF_ACCOUNT_ID}/${CF_GATEWAY_ID}/workers-ai/v1',
|
||||
proxy: '',
|
||||
headers: {
|
||||
'x-librechat-thread-id': '{{LIBRECHAT_BODY_CONVERSATIONID}}',
|
||||
'x-test-key': '{{TESTING_USER_VAR}}',
|
||||
},
|
||||
addParams: {
|
||||
disableStreaming: true,
|
||||
},
|
||||
endpoint: 'Cloudflare Workers AI',
|
||||
endpointType: 'custom',
|
||||
};
|
||||
|
||||
const result = getOpenAIConfig(apiKey, options, endpoint);
|
||||
|
||||
expect(result).toEqual({
|
||||
llmConfig: {
|
||||
streaming: true,
|
||||
model: '@cf/deepseek-ai/deepseek-r1-distill-qwen-32b',
|
||||
user: 'some-user',
|
||||
disableStreaming: true,
|
||||
apiKey: 'someKey',
|
||||
},
|
||||
configOptions: {
|
||||
baseURL:
|
||||
'https://gateway.ai.cloudflare.com/v1/${CF_ACCOUNT_ID}/${CF_GATEWAY_ID}/workers-ai/v1',
|
||||
defaultHeaders: {
|
||||
'x-librechat-thread-id': '{{LIBRECHAT_BODY_CONVERSATIONID}}',
|
||||
'x-test-key': '{{TESTING_USER_VAR}}',
|
||||
},
|
||||
},
|
||||
tools: [],
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
Loading…
Add table
Add a link
Reference in a new issue