mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-12-18 01:10:14 +01:00
🎚️ feat: Anthropic Parameter Set Support via Custom Endpoints (#9415)
* refactor: modularize openai llm config logic into new getOpenAILLMConfig function (#9412) * ✈️ refactor: Migrate Anthropic's getLLMConfig to TypeScript (#9413) * refactor: move tokens.js over to packages/api and update imports * refactor: port tokens.js to typescript * refactor: move helpers.js over to packages/api and update imports * refactor: port helpers.js to typescript * refactor: move anthropic/llm.js over to packages/api and update imports * refactor: port anthropic/llm.js to typescript with supporting types in types/anthropic.ts and updated tests in llm.spec.js * refactor: move llm.spec.js over to packages/api and update import * refactor: port llm.spec.js over to typescript * 📝 Add Prompt Parameter Support for Anthropic Custom Endpoints (#9414) feat: add anthropic llm config support for openai-like (custom) endpoints * fix: missed compiler / type issues from addition of getAnthropicLLMConfig * refactor: update tokens.ts to export constants and functions, enhance type definitions, and adjust default values * WIP: first pass, decouple `llmConfig` from `configOptions` * chore: update import path for OpenAI configuration from 'llm' to 'config' * refactor: enhance type definitions for ThinkingConfig and update modelOptions in AnthropicConfigOptions * refactor: cleanup type, introduce openai transform from alt provider * chore: integrate removeNullishValues in Google llmConfig and update OpenAI exports * chore: bump version of @librechat/api to 1.3.5 in package.json and package-lock.json * refactor: update customParams type in OpenAIConfigOptions to use TConfig['customParams'] * refactor: enhance transformToOpenAIConfig to include fromEndpoint and improve config extraction * refactor: conform userId field for anthropic/openai, cleanup anthropic typing * ci: add backward compatibility tests for getOpenAIConfig with various endpoints and configurations * ci: replace userId with user in clientOptions for getLLMConfig * test: add Azure OpenAI endpoint tests for various configurations in getOpenAIConfig * refactor: defaultHeaders retrieval for prompt caching for anthropic-based custom endpoint (litellm) * test: add unit tests for getOpenAIConfig with various Anthropic model configurations * test: enhance Anthropic compatibility tests with addParams and dropParams handling * chore: update @librechat/agents dependency to version 2.4.78 in package.json and package-lock.json * chore: update @librechat/agents dependency to version 2.4.79 in package.json and package-lock.json --------- Co-authored-by: Danny Avila <danny@librechat.ai>
This commit is contained in:
parent
7de6f6e44c
commit
c6ecf0095b
40 changed files with 1736 additions and 432 deletions
|
|
@ -1,16 +1,11 @@
|
|||
import { ProxyAgent } from 'undici';
|
||||
import { Providers } from '@librechat/agents';
|
||||
import { KnownEndpoints, removeNullishValues } from 'librechat-data-provider';
|
||||
import { removeNullishValues } from 'librechat-data-provider';
|
||||
import type { BindToolsInput } from '@langchain/core/language_models/chat_models';
|
||||
import type { AzureOpenAIInput } from '@langchain/openai';
|
||||
import type { OpenAI } from 'openai';
|
||||
import type * as t from '~/types';
|
||||
import { sanitizeModelName, constructAzureURL } from '~/utils/azure';
|
||||
import { createFetch } from '~/utils/generators';
|
||||
import { isEnabled } from '~/utils/common';
|
||||
|
||||
type Fetch = (input: string | URL | Request, init?: RequestInit) => Promise<Response>;
|
||||
|
||||
export const knownOpenAIParams = new Set([
|
||||
// Constructor/Instance Parameters
|
||||
'model',
|
||||
|
|
@ -80,47 +75,44 @@ function hasReasoningParams({
|
|||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Generates configuration options for creating a language model (LLM) instance.
|
||||
* @param apiKey - The API key for authentication.
|
||||
* @param options - Additional options for configuring the LLM.
|
||||
* @param endpoint - The endpoint name
|
||||
* @returns Configuration options for creating an LLM instance.
|
||||
*/
|
||||
export function getOpenAIConfig(
|
||||
apiKey: string,
|
||||
options: t.OpenAIConfigOptions = {},
|
||||
endpoint?: string | null,
|
||||
): t.LLMConfigResult {
|
||||
const {
|
||||
modelOptions: _modelOptions = {},
|
||||
reverseProxyUrl,
|
||||
directEndpoint,
|
||||
defaultQuery,
|
||||
headers,
|
||||
proxy,
|
||||
azure,
|
||||
streaming = true,
|
||||
addParams,
|
||||
dropParams,
|
||||
} = options;
|
||||
export function getOpenAILLMConfig({
|
||||
azure,
|
||||
apiKey,
|
||||
baseURL,
|
||||
streaming,
|
||||
addParams,
|
||||
dropParams,
|
||||
useOpenRouter,
|
||||
modelOptions: _modelOptions,
|
||||
}: {
|
||||
apiKey: string;
|
||||
streaming: boolean;
|
||||
baseURL?: string | null;
|
||||
modelOptions: Partial<t.OpenAIParameters>;
|
||||
addParams?: Record<string, unknown>;
|
||||
dropParams?: string[];
|
||||
useOpenRouter?: boolean;
|
||||
azure?: false | t.AzureOptions;
|
||||
}): Pick<t.LLMConfigResult, 'llmConfig' | 'tools'> & {
|
||||
azure?: t.AzureOptions;
|
||||
} {
|
||||
const {
|
||||
reasoning_effort,
|
||||
reasoning_summary,
|
||||
verbosity,
|
||||
web_search,
|
||||
frequency_penalty,
|
||||
presence_penalty,
|
||||
...modelOptions
|
||||
} = _modelOptions;
|
||||
const llmConfig: Partial<t.ClientOptions> &
|
||||
Partial<t.OpenAIParameters> &
|
||||
Partial<AzureOpenAIInput> = Object.assign(
|
||||
|
||||
const llmConfig = Object.assign(
|
||||
{
|
||||
streaming,
|
||||
model: modelOptions.model ?? '',
|
||||
},
|
||||
modelOptions,
|
||||
);
|
||||
) as Partial<t.OAIClientOptions> & Partial<t.OpenAIParameters> & Partial<AzureOpenAIInput>;
|
||||
|
||||
if (frequency_penalty != null) {
|
||||
llmConfig.frequencyPenalty = frequency_penalty;
|
||||
|
|
@ -148,104 +140,8 @@ export function getOpenAIConfig(
|
|||
}
|
||||
}
|
||||
|
||||
let useOpenRouter = false;
|
||||
const configOptions: t.OpenAIConfiguration = {};
|
||||
|
||||
if (
|
||||
(reverseProxyUrl && reverseProxyUrl.includes(KnownEndpoints.openrouter)) ||
|
||||
(endpoint && endpoint.toLowerCase().includes(KnownEndpoints.openrouter))
|
||||
) {
|
||||
useOpenRouter = true;
|
||||
if (useOpenRouter) {
|
||||
llmConfig.include_reasoning = true;
|
||||
configOptions.baseURL = reverseProxyUrl;
|
||||
configOptions.defaultHeaders = Object.assign(
|
||||
{
|
||||
'HTTP-Referer': 'https://librechat.ai',
|
||||
'X-Title': 'LibreChat',
|
||||
},
|
||||
headers,
|
||||
);
|
||||
} else if (reverseProxyUrl) {
|
||||
configOptions.baseURL = reverseProxyUrl;
|
||||
if (headers) {
|
||||
configOptions.defaultHeaders = headers;
|
||||
}
|
||||
}
|
||||
|
||||
if (defaultQuery) {
|
||||
configOptions.defaultQuery = defaultQuery;
|
||||
}
|
||||
|
||||
if (proxy) {
|
||||
const proxyAgent = new ProxyAgent(proxy);
|
||||
configOptions.fetchOptions = {
|
||||
dispatcher: proxyAgent,
|
||||
};
|
||||
}
|
||||
|
||||
if (azure) {
|
||||
const useModelName = isEnabled(process.env.AZURE_USE_MODEL_AS_DEPLOYMENT_NAME);
|
||||
const updatedAzure = { ...azure };
|
||||
updatedAzure.azureOpenAIApiDeploymentName = useModelName
|
||||
? sanitizeModelName(llmConfig.model || '')
|
||||
: azure.azureOpenAIApiDeploymentName;
|
||||
|
||||
if (process.env.AZURE_OPENAI_DEFAULT_MODEL) {
|
||||
llmConfig.model = process.env.AZURE_OPENAI_DEFAULT_MODEL;
|
||||
}
|
||||
|
||||
const constructBaseURL = () => {
|
||||
if (!configOptions.baseURL) {
|
||||
return;
|
||||
}
|
||||
const azureURL = constructAzureURL({
|
||||
baseURL: configOptions.baseURL,
|
||||
azureOptions: updatedAzure,
|
||||
});
|
||||
updatedAzure.azureOpenAIBasePath = azureURL.split(
|
||||
`/${updatedAzure.azureOpenAIApiDeploymentName}`,
|
||||
)[0];
|
||||
};
|
||||
|
||||
constructBaseURL();
|
||||
Object.assign(llmConfig, updatedAzure);
|
||||
|
||||
const constructAzureResponsesApi = () => {
|
||||
if (!llmConfig.useResponsesApi) {
|
||||
return;
|
||||
}
|
||||
|
||||
configOptions.baseURL = constructAzureURL({
|
||||
baseURL: configOptions.baseURL || 'https://${INSTANCE_NAME}.openai.azure.com/openai/v1',
|
||||
azureOptions: llmConfig,
|
||||
});
|
||||
|
||||
delete llmConfig.azureOpenAIApiDeploymentName;
|
||||
delete llmConfig.azureOpenAIApiInstanceName;
|
||||
delete llmConfig.azureOpenAIApiVersion;
|
||||
delete llmConfig.azureOpenAIBasePath;
|
||||
delete llmConfig.azureOpenAIApiKey;
|
||||
llmConfig.apiKey = apiKey;
|
||||
|
||||
configOptions.defaultHeaders = {
|
||||
...configOptions.defaultHeaders,
|
||||
'api-key': apiKey,
|
||||
};
|
||||
configOptions.defaultQuery = {
|
||||
...configOptions.defaultQuery,
|
||||
'api-version': configOptions.defaultQuery?.['api-version'] ?? 'preview',
|
||||
};
|
||||
};
|
||||
|
||||
constructAzureResponsesApi();
|
||||
|
||||
llmConfig.model = updatedAzure.azureOpenAIApiDeploymentName;
|
||||
} else {
|
||||
llmConfig.apiKey = apiKey;
|
||||
}
|
||||
|
||||
if (process.env.OPENAI_ORGANIZATION && azure) {
|
||||
configOptions.organization = process.env.OPENAI_ORGANIZATION;
|
||||
}
|
||||
|
||||
if (
|
||||
|
|
@ -270,7 +166,7 @@ export function getOpenAIConfig(
|
|||
|
||||
const tools: BindToolsInput[] = [];
|
||||
|
||||
if (modelOptions.web_search) {
|
||||
if (web_search) {
|
||||
llmConfig.useResponsesApi = true;
|
||||
tools.push({ type: 'web_search_preview' });
|
||||
}
|
||||
|
|
@ -278,7 +174,7 @@ export function getOpenAIConfig(
|
|||
/**
|
||||
* Note: OpenAI Web Search models do not support any known parameters besides `max_tokens`
|
||||
*/
|
||||
if (modelOptions.model && /gpt-4o.*search/.test(modelOptions.model)) {
|
||||
if (modelOptions.model && /gpt-4o.*search/.test(modelOptions.model as string)) {
|
||||
const searchExcludeParams = [
|
||||
'frequency_penalty',
|
||||
'presence_penalty',
|
||||
|
|
@ -301,13 +197,13 @@ export function getOpenAIConfig(
|
|||
|
||||
combinedDropParams.forEach((param) => {
|
||||
if (param in llmConfig) {
|
||||
delete llmConfig[param as keyof t.ClientOptions];
|
||||
delete llmConfig[param as keyof t.OAIClientOptions];
|
||||
}
|
||||
});
|
||||
} else if (dropParams && Array.isArray(dropParams)) {
|
||||
dropParams.forEach((param) => {
|
||||
if (param in llmConfig) {
|
||||
delete llmConfig[param as keyof t.ClientOptions];
|
||||
delete llmConfig[param as keyof t.OAIClientOptions];
|
||||
}
|
||||
});
|
||||
}
|
||||
|
|
@ -329,20 +225,52 @@ export function getOpenAIConfig(
|
|||
llmConfig.modelKwargs = modelKwargs;
|
||||
}
|
||||
|
||||
if (directEndpoint === true && configOptions?.baseURL != null) {
|
||||
configOptions.fetch = createFetch({
|
||||
directEndpoint: directEndpoint,
|
||||
reverseProxyUrl: configOptions?.baseURL,
|
||||
}) as unknown as Fetch;
|
||||
if (!azure) {
|
||||
llmConfig.apiKey = apiKey;
|
||||
return { llmConfig, tools };
|
||||
}
|
||||
|
||||
const result: t.LLMConfigResult = {
|
||||
llmConfig,
|
||||
configOptions,
|
||||
tools,
|
||||
};
|
||||
if (useOpenRouter) {
|
||||
result.provider = Providers.OPENROUTER;
|
||||
const useModelName = isEnabled(process.env.AZURE_USE_MODEL_AS_DEPLOYMENT_NAME);
|
||||
const updatedAzure = { ...azure };
|
||||
updatedAzure.azureOpenAIApiDeploymentName = useModelName
|
||||
? sanitizeModelName(llmConfig.model || '')
|
||||
: azure.azureOpenAIApiDeploymentName;
|
||||
|
||||
if (process.env.AZURE_OPENAI_DEFAULT_MODEL) {
|
||||
llmConfig.model = process.env.AZURE_OPENAI_DEFAULT_MODEL;
|
||||
}
|
||||
return result;
|
||||
|
||||
const constructAzureOpenAIBasePath = () => {
|
||||
if (!baseURL) {
|
||||
return;
|
||||
}
|
||||
const azureURL = constructAzureURL({
|
||||
baseURL,
|
||||
azureOptions: updatedAzure,
|
||||
});
|
||||
updatedAzure.azureOpenAIBasePath = azureURL.split(
|
||||
`/${updatedAzure.azureOpenAIApiDeploymentName}`,
|
||||
)[0];
|
||||
};
|
||||
|
||||
constructAzureOpenAIBasePath();
|
||||
Object.assign(llmConfig, updatedAzure);
|
||||
|
||||
const constructAzureResponsesApi = () => {
|
||||
if (!llmConfig.useResponsesApi) {
|
||||
return;
|
||||
}
|
||||
|
||||
delete llmConfig.azureOpenAIApiDeploymentName;
|
||||
delete llmConfig.azureOpenAIApiInstanceName;
|
||||
delete llmConfig.azureOpenAIApiVersion;
|
||||
delete llmConfig.azureOpenAIBasePath;
|
||||
delete llmConfig.azureOpenAIApiKey;
|
||||
llmConfig.apiKey = apiKey;
|
||||
};
|
||||
|
||||
constructAzureResponsesApi();
|
||||
|
||||
llmConfig.model = updatedAzure.azureOpenAIApiDeploymentName;
|
||||
return { llmConfig, tools, azure: updatedAzure };
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue