mirror of
https://github.com/danny-avila/LibreChat.git
synced 2026-01-27 04:36:12 +01:00
* refactor: modularize openai llm config logic into new getOpenAILLMConfig function (#9412) * ✈️ refactor: Migrate Anthropic's getLLMConfig to TypeScript (#9413) * refactor: move tokens.js over to packages/api and update imports * refactor: port tokens.js to typescript * refactor: move helpers.js over to packages/api and update imports * refactor: port helpers.js to typescript * refactor: move anthropic/llm.js over to packages/api and update imports * refactor: port anthropic/llm.js to typescript with supporting types in types/anthropic.ts and updated tests in llm.spec.js * refactor: move llm.spec.js over to packages/api and update import * refactor: port llm.spec.js over to typescript * 📝 Add Prompt Parameter Support for Anthropic Custom Endpoints (#9414) feat: add anthropic llm config support for openai-like (custom) endpoints * fix: missed compiler / type issues from addition of getAnthropicLLMConfig * refactor: update tokens.ts to export constants and functions, enhance type definitions, and adjust default values * WIP: first pass, decouple `llmConfig` from `configOptions` * chore: update import path for OpenAI configuration from 'llm' to 'config' * refactor: enhance type definitions for ThinkingConfig and update modelOptions in AnthropicConfigOptions * refactor: cleanup type, introduce openai transform from alt provider * chore: integrate removeNullishValues in Google llmConfig and update OpenAI exports * chore: bump version of @librechat/api to 1.3.5 in package.json and package-lock.json * refactor: update customParams type in OpenAIConfigOptions to use TConfig['customParams'] * refactor: enhance transformToOpenAIConfig to include fromEndpoint and improve config extraction * refactor: conform userId field for anthropic/openai, cleanup anthropic typing * ci: add backward compatibility tests for getOpenAIConfig with various endpoints and configurations * ci: replace userId with user in clientOptions for getLLMConfig * test: add Azure OpenAI endpoint tests for various configurations in getOpenAIConfig * refactor: defaultHeaders retrieval for prompt caching for anthropic-based custom endpoint (litellm) * test: add unit tests for getOpenAIConfig with various Anthropic model configurations * test: enhance Anthropic compatibility tests with addParams and dropParams handling * chore: update @librechat/agents dependency to version 2.4.78 in package.json and package-lock.json * chore: update @librechat/agents dependency to version 2.4.79 in package.json and package-lock.json --------- Co-authored-by: Danny Avila <danny@librechat.ai>
105 lines
3.7 KiB
TypeScript
105 lines
3.7 KiB
TypeScript
import { Dispatcher, ProxyAgent } from 'undici';
|
|
import { AnthropicClientOptions } from '@librechat/agents';
|
|
import { anthropicSettings, removeNullishValues } from 'librechat-data-provider';
|
|
import type { AnthropicLLMConfigResult, AnthropicConfigOptions } from '~/types/anthropic';
|
|
import { checkPromptCacheSupport, getClaudeHeaders, configureReasoning } from './helpers';
|
|
|
|
/**
|
|
* Generates configuration options for creating an Anthropic language model (LLM) instance.
|
|
* @param apiKey - The API key for authentication with Anthropic.
|
|
* @param options={} - Additional options for configuring the LLM.
|
|
* @returns Configuration options for creating an Anthropic LLM instance, with null and undefined values removed.
|
|
*/
|
|
function getLLMConfig(
|
|
apiKey?: string,
|
|
options: AnthropicConfigOptions = {} as AnthropicConfigOptions,
|
|
): AnthropicLLMConfigResult {
|
|
const systemOptions = {
|
|
thinking: options.modelOptions?.thinking ?? anthropicSettings.thinking.default,
|
|
promptCache: options.modelOptions?.promptCache ?? anthropicSettings.promptCache.default,
|
|
thinkingBudget:
|
|
options.modelOptions?.thinkingBudget ?? anthropicSettings.thinkingBudget.default,
|
|
};
|
|
|
|
/** Couldn't figure out a way to still loop through the object while deleting the overlapping keys when porting this
|
|
* over from javascript, so for now they are being deleted manually until a better way presents itself.
|
|
*/
|
|
if (options.modelOptions) {
|
|
delete options.modelOptions.thinking;
|
|
delete options.modelOptions.promptCache;
|
|
delete options.modelOptions.thinkingBudget;
|
|
} else {
|
|
throw new Error('No modelOptions provided');
|
|
}
|
|
|
|
const defaultOptions = {
|
|
model: anthropicSettings.model.default,
|
|
maxOutputTokens: anthropicSettings.maxOutputTokens.default,
|
|
stream: true,
|
|
};
|
|
|
|
const mergedOptions = Object.assign(defaultOptions, options.modelOptions);
|
|
|
|
let requestOptions: AnthropicClientOptions & { stream?: boolean } = {
|
|
apiKey,
|
|
model: mergedOptions.model,
|
|
stream: mergedOptions.stream,
|
|
temperature: mergedOptions.temperature,
|
|
stopSequences: mergedOptions.stop,
|
|
maxTokens:
|
|
mergedOptions.maxOutputTokens || anthropicSettings.maxOutputTokens.reset(mergedOptions.model),
|
|
clientOptions: {},
|
|
invocationKwargs: {
|
|
metadata: {
|
|
user_id: mergedOptions.user,
|
|
},
|
|
},
|
|
};
|
|
|
|
requestOptions = configureReasoning(requestOptions, systemOptions);
|
|
|
|
if (!/claude-3[-.]7/.test(mergedOptions.model)) {
|
|
requestOptions.topP = mergedOptions.topP;
|
|
requestOptions.topK = mergedOptions.topK;
|
|
} else if (requestOptions.thinking == null) {
|
|
requestOptions.topP = mergedOptions.topP;
|
|
requestOptions.topK = mergedOptions.topK;
|
|
}
|
|
|
|
const supportsCacheControl =
|
|
systemOptions.promptCache === true && checkPromptCacheSupport(requestOptions.model ?? '');
|
|
const headers = getClaudeHeaders(requestOptions.model ?? '', supportsCacheControl);
|
|
if (headers && requestOptions.clientOptions) {
|
|
requestOptions.clientOptions.defaultHeaders = headers;
|
|
}
|
|
|
|
if (options.proxy && requestOptions.clientOptions) {
|
|
const proxyAgent = new ProxyAgent(options.proxy);
|
|
requestOptions.clientOptions.fetchOptions = {
|
|
dispatcher: proxyAgent,
|
|
};
|
|
}
|
|
|
|
if (options.reverseProxyUrl && requestOptions.clientOptions) {
|
|
requestOptions.clientOptions.baseURL = options.reverseProxyUrl;
|
|
requestOptions.anthropicApiUrl = options.reverseProxyUrl;
|
|
}
|
|
|
|
const tools = [];
|
|
|
|
if (mergedOptions.web_search) {
|
|
tools.push({
|
|
type: 'web_search_20250305',
|
|
name: 'web_search',
|
|
});
|
|
}
|
|
|
|
return {
|
|
tools,
|
|
llmConfig: removeNullishValues(
|
|
requestOptions as Record<string, unknown>,
|
|
) as AnthropicClientOptions & { clientOptions?: { fetchOptions?: { dispatcher: Dispatcher } } },
|
|
};
|
|
}
|
|
|
|
export { getLLMConfig };
|