mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-12-16 16:30:15 +01:00
* refactor: modularize openai llm config logic into new getOpenAILLMConfig function (#9412) * ✈️ refactor: Migrate Anthropic's getLLMConfig to TypeScript (#9413) * refactor: move tokens.js over to packages/api and update imports * refactor: port tokens.js to typescript * refactor: move helpers.js over to packages/api and update imports * refactor: port helpers.js to typescript * refactor: move anthropic/llm.js over to packages/api and update imports * refactor: port anthropic/llm.js to typescript with supporting types in types/anthropic.ts and updated tests in llm.spec.js * refactor: move llm.spec.js over to packages/api and update import * refactor: port llm.spec.js over to typescript * 📝 Add Prompt Parameter Support for Anthropic Custom Endpoints (#9414) feat: add anthropic llm config support for openai-like (custom) endpoints * fix: missed compiler / type issues from addition of getAnthropicLLMConfig * refactor: update tokens.ts to export constants and functions, enhance type definitions, and adjust default values * WIP: first pass, decouple `llmConfig` from `configOptions` * chore: update import path for OpenAI configuration from 'llm' to 'config' * refactor: enhance type definitions for ThinkingConfig and update modelOptions in AnthropicConfigOptions * refactor: cleanup type, introduce openai transform from alt provider * chore: integrate removeNullishValues in Google llmConfig and update OpenAI exports * chore: bump version of @librechat/api to 1.3.5 in package.json and package-lock.json * refactor: update customParams type in OpenAIConfigOptions to use TConfig['customParams'] * refactor: enhance transformToOpenAIConfig to include fromEndpoint and improve config extraction * refactor: conform userId field for anthropic/openai, cleanup anthropic typing * ci: add backward compatibility tests for getOpenAIConfig with various endpoints and configurations * ci: replace userId with user in clientOptions for getLLMConfig * test: add Azure OpenAI endpoint tests for various configurations in getOpenAIConfig * refactor: defaultHeaders retrieval for prompt caching for anthropic-based custom endpoint (litellm) * test: add unit tests for getOpenAIConfig with various Anthropic model configurations * test: enhance Anthropic compatibility tests with addParams and dropParams handling * chore: update @librechat/agents dependency to version 2.4.78 in package.json and package-lock.json * chore: update @librechat/agents dependency to version 2.4.79 in package.json and package-lock.json --------- Co-authored-by: Danny Avila <danny@librechat.ai>
70 lines
2 KiB
JavaScript
70 lines
2 KiB
JavaScript
const { getLLMConfig } = require('@librechat/api');
|
|
const { EModelEndpoint } = require('librechat-data-provider');
|
|
const { getUserKey, checkUserKeyExpiry } = require('~/server/services/UserService');
|
|
const AnthropicClient = require('~/app/clients/AnthropicClient');
|
|
|
|
const initializeClient = async ({ req, res, endpointOption, overrideModel, optionsOnly }) => {
|
|
const appConfig = req.config;
|
|
const { ANTHROPIC_API_KEY, ANTHROPIC_REVERSE_PROXY, PROXY } = process.env;
|
|
const expiresAt = req.body.key;
|
|
const isUserProvided = ANTHROPIC_API_KEY === 'user_provided';
|
|
|
|
const anthropicApiKey = isUserProvided
|
|
? await getUserKey({ userId: req.user.id, name: EModelEndpoint.anthropic })
|
|
: ANTHROPIC_API_KEY;
|
|
|
|
if (!anthropicApiKey) {
|
|
throw new Error('Anthropic API key not provided. Please provide it again.');
|
|
}
|
|
|
|
if (expiresAt && isUserProvided) {
|
|
checkUserKeyExpiry(expiresAt, EModelEndpoint.anthropic);
|
|
}
|
|
|
|
let clientOptions = {};
|
|
|
|
/** @type {undefined | TBaseEndpoint} */
|
|
const anthropicConfig = appConfig.endpoints?.[EModelEndpoint.anthropic];
|
|
|
|
if (anthropicConfig) {
|
|
clientOptions.streamRate = anthropicConfig.streamRate;
|
|
clientOptions.titleModel = anthropicConfig.titleModel;
|
|
}
|
|
|
|
const allConfig = appConfig.endpoints?.all;
|
|
if (allConfig) {
|
|
clientOptions.streamRate = allConfig.streamRate;
|
|
}
|
|
|
|
if (optionsOnly) {
|
|
clientOptions = Object.assign(
|
|
{
|
|
proxy: PROXY ?? null,
|
|
reverseProxyUrl: ANTHROPIC_REVERSE_PROXY ?? null,
|
|
modelOptions: endpointOption?.model_parameters ?? {},
|
|
},
|
|
clientOptions,
|
|
);
|
|
if (overrideModel) {
|
|
clientOptions.modelOptions.model = overrideModel;
|
|
}
|
|
clientOptions.modelOptions.user = req.user.id;
|
|
return getLLMConfig(anthropicApiKey, clientOptions);
|
|
}
|
|
|
|
const client = new AnthropicClient(anthropicApiKey, {
|
|
req,
|
|
res,
|
|
reverseProxyUrl: ANTHROPIC_REVERSE_PROXY ?? null,
|
|
proxy: PROXY ?? null,
|
|
...clientOptions,
|
|
...endpointOption,
|
|
});
|
|
|
|
return {
|
|
client,
|
|
anthropicApiKey,
|
|
};
|
|
};
|
|
|
|
module.exports = initializeClient;
|