LibreChat/api/server/services/Endpoints/anthropic/llm.js
Danny Avila be280004cf
🔧 refactor: Improve Params Handling, Remove Legacy Items, & Update Configs (#6074)
* chore: include all assets for service worker, remove unused tsconfig.node.json, eslint ignore vite config

* chore: exclude image files from service worker caching

* refactor: simplify googleSchema transformation and error handling

* fix: max output tokens cap for 3.7 models

* fix: skip index fixing in CI, development, and test environments

* ci: add maxOutputTokens handling tests for Claude models

* refactor: drop top_k and top_p parameters for claude-3.7 in AnthropicClient and add tests for new behavior

* refactor: conditionally include top_k and top_p parameters for non-claude-3.7 models

* ci: add unit tests for getLLMConfig function with various model options

* chore: remove all OPENROUTER_API_KEY legacy logic

* refactor: optimize stream chunk handling

* feat: reset model parameters button

* refactor: remove unused examples field from convoSchema and presetSchema

* chore: update librechat-data-provider version to 0.7.6993

* refactor: move excludedKeys set to data-provider for better reusability

* feat: enhance saveMessageToDatabase to handle unset fields and fetched conversation state

* feat: add 'iconURL' and 'greeting' to excludedKeys in data provider config

* fix: add optional chaining to user ID retrieval in getConvo call
2025-02-26 15:02:03 -05:00

82 lines
3.3 KiB
JavaScript

const { HttpsProxyAgent } = require('https-proxy-agent');
const { anthropicSettings, removeNullishValues } = require('librechat-data-provider');
const { checkPromptCacheSupport, getClaudeHeaders } = require('./helpers');
/**
* Generates configuration options for creating an Anthropic language model (LLM) instance.
*
* @param {string} apiKey - The API key for authentication with Anthropic.
* @param {Object} [options={}] - Additional options for configuring the LLM.
* @param {Object} [options.modelOptions] - Model-specific options.
* @param {string} [options.modelOptions.model] - The name of the model to use.
* @param {number} [options.modelOptions.maxOutputTokens] - The maximum number of tokens to generate.
* @param {number} [options.modelOptions.temperature] - Controls randomness in output generation.
* @param {number} [options.modelOptions.topP] - Controls diversity of output generation.
* @param {number} [options.modelOptions.topK] - Controls the number of top tokens to consider.
* @param {string[]} [options.modelOptions.stop] - Sequences where the API will stop generating further tokens.
* @param {boolean} [options.modelOptions.stream] - Whether to stream the response.
* @param {string} [options.proxy] - Proxy server URL.
* @param {string} [options.reverseProxyUrl] - URL for a reverse proxy, if used.
*
* @returns {Object} Configuration options for creating an Anthropic LLM instance, with null and undefined values removed.
*/
function getLLMConfig(apiKey, options = {}) {
const systemOptions = {
thinking: options.modelOptions.thinking ?? anthropicSettings.thinking.default,
promptCache: options.modelOptions.promptCache ?? anthropicSettings.promptCache.default,
thinkingBudget: options.modelOptions.thinkingBudget ?? anthropicSettings.thinkingBudget.default,
};
for (let key in systemOptions) {
delete options.modelOptions[key];
}
const defaultOptions = {
model: anthropicSettings.model.default,
maxOutputTokens: anthropicSettings.maxOutputTokens.default,
stream: true,
};
const mergedOptions = Object.assign(defaultOptions, options.modelOptions);
/** @type {AnthropicClientOptions} */
let requestOptions = {
apiKey,
model: mergedOptions.model,
stream: mergedOptions.stream,
temperature: mergedOptions.temperature,
stopSequences: mergedOptions.stop,
maxTokens:
mergedOptions.maxOutputTokens || anthropicSettings.maxOutputTokens.reset(mergedOptions.model),
clientOptions: {},
};
if (!/claude-3[-.]7/.test(mergedOptions.model)) {
if (mergedOptions.topP !== undefined) {
requestOptions.topP = mergedOptions.topP;
}
if (mergedOptions.topK !== undefined) {
requestOptions.topK = mergedOptions.topK;
}
}
const supportsCacheControl =
systemOptions.promptCache === true && checkPromptCacheSupport(requestOptions.model);
const headers = getClaudeHeaders(requestOptions.model, supportsCacheControl);
if (headers) {
requestOptions.clientOptions.defaultHeaders = headers;
}
if (options.proxy) {
requestOptions.clientOptions.httpAgent = new HttpsProxyAgent(options.proxy);
}
if (options.reverseProxyUrl) {
requestOptions.clientOptions.baseURL = options.reverseProxyUrl;
}
return {
/** @type {AnthropicClientOptions} */
llmConfig: removeNullishValues(requestOptions),
};
}
module.exports = { getLLMConfig };