mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-12-18 17:30:16 +01:00
* 🔧 fix: enhance client options handling in AgentClient and set default recursion limit
- Updated the recursion limit to default to 25 if not specified in agentsEConfig.
- Enhanced client options in AgentClient to include model parameters such as apiKey and anthropicApiUrl from agentModelParams.
- Updated requestOptions in the anthropic endpoint to use reverseProxyUrl as anthropicApiUrl.
* Enhance LLM configuration tests with edge case handling
* chore add return type annotation for getCustomEndpointConfig function
* fix: update modelOptions handling to use optional chaining and default to empty object in multiple endpoint initializations
* chore: update @librechat/agents to version 2.4.42
* refactor: streamline agent endpoint configuration and enhance client options handling for title generations
- Introduced a new `getProviderConfig` function to centralize provider configuration logic.
- Updated `AgentClient` to utilize the new provider configuration, improving clarity and maintainability.
- Removed redundant code related to endpoint initialization and model parameter handling.
- Enhanced error logging for missing endpoint configurations.
* fix: add abort handling for image generation and editing in OpenAIImageTools
* ci: enhance getLLMConfig tests to verify fetchOptions and dispatcher properties
* fix: use optional chaining for endpointOption properties in getOptions
* fix: increase title generation timeout from 25s to 45s, pass `endpointOption` to `getOptions`
* fix: update file filtering logic in getToolFilesByIds to ensure text field is properly checked
* fix: add error handling for empty OCR results in uploadMistralOCR and uploadAzureMistralOCR
* fix: enhance error handling in file upload to include 'No OCR result' message
* chore: update error messages in uploadMistralOCR and uploadAzureMistralOCR
* fix: enhance filtering logic in getToolFilesByIds to include context checks for OCR resources to only include files directly attached to agent
---------
Co-authored-by: Matt Burnett <matt.burnett@shopify.com>
87 lines
3.6 KiB
JavaScript
87 lines
3.6 KiB
JavaScript
const { ProxyAgent } = require('undici');
|
|
const { anthropicSettings, removeNullishValues } = require('librechat-data-provider');
|
|
const { checkPromptCacheSupport, getClaudeHeaders, configureReasoning } = require('./helpers');
|
|
|
|
/**
|
|
* Generates configuration options for creating an Anthropic language model (LLM) instance.
|
|
*
|
|
* @param {string} apiKey - The API key for authentication with Anthropic.
|
|
* @param {Object} [options={}] - Additional options for configuring the LLM.
|
|
* @param {Object} [options.modelOptions] - Model-specific options.
|
|
* @param {string} [options.modelOptions.model] - The name of the model to use.
|
|
* @param {number} [options.modelOptions.maxOutputTokens] - The maximum number of tokens to generate.
|
|
* @param {number} [options.modelOptions.temperature] - Controls randomness in output generation.
|
|
* @param {number} [options.modelOptions.topP] - Controls diversity of output generation.
|
|
* @param {number} [options.modelOptions.topK] - Controls the number of top tokens to consider.
|
|
* @param {string[]} [options.modelOptions.stop] - Sequences where the API will stop generating further tokens.
|
|
* @param {boolean} [options.modelOptions.stream] - Whether to stream the response.
|
|
* @param {string} [options.proxy] - Proxy server URL.
|
|
* @param {string} [options.reverseProxyUrl] - URL for a reverse proxy, if used.
|
|
*
|
|
* @returns {Object} Configuration options for creating an Anthropic LLM instance, with null and undefined values removed.
|
|
*/
|
|
function getLLMConfig(apiKey, options = {}) {
|
|
const systemOptions = {
|
|
thinking: options.modelOptions.thinking ?? anthropicSettings.thinking.default,
|
|
promptCache: options.modelOptions.promptCache ?? anthropicSettings.promptCache.default,
|
|
thinkingBudget: options.modelOptions.thinkingBudget ?? anthropicSettings.thinkingBudget.default,
|
|
};
|
|
for (let key in systemOptions) {
|
|
delete options.modelOptions[key];
|
|
}
|
|
const defaultOptions = {
|
|
model: anthropicSettings.model.default,
|
|
maxOutputTokens: anthropicSettings.maxOutputTokens.default,
|
|
stream: true,
|
|
};
|
|
|
|
const mergedOptions = Object.assign(defaultOptions, options.modelOptions);
|
|
|
|
/** @type {AnthropicClientOptions} */
|
|
let requestOptions = {
|
|
apiKey,
|
|
model: mergedOptions.model,
|
|
stream: mergedOptions.stream,
|
|
temperature: mergedOptions.temperature,
|
|
stopSequences: mergedOptions.stop,
|
|
maxTokens:
|
|
mergedOptions.maxOutputTokens || anthropicSettings.maxOutputTokens.reset(mergedOptions.model),
|
|
clientOptions: {},
|
|
};
|
|
|
|
requestOptions = configureReasoning(requestOptions, systemOptions);
|
|
|
|
if (!/claude-3[-.]7/.test(mergedOptions.model)) {
|
|
requestOptions.topP = mergedOptions.topP;
|
|
requestOptions.topK = mergedOptions.topK;
|
|
} else if (requestOptions.thinking == null) {
|
|
requestOptions.topP = mergedOptions.topP;
|
|
requestOptions.topK = mergedOptions.topK;
|
|
}
|
|
|
|
const supportsCacheControl =
|
|
systemOptions.promptCache === true && checkPromptCacheSupport(requestOptions.model);
|
|
const headers = getClaudeHeaders(requestOptions.model, supportsCacheControl);
|
|
if (headers) {
|
|
requestOptions.clientOptions.defaultHeaders = headers;
|
|
}
|
|
|
|
if (options.proxy) {
|
|
const proxyAgent = new ProxyAgent(options.proxy);
|
|
requestOptions.clientOptions.fetchOptions = {
|
|
dispatcher: proxyAgent,
|
|
};
|
|
}
|
|
|
|
if (options.reverseProxyUrl) {
|
|
requestOptions.clientOptions.baseURL = options.reverseProxyUrl;
|
|
requestOptions.anthropicApiUrl = options.reverseProxyUrl;
|
|
}
|
|
|
|
return {
|
|
/** @type {AnthropicClientOptions} */
|
|
llmConfig: removeNullishValues(requestOptions),
|
|
};
|
|
}
|
|
|
|
module.exports = { getLLMConfig };
|