diff --git a/api/server/services/Endpoints/anthropic/initialize.js b/api/server/services/Endpoints/anthropic/initialize.js index 4546fc634c..7c98d8a63b 100644 --- a/api/server/services/Endpoints/anthropic/initialize.js +++ b/api/server/services/Endpoints/anthropic/initialize.js @@ -39,8 +39,9 @@ const initializeClient = async ({ req, res, endpointOption, overrideModel, optio if (optionsOnly) { clientOptions = Object.assign( { - reverseProxyUrl: ANTHROPIC_REVERSE_PROXY ?? null, proxy: PROXY ?? null, + userId: req.user.id, + reverseProxyUrl: ANTHROPIC_REVERSE_PROXY ?? null, modelOptions: endpointOption?.model_parameters ?? {}, }, clientOptions, diff --git a/api/server/services/Endpoints/anthropic/llm.js b/api/server/services/Endpoints/anthropic/llm.js index 8355b8aa26..2cb76d5f92 100644 --- a/api/server/services/Endpoints/anthropic/llm.js +++ b/api/server/services/Endpoints/anthropic/llm.js @@ -15,6 +15,7 @@ const { checkPromptCacheSupport, getClaudeHeaders, configureReasoning } = requir * @param {number} [options.modelOptions.topK] - Controls the number of top tokens to consider. * @param {string[]} [options.modelOptions.stop] - Sequences where the API will stop generating further tokens. * @param {boolean} [options.modelOptions.stream] - Whether to stream the response. + * @param {string} options.userId - The user ID for tracking and personalization. * @param {string} [options.proxy] - Proxy server URL. * @param {string} [options.reverseProxyUrl] - URL for a reverse proxy, if used. * @@ -47,6 +48,11 @@ function getLLMConfig(apiKey, options = {}) { maxTokens: mergedOptions.maxOutputTokens || anthropicSettings.maxOutputTokens.reset(mergedOptions.model), clientOptions: {}, + invocationKwargs: { + metadata: { + user_id: options.userId, + }, + }, }; requestOptions = configureReasoning(requestOptions, systemOptions);