LibreChat/api/server/services/Endpoints/openAI/initialize.js
Danny Avila d39b99971f
🧠 fix: Agent Title Config & Resource Handling (#8028)
* 🔧 fix: enhance client options handling in AgentClient and set default recursion limit

- Updated the recursion limit to default to 25 if not specified in agentsEConfig.
- Enhanced client options in AgentClient to include model parameters such as apiKey and anthropicApiUrl from agentModelParams.
- Updated requestOptions in the anthropic endpoint to use reverseProxyUrl as anthropicApiUrl.

* Enhance LLM configuration tests with edge case handling

* chore add return type annotation for getCustomEndpointConfig function

* fix: update modelOptions handling to use optional chaining and default to empty object in multiple endpoint initializations

* chore: update @librechat/agents to version 2.4.42

* refactor: streamline agent endpoint configuration and enhance client options handling for title generations

- Introduced a new `getProviderConfig` function to centralize provider configuration logic.
- Updated `AgentClient` to utilize the new provider configuration, improving clarity and maintainability.
- Removed redundant code related to endpoint initialization and model parameter handling.
- Enhanced error logging for missing endpoint configurations.

* fix: add abort handling for image generation and editing in OpenAIImageTools

* ci: enhance getLLMConfig tests to verify fetchOptions and dispatcher properties

* fix: use optional chaining for endpointOption properties in getOptions

* fix: increase title generation timeout from 25s to 45s, pass `endpointOption` to `getOptions`

* fix: update file filtering logic in getToolFilesByIds to ensure text field is properly checked

* fix: add error handling for empty OCR results in uploadMistralOCR and uploadAzureMistralOCR

* fix: enhance error handling in file upload to include 'No OCR result' message

* chore: update error messages in uploadMistralOCR and uploadAzureMistralOCR

* fix: enhance filtering logic in getToolFilesByIds to include context checks for OCR resources to only include files directly attached to agent

---------

Co-authored-by: Matt Burnett <matt.burnett@shopify.com>
2025-06-23 19:44:24 -04:00

165 lines
4.9 KiB
JavaScript

const { ErrorTypes, EModelEndpoint, mapModelToAzureConfig } = require('librechat-data-provider');
const {
isEnabled,
resolveHeaders,
isUserProvided,
getOpenAIConfig,
getAzureCredentials,
createHandleLLMNewToken,
} = require('@librechat/api');
const { getUserKeyValues, checkUserKeyExpiry } = require('~/server/services/UserService');
const OpenAIClient = require('~/app/clients/OpenAIClient');
const initializeClient = async ({
req,
res,
endpointOption,
optionsOnly,
overrideEndpoint,
overrideModel,
}) => {
const {
PROXY,
OPENAI_API_KEY,
AZURE_API_KEY,
OPENAI_REVERSE_PROXY,
AZURE_OPENAI_BASEURL,
OPENAI_SUMMARIZE,
DEBUG_OPENAI,
} = process.env;
const { key: expiresAt } = req.body;
const modelName = overrideModel ?? req.body.model;
const endpoint = overrideEndpoint ?? req.body.endpoint;
const contextStrategy = isEnabled(OPENAI_SUMMARIZE) ? 'summarize' : null;
const credentials = {
[EModelEndpoint.openAI]: OPENAI_API_KEY,
[EModelEndpoint.azureOpenAI]: AZURE_API_KEY,
};
const baseURLOptions = {
[EModelEndpoint.openAI]: OPENAI_REVERSE_PROXY,
[EModelEndpoint.azureOpenAI]: AZURE_OPENAI_BASEURL,
};
const userProvidesKey = isUserProvided(credentials[endpoint]);
const userProvidesURL = isUserProvided(baseURLOptions[endpoint]);
let userValues = null;
if (expiresAt && (userProvidesKey || userProvidesURL)) {
checkUserKeyExpiry(expiresAt, endpoint);
userValues = await getUserKeyValues({ userId: req.user.id, name: endpoint });
}
let apiKey = userProvidesKey ? userValues?.apiKey : credentials[endpoint];
let baseURL = userProvidesURL ? userValues?.baseURL : baseURLOptions[endpoint];
let clientOptions = {
contextStrategy,
proxy: PROXY ?? null,
debug: isEnabled(DEBUG_OPENAI),
reverseProxyUrl: baseURL ? baseURL : null,
...endpointOption,
};
const isAzureOpenAI = endpoint === EModelEndpoint.azureOpenAI;
/** @type {false | TAzureConfig} */
const azureConfig = isAzureOpenAI && req.app.locals[EModelEndpoint.azureOpenAI];
if (isAzureOpenAI && azureConfig) {
const { modelGroupMap, groupMap } = azureConfig;
const {
azureOptions,
baseURL,
headers = {},
serverless,
} = mapModelToAzureConfig({
modelName,
modelGroupMap,
groupMap,
});
clientOptions.reverseProxyUrl = baseURL ?? clientOptions.reverseProxyUrl;
clientOptions.headers = resolveHeaders(
{ ...headers, ...(clientOptions.headers ?? {}) },
req.user,
);
clientOptions.titleConvo = azureConfig.titleConvo;
clientOptions.titleModel = azureConfig.titleModel;
const azureRate = modelName.includes('gpt-4') ? 30 : 17;
clientOptions.streamRate = azureConfig.streamRate ?? azureRate;
clientOptions.titleMethod = azureConfig.titleMethod ?? 'completion';
const groupName = modelGroupMap[modelName].group;
clientOptions.addParams = azureConfig.groupMap[groupName].addParams;
clientOptions.dropParams = azureConfig.groupMap[groupName].dropParams;
clientOptions.forcePrompt = azureConfig.groupMap[groupName].forcePrompt;
apiKey = azureOptions.azureOpenAIApiKey;
clientOptions.azure = !serverless && azureOptions;
if (serverless === true) {
clientOptions.defaultQuery = azureOptions.azureOpenAIApiVersion
? { 'api-version': azureOptions.azureOpenAIApiVersion }
: undefined;
clientOptions.headers['api-key'] = apiKey;
}
} else if (isAzureOpenAI) {
clientOptions.azure = userProvidesKey ? JSON.parse(userValues.apiKey) : getAzureCredentials();
apiKey = clientOptions.azure.azureOpenAIApiKey;
}
/** @type {undefined | TBaseEndpoint} */
const openAIConfig = req.app.locals[EModelEndpoint.openAI];
if (!isAzureOpenAI && openAIConfig) {
clientOptions.streamRate = openAIConfig.streamRate;
clientOptions.titleModel = openAIConfig.titleModel;
}
/** @type {undefined | TBaseEndpoint} */
const allConfig = req.app.locals.all;
if (allConfig) {
clientOptions.streamRate = allConfig.streamRate;
}
if (userProvidesKey & !apiKey) {
throw new Error(
JSON.stringify({
type: ErrorTypes.NO_USER_KEY,
}),
);
}
if (!apiKey) {
throw new Error(`${endpoint} API Key not provided.`);
}
if (optionsOnly) {
const modelOptions = endpointOption?.model_parameters ?? {};
modelOptions.model = modelName;
clientOptions = Object.assign({ modelOptions }, clientOptions);
clientOptions.modelOptions.user = req.user.id;
const options = getOpenAIConfig(apiKey, clientOptions);
const streamRate = clientOptions.streamRate;
if (!streamRate) {
return options;
}
options.llmConfig.callbacks = [
{
handleLLMNewToken: createHandleLLMNewToken(streamRate),
},
];
return options;
}
const client = new OpenAIClient(apiKey, Object.assign({ req, res }, clientOptions));
return {
client,
openAIApiKey: apiKey,
};
};
module.exports = initializeClient;