LibreChat/api/server/services/Config/loadConfigModels.js
Danny Avila 00b2d026c1
🚀 feat: Enhance Model Handling, Logging & xAI Agent Support (#6182)
* chore: update @librechat/agents to version 2.1.9

* feat: xAI standalone provider for agents

* chore: bump librechat-data-provider version to 0.7.6997

* fix: reorder import statements and enhance user listing output

* fix: Update Docker Compose commands to support v2 syntax with fallback

* 🔧 fix: drop `reasoning_effort` for o1-preview/mini models

* chore: requireLocalAuth logging

* fix: edge case artifact message editing logic to handle `new` conversation IDs

* fix: remove `temperature` from model options in OpenAIClient if o1-mini/preview

* fix: update type annotation for fetchPromisesMap to use Promise<string[]> instead of string[]

* feat: anthropic model fetching

* fix: update model name to use EModelEndpoint.openAI in fetchModels and fetchOpenAIModels

* fix: add error handling to modelController for loadModels

* fix: add error handling and logging for model fetching in loadDefaultModels

* ci: update getAnthropicModels tests to be asynchronous

* feat: add user ID to model options in OpenAI and custom endpoint initialization

---------

Co-authored-by: Andrei Berceanu <andreicberceanu@gmail.com>
Co-authored-by: KiGamji <maloyh44@gmail.com>
2025-03-05 12:04:26 -05:00

112 lines
3.5 KiB
JavaScript

const { EModelEndpoint, extractEnvVariable } = require('librechat-data-provider');
const { isUserProvided, normalizeEndpointName } = require('~/server/utils');
const { fetchModels } = require('~/server/services/ModelService');
const { getCustomConfig } = require('./getCustomConfig');
/**
* Load config endpoints from the cached configuration object
* @function loadConfigModels
* @param {Express.Request} req - The Express request object.
*/
async function loadConfigModels(req) {
const customConfig = await getCustomConfig();
if (!customConfig) {
return {};
}
const { endpoints = {} } = customConfig ?? {};
const modelsConfig = {};
const azureEndpoint = endpoints[EModelEndpoint.azureOpenAI];
const azureConfig = req.app.locals[EModelEndpoint.azureOpenAI];
const { modelNames } = azureConfig ?? {};
if (modelNames && azureEndpoint) {
modelsConfig[EModelEndpoint.azureOpenAI] = modelNames;
}
if (modelNames && azureEndpoint && azureEndpoint.plugins) {
modelsConfig[EModelEndpoint.gptPlugins] = modelNames;
}
if (azureEndpoint?.assistants && azureConfig.assistantModels) {
modelsConfig[EModelEndpoint.azureAssistants] = azureConfig.assistantModels;
}
if (!Array.isArray(endpoints[EModelEndpoint.custom])) {
return modelsConfig;
}
const customEndpoints = endpoints[EModelEndpoint.custom].filter(
(endpoint) =>
endpoint.baseURL &&
endpoint.apiKey &&
endpoint.name &&
endpoint.models &&
(endpoint.models.fetch || endpoint.models.default),
);
/**
* @type {Record<string, Promise<string[]>>}
* Map for promises keyed by unique combination of baseURL and apiKey */
const fetchPromisesMap = {};
/**
* @type {Record<string, string[]>}
* Map to associate unique keys with endpoint names; note: one key may can correspond to multiple endpoints */
const uniqueKeyToEndpointsMap = {};
/**
* @type {Record<string, Partial<TEndpoint>>}
* Map to associate endpoint names to their configurations */
const endpointsMap = {};
for (let i = 0; i < customEndpoints.length; i++) {
const endpoint = customEndpoints[i];
const { models, name: configName, baseURL, apiKey } = endpoint;
const name = normalizeEndpointName(configName);
endpointsMap[name] = endpoint;
const API_KEY = extractEnvVariable(apiKey);
const BASE_URL = extractEnvVariable(baseURL);
const uniqueKey = `${BASE_URL}__${API_KEY}`;
modelsConfig[name] = [];
if (models.fetch && !isUserProvided(API_KEY) && !isUserProvided(BASE_URL)) {
fetchPromisesMap[uniqueKey] =
fetchPromisesMap[uniqueKey] ||
fetchModels({
user: req.user.id,
baseURL: BASE_URL,
apiKey: API_KEY,
name,
userIdQuery: models.userIdQuery,
});
uniqueKeyToEndpointsMap[uniqueKey] = uniqueKeyToEndpointsMap[uniqueKey] || [];
uniqueKeyToEndpointsMap[uniqueKey].push(name);
continue;
}
if (Array.isArray(models.default)) {
modelsConfig[name] = models.default;
}
}
const fetchedData = await Promise.all(Object.values(fetchPromisesMap));
const uniqueKeys = Object.keys(fetchPromisesMap);
for (let i = 0; i < fetchedData.length; i++) {
const currentKey = uniqueKeys[i];
const modelData = fetchedData[i];
const associatedNames = uniqueKeyToEndpointsMap[currentKey];
for (const name of associatedNames) {
const endpoint = endpointsMap[name];
modelsConfig[name] = !modelData?.length ? (endpoint.models.default ?? []) : modelData;
}
}
return modelsConfig;
}
module.exports = loadConfigModels;