mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-12-17 17:00:15 +01:00
* chore: update @librechat/agents to version 2.1.9
* feat: xAI standalone provider for agents
* chore: bump librechat-data-provider version to 0.7.6997
* fix: reorder import statements and enhance user listing output
* fix: Update Docker Compose commands to support v2 syntax with fallback
* 🔧 fix: drop `reasoning_effort` for o1-preview/mini models
* chore: requireLocalAuth logging
* fix: edge case artifact message editing logic to handle `new` conversation IDs
* fix: remove `temperature` from model options in OpenAIClient if o1-mini/preview
* fix: update type annotation for fetchPromisesMap to use Promise<string[]> instead of string[]
* feat: anthropic model fetching
* fix: update model name to use EModelEndpoint.openAI in fetchModels and fetchOpenAIModels
* fix: add error handling to modelController for loadModels
* fix: add error handling and logging for model fetching in loadDefaultModels
* ci: update getAnthropicModels tests to be asynchronous
* feat: add user ID to model options in OpenAI and custom endpoint initialization
---------
Co-authored-by: Andrei Berceanu <andreicberceanu@gmail.com>
Co-authored-by: KiGamji <maloyh44@gmail.com>
49 lines
1.5 KiB
JavaScript
49 lines
1.5 KiB
JavaScript
const { CacheKeys } = require('librechat-data-provider');
|
|
const { loadDefaultModels, loadConfigModels } = require('~/server/services/Config');
|
|
const { getLogStores } = require('~/cache');
|
|
const { logger } = require('~/config');
|
|
|
|
/**
|
|
* @param {ServerRequest} req
|
|
*/
|
|
const getModelsConfig = async (req) => {
|
|
const cache = getLogStores(CacheKeys.CONFIG_STORE);
|
|
let modelsConfig = await cache.get(CacheKeys.MODELS_CONFIG);
|
|
if (!modelsConfig) {
|
|
modelsConfig = await loadModels(req);
|
|
}
|
|
|
|
return modelsConfig;
|
|
};
|
|
|
|
/**
|
|
* Loads the models from the config.
|
|
* @param {ServerRequest} req - The Express request object.
|
|
* @returns {Promise<TModelsConfig>} The models config.
|
|
*/
|
|
async function loadModels(req) {
|
|
const cache = getLogStores(CacheKeys.CONFIG_STORE);
|
|
const cachedModelsConfig = await cache.get(CacheKeys.MODELS_CONFIG);
|
|
if (cachedModelsConfig) {
|
|
return cachedModelsConfig;
|
|
}
|
|
const defaultModelsConfig = await loadDefaultModels(req);
|
|
const customModelsConfig = await loadConfigModels(req);
|
|
|
|
const modelConfig = { ...defaultModelsConfig, ...customModelsConfig };
|
|
|
|
await cache.set(CacheKeys.MODELS_CONFIG, modelConfig);
|
|
return modelConfig;
|
|
}
|
|
|
|
async function modelController(req, res) {
|
|
try {
|
|
const modelConfig = await loadModels(req);
|
|
res.send(modelConfig);
|
|
} catch (error) {
|
|
logger.error('Error fetching models:', error);
|
|
res.status(500).send({ error: error.message });
|
|
}
|
|
}
|
|
|
|
module.exports = { modelController, loadModels, getModelsConfig };
|