🚀 feat: Enhance Model Handling, Logging & xAI Agent Support (#6182)

* chore: update @librechat/agents to version 2.1.9

* feat: xAI standalone provider for agents

* chore: bump librechat-data-provider version to 0.7.6997

* fix: reorder import statements and enhance user listing output

* fix: Update Docker Compose commands to support v2 syntax with fallback

* 🔧 fix: drop `reasoning_effort` for o1-preview/mini models

* chore: requireLocalAuth logging

* fix: edge case artifact message editing logic to handle `new` conversation IDs

* fix: remove `temperature` from model options in OpenAIClient if o1-mini/preview

* fix: update type annotation for fetchPromisesMap to use Promise<string[]> instead of string[]

* feat: anthropic model fetching

* fix: update model name to use EModelEndpoint.openAI in fetchModels and fetchOpenAIModels

* fix: add error handling to modelController for loadModels

* fix: add error handling and logging for model fetching in loadDefaultModels

* ci: update getAnthropicModels tests to be asynchronous

* feat: add user ID to model options in OpenAI and custom endpoint initialization

---------

Co-authored-by: Andrei Berceanu <andreicberceanu@gmail.com>
Co-authored-by: KiGamji <maloyh44@gmail.com>
This commit is contained in:
Danny Avila 2025-03-05 12:04:26 -05:00 committed by GitHub
parent 287699331c
commit 00b2d026c1
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
19 changed files with 1010 additions and 1044 deletions

View file

@ -1,6 +1,7 @@
const { CacheKeys } = require('librechat-data-provider');
const { loadDefaultModels, loadConfigModels } = require('~/server/services/Config');
const { getLogStores } = require('~/cache');
const { logger } = require('~/config');
/**
* @param {ServerRequest} req
@ -36,8 +37,13 @@ async function loadModels(req) {
}
async function modelController(req, res) {
const modelConfig = await loadModels(req);
res.send(modelConfig);
try {
const modelConfig = await loadModels(req);
res.send(modelConfig);
} catch (error) {
logger.error('Error fetching models:', error);
res.status(500).send({ error: error.message });
}
}
module.exports = { modelController, loadModels, getModelsConfig };