🚀 feat: Enhance Model Handling, Logging & xAI Agent Support (#6182)

* chore: update @librechat/agents to version 2.1.9

* feat: xAI standalone provider for agents

* chore: bump librechat-data-provider version to 0.7.6997

* fix: reorder import statements and enhance user listing output

* fix: Update Docker Compose commands to support v2 syntax with fallback

* 🔧 fix: drop `reasoning_effort` for o1-preview/mini models

* chore: requireLocalAuth logging

* fix: edge case artifact message editing logic to handle `new` conversation IDs

* fix: remove `temperature` from model options in OpenAIClient if o1-mini/preview

* fix: update type annotation for fetchPromisesMap to use Promise<string[]> instead of string[]

* feat: anthropic model fetching

* fix: update model name to use EModelEndpoint.openAI in fetchModels and fetchOpenAIModels

* fix: add error handling to modelController for loadModels

* fix: add error handling and logging for model fetching in loadDefaultModels

* ci: update getAnthropicModels tests to be asynchronous

* feat: add user ID to model options in OpenAI and custom endpoint initialization

---------

Co-authored-by: Andrei Berceanu <andreicberceanu@gmail.com>
Co-authored-by: KiGamji <maloyh44@gmail.com>
This commit is contained in:
Danny Avila 2025-03-05 12:04:26 -05:00 committed by GitHub
parent 287699331c
commit 00b2d026c1
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
19 changed files with 1010 additions and 1044 deletions

View file

@ -1307,8 +1307,12 @@ ${convo}
) { ) {
delete modelOptions.stream; delete modelOptions.stream;
delete modelOptions.stop; delete modelOptions.stop;
} else if (!this.isOmni && modelOptions.reasoning_effort != null) { } else if (
(!this.isOmni || /^o1-(mini|preview)/i.test(modelOptions.model)) &&
modelOptions.reasoning_effort != null
) {
delete modelOptions.reasoning_effort; delete modelOptions.reasoning_effort;
delete modelOptions.temperature;
} }
let reasoningKey = 'reasoning_content'; let reasoningKey = 'reasoning_content';

View file

@ -45,7 +45,7 @@
"@langchain/google-genai": "^0.1.9", "@langchain/google-genai": "^0.1.9",
"@langchain/google-vertexai": "^0.2.0", "@langchain/google-vertexai": "^0.2.0",
"@langchain/textsplitters": "^0.1.0", "@langchain/textsplitters": "^0.1.0",
"@librechat/agents": "^2.1.8", "@librechat/agents": "^2.2.0",
"@waylaidwanderer/fetch-event-source": "^3.0.1", "@waylaidwanderer/fetch-event-source": "^3.0.1",
"axios": "1.7.8", "axios": "1.7.8",
"bcryptjs": "^2.4.3", "bcryptjs": "^2.4.3",

View file

@ -1,6 +1,7 @@
const { CacheKeys } = require('librechat-data-provider'); const { CacheKeys } = require('librechat-data-provider');
const { loadDefaultModels, loadConfigModels } = require('~/server/services/Config'); const { loadDefaultModels, loadConfigModels } = require('~/server/services/Config');
const { getLogStores } = require('~/cache'); const { getLogStores } = require('~/cache');
const { logger } = require('~/config');
/** /**
* @param {ServerRequest} req * @param {ServerRequest} req
@ -36,8 +37,13 @@ async function loadModels(req) {
} }
async function modelController(req, res) { async function modelController(req, res) {
const modelConfig = await loadModels(req); try {
res.send(modelConfig); const modelConfig = await loadModels(req);
res.send(modelConfig);
} catch (error) {
logger.error('Error fetching models:', error);
res.status(500).send({ error: error.message });
}
} }
module.exports = { modelController, loadModels, getModelsConfig }; module.exports = { modelController, loadModels, getModelsConfig };

View file

@ -1,32 +1,18 @@
const passport = require('passport'); const passport = require('passport');
const DebugControl = require('../../utils/debug.js'); const { logger } = require('~/config');
function log({ title, parameters }) {
DebugControl.log.functionName(title);
if (parameters) {
DebugControl.log.parameters(parameters);
}
}
const requireLocalAuth = (req, res, next) => { const requireLocalAuth = (req, res, next) => {
passport.authenticate('local', (err, user, info) => { passport.authenticate('local', (err, user, info) => {
if (err) { if (err) {
log({ logger.error('[requireLocalAuth] Error at passport.authenticate:', err);
title: '(requireLocalAuth) Error at passport.authenticate',
parameters: [{ name: 'error', value: err }],
});
return next(err); return next(err);
} }
if (!user) { if (!user) {
log({ logger.debug('[requireLocalAuth] Error: No user');
title: '(requireLocalAuth) Error: No user',
});
return res.status(404).send(info); return res.status(404).send(info);
} }
if (info && info.message) { if (info && info.message) {
log({ logger.debug('[requireLocalAuth] Error: ' + info.message);
title: '(requireLocalAuth) Error: ' + info.message,
});
return res.status(422).send({ message: info.message }); return res.status(422).send({ message: info.message });
} }
req.user = user; req.user = user;

View file

@ -47,7 +47,7 @@ async function loadConfigModels(req) {
); );
/** /**
* @type {Record<string, string[]>} * @type {Record<string, Promise<string[]>>}
* Map for promises keyed by unique combination of baseURL and apiKey */ * Map for promises keyed by unique combination of baseURL and apiKey */
const fetchPromisesMap = {}; const fetchPromisesMap = {};
/** /**
@ -102,7 +102,7 @@ async function loadConfigModels(req) {
for (const name of associatedNames) { for (const name of associatedNames) {
const endpoint = endpointsMap[name]; const endpoint = endpointsMap[name];
modelsConfig[name] = !modelData?.length ? endpoint.models.default ?? [] : modelData; modelsConfig[name] = !modelData?.length ? (endpoint.models.default ?? []) : modelData;
} }
} }

View file

@ -5,8 +5,8 @@ const {
getGoogleModels, getGoogleModels,
getBedrockModels, getBedrockModels,
getAnthropicModels, getAnthropicModels,
getChatGPTBrowserModels,
} = require('~/server/services/ModelService'); } = require('~/server/services/ModelService');
const { logger } = require('~/config');
/** /**
* Loads the default models for the application. * Loads the default models for the application.
@ -15,31 +15,68 @@ const {
* @param {Express.Request} req - The Express request object. * @param {Express.Request} req - The Express request object.
*/ */
async function loadDefaultModels(req) { async function loadDefaultModels(req) {
const google = getGoogleModels(); try {
const openAI = await getOpenAIModels({ user: req.user.id }); const [
const anthropic = getAnthropicModels(); openAI,
const chatGPTBrowser = getChatGPTBrowserModels(); anthropic,
const azureOpenAI = await getOpenAIModels({ user: req.user.id, azure: true }); azureOpenAI,
const gptPlugins = await getOpenAIModels({ gptPlugins,
user: req.user.id, assistants,
azure: useAzurePlugins, azureAssistants,
plugins: true, google,
}); bedrock,
const assistants = await getOpenAIModels({ assistants: true }); ] = await Promise.all([
const azureAssistants = await getOpenAIModels({ azureAssistants: true }); getOpenAIModels({ user: req.user.id }).catch((error) => {
logger.error('Error fetching OpenAI models:', error);
return [];
}),
getAnthropicModels({ user: req.user.id }).catch((error) => {
logger.error('Error fetching Anthropic models:', error);
return [];
}),
getOpenAIModels({ user: req.user.id, azure: true }).catch((error) => {
logger.error('Error fetching Azure OpenAI models:', error);
return [];
}),
getOpenAIModels({ user: req.user.id, azure: useAzurePlugins, plugins: true }).catch(
(error) => {
logger.error('Error fetching Plugin models:', error);
return [];
},
),
getOpenAIModels({ assistants: true }).catch((error) => {
logger.error('Error fetching OpenAI Assistants API models:', error);
return [];
}),
getOpenAIModels({ azureAssistants: true }).catch((error) => {
logger.error('Error fetching Azure OpenAI Assistants API models:', error);
return [];
}),
Promise.resolve(getGoogleModels()).catch((error) => {
logger.error('Error getting Google models:', error);
return [];
}),
Promise.resolve(getBedrockModels()).catch((error) => {
logger.error('Error getting Bedrock models:', error);
return [];
}),
]);
return { return {
[EModelEndpoint.openAI]: openAI, [EModelEndpoint.openAI]: openAI,
[EModelEndpoint.agents]: openAI, [EModelEndpoint.agents]: openAI,
[EModelEndpoint.google]: google, [EModelEndpoint.google]: google,
[EModelEndpoint.anthropic]: anthropic, [EModelEndpoint.anthropic]: anthropic,
[EModelEndpoint.gptPlugins]: gptPlugins, [EModelEndpoint.gptPlugins]: gptPlugins,
[EModelEndpoint.azureOpenAI]: azureOpenAI, [EModelEndpoint.azureOpenAI]: azureOpenAI,
[EModelEndpoint.chatGPTBrowser]: chatGPTBrowser, [EModelEndpoint.assistants]: assistants,
[EModelEndpoint.assistants]: assistants, [EModelEndpoint.azureAssistants]: azureAssistants,
[EModelEndpoint.azureAssistants]: azureAssistants, [EModelEndpoint.bedrock]: bedrock,
[EModelEndpoint.bedrock]: getBedrockModels(), };
}; } catch (error) {
logger.error('Error fetching default models:', error);
throw new Error(`Failed to load default models: ${error.message}`);
}
} }
module.exports = loadDefaultModels; module.exports = loadDefaultModels;

View file

@ -22,6 +22,7 @@ const { getAgent } = require('~/models/Agent');
const { logger } = require('~/config'); const { logger } = require('~/config');
const providerConfigMap = { const providerConfigMap = {
[Providers.XAI]: initCustom,
[Providers.OLLAMA]: initCustom, [Providers.OLLAMA]: initCustom,
[Providers.DEEPSEEK]: initCustom, [Providers.DEEPSEEK]: initCustom,
[Providers.OPENROUTER]: initCustom, [Providers.OPENROUTER]: initCustom,

View file

@ -141,6 +141,7 @@ const initializeClient = async ({ req, res, endpointOption, optionsOnly, overrid
}, },
clientOptions, clientOptions,
); );
clientOptions.modelOptions.user = req.user.id;
const options = getLLMConfig(apiKey, clientOptions, endpoint); const options = getLLMConfig(apiKey, clientOptions, endpoint);
if (!customOptions.streamRate) { if (!customOptions.streamRate) {
return options; return options;

View file

@ -141,6 +141,7 @@ const initializeClient = async ({
}, },
clientOptions, clientOptions,
); );
clientOptions.modelOptions.user = req.user.id;
const options = getLLMConfig(apiKey, clientOptions); const options = getLLMConfig(apiKey, clientOptions);
if (!clientOptions.streamRate) { if (!clientOptions.streamRate) {
return options; return options;

View file

@ -9,6 +9,7 @@ const { isEnabled } = require('~/server/utils');
* @param {Object} options - Additional options for configuring the LLM. * @param {Object} options - Additional options for configuring the LLM.
* @param {Object} [options.modelOptions] - Model-specific options. * @param {Object} [options.modelOptions] - Model-specific options.
* @param {string} [options.modelOptions.model] - The name of the model to use. * @param {string} [options.modelOptions.model] - The name of the model to use.
* @param {string} [options.modelOptions.user] - The user ID
* @param {number} [options.modelOptions.temperature] - Controls randomness in output generation (0-2). * @param {number} [options.modelOptions.temperature] - Controls randomness in output generation (0-2).
* @param {number} [options.modelOptions.top_p] - Controls diversity via nucleus sampling (0-1). * @param {number} [options.modelOptions.top_p] - Controls diversity via nucleus sampling (0-1).
* @param {number} [options.modelOptions.frequency_penalty] - Reduces repetition of token sequences (-2 to 2). * @param {number} [options.modelOptions.frequency_penalty] - Reduces repetition of token sequences (-2 to 2).

View file

@ -4,7 +4,9 @@ const { HttpsProxyAgent } = require('https-proxy-agent');
const { EModelEndpoint, defaultModels, CacheKeys } = require('librechat-data-provider'); const { EModelEndpoint, defaultModels, CacheKeys } = require('librechat-data-provider');
const { inputSchema, logAxiosError, extractBaseURL, processModelData } = require('~/utils'); const { inputSchema, logAxiosError, extractBaseURL, processModelData } = require('~/utils');
const { OllamaClient } = require('~/app/clients/OllamaClient'); const { OllamaClient } = require('~/app/clients/OllamaClient');
const { isUserProvided } = require('~/server/utils');
const getLogStores = require('~/cache/getLogStores'); const getLogStores = require('~/cache/getLogStores');
const { logger } = require('~/config');
/** /**
* Splits a string by commas and trims each resulting value. * Splits a string by commas and trims each resulting value.
@ -42,7 +44,7 @@ const fetchModels = async ({
user, user,
apiKey, apiKey,
baseURL, baseURL,
name = 'OpenAI', name = EModelEndpoint.openAI,
azure = false, azure = false,
userIdQuery = false, userIdQuery = false,
createTokenConfig = true, createTokenConfig = true,
@ -64,12 +66,19 @@ const fetchModels = async ({
try { try {
const options = { const options = {
headers: { headers: {},
Authorization: `Bearer ${apiKey}`,
},
timeout: 5000, timeout: 5000,
}; };
if (name === EModelEndpoint.anthropic) {
options.headers = {
'x-api-key': apiKey,
'anthropic-version': process.env.ANTHROPIC_VERSION || '2023-06-01',
};
} else {
options.headers.Authorization = `Bearer ${apiKey}`;
}
if (process.env.PROXY) { if (process.env.PROXY) {
options.httpsAgent = new HttpsProxyAgent(process.env.PROXY); options.httpsAgent = new HttpsProxyAgent(process.env.PROXY);
} }
@ -148,7 +157,7 @@ const fetchOpenAIModels = async (opts, _models = []) => {
baseURL, baseURL,
azure: opts.azure, azure: opts.azure,
user: opts.user, user: opts.user,
name: baseURL, name: EModelEndpoint.openAI,
}); });
} }
@ -231,13 +240,71 @@ const getChatGPTBrowserModels = () => {
return models; return models;
}; };
const getAnthropicModels = () => { /**
* Fetches models from the Anthropic API.
* @async
* @function
* @param {object} opts - The options for fetching the models.
* @param {string} opts.user - The user ID to send to the API.
* @param {string[]} [_models=[]] - The models to use as a fallback.
*/
const fetchAnthropicModels = async (opts, _models = []) => {
let models = _models.slice() ?? [];
let apiKey = process.env.ANTHROPIC_API_KEY;
const anthropicBaseURL = 'https://api.anthropic.com/v1';
let baseURL = anthropicBaseURL;
let reverseProxyUrl = process.env.ANTHROPIC_REVERSE_PROXY;
if (reverseProxyUrl) {
baseURL = extractBaseURL(reverseProxyUrl);
}
if (!apiKey) {
return models;
}
const modelsCache = getLogStores(CacheKeys.MODEL_QUERIES);
const cachedModels = await modelsCache.get(baseURL);
if (cachedModels) {
return cachedModels;
}
if (baseURL) {
models = await fetchModels({
apiKey,
baseURL,
user: opts.user,
name: EModelEndpoint.anthropic,
tokenKey: EModelEndpoint.anthropic,
});
}
if (models.length === 0) {
return _models;
}
await modelsCache.set(baseURL, models);
return models;
};
const getAnthropicModels = async (opts = {}) => {
let models = defaultModels[EModelEndpoint.anthropic]; let models = defaultModels[EModelEndpoint.anthropic];
if (process.env.ANTHROPIC_MODELS) { if (process.env.ANTHROPIC_MODELS) {
models = splitAndTrim(process.env.ANTHROPIC_MODELS); models = splitAndTrim(process.env.ANTHROPIC_MODELS);
return models;
} }
return models; if (isUserProvided(process.env.ANTHROPIC_API_KEY)) {
return models;
}
try {
return await fetchAnthropicModels(opts, models);
} catch (error) {
logger.error('Error fetching Anthropic models:', error);
return models;
}
}; };
const getGoogleModels = () => { const getGoogleModels = () => {

View file

@ -352,15 +352,15 @@ describe('splitAndTrim', () => {
}); });
describe('getAnthropicModels', () => { describe('getAnthropicModels', () => {
it('returns default models when ANTHROPIC_MODELS is not set', () => { it('returns default models when ANTHROPIC_MODELS is not set', async () => {
delete process.env.ANTHROPIC_MODELS; delete process.env.ANTHROPIC_MODELS;
const models = getAnthropicModels(); const models = await getAnthropicModels();
expect(models).toEqual(defaultModels[EModelEndpoint.anthropic]); expect(models).toEqual(defaultModels[EModelEndpoint.anthropic]);
}); });
it('returns models from ANTHROPIC_MODELS when set', () => { it('returns models from ANTHROPIC_MODELS when set', async () => {
process.env.ANTHROPIC_MODELS = 'claude-1, claude-2 '; process.env.ANTHROPIC_MODELS = 'claude-1, claude-2 ';
const models = getAnthropicModels(); const models = await getAnthropicModels();
expect(models).toEqual(['claude-1', 'claude-2']); expect(models).toEqual(['claude-1', 'claude-2']);
}); });
}); });

View file

@ -1,56 +0,0 @@
const levels = {
NONE: 0,
LOW: 1,
MEDIUM: 2,
HIGH: 3,
};
let level = levels.HIGH;
module.exports = {
levels,
setLevel: (l) => (level = l),
log: {
parameters: (parameters) => {
if (levels.HIGH > level) {
return;
}
console.group();
parameters.forEach((p) => console.log(`${p.name}:`, p.value));
console.groupEnd();
},
functionName: (name) => {
if (levels.MEDIUM > level) {
return;
}
console.log(`\nEXECUTING: ${name}\n`);
},
flow: (flow) => {
if (levels.LOW > level) {
return;
}
console.log(`\n\n\nBEGIN FLOW: ${flow}\n\n\n`);
},
variable: ({ name, value }) => {
if (levels.HIGH > level) {
return;
}
console.group();
console.group();
console.log(`VARIABLE ${name}:`, value);
console.groupEnd();
console.groupEnd();
},
request: () => (req, res, next) => {
if (levels.HIGH > level) {
return next();
}
console.log('Hit URL', req.url, 'with following:');
console.group();
console.log('Query:', req.query);
console.log('Body:', req.body);
console.groupEnd();
return next();
},
},
};

View file

@ -1,7 +1,7 @@
import { dataService, QueryKeys } from 'librechat-data-provider';
import { useMutation, useQueryClient } from '@tanstack/react-query'; import { useMutation, useQueryClient } from '@tanstack/react-query';
import type * as t from 'librechat-data-provider'; import { dataService, QueryKeys, Constants } from 'librechat-data-provider';
import type { UseMutationResult } from '@tanstack/react-query'; import type { UseMutationResult } from '@tanstack/react-query';
import type * as t from 'librechat-data-provider';
export const useEditArtifact = ( export const useEditArtifact = (
_options?: t.EditArtifactOptions, _options?: t.EditArtifactOptions,
@ -11,33 +11,47 @@ export const useEditArtifact = (
return useMutation({ return useMutation({
mutationFn: (variables: t.TEditArtifactRequest) => dataService.editArtifact(variables), mutationFn: (variables: t.TEditArtifactRequest) => dataService.editArtifact(variables),
onSuccess: (data, vars, context) => { onSuccess: (data, vars, context) => {
queryClient.setQueryData<t.TMessage[]>([QueryKeys.messages, data.conversationId], (prev) => { let targetNotFound = true;
if (!prev) { const setMessageData = (conversationId?: string | null) => {
return prev; if (!conversationId) {
return;
} }
queryClient.setQueryData<t.TMessage[]>([QueryKeys.messages, conversationId], (prev) => {
const newArray = [...prev]; if (!prev) {
let targetIndex: number | undefined; return prev;
for (let i = newArray.length - 1; i >= 0; i--) {
if (newArray[i].messageId === vars.messageId) {
targetIndex = i;
break;
} }
}
if (targetIndex == null) { const newArray = [...prev];
return prev; let targetIndex: number | undefined;
}
newArray[targetIndex] = { for (let i = newArray.length - 1; i >= 0; i--) {
...newArray[targetIndex], if (newArray[i].messageId === vars.messageId) {
content: data.content, targetIndex = i;
text: data.text, targetNotFound = false;
}; break;
}
}
return newArray; if (targetIndex == null) {
}); return prev;
}
newArray[targetIndex] = {
...newArray[targetIndex],
content: data.content,
text: data.text,
};
return newArray;
});
};
setMessageData(data.conversationId);
if (targetNotFound) {
console.warn(
'Edited Artifact Message not found in cache, trying `new` as `conversationId`',
);
setMessageData(Constants.NEW_CONVO);
}
onSuccess?.(data, vars, context); onSuccess?.(data, vars, context);
}, },

View file

@ -1,7 +1,7 @@
const path = require('path'); const path = require('path');
require('module-alias')({ base: path.resolve(__dirname, '..', 'api') }); require('module-alias')({ base: path.resolve(__dirname, '..', 'api') });
const connect = require('./connect');
const User = require('../api/models/User'); const User = require('../api/models/User');
const connect = require('./connect');
const listUsers = async () => { const listUsers = async () => {
try { try {
@ -11,6 +11,7 @@ const listUsers = async () => {
console.log('\nUser List:'); console.log('\nUser List:');
console.log('----------------------------------------'); console.log('----------------------------------------');
users.forEach((user) => { users.forEach((user) => {
console.log(`ID: ${user._id.toString()}`);
console.log(`Email: ${user.email}`); console.log(`Email: ${user.email}`);
console.log(`Username: ${user.username || 'N/A'}`); console.log(`Username: ${user.username || 'N/A'}`);
console.log(`Name: ${user.name || 'N/A'}`); console.log(`Name: ${user.name || 'N/A'}`);

1700
package-lock.json generated

File diff suppressed because it is too large Load diff

View file

@ -23,8 +23,8 @@
"update:sudo": "node config/update.js --sudo", "update:sudo": "node config/update.js --sudo",
"update:deployed": "node config/deployed-update.js", "update:deployed": "node config/deployed-update.js",
"rebase:deployed": "node config/deployed-update.js --rebase", "rebase:deployed": "node config/deployed-update.js --rebase",
"start:deployed": "docker-compose -f ./deploy-compose.yml up -d", "start:deployed": "docker compose -f ./deploy-compose.yml up -d || docker-compose -f ./deploy-compose.yml up -d",
"stop:deployed": "docker-compose -f ./deploy-compose.yml down", "stop:deployed": "docker compose -f ./deploy-compose.yml down || docker-compose -f ./deploy-compose.yml down",
"upgrade": "node config/upgrade.js", "upgrade": "node config/upgrade.js",
"create-user": "node config/create-user.js", "create-user": "node config/create-user.js",
"invite-user": "node config/invite-user.js", "invite-user": "node config/invite-user.js",

View file

@ -1,6 +1,6 @@
{ {
"name": "librechat-data-provider", "name": "librechat-data-provider",
"version": "0.7.6996", "version": "0.7.6997",
"description": "data services for librechat apps", "description": "data services for librechat apps",
"main": "dist/index.js", "main": "dist/index.js",
"module": "dist/index.es.js", "module": "dist/index.es.js",

View file

@ -811,6 +811,9 @@ export const supportsBalanceCheck = {
}; };
export const visionModels = [ export const visionModels = [
'grok-3',
'grok-2-vision',
'grok-vision',
'gpt-4.5', 'gpt-4.5',
'gpt-4o', 'gpt-4o',
'gpt-4o-mini', 'gpt-4o-mini',