Merge branch 'dev' into feat/context-window-ui

This commit is contained in:
Marco Beretta 2025-12-29 02:07:54 +01:00
commit cb8322ca85
No known key found for this signature in database
GPG key ID: D918033D8E74CC11
407 changed files with 25479 additions and 19894 deletions

View file

@ -1,8 +1,18 @@
const mongoose = require('mongoose');
const crypto = require('node:crypto');
const { logger } = require('@librechat/data-schemas');
const { ResourceType, SystemRoles, Tools, actionDelimiter } = require('librechat-data-provider');
const { GLOBAL_PROJECT_NAME, EPHEMERAL_AGENT_ID, mcp_all, mcp_delimiter } =
const { getCustomEndpointConfig } = require('@librechat/api');
const {
Tools,
SystemRoles,
ResourceType,
actionDelimiter,
isAgentsEndpoint,
getResponseSender,
isEphemeralAgentId,
encodeEphemeralAgentId,
} = require('librechat-data-provider');
const { GLOBAL_PROJECT_NAME, mcp_all, mcp_delimiter } =
require('librechat-data-provider').Constants;
const {
removeAgentFromAllProjects,
@ -92,7 +102,7 @@ const getAgents = async (searchParameter) => await Agent.find(searchParameter).l
* @param {import('@librechat/agents').ClientOptions} [params.model_parameters]
* @returns {Promise<Agent|null>} The agent document as a plain object, or null if not found.
*/
const loadEphemeralAgent = async ({ req, spec, agent_id, endpoint, model_parameters: _m }) => {
const loadEphemeralAgent = async ({ req, spec, endpoint, model_parameters: _m }) => {
const { model, ...model_parameters } = _m;
const modelSpecs = req.config?.modelSpecs?.list;
/** @type {TModelSpec | null} */
@ -139,8 +149,28 @@ const loadEphemeralAgent = async ({ req, spec, agent_id, endpoint, model_paramet
}
const instructions = req.body.promptPrefix;
// Compute display name using getResponseSender (same logic used for addedConvo agents)
const appConfig = req.config;
let endpointConfig = appConfig?.endpoints?.[endpoint];
if (!isAgentsEndpoint(endpoint) && !endpointConfig) {
try {
endpointConfig = getCustomEndpointConfig({ endpoint, appConfig });
} catch (err) {
logger.error('[loadEphemeralAgent] Error getting custom endpoint config', err);
}
}
const sender = getResponseSender({
modelLabel: model_parameters?.modelLabel,
modelDisplayLabel: endpointConfig?.modelDisplayLabel,
});
// Encode ephemeral agent ID with endpoint, model, and computed sender for display
const ephemeralId = encodeEphemeralAgentId({ endpoint, model, sender });
const result = {
id: agent_id,
id: ephemeralId,
instructions,
provider: endpoint,
model_parameters,
@ -169,8 +199,8 @@ const loadAgent = async ({ req, spec, agent_id, endpoint, model_parameters }) =>
if (!agent_id) {
return null;
}
if (agent_id === EPHEMERAL_AGENT_ID) {
return await loadEphemeralAgent({ req, spec, agent_id, endpoint, model_parameters });
if (isEphemeralAgentId(agent_id)) {
return await loadEphemeralAgent({ req, spec, endpoint, model_parameters });
}
const agent = await getAgent({
id: agent_id,

View file

@ -1960,7 +1960,8 @@ describe('models/Agent', () => {
});
if (result) {
expect(result.id).toBe(EPHEMERAL_AGENT_ID);
// Ephemeral agent ID is encoded with endpoint and model
expect(result.id).toBe('openai__gpt-4');
expect(result.instructions).toBe('Test instructions');
expect(result.provider).toBe('openai');
expect(result.model).toBe('gpt-4');
@ -1978,7 +1979,7 @@ describe('models/Agent', () => {
const mockReq = { user: { id: 'user123' } };
const result = await loadAgent({
req: mockReq,
agent_id: 'non_existent_agent',
agent_id: 'agent_non_existent',
endpoint: 'openai',
model_parameters: { model: 'gpt-4' },
});
@ -2105,7 +2106,7 @@ describe('models/Agent', () => {
test('should handle loadAgent with malformed req object', async () => {
const result = await loadAgent({
req: null,
agent_id: 'test',
agent_id: 'agent_test',
endpoint: 'openai',
model_parameters: { model: 'gpt-4' },
});

View file

@ -0,0 +1,218 @@
const { logger } = require('@librechat/data-schemas');
const { getCustomEndpointConfig } = require('@librechat/api');
const {
Tools,
Constants,
isAgentsEndpoint,
getResponseSender,
isEphemeralAgentId,
appendAgentIdSuffix,
encodeEphemeralAgentId,
} = require('librechat-data-provider');
const { getMCPServerTools } = require('~/server/services/Config');
const { mcp_all, mcp_delimiter } = Constants;
/**
* Constant for added conversation agent ID
*/
const ADDED_AGENT_ID = 'added_agent';
/**
* Get an agent document based on the provided ID.
* @param {Object} searchParameter - The search parameters to find the agent.
* @param {string} searchParameter.id - The ID of the agent.
* @returns {Promise<import('librechat-data-provider').Agent|null>}
*/
let getAgent;
/**
* Set the getAgent function (dependency injection to avoid circular imports)
* @param {Function} fn
*/
const setGetAgent = (fn) => {
getAgent = fn;
};
/**
* Load an agent from an added conversation (TConversation).
* Used for multi-convo parallel agent execution.
*
* @param {Object} params
* @param {import('express').Request} params.req
* @param {import('librechat-data-provider').TConversation} params.conversation - The added conversation
* @param {import('librechat-data-provider').Agent} [params.primaryAgent] - The primary agent (used to duplicate tools when both are ephemeral)
* @returns {Promise<import('librechat-data-provider').Agent|null>} The agent config as a plain object, or null if invalid.
*/
const loadAddedAgent = async ({ req, conversation, primaryAgent }) => {
if (!conversation) {
return null;
}
// If there's an agent_id, load the existing agent
if (conversation.agent_id && !isEphemeralAgentId(conversation.agent_id)) {
if (!getAgent) {
throw new Error('getAgent not initialized - call setGetAgent first');
}
const agent = await getAgent({
id: conversation.agent_id,
});
if (!agent) {
logger.warn(`[loadAddedAgent] Agent ${conversation.agent_id} not found`);
return null;
}
agent.version = agent.versions ? agent.versions.length : 0;
// Append suffix to distinguish from primary agent (matches ephemeral format)
// This is needed when both agents have the same ID or for consistent parallel content attribution
agent.id = appendAgentIdSuffix(agent.id, 1);
return agent;
}
// Otherwise, create an ephemeral agent config from the conversation
const { model, endpoint, promptPrefix, spec, ...rest } = conversation;
if (!endpoint || !model) {
logger.warn('[loadAddedAgent] Missing required endpoint or model for ephemeral agent');
return null;
}
// If both primary and added agents are ephemeral, duplicate tools from primary agent
const primaryIsEphemeral = primaryAgent && isEphemeralAgentId(primaryAgent.id);
if (primaryIsEphemeral && Array.isArray(primaryAgent.tools)) {
// Get display name using getResponseSender
const appConfig = req.config;
let endpointConfig = appConfig?.endpoints?.[endpoint];
if (!isAgentsEndpoint(endpoint) && !endpointConfig) {
try {
endpointConfig = getCustomEndpointConfig({ endpoint, appConfig });
} catch (err) {
logger.error('[loadAddedAgent] Error getting custom endpoint config', err);
}
}
const sender = getResponseSender({
modelLabel: rest.modelLabel,
modelDisplayLabel: endpointConfig?.modelDisplayLabel,
});
const ephemeralId = encodeEphemeralAgentId({ endpoint, model, sender, index: 1 });
return {
id: ephemeralId,
instructions: promptPrefix || '',
provider: endpoint,
model_parameters: {},
model,
tools: [...primaryAgent.tools],
};
}
// Extract ephemeral agent options from conversation if present
const ephemeralAgent = rest.ephemeralAgent;
const mcpServers = new Set(ephemeralAgent?.mcp);
const userId = req.user?.id;
// Check model spec for MCP servers
const modelSpecs = req.config?.modelSpecs?.list;
let modelSpec = null;
if (spec != null && spec !== '') {
modelSpec = modelSpecs?.find((s) => s.name === spec) || null;
}
if (modelSpec?.mcpServers) {
for (const mcpServer of modelSpec.mcpServers) {
mcpServers.add(mcpServer);
}
}
/** @type {string[]} */
const tools = [];
if (ephemeralAgent?.execute_code === true || modelSpec?.executeCode === true) {
tools.push(Tools.execute_code);
}
if (ephemeralAgent?.file_search === true || modelSpec?.fileSearch === true) {
tools.push(Tools.file_search);
}
if (ephemeralAgent?.web_search === true || modelSpec?.webSearch === true) {
tools.push(Tools.web_search);
}
const addedServers = new Set();
if (mcpServers.size > 0) {
for (const mcpServer of mcpServers) {
if (addedServers.has(mcpServer)) {
continue;
}
const serverTools = await getMCPServerTools(userId, mcpServer);
if (!serverTools) {
tools.push(`${mcp_all}${mcp_delimiter}${mcpServer}`);
addedServers.add(mcpServer);
continue;
}
tools.push(...Object.keys(serverTools));
addedServers.add(mcpServer);
}
}
// Build model_parameters from conversation fields
const model_parameters = {};
const paramKeys = [
'temperature',
'top_p',
'topP',
'topK',
'presence_penalty',
'frequency_penalty',
'maxOutputTokens',
'maxTokens',
'max_tokens',
];
for (const key of paramKeys) {
if (rest[key] != null) {
model_parameters[key] = rest[key];
}
}
// Get endpoint config for modelDisplayLabel (same pattern as initialize.js)
const appConfig = req.config;
let endpointConfig = appConfig?.endpoints?.[endpoint];
if (!isAgentsEndpoint(endpoint) && !endpointConfig) {
try {
endpointConfig = getCustomEndpointConfig({ endpoint, appConfig });
} catch (err) {
logger.error('[loadAddedAgent] Error getting custom endpoint config', err);
}
}
// Compute display name using getResponseSender (same logic used for main agent)
const sender = getResponseSender({
modelLabel: rest.modelLabel,
modelDisplayLabel: endpointConfig?.modelDisplayLabel,
});
/** Encoded ephemeral agent ID with endpoint, model, sender, and index=1 to distinguish from primary */
const ephemeralId = encodeEphemeralAgentId({ endpoint, model, sender, index: 1 });
const result = {
id: ephemeralId,
instructions: promptPrefix || '',
provider: endpoint,
model_parameters,
model,
tools,
};
if (ephemeralAgent?.artifacts != null && ephemeralAgent.artifacts) {
result.artifacts = ephemeralAgent.artifacts;
}
return result;
};
module.exports = {
ADDED_AGENT_ID,
loadAddedAgent,
setGetAgent,
};

View file

@ -113,6 +113,8 @@ const tokenValues = Object.assign(
'gpt-4o-2024-05-13': { prompt: 5, completion: 15 },
'gpt-4o-mini': { prompt: 0.15, completion: 0.6 },
'gpt-5': { prompt: 1.25, completion: 10 },
'gpt-5.1': { prompt: 1.25, completion: 10 },
'gpt-5.2': { prompt: 1.75, completion: 14 },
'gpt-5-nano': { prompt: 0.05, completion: 0.4 },
'gpt-5-mini': { prompt: 0.25, completion: 2 },
'gpt-5-pro': { prompt: 15, completion: 120 },

View file

@ -35,6 +35,19 @@ describe('getValueKey', () => {
expect(getValueKey('gpt-5-0130')).toBe('gpt-5');
});
it('should return "gpt-5.1" for model name containing "gpt-5.1"', () => {
expect(getValueKey('gpt-5.1')).toBe('gpt-5.1');
expect(getValueKey('gpt-5.1-chat')).toBe('gpt-5.1');
expect(getValueKey('gpt-5.1-codex')).toBe('gpt-5.1');
expect(getValueKey('openai/gpt-5.1')).toBe('gpt-5.1');
});
it('should return "gpt-5.2" for model name containing "gpt-5.2"', () => {
expect(getValueKey('gpt-5.2')).toBe('gpt-5.2');
expect(getValueKey('gpt-5.2-chat')).toBe('gpt-5.2');
expect(getValueKey('openai/gpt-5.2')).toBe('gpt-5.2');
});
it('should return "gpt-3.5-turbo-1106" for model name containing "gpt-3.5-turbo-1106"', () => {
expect(getValueKey('gpt-3.5-turbo-1106-some-other-info')).toBe('gpt-3.5-turbo-1106');
expect(getValueKey('openai/gpt-3.5-turbo-1106')).toBe('gpt-3.5-turbo-1106');
@ -310,6 +323,34 @@ describe('getMultiplier', () => {
);
});
it('should return the correct multiplier for gpt-5.1', () => {
expect(getMultiplier({ model: 'gpt-5.1', tokenType: 'prompt' })).toBe(
tokenValues['gpt-5.1'].prompt,
);
expect(getMultiplier({ model: 'gpt-5.1', tokenType: 'completion' })).toBe(
tokenValues['gpt-5.1'].completion,
);
expect(getMultiplier({ model: 'openai/gpt-5.1', tokenType: 'prompt' })).toBe(
tokenValues['gpt-5.1'].prompt,
);
expect(tokenValues['gpt-5.1'].prompt).toBe(1.25);
expect(tokenValues['gpt-5.1'].completion).toBe(10);
});
it('should return the correct multiplier for gpt-5.2', () => {
expect(getMultiplier({ model: 'gpt-5.2', tokenType: 'prompt' })).toBe(
tokenValues['gpt-5.2'].prompt,
);
expect(getMultiplier({ model: 'gpt-5.2', tokenType: 'completion' })).toBe(
tokenValues['gpt-5.2'].completion,
);
expect(getMultiplier({ model: 'openai/gpt-5.2', tokenType: 'prompt' })).toBe(
tokenValues['gpt-5.2'].prompt,
);
expect(tokenValues['gpt-5.2'].prompt).toBe(1.75);
expect(tokenValues['gpt-5.2'].completion).toBe(14);
});
it('should return the correct multiplier for gpt-4o', () => {
const valueKey = getValueKey('gpt-4o-2024-08-06');
expect(getMultiplier({ valueKey, tokenType: 'prompt' })).toBe(tokenValues['gpt-4o'].prompt);