Merge branch 'main' into refactor/package-auth

This commit is contained in:
Cha 2025-06-17 18:26:25 +08:00
commit 02b9c9d447
340 changed files with 18559 additions and 14872 deletions

View file

@ -163,7 +163,11 @@ const deleteUserController = async (req, res) => {
await Balance.deleteMany({ user: user._id }); // delete user balances
await deletePresets(user.id); // delete user presets
/* TODO: Delete Assistant Threads */
await deleteConvos(user.id); // delete user convos
try {
await deleteConvos(user.id); // delete user convos
} catch (error) {
logger.error('[deleteUserController] Error deleting user convos, likely no convos', error);
}
await deleteUserPluginAuth(user.id, null, true); // delete user plugin auth
await deleteUserById(user.id); // delete user
await deleteAllSharedLinks(user.id); // delete user shared links

View file

@ -1,4 +1,6 @@
const { nanoid } = require('nanoid');
const { sendEvent } = require('@librechat/api');
const { logger } = require('@librechat/data-schemas');
const { Tools, StepTypes, FileContext } = require('librechat-data-provider');
const {
EnvVar,
@ -12,7 +14,6 @@ const {
const { processCodeOutput } = require('~/server/services/Files/Code/process');
const { loadAuthValues } = require('~/server/services/Tools/credentials');
const { saveBase64Image } = require('~/server/services/Files/process');
const { logger, sendEvent } = require('~/config');
class ModelEndHandler {
/**
@ -240,9 +241,7 @@ function createToolEndCallback({ req, res, artifactPromises }) {
if (output.artifact[Tools.web_search]) {
artifactPromises.push(
(async () => {
const name = `${output.name}_${output.tool_call_id}_${nanoid()}`;
const attachment = {
name,
type: Tools.web_search,
messageId: metadata.run_id,
toolCallId: output.tool_call_id,

View file

@ -1,13 +1,12 @@
// const { HttpsProxyAgent } = require('https-proxy-agent');
// const {
// Constants,
// ImageDetail,
// EModelEndpoint,
// resolveHeaders,
// validateVisionModel,
// mapModelToAzureConfig,
// } = require('librechat-data-provider');
require('events').EventEmitter.defaultMaxListeners = 100;
const { logger } = require('@librechat/data-schemas');
const {
sendEvent,
createRun,
Tokenizer,
memoryInstructions,
createMemoryProcessor,
} = require('@librechat/api');
const {
Callback,
GraphEvents,
@ -19,25 +18,30 @@ const {
} = require('@librechat/agents');
const {
Constants,
Permissions,
VisionModes,
ContentTypes,
EModelEndpoint,
KnownEndpoints,
PermissionTypes,
isAgentsEndpoint,
AgentCapabilities,
bedrockInputSchema,
removeNullishValues,
} = require('librechat-data-provider');
const { DynamicStructuredTool } = require('@langchain/core/tools');
const { getBufferString, HumanMessage } = require('@langchain/core/messages');
const { getCustomEndpointConfig, checkCapability } = require('~/server/services/Config');
const { addCacheControl, createContextHandlers } = require('~/app/clients/prompts');
const { initializeAgent } = require('~/server/services/Endpoints/agents/agent');
const { spendTokens, spendStructuredTokens } = require('~/models/spendTokens');
const { getBufferString, HumanMessage } = require('@langchain/core/messages');
const { setMemory, deleteMemory, getFormattedMemories } = require('~/models');
const { encodeAndFormat } = require('~/server/services/Files/images/encode');
const initOpenAI = require('~/server/services/Endpoints/openAI/initialize');
const Tokenizer = require('~/server/services/Tokenizer');
const { checkAccess } = require('~/server/middleware/roles/access');
const BaseClient = require('~/app/clients/BaseClient');
const { logger, sendEvent } = require('~/config');
const { createRun } = require('./run');
const { loadAgent } = require('~/models/Agent');
const { getMCPManager } = require('~/config');
/**
* @param {ServerRequest} req
@ -57,12 +61,8 @@ const legacyContentEndpoints = new Set([KnownEndpoints.groq, KnownEndpoints.deep
const noSystemModelRegex = [/\b(o1-preview|o1-mini|amazon\.titan-text)\b/gi];
// const { processMemory, memoryInstructions } = require('~/server/services/Endpoints/agents/memory');
// const { getFormattedMemories } = require('~/models/Memory');
// const { getCurrentDateTime } = require('~/utils');
function createTokenCounter(encoding) {
return (message) => {
return function (message) {
const countTokens = (text) => Tokenizer.getTokenCount(text, encoding);
return getTokenCountForMessage(message, countTokens);
};
@ -123,6 +123,8 @@ class AgentClient extends BaseClient {
this.usage;
/** @type {Record<string, number>} */
this.indexTokenCountMap = {};
/** @type {(messages: BaseMessage[]) => Promise<void>} */
this.processMemory;
}
/**
@ -137,55 +139,10 @@ class AgentClient extends BaseClient {
}
/**
*
* Checks if the model is a vision model based on request attachments and sets the appropriate options:
* - Sets `this.modelOptions.model` to `gpt-4-vision-preview` if the request is a vision request.
* - Sets `this.isVisionModel` to `true` if vision request.
* - Deletes `this.modelOptions.stop` if vision request.
* `AgentClient` is not opinionated about vision requests, so we don't do anything here
* @param {MongoFile[]} attachments
*/
checkVisionRequest(attachments) {
// if (!attachments) {
// return;
// }
// const availableModels = this.options.modelsConfig?.[this.options.endpoint];
// if (!availableModels) {
// return;
// }
// let visionRequestDetected = false;
// for (const file of attachments) {
// if (file?.type?.includes('image')) {
// visionRequestDetected = true;
// break;
// }
// }
// if (!visionRequestDetected) {
// return;
// }
// this.isVisionModel = validateVisionModel({ model: this.modelOptions.model, availableModels });
// if (this.isVisionModel) {
// delete this.modelOptions.stop;
// return;
// }
// for (const model of availableModels) {
// if (!validateVisionModel({ model, availableModels })) {
// continue;
// }
// this.modelOptions.model = model;
// this.isVisionModel = true;
// delete this.modelOptions.stop;
// return;
// }
// if (!availableModels.includes(this.defaultVisionModel)) {
// return;
// }
// if (!validateVisionModel({ model: this.defaultVisionModel, availableModels })) {
// return;
// }
// this.modelOptions.model = this.defaultVisionModel;
// this.isVisionModel = true;
// delete this.modelOptions.stop;
}
checkVisionRequest() {}
getSaveOptions() {
// TODO:
@ -269,24 +226,6 @@ class AgentClient extends BaseClient {
.filter(Boolean)
.join('\n')
.trim();
// this.systemMessage = getCurrentDateTime();
// const { withKeys, withoutKeys } = await getFormattedMemories({
// userId: this.options.req.user.id,
// });
// processMemory({
// userId: this.options.req.user.id,
// message: this.options.req.body.text,
// parentMessageId,
// memory: withKeys,
// thread_id: this.conversationId,
// }).catch((error) => {
// logger.error('Memory Agent failed to process memory', error);
// });
// this.systemMessage += '\n\n' + memoryInstructions;
// if (withoutKeys) {
// this.systemMessage += `\n\n# Existing memory about the user:\n${withoutKeys}`;
// }
if (this.options.attachments) {
const attachments = await this.options.attachments;
@ -370,6 +309,37 @@ class AgentClient extends BaseClient {
systemContent = this.augmentedPrompt + systemContent;
}
// Inject MCP server instructions if available
const ephemeralAgent = this.options.req.body.ephemeralAgent;
let mcpServers = [];
// Check for ephemeral agent MCP servers
if (ephemeralAgent && ephemeralAgent.mcp && ephemeralAgent.mcp.length > 0) {
mcpServers = ephemeralAgent.mcp;
}
// Check for regular agent MCP tools
else if (this.options.agent && this.options.agent.tools) {
mcpServers = this.options.agent.tools
.filter(
(tool) =>
tool instanceof DynamicStructuredTool && tool.name.includes(Constants.mcp_delimiter),
)
.map((tool) => tool.name.split(Constants.mcp_delimiter).pop())
.filter(Boolean);
}
if (mcpServers.length > 0) {
try {
const mcpInstructions = getMCPManager().formatInstructionsForContext(mcpServers);
if (mcpInstructions) {
systemContent = [systemContent, mcpInstructions].filter(Boolean).join('\n\n');
logger.debug('[AgentClient] Injected MCP instructions for servers:', mcpServers);
}
} catch (error) {
logger.error('[AgentClient] Failed to inject MCP instructions:', error);
}
}
if (systemContent) {
this.options.agent.instructions = systemContent;
}
@ -399,9 +369,150 @@ class AgentClient extends BaseClient {
opts.getReqData({ promptTokens });
}
const withoutKeys = await this.useMemory();
if (withoutKeys) {
systemContent += `${memoryInstructions}\n\n# Existing memory about the user:\n${withoutKeys}`;
}
if (systemContent) {
this.options.agent.instructions = systemContent;
}
return result;
}
/**
* @returns {Promise<string | undefined>}
*/
async useMemory() {
const user = this.options.req.user;
if (user.personalization?.memories === false) {
return;
}
const hasAccess = await checkAccess(user, PermissionTypes.MEMORIES, [Permissions.USE]);
if (!hasAccess) {
logger.debug(
`[api/server/controllers/agents/client.js #useMemory] User ${user.id} does not have USE permission for memories`,
);
return;
}
/** @type {TCustomConfig['memory']} */
const memoryConfig = this.options.req?.app?.locals?.memory;
if (!memoryConfig || memoryConfig.disabled === true) {
return;
}
/** @type {Agent} */
let prelimAgent;
const allowedProviders = new Set(
this.options.req?.app?.locals?.[EModelEndpoint.agents]?.allowedProviders,
);
try {
if (memoryConfig.agent?.id != null && memoryConfig.agent.id !== this.options.agent.id) {
prelimAgent = await loadAgent({
req: this.options.req,
agent_id: memoryConfig.agent.id,
endpoint: EModelEndpoint.agents,
});
} else if (
memoryConfig.agent?.id == null &&
memoryConfig.agent?.model != null &&
memoryConfig.agent?.provider != null
) {
prelimAgent = { id: Constants.EPHEMERAL_AGENT_ID, ...memoryConfig.agent };
}
} catch (error) {
logger.error(
'[api/server/controllers/agents/client.js #useMemory] Error loading agent for memory',
error,
);
}
const agent = await initializeAgent({
req: this.options.req,
res: this.options.res,
agent: prelimAgent,
allowedProviders,
});
if (!agent) {
logger.warn(
'[api/server/controllers/agents/client.js #useMemory] No agent found for memory',
memoryConfig,
);
return;
}
const llmConfig = Object.assign(
{
provider: agent.provider,
model: agent.model,
},
agent.model_parameters,
);
/** @type {import('@librechat/api').MemoryConfig} */
const config = {
validKeys: memoryConfig.validKeys,
instructions: agent.instructions,
llmConfig,
tokenLimit: memoryConfig.tokenLimit,
};
const userId = this.options.req.user.id + '';
const messageId = this.responseMessageId + '';
const conversationId = this.conversationId + '';
const [withoutKeys, processMemory] = await createMemoryProcessor({
userId,
config,
messageId,
conversationId,
memoryMethods: {
setMemory,
deleteMemory,
getFormattedMemories,
},
res: this.options.res,
});
this.processMemory = processMemory;
return withoutKeys;
}
/**
* @param {BaseMessage[]} messages
* @returns {Promise<void | (TAttachment | null)[]>}
*/
async runMemory(messages) {
try {
if (this.processMemory == null) {
return;
}
/** @type {TCustomConfig['memory']} */
const memoryConfig = this.options.req?.app?.locals?.memory;
const messageWindowSize = memoryConfig?.messageWindowSize ?? 5;
let messagesToProcess = [...messages];
if (messages.length > messageWindowSize) {
for (let i = messages.length - messageWindowSize; i >= 0; i--) {
const potentialWindow = messages.slice(i, i + messageWindowSize);
if (potentialWindow[0]?.role === 'user') {
messagesToProcess = [...potentialWindow];
break;
}
}
if (messagesToProcess.length === messages.length) {
messagesToProcess = [...messages.slice(-messageWindowSize)];
}
}
return await this.processMemory(messagesToProcess);
} catch (error) {
logger.error('Memory Agent failed to process memory', error);
}
}
/** @type {sendCompletion} */
async sendCompletion(payload, opts = {}) {
await this.chatCompletion({
@ -544,100 +655,13 @@ class AgentClient extends BaseClient {
let config;
/** @type {ReturnType<createRun>} */
let run;
/** @type {Promise<(TAttachment | null)[] | undefined>} */
let memoryPromise;
try {
if (!abortController) {
abortController = new AbortController();
}
// if (this.options.headers) {
// opts.defaultHeaders = { ...opts.defaultHeaders, ...this.options.headers };
// }
// if (this.options.proxy) {
// opts.httpAgent = new HttpsProxyAgent(this.options.proxy);
// }
// if (this.isVisionModel) {
// modelOptions.max_tokens = 4000;
// }
// /** @type {TAzureConfig | undefined} */
// const azureConfig = this.options?.req?.app?.locals?.[EModelEndpoint.azureOpenAI];
// if (
// (this.azure && this.isVisionModel && azureConfig) ||
// (azureConfig && this.isVisionModel && this.options.endpoint === EModelEndpoint.azureOpenAI)
// ) {
// const { modelGroupMap, groupMap } = azureConfig;
// const {
// azureOptions,
// baseURL,
// headers = {},
// serverless,
// } = mapModelToAzureConfig({
// modelName: modelOptions.model,
// modelGroupMap,
// groupMap,
// });
// opts.defaultHeaders = resolveHeaders(headers);
// this.langchainProxy = extractBaseURL(baseURL);
// this.apiKey = azureOptions.azureOpenAIApiKey;
// const groupName = modelGroupMap[modelOptions.model].group;
// this.options.addParams = azureConfig.groupMap[groupName].addParams;
// this.options.dropParams = azureConfig.groupMap[groupName].dropParams;
// // Note: `forcePrompt` not re-assigned as only chat models are vision models
// this.azure = !serverless && azureOptions;
// this.azureEndpoint =
// !serverless && genAzureChatCompletion(this.azure, modelOptions.model, this);
// }
// if (this.azure || this.options.azure) {
// /* Azure Bug, extremely short default `max_tokens` response */
// if (!modelOptions.max_tokens && modelOptions.model === 'gpt-4-vision-preview') {
// modelOptions.max_tokens = 4000;
// }
// /* Azure does not accept `model` in the body, so we need to remove it. */
// delete modelOptions.model;
// opts.baseURL = this.langchainProxy
// ? constructAzureURL({
// baseURL: this.langchainProxy,
// azureOptions: this.azure,
// })
// : this.azureEndpoint.split(/(?<!\/)\/(chat|completion)\//)[0];
// opts.defaultQuery = { 'api-version': this.azure.azureOpenAIApiVersion };
// opts.defaultHeaders = { ...opts.defaultHeaders, 'api-key': this.apiKey };
// }
// if (process.env.OPENAI_ORGANIZATION) {
// opts.organization = process.env.OPENAI_ORGANIZATION;
// }
// if (this.options.addParams && typeof this.options.addParams === 'object') {
// modelOptions = {
// ...modelOptions,
// ...this.options.addParams,
// };
// logger.debug('[api/server/controllers/agents/client.js #chatCompletion] added params', {
// addParams: this.options.addParams,
// modelOptions,
// });
// }
// if (this.options.dropParams && Array.isArray(this.options.dropParams)) {
// this.options.dropParams.forEach((param) => {
// delete modelOptions[param];
// });
// logger.debug('[api/server/controllers/agents/client.js #chatCompletion] dropped params', {
// dropParams: this.options.dropParams,
// modelOptions,
// });
// }
/** @type {TCustomConfig['endpoints']['agents']} */
const agentsEConfig = this.options.req.app.locals[EModelEndpoint.agents];
@ -647,6 +671,7 @@ class AgentClient extends BaseClient {
last_agent_index: this.agentConfigs?.size ?? 0,
user_id: this.user ?? this.options.req.user?.id,
hide_sequential_outputs: this.options.agent.hide_sequential_outputs,
user: this.options.req.user,
},
recursionLimit: agentsEConfig?.recursionLimit,
signal: abortController.signal,
@ -734,6 +759,10 @@ class AgentClient extends BaseClient {
messages = addCacheControl(messages);
}
if (i === 0) {
memoryPromise = this.runMemory(messages);
}
run = await createRun({
agent,
req: this.options.req,
@ -769,10 +798,9 @@ class AgentClient extends BaseClient {
run.Graph.contentData = contentData;
}
const encoding = this.getEncoding();
await run.processStream({ messages }, config, {
keepContent: i !== 0,
tokenCounter: createTokenCounter(encoding),
tokenCounter: createTokenCounter(this.getEncoding()),
indexTokenCountMap: currentIndexCountMap,
maxContextTokens: agent.maxContextTokens,
callbacks: {
@ -887,6 +915,12 @@ class AgentClient extends BaseClient {
});
try {
if (memoryPromise) {
const attachments = await memoryPromise;
if (attachments && attachments.length > 0) {
this.artifactPromises.push(...attachments);
}
}
await this.recordCollectedUsage({ context: 'message' });
} catch (err) {
logger.error(
@ -895,6 +929,12 @@ class AgentClient extends BaseClient {
);
}
} catch (err) {
if (memoryPromise) {
const attachments = await memoryPromise;
if (attachments && attachments.length > 0) {
this.artifactPromises.push(...attachments);
}
}
logger.error(
'[api/server/controllers/agents/client.js #sendCompletion] Operation aborted',
err,

View file

@ -1,94 +0,0 @@
const { Run, Providers } = require('@librechat/agents');
const { providerEndpointMap, KnownEndpoints } = require('librechat-data-provider');
/**
* @typedef {import('@librechat/agents').t} t
* @typedef {import('@librechat/agents').StandardGraphConfig} StandardGraphConfig
* @typedef {import('@librechat/agents').StreamEventData} StreamEventData
* @typedef {import('@librechat/agents').EventHandler} EventHandler
* @typedef {import('@librechat/agents').GraphEvents} GraphEvents
* @typedef {import('@librechat/agents').LLMConfig} LLMConfig
* @typedef {import('@librechat/agents').IState} IState
*/
const customProviders = new Set([
Providers.XAI,
Providers.OLLAMA,
Providers.DEEPSEEK,
Providers.OPENROUTER,
]);
/**
* Creates a new Run instance with custom handlers and configuration.
*
* @param {Object} options - The options for creating the Run instance.
* @param {ServerRequest} [options.req] - The server request.
* @param {string | undefined} [options.runId] - Optional run ID; otherwise, a new run ID will be generated.
* @param {Agent} options.agent - The agent for this run.
* @param {AbortSignal} options.signal - The signal for this run.
* @param {Record<GraphEvents, EventHandler> | undefined} [options.customHandlers] - Custom event handlers.
* @param {boolean} [options.streaming=true] - Whether to use streaming.
* @param {boolean} [options.streamUsage=true] - Whether to stream usage information.
* @returns {Promise<Run<IState>>} A promise that resolves to a new Run instance.
*/
async function createRun({
runId,
agent,
signal,
customHandlers,
streaming = true,
streamUsage = true,
}) {
const provider = providerEndpointMap[agent.provider] ?? agent.provider;
/** @type {LLMConfig} */
const llmConfig = Object.assign(
{
provider,
streaming,
streamUsage,
},
agent.model_parameters,
);
/** Resolves issues with new OpenAI usage field */
if (
customProviders.has(agent.provider) ||
(agent.provider === Providers.OPENAI && agent.endpoint !== agent.provider)
) {
llmConfig.streamUsage = false;
llmConfig.usage = true;
}
/** @type {'reasoning_content' | 'reasoning'} */
let reasoningKey;
if (
llmConfig.configuration?.baseURL?.includes(KnownEndpoints.openrouter) ||
(agent.endpoint && agent.endpoint.toLowerCase().includes(KnownEndpoints.openrouter))
) {
reasoningKey = 'reasoning';
}
/** @type {StandardGraphConfig} */
const graphConfig = {
signal,
llmConfig,
reasoningKey,
tools: agent.tools,
instructions: agent.instructions,
additional_instructions: agent.additional_instructions,
// toolEnd: agent.end_after_tools,
};
// TEMPORARY FOR TESTING
if (agent.provider === Providers.ANTHROPIC || agent.provider === Providers.BEDROCK) {
graphConfig.streamBuffer = 2000;
}
return Run.create({
runId,
graphConfig,
customHandlers,
});
}
module.exports = { createRun };

View file

@ -18,6 +18,7 @@ const {
} = require('~/models/Agent');
const { uploadImageBuffer, filterFile } = require('~/server/services/Files/process');
const { getStrategyFunctions } = require('~/server/services/Files/strategies');
const { resizeAvatar } = require('@librechat/auth');
const { refreshS3Url } = require('~/server/services/Files/S3/crud');
const { updateAction, getActions } = require('~/models/Action');
const { updateAgentProjects } = require('~/models/Agent');
@ -168,12 +169,18 @@ const updateAgentHandler = async (req, res) => {
});
}
/** @type {boolean} */
const isProjectUpdate = (projectIds?.length ?? 0) > 0 || (removeProjectIds?.length ?? 0) > 0;
let updatedAgent =
Object.keys(updateData).length > 0
? await updateAgent({ id }, updateData, { updatingUserId: req.user.id })
? await updateAgent({ id }, updateData, {
updatingUserId: req.user.id,
skipVersioning: isProjectUpdate,
})
: existingAgent;
if (projectIds || removeProjectIds) {
if (isProjectUpdate) {
updatedAgent = await updateAgentProjects({
user: req.user,
agentId: id,
@ -373,12 +380,27 @@ const uploadAgentAvatarHandler = async (req, res) => {
}
const buffer = await fs.readFile(req.file.path);
const image = await uploadImageBuffer({
req,
context: FileContext.avatar,
metadata: { buffer },
const fileStrategy = req.app.locals.fileStrategy;
const resizedBuffer = await resizeAvatar({
userId: req.user.id,
input: buffer,
});
const { processAvatar } = getStrategyFunctions(fileStrategy);
const avatarUrl = await processAvatar({
buffer: resizedBuffer,
userId: req.user.id,
manual: 'false',
agentId: agent_id,
});
const image = {
filepath: avatarUrl,
source: fileStrategy,
};
let _avatar;
try {
const agent = await getAgent({ id: agent_id });
@ -403,7 +425,7 @@ const uploadAgentAvatarHandler = async (req, res) => {
const data = {
avatar: {
filepath: image.filepath,
source: req.app.locals.fileStrategy,
source: image.source,
},
};