Merge branch 'main' into feature/cohere-base-url

This commit is contained in:
Jón Levy 2026-02-05 16:21:22 +00:00 committed by GitHub
commit d4edaafcf7
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
83 changed files with 5324 additions and 634 deletions

View file

@ -1,4 +1,4 @@
# v0.8.2-rc3
# v0.8.2
# Base node image
FROM node:20-alpine AS node

View file

@ -1,5 +1,5 @@
# Dockerfile.multi
# v0.8.2-rc3
# v0.8.2
# Set configurable max-old-space-size with default
ARG NODE_MAX_OLD_SPACE_SIZE=6144

View file

@ -109,6 +109,11 @@
- 🎨 **Customizable Interface**:
- Customizable Dropdown & Interface that adapts to both power users and newcomers
- 🌊 **[Resumable Streams](https://www.librechat.ai/docs/features/resumable_streams)**:
- Never lose a response: AI responses automatically reconnect and resume if your connection drops
- Multi-Tab & Multi-Device Sync: Open the same chat in multiple tabs or pick up on another device
- Production-Ready: Works from single-server setups to horizontally scaled deployments with Redis
- 🗣️ **Speech & Audio**:
- Chat hands-free with Speech-to-Text and Text-to-Speech
- Automatically send and play Audio
@ -137,13 +142,11 @@
## 🪶 All-In-One AI Conversations with LibreChat
LibreChat brings together the future of assistant AIs with the revolutionary technology of OpenAI's ChatGPT. Celebrating the original styling, LibreChat gives you the ability to integrate multiple AI models. It also integrates and enhances original client features such as conversation and message search, prompt templates and plugins.
LibreChat is a self-hosted AI chat platform that unifies all major AI providers in a single, privacy-focused interface.
With LibreChat, you no longer need to opt for ChatGPT Plus and can instead use free or pay-per-call APIs. We welcome contributions, cloning, and forking to enhance the capabilities of this advanced chatbot platform.
Beyond chat, LibreChat provides AI Agents, Model Context Protocol (MCP) support, Artifacts, Code Interpreter, custom actions, conversation search, and enterprise-ready multi-user authentication.
[![Watch the video](https://raw.githubusercontent.com/LibreChat-AI/librechat.ai/main/public/images/changelog/v0.7.6.gif)](https://www.youtube.com/watch?v=ilfwGQtJNlI)
Click on the thumbnail to open the video☝
Open source, actively developed, and built for anyone who values control over their AI infrastructure.
---

View file

@ -11,17 +11,15 @@ const {
isEphemeralAgentId,
encodeEphemeralAgentId,
} = require('librechat-data-provider');
const { GLOBAL_PROJECT_NAME, mcp_all, mcp_delimiter } =
require('librechat-data-provider').Constants;
const { mcp_all, mcp_delimiter } = require('librechat-data-provider').Constants;
const {
removeAgentFromAllProjects,
removeAgentIdsFromProject,
addAgentIdsToProject,
getProjectByName,
} = require('./Project');
const { removeAllPermissions } = require('~/server/services/PermissionService');
const { getMCPServerTools } = require('~/server/services/Config');
const { Agent, AclEntry } = require('~/db/models');
const { Agent, AclEntry, User } = require('~/db/models');
const { getActions } = require('./Action');
/**
@ -600,6 +598,14 @@ const deleteAgent = async (searchParameter) => {
} catch (error) {
logger.error('[deleteAgent] Error removing agent from handoff edges', error);
}
try {
await User.updateMany(
{ 'favorites.agentId': agent.id },
{ $pull: { favorites: { agentId: agent.id } } },
);
} catch (error) {
logger.error('[deleteAgent] Error removing agent from user favorites', error);
}
}
return agent;
};
@ -629,6 +635,15 @@ const deleteUserAgents = async (userId) => {
resourceId: { $in: agentObjectIds },
});
try {
await User.updateMany(
{ 'favorites.agentId': { $in: agentIds } },
{ $pull: { favorites: { agentId: { $in: agentIds } } } },
);
} catch (error) {
logger.error('[deleteUserAgents] Error removing agents from user favorites', error);
}
await Agent.deleteMany({ author: userId });
} catch (error) {
logger.error('[deleteUserAgents] General error:', error);
@ -735,59 +750,6 @@ const getListAgentsByAccess = async ({
};
};
/**
* Get all agents.
* @deprecated Use getListAgentsByAccess for ACL-aware agent listing
* @param {Object} searchParameter - The search parameters to find matching agents.
* @param {string} searchParameter.author - The user ID of the agent's author.
* @returns {Promise<Object>} A promise that resolves to an object containing the agents data and pagination info.
*/
const getListAgents = async (searchParameter) => {
const { author, ...otherParams } = searchParameter;
let query = Object.assign({ author }, otherParams);
const globalProject = await getProjectByName(GLOBAL_PROJECT_NAME, ['agentIds']);
if (globalProject && (globalProject.agentIds?.length ?? 0) > 0) {
const globalQuery = { id: { $in: globalProject.agentIds }, ...otherParams };
delete globalQuery.author;
query = { $or: [globalQuery, query] };
}
const agents = (
await Agent.find(query, {
id: 1,
_id: 1,
name: 1,
avatar: 1,
author: 1,
projectIds: 1,
description: 1,
// @deprecated - isCollaborative replaced by ACL permissions
isCollaborative: 1,
category: 1,
}).lean()
).map((agent) => {
if (agent.author?.toString() !== author) {
delete agent.author;
}
if (agent.author) {
agent.author = agent.author.toString();
}
return agent;
});
const hasMore = agents.length > 0;
const firstId = agents.length > 0 ? agents[0].id : null;
const lastId = agents.length > 0 ? agents[agents.length - 1].id : null;
return {
data: agents,
has_more: hasMore,
first_id: firstId,
last_id: lastId,
};
};
/**
* Updates the projects associated with an agent, adding and removing project IDs as specified.
* This function also updates the corresponding projects to include or exclude the agent ID.
@ -953,12 +915,11 @@ module.exports = {
updateAgent,
deleteAgent,
deleteUserAgents,
getListAgents,
revertAgentVersion,
updateAgentProjects,
countPromotedAgents,
addAgentResourceFile,
getListAgentsByAccess,
removeAgentResourceFiles,
generateActionMetadataHash,
countPromotedAgents,
};

View file

@ -22,17 +22,17 @@ const {
createAgent,
updateAgent,
deleteAgent,
getListAgents,
getListAgentsByAccess,
deleteUserAgents,
revertAgentVersion,
updateAgentProjects,
addAgentResourceFile,
getListAgentsByAccess,
removeAgentResourceFiles,
generateActionMetadataHash,
} = require('./Agent');
const permissionService = require('~/server/services/PermissionService');
const { getCachedTools, getMCPServerTools } = require('~/server/services/Config');
const { AclEntry } = require('~/db/models');
const { AclEntry, User } = require('~/db/models');
/**
* @type {import('mongoose').Model<import('@librechat/data-schemas').IAgent>}
@ -59,6 +59,7 @@ describe('models/Agent', () => {
beforeEach(async () => {
await Agent.deleteMany({});
await User.deleteMany({});
});
test('should add tool_resource to tools if missing', async () => {
@ -575,43 +576,488 @@ describe('models/Agent', () => {
expect(sourceAgentAfter.edges).toHaveLength(0);
});
test('should list agents by author', async () => {
test('should remove agent from user favorites when agent is deleted', async () => {
const agentId = `agent_${uuidv4()}`;
const authorId = new mongoose.Types.ObjectId();
const userId = new mongoose.Types.ObjectId();
// Create agent
await createAgent({
id: agentId,
name: 'Agent To Delete',
provider: 'test',
model: 'test-model',
author: authorId,
});
// Create user with the agent in favorites
await User.create({
_id: userId,
name: 'Test User',
email: `test-${uuidv4()}@example.com`,
provider: 'local',
favorites: [{ agentId: agentId }, { model: 'gpt-4', endpoint: 'openAI' }],
});
// Verify user has agent in favorites
const userBefore = await User.findById(userId);
expect(userBefore.favorites).toHaveLength(2);
expect(userBefore.favorites.some((f) => f.agentId === agentId)).toBe(true);
// Delete the agent
await deleteAgent({ id: agentId });
// Verify agent is deleted
const agentAfterDelete = await getAgent({ id: agentId });
expect(agentAfterDelete).toBeNull();
// Verify agent is removed from user favorites
const userAfter = await User.findById(userId);
expect(userAfter.favorites).toHaveLength(1);
expect(userAfter.favorites.some((f) => f.agentId === agentId)).toBe(false);
expect(userAfter.favorites.some((f) => f.model === 'gpt-4')).toBe(true);
});
test('should remove agent from multiple users favorites when agent is deleted', async () => {
const agentId = `agent_${uuidv4()}`;
const authorId = new mongoose.Types.ObjectId();
const user1Id = new mongoose.Types.ObjectId();
const user2Id = new mongoose.Types.ObjectId();
// Create agent
await createAgent({
id: agentId,
name: 'Agent To Delete',
provider: 'test',
model: 'test-model',
author: authorId,
});
// Create two users with the agent in favorites
await User.create({
_id: user1Id,
name: 'Test User 1',
email: `test1-${uuidv4()}@example.com`,
provider: 'local',
favorites: [{ agentId: agentId }],
});
await User.create({
_id: user2Id,
name: 'Test User 2',
email: `test2-${uuidv4()}@example.com`,
provider: 'local',
favorites: [{ agentId: agentId }, { agentId: `agent_${uuidv4()}` }],
});
// Delete the agent
await deleteAgent({ id: agentId });
// Verify agent is removed from both users' favorites
const user1After = await User.findById(user1Id);
const user2After = await User.findById(user2Id);
expect(user1After.favorites).toHaveLength(0);
expect(user2After.favorites).toHaveLength(1);
expect(user2After.favorites.some((f) => f.agentId === agentId)).toBe(false);
});
test('should preserve other agents in database when one agent is deleted', async () => {
const agentToDeleteId = `agent_${uuidv4()}`;
const agentToKeep1Id = `agent_${uuidv4()}`;
const agentToKeep2Id = `agent_${uuidv4()}`;
const authorId = new mongoose.Types.ObjectId();
// Create multiple agents
await createAgent({
id: agentToDeleteId,
name: 'Agent To Delete',
provider: 'test',
model: 'test-model',
author: authorId,
});
await createAgent({
id: agentToKeep1Id,
name: 'Agent To Keep 1',
provider: 'test',
model: 'test-model',
author: authorId,
});
await createAgent({
id: agentToKeep2Id,
name: 'Agent To Keep 2',
provider: 'test',
model: 'test-model',
author: authorId,
});
// Verify all agents exist
expect(await getAgent({ id: agentToDeleteId })).not.toBeNull();
expect(await getAgent({ id: agentToKeep1Id })).not.toBeNull();
expect(await getAgent({ id: agentToKeep2Id })).not.toBeNull();
// Delete one agent
await deleteAgent({ id: agentToDeleteId });
// Verify only the deleted agent is removed, others remain intact
expect(await getAgent({ id: agentToDeleteId })).toBeNull();
const keptAgent1 = await getAgent({ id: agentToKeep1Id });
const keptAgent2 = await getAgent({ id: agentToKeep2Id });
expect(keptAgent1).not.toBeNull();
expect(keptAgent1.name).toBe('Agent To Keep 1');
expect(keptAgent2).not.toBeNull();
expect(keptAgent2.name).toBe('Agent To Keep 2');
});
test('should preserve other agents in user favorites when one agent is deleted', async () => {
const agentToDeleteId = `agent_${uuidv4()}`;
const agentToKeep1Id = `agent_${uuidv4()}`;
const agentToKeep2Id = `agent_${uuidv4()}`;
const authorId = new mongoose.Types.ObjectId();
const userId = new mongoose.Types.ObjectId();
// Create multiple agents
await createAgent({
id: agentToDeleteId,
name: 'Agent To Delete',
provider: 'test',
model: 'test-model',
author: authorId,
});
await createAgent({
id: agentToKeep1Id,
name: 'Agent To Keep 1',
provider: 'test',
model: 'test-model',
author: authorId,
});
await createAgent({
id: agentToKeep2Id,
name: 'Agent To Keep 2',
provider: 'test',
model: 'test-model',
author: authorId,
});
// Create user with all three agents in favorites
await User.create({
_id: userId,
name: 'Test User',
email: `test-${uuidv4()}@example.com`,
provider: 'local',
favorites: [
{ agentId: agentToDeleteId },
{ agentId: agentToKeep1Id },
{ agentId: agentToKeep2Id },
],
});
// Verify user has all three agents in favorites
const userBefore = await User.findById(userId);
expect(userBefore.favorites).toHaveLength(3);
// Delete one agent
await deleteAgent({ id: agentToDeleteId });
// Verify only the deleted agent is removed from favorites
const userAfter = await User.findById(userId);
expect(userAfter.favorites).toHaveLength(2);
expect(userAfter.favorites.some((f) => f.agentId === agentToDeleteId)).toBe(false);
expect(userAfter.favorites.some((f) => f.agentId === agentToKeep1Id)).toBe(true);
expect(userAfter.favorites.some((f) => f.agentId === agentToKeep2Id)).toBe(true);
});
test('should not affect users who do not have deleted agent in favorites', async () => {
const agentToDeleteId = `agent_${uuidv4()}`;
const otherAgentId = `agent_${uuidv4()}`;
const authorId = new mongoose.Types.ObjectId();
const userWithDeletedAgentId = new mongoose.Types.ObjectId();
const userWithoutDeletedAgentId = new mongoose.Types.ObjectId();
// Create agents
await createAgent({
id: agentToDeleteId,
name: 'Agent To Delete',
provider: 'test',
model: 'test-model',
author: authorId,
});
await createAgent({
id: otherAgentId,
name: 'Other Agent',
provider: 'test',
model: 'test-model',
author: authorId,
});
// Create user with the agent to be deleted
await User.create({
_id: userWithDeletedAgentId,
name: 'User With Deleted Agent',
email: `user1-${uuidv4()}@example.com`,
provider: 'local',
favorites: [{ agentId: agentToDeleteId }, { model: 'gpt-4', endpoint: 'openAI' }],
});
// Create user without the agent to be deleted
await User.create({
_id: userWithoutDeletedAgentId,
name: 'User Without Deleted Agent',
email: `user2-${uuidv4()}@example.com`,
provider: 'local',
favorites: [{ agentId: otherAgentId }, { model: 'claude-3', endpoint: 'anthropic' }],
});
// Delete the agent
await deleteAgent({ id: agentToDeleteId });
// Verify user with deleted agent has it removed
const userWithDeleted = await User.findById(userWithDeletedAgentId);
expect(userWithDeleted.favorites).toHaveLength(1);
expect(userWithDeleted.favorites.some((f) => f.agentId === agentToDeleteId)).toBe(false);
expect(userWithDeleted.favorites.some((f) => f.model === 'gpt-4')).toBe(true);
// Verify user without deleted agent is completely unaffected
const userWithoutDeleted = await User.findById(userWithoutDeletedAgentId);
expect(userWithoutDeleted.favorites).toHaveLength(2);
expect(userWithoutDeleted.favorites.some((f) => f.agentId === otherAgentId)).toBe(true);
expect(userWithoutDeleted.favorites.some((f) => f.model === 'claude-3')).toBe(true);
});
test('should remove all user agents from favorites when deleteUserAgents is called', async () => {
const authorId = new mongoose.Types.ObjectId();
const otherAuthorId = new mongoose.Types.ObjectId();
const userId = new mongoose.Types.ObjectId();
const agentIds = [];
for (let i = 0; i < 5; i++) {
const id = `agent_${uuidv4()}`;
agentIds.push(id);
await createAgent({
id,
name: `Agent ${i}`,
provider: 'test',
model: 'test-model',
author: authorId,
});
}
const agent1Id = `agent_${uuidv4()}`;
const agent2Id = `agent_${uuidv4()}`;
const otherAuthorAgentId = `agent_${uuidv4()}`;
for (let i = 0; i < 3; i++) {
await createAgent({
id: `other_agent_${uuidv4()}`,
name: `Other Agent ${i}`,
provider: 'test',
model: 'test-model',
author: otherAuthorId,
});
}
// Create agents by the author to be deleted
await createAgent({
id: agent1Id,
name: 'Author Agent 1',
provider: 'test',
model: 'test-model',
author: authorId,
});
const result = await getListAgents({ author: authorId.toString() });
await createAgent({
id: agent2Id,
name: 'Author Agent 2',
provider: 'test',
model: 'test-model',
author: authorId,
});
expect(result).toBeDefined();
expect(result.data).toBeDefined();
expect(result.data).toHaveLength(5);
expect(result.has_more).toBe(true);
// Create agent by different author (should not be deleted)
await createAgent({
id: otherAuthorAgentId,
name: 'Other Author Agent',
provider: 'test',
model: 'test-model',
author: otherAuthorId,
});
for (const agent of result.data) {
expect(agent.author).toBe(authorId.toString());
}
// Create user with all agents in favorites
await User.create({
_id: userId,
name: 'Test User',
email: `test-${uuidv4()}@example.com`,
provider: 'local',
favorites: [
{ agentId: agent1Id },
{ agentId: agent2Id },
{ agentId: otherAuthorAgentId },
{ model: 'gpt-4', endpoint: 'openAI' },
],
});
// Verify user has all favorites
const userBefore = await User.findById(userId);
expect(userBefore.favorites).toHaveLength(4);
// Delete all agents by the author
await deleteUserAgents(authorId.toString());
// Verify author's agents are deleted from database
expect(await getAgent({ id: agent1Id })).toBeNull();
expect(await getAgent({ id: agent2Id })).toBeNull();
// Verify other author's agent still exists
expect(await getAgent({ id: otherAuthorAgentId })).not.toBeNull();
// Verify user favorites: author's agents removed, others remain
const userAfter = await User.findById(userId);
expect(userAfter.favorites).toHaveLength(2);
expect(userAfter.favorites.some((f) => f.agentId === agent1Id)).toBe(false);
expect(userAfter.favorites.some((f) => f.agentId === agent2Id)).toBe(false);
expect(userAfter.favorites.some((f) => f.agentId === otherAuthorAgentId)).toBe(true);
expect(userAfter.favorites.some((f) => f.model === 'gpt-4')).toBe(true);
});
test('should handle deleteUserAgents when agents are in multiple users favorites', async () => {
const authorId = new mongoose.Types.ObjectId();
const user1Id = new mongoose.Types.ObjectId();
const user2Id = new mongoose.Types.ObjectId();
const user3Id = new mongoose.Types.ObjectId();
const agent1Id = `agent_${uuidv4()}`;
const agent2Id = `agent_${uuidv4()}`;
const unrelatedAgentId = `agent_${uuidv4()}`;
// Create agents by the author
await createAgent({
id: agent1Id,
name: 'Author Agent 1',
provider: 'test',
model: 'test-model',
author: authorId,
});
await createAgent({
id: agent2Id,
name: 'Author Agent 2',
provider: 'test',
model: 'test-model',
author: authorId,
});
// Create users with various favorites configurations
await User.create({
_id: user1Id,
name: 'User 1',
email: `user1-${uuidv4()}@example.com`,
provider: 'local',
favorites: [{ agentId: agent1Id }, { agentId: agent2Id }],
});
await User.create({
_id: user2Id,
name: 'User 2',
email: `user2-${uuidv4()}@example.com`,
provider: 'local',
favorites: [{ agentId: agent1Id }, { model: 'claude-3', endpoint: 'anthropic' }],
});
await User.create({
_id: user3Id,
name: 'User 3',
email: `user3-${uuidv4()}@example.com`,
provider: 'local',
favorites: [{ agentId: unrelatedAgentId }, { model: 'gpt-4', endpoint: 'openAI' }],
});
// Delete all agents by the author
await deleteUserAgents(authorId.toString());
// Verify all users' favorites are correctly updated
const user1After = await User.findById(user1Id);
expect(user1After.favorites).toHaveLength(0);
const user2After = await User.findById(user2Id);
expect(user2After.favorites).toHaveLength(1);
expect(user2After.favorites.some((f) => f.agentId === agent1Id)).toBe(false);
expect(user2After.favorites.some((f) => f.model === 'claude-3')).toBe(true);
// User 3 should be completely unaffected
const user3After = await User.findById(user3Id);
expect(user3After.favorites).toHaveLength(2);
expect(user3After.favorites.some((f) => f.agentId === unrelatedAgentId)).toBe(true);
expect(user3After.favorites.some((f) => f.model === 'gpt-4')).toBe(true);
});
test('should handle deleteUserAgents when user has no agents', async () => {
const authorWithNoAgentsId = new mongoose.Types.ObjectId();
const otherAuthorId = new mongoose.Types.ObjectId();
const userId = new mongoose.Types.ObjectId();
const existingAgentId = `agent_${uuidv4()}`;
// Create agent by different author
await createAgent({
id: existingAgentId,
name: 'Existing Agent',
provider: 'test',
model: 'test-model',
author: otherAuthorId,
});
// Create user with favorites
await User.create({
_id: userId,
name: 'Test User',
email: `test-${uuidv4()}@example.com`,
provider: 'local',
favorites: [{ agentId: existingAgentId }, { model: 'gpt-4', endpoint: 'openAI' }],
});
// Delete agents for user with no agents (should be a no-op)
await deleteUserAgents(authorWithNoAgentsId.toString());
// Verify existing agent still exists
expect(await getAgent({ id: existingAgentId })).not.toBeNull();
// Verify user favorites are unchanged
const userAfter = await User.findById(userId);
expect(userAfter.favorites).toHaveLength(2);
expect(userAfter.favorites.some((f) => f.agentId === existingAgentId)).toBe(true);
expect(userAfter.favorites.some((f) => f.model === 'gpt-4')).toBe(true);
});
test('should handle deleteUserAgents when agents are not in any favorites', async () => {
const authorId = new mongoose.Types.ObjectId();
const userId = new mongoose.Types.ObjectId();
const agent1Id = `agent_${uuidv4()}`;
const agent2Id = `agent_${uuidv4()}`;
// Create agents by the author
await createAgent({
id: agent1Id,
name: 'Agent 1',
provider: 'test',
model: 'test-model',
author: authorId,
});
await createAgent({
id: agent2Id,
name: 'Agent 2',
provider: 'test',
model: 'test-model',
author: authorId,
});
// Create user with favorites that don't include these agents
await User.create({
_id: userId,
name: 'Test User',
email: `test-${uuidv4()}@example.com`,
provider: 'local',
favorites: [{ model: 'gpt-4', endpoint: 'openAI' }],
});
// Verify agents exist
expect(await getAgent({ id: agent1Id })).not.toBeNull();
expect(await getAgent({ id: agent2Id })).not.toBeNull();
// Delete all agents by the author
await deleteUserAgents(authorId.toString());
// Verify agents are deleted
expect(await getAgent({ id: agent1Id })).toBeNull();
expect(await getAgent({ id: agent2Id })).toBeNull();
// Verify user favorites are unchanged
const userAfter = await User.findById(userId);
expect(userAfter.favorites).toHaveLength(1);
expect(userAfter.favorites.some((f) => f.model === 'gpt-4')).toBe(true);
});
test('should update agent projects', async () => {
@ -733,26 +1179,6 @@ describe('models/Agent', () => {
expect(result).toBe(expected);
});
test('should handle getListAgents with invalid author format', async () => {
try {
const result = await getListAgents({ author: 'invalid-object-id' });
expect(result.data).toEqual([]);
} catch (error) {
expect(error).toBeDefined();
}
});
test('should handle getListAgents with no agents', async () => {
const authorId = new mongoose.Types.ObjectId();
const result = await getListAgents({ author: authorId.toString() });
expect(result).toBeDefined();
expect(result.data).toEqual([]);
expect(result.has_more).toBe(false);
expect(result.first_id).toBeNull();
expect(result.last_id).toBeNull();
});
test('should handle updateAgentProjects with non-existent agent', async () => {
const nonExistentId = `agent_${uuidv4()}`;
const userId = new mongoose.Types.ObjectId();
@ -2366,17 +2792,6 @@ describe('models/Agent', () => {
expect(result).toBeNull();
});
test('should handle getListAgents with no agents', async () => {
const authorId = new mongoose.Types.ObjectId();
const result = await getListAgents({ author: authorId.toString() });
expect(result).toBeDefined();
expect(result.data).toEqual([]);
expect(result.has_more).toBe(false);
expect(result.first_id).toBeNull();
expect(result.last_id).toBeNull();
});
test('should handle updateAgent with MongoDB operators mixed with direct updates', async () => {
const agentId = `agent_${uuidv4()}`;
const authorId = new mongoose.Types.ObjectId();

View file

@ -1,6 +1,6 @@
{
"name": "@librechat/backend",
"version": "v0.8.2-rc3",
"version": "v0.8.2",
"description": "",
"scripts": {
"start": "echo 'please run this from the root directory'",
@ -49,7 +49,7 @@
"@librechat/api": "*",
"@librechat/data-schemas": "*",
"@microsoft/microsoft-graph-client": "^3.0.7",
"@modelcontextprotocol/sdk": "^1.25.2",
"@modelcontextprotocol/sdk": "^1.25.3",
"@node-saml/passport-saml": "^5.1.0",
"@smithy/node-http-handler": "^4.4.5",
"axios": "^1.12.1",
@ -80,7 +80,7 @@
"keyv-file": "^5.1.2",
"klona": "^2.0.6",
"librechat-data-provider": "*",
"lodash": "^4.17.21",
"lodash": "^4.17.23",
"mathjs": "^15.1.0",
"meilisearch": "^0.38.0",
"memorystore": "^1.6.7",

View file

@ -0,0 +1,281 @@
/**
* Tests for job replacement detection in ResumableAgentController
*
* Tests the following fixes from PR #11462:
* 1. Job creation timestamp tracking
* 2. Stale job detection and event skipping
* 3. Response message saving before final event emission
*/
const mockLogger = {
debug: jest.fn(),
warn: jest.fn(),
error: jest.fn(),
info: jest.fn(),
};
const mockGenerationJobManager = {
createJob: jest.fn(),
getJob: jest.fn(),
emitDone: jest.fn(),
emitChunk: jest.fn(),
completeJob: jest.fn(),
updateMetadata: jest.fn(),
setContentParts: jest.fn(),
subscribe: jest.fn(),
};
const mockSaveMessage = jest.fn();
const mockDecrementPendingRequest = jest.fn();
jest.mock('@librechat/data-schemas', () => ({
logger: mockLogger,
}));
jest.mock('@librechat/api', () => ({
isEnabled: jest.fn().mockReturnValue(false),
GenerationJobManager: mockGenerationJobManager,
checkAndIncrementPendingRequest: jest.fn().mockResolvedValue({ allowed: true }),
decrementPendingRequest: (...args) => mockDecrementPendingRequest(...args),
getViolationInfo: jest.fn(),
sanitizeMessageForTransmit: jest.fn((msg) => msg),
sanitizeFileForTransmit: jest.fn((file) => file),
Constants: { NO_PARENT: '00000000-0000-0000-0000-000000000000' },
}));
jest.mock('~/models', () => ({
saveMessage: (...args) => mockSaveMessage(...args),
}));
describe('Job Replacement Detection', () => {
beforeEach(() => {
jest.clearAllMocks();
});
describe('Job Creation Timestamp Tracking', () => {
it('should capture createdAt when job is created', async () => {
const streamId = 'test-stream-123';
const createdAt = Date.now();
mockGenerationJobManager.createJob.mockResolvedValue({
createdAt,
readyPromise: Promise.resolve(),
abortController: new AbortController(),
emitter: { on: jest.fn() },
});
const job = await mockGenerationJobManager.createJob(streamId, 'user-123', streamId);
expect(job.createdAt).toBe(createdAt);
});
});
describe('Job Replacement Detection Logic', () => {
/**
* Simulates the job replacement detection logic from request.js
* This is extracted for unit testing since the full controller is complex
*/
const detectJobReplacement = async (streamId, originalCreatedAt) => {
const currentJob = await mockGenerationJobManager.getJob(streamId);
return !currentJob || currentJob.createdAt !== originalCreatedAt;
};
it('should detect when job was replaced (different createdAt)', async () => {
const streamId = 'test-stream-123';
const originalCreatedAt = 1000;
const newCreatedAt = 2000;
mockGenerationJobManager.getJob.mockResolvedValue({
createdAt: newCreatedAt,
});
const wasReplaced = await detectJobReplacement(streamId, originalCreatedAt);
expect(wasReplaced).toBe(true);
});
it('should detect when job was deleted', async () => {
const streamId = 'test-stream-123';
const originalCreatedAt = 1000;
mockGenerationJobManager.getJob.mockResolvedValue(null);
const wasReplaced = await detectJobReplacement(streamId, originalCreatedAt);
expect(wasReplaced).toBe(true);
});
it('should not detect replacement when same job (same createdAt)', async () => {
const streamId = 'test-stream-123';
const originalCreatedAt = 1000;
mockGenerationJobManager.getJob.mockResolvedValue({
createdAt: originalCreatedAt,
});
const wasReplaced = await detectJobReplacement(streamId, originalCreatedAt);
expect(wasReplaced).toBe(false);
});
});
describe('Event Emission Behavior', () => {
/**
* Simulates the final event emission logic from request.js
*/
const emitFinalEventIfNotReplaced = async ({
streamId,
originalCreatedAt,
finalEvent,
userId,
}) => {
const currentJob = await mockGenerationJobManager.getJob(streamId);
const jobWasReplaced = !currentJob || currentJob.createdAt !== originalCreatedAt;
if (jobWasReplaced) {
mockLogger.debug('Skipping FINAL emit - job was replaced', {
streamId,
originalCreatedAt,
currentCreatedAt: currentJob?.createdAt,
});
await mockDecrementPendingRequest(userId);
return false;
}
mockGenerationJobManager.emitDone(streamId, finalEvent);
mockGenerationJobManager.completeJob(streamId);
await mockDecrementPendingRequest(userId);
return true;
};
it('should skip emitting when job was replaced', async () => {
const streamId = 'test-stream-123';
const originalCreatedAt = 1000;
const newCreatedAt = 2000;
const userId = 'user-123';
mockGenerationJobManager.getJob.mockResolvedValue({
createdAt: newCreatedAt,
});
const emitted = await emitFinalEventIfNotReplaced({
streamId,
originalCreatedAt,
finalEvent: { final: true },
userId,
});
expect(emitted).toBe(false);
expect(mockGenerationJobManager.emitDone).not.toHaveBeenCalled();
expect(mockGenerationJobManager.completeJob).not.toHaveBeenCalled();
expect(mockDecrementPendingRequest).toHaveBeenCalledWith(userId);
expect(mockLogger.debug).toHaveBeenCalledWith(
'Skipping FINAL emit - job was replaced',
expect.objectContaining({
streamId,
originalCreatedAt,
currentCreatedAt: newCreatedAt,
}),
);
});
it('should emit when job was not replaced', async () => {
const streamId = 'test-stream-123';
const originalCreatedAt = 1000;
const userId = 'user-123';
const finalEvent = { final: true, conversation: { conversationId: streamId } };
mockGenerationJobManager.getJob.mockResolvedValue({
createdAt: originalCreatedAt,
});
const emitted = await emitFinalEventIfNotReplaced({
streamId,
originalCreatedAt,
finalEvent,
userId,
});
expect(emitted).toBe(true);
expect(mockGenerationJobManager.emitDone).toHaveBeenCalledWith(streamId, finalEvent);
expect(mockGenerationJobManager.completeJob).toHaveBeenCalledWith(streamId);
expect(mockDecrementPendingRequest).toHaveBeenCalledWith(userId);
});
});
describe('Response Message Saving Order', () => {
/**
* Tests that response messages are saved BEFORE final events are emitted
* This prevents race conditions where clients send follow-up messages
* before the response is in the database
*/
it('should save message before emitting final event', async () => {
const callOrder = [];
mockSaveMessage.mockImplementation(async () => {
callOrder.push('saveMessage');
});
mockGenerationJobManager.emitDone.mockImplementation(() => {
callOrder.push('emitDone');
});
mockGenerationJobManager.getJob.mockResolvedValue({
createdAt: 1000,
});
// Simulate the order of operations from request.js
const streamId = 'test-stream-123';
const originalCreatedAt = 1000;
const response = { messageId: 'response-123' };
const userId = 'user-123';
// Step 1: Save message
await mockSaveMessage({}, { ...response, user: userId }, { context: 'test' });
// Step 2: Check for replacement
const currentJob = await mockGenerationJobManager.getJob(streamId);
const jobWasReplaced = !currentJob || currentJob.createdAt !== originalCreatedAt;
// Step 3: Emit if not replaced
if (!jobWasReplaced) {
mockGenerationJobManager.emitDone(streamId, { final: true });
}
expect(callOrder).toEqual(['saveMessage', 'emitDone']);
});
});
describe('Aborted Request Handling', () => {
it('should use unfinished: true instead of error: true for aborted requests', () => {
const response = { messageId: 'response-123', content: [] };
// The new format for aborted responses
const abortedResponse = { ...response, unfinished: true };
expect(abortedResponse.unfinished).toBe(true);
expect(abortedResponse.error).toBeUndefined();
});
it('should include unfinished flag in final event for aborted requests', () => {
const response = { messageId: 'response-123', content: [] };
// Old format (deprecated)
const _oldFinalEvent = {
final: true,
responseMessage: { ...response, error: true },
error: { message: 'Request was aborted' },
};
// New format (PR #11462)
const newFinalEvent = {
final: true,
responseMessage: { ...response, unfinished: true },
};
expect(newFinalEvent.responseMessage.unfinished).toBe(true);
expect(newFinalEvent.error).toBeUndefined();
expect(newFinalEvent.responseMessage.error).toBeUndefined();
});
});
});

View file

@ -1,6 +1,5 @@
require('events').EventEmitter.defaultMaxListeners = 100;
const { logger } = require('@librechat/data-schemas');
const { DynamicStructuredTool } = require('@langchain/core/tools');
const { getBufferString, HumanMessage } = require('@langchain/core/messages');
const {
createRun,
@ -14,6 +13,7 @@ const {
getBalanceConfig,
getProviderConfig,
memoryInstructions,
applyContextToAgent,
GenerationJobManager,
getTransactionsConfig,
createMemoryProcessor,
@ -328,11 +328,13 @@ class AgentClient extends BaseClient {
);
}
/**
* Returns build message options. For AgentClient, agent-specific instructions
* are retrieved directly from agent objects in buildMessages, so this returns empty.
* @returns {Object} Empty options object
*/
getBuildMessagesOptions() {
return {
instructions: this.options.agent.instructions,
additional_instructions: this.options.agent.additional_instructions,
};
return {};
}
/**
@ -355,12 +357,7 @@ class AgentClient extends BaseClient {
return files;
}
async buildMessages(
messages,
parentMessageId,
{ instructions = null, additional_instructions = null },
opts,
) {
async buildMessages(messages, parentMessageId, _buildOptions, opts) {
/** Always pass mapMethod; getMessagesForConversation applies it only to messages with addedConvo flag */
const orderedMessages = this.constructor.getMessagesForConversation({
messages,
@ -374,11 +371,29 @@ class AgentClient extends BaseClient {
/** @type {number | undefined} */
let promptTokens;
/** @type {string} */
let systemContent = [instructions ?? '', additional_instructions ?? '']
.filter(Boolean)
.join('\n')
.trim();
/**
* Extract base instructions for all agents (combines instructions + additional_instructions).
* This must be done before applying context to preserve the original agent configuration.
*/
const extractBaseInstructions = (agent) => {
const baseInstructions = [agent.instructions ?? '', agent.additional_instructions ?? '']
.filter(Boolean)
.join('\n')
.trim();
agent.instructions = baseInstructions;
return agent;
};
/** Collect all agents for unified processing, extracting base instructions during collection */
const allAgents = [
{ agent: extractBaseInstructions(this.options.agent), agentId: this.options.agent.id },
...(this.agentConfigs?.size > 0
? Array.from(this.agentConfigs.entries()).map(([agentId, agent]) => ({
agent: extractBaseInstructions(agent),
agentId,
}))
: []),
];
if (this.options.attachments) {
const attachments = await this.options.attachments;
@ -413,6 +428,7 @@ class AgentClient extends BaseClient {
assistantName: this.options?.modelLabel,
});
/** For non-latest messages, prepend file context directly to message content */
if (message.fileContext && i !== orderedMessages.length - 1) {
if (typeof formattedMessage.content === 'string') {
formattedMessage.content = message.fileContext + '\n' + formattedMessage.content;
@ -422,8 +438,6 @@ class AgentClient extends BaseClient {
? (textPart.text = message.fileContext + '\n' + textPart.text)
: formattedMessage.content.unshift({ type: 'text', text: message.fileContext });
}
} else if (message.fileContext && i === orderedMessages.length - 1) {
systemContent = [systemContent, message.fileContext].join('\n');
}
const needsTokenCount =
@ -456,46 +470,35 @@ class AgentClient extends BaseClient {
return formattedMessage;
});
/**
* Build shared run context - applies to ALL agents in the run.
* This includes: file context (latest message), augmented prompt (RAG), memory context.
*/
const sharedRunContextParts = [];
/** File context from the latest message (attachments) */
const latestMessage = orderedMessages[orderedMessages.length - 1];
if (latestMessage?.fileContext) {
sharedRunContextParts.push(latestMessage.fileContext);
}
/** Augmented prompt from RAG/context handlers */
if (this.contextHandlers) {
this.augmentedPrompt = await this.contextHandlers.createContext();
systemContent = this.augmentedPrompt + systemContent;
}
// Inject MCP server instructions if available
const ephemeralAgent = this.options.req.body.ephemeralAgent;
let mcpServers = [];
// Check for ephemeral agent MCP servers
if (ephemeralAgent && ephemeralAgent.mcp && ephemeralAgent.mcp.length > 0) {
mcpServers = ephemeralAgent.mcp;
}
// Check for regular agent MCP tools
else if (this.options.agent && this.options.agent.tools) {
mcpServers = this.options.agent.tools
.filter(
(tool) =>
tool instanceof DynamicStructuredTool && tool.name.includes(Constants.mcp_delimiter),
)
.map((tool) => tool.name.split(Constants.mcp_delimiter).pop())
.filter(Boolean);
}
if (mcpServers.length > 0) {
try {
const mcpInstructions = await getMCPManager().formatInstructionsForContext(mcpServers);
if (mcpInstructions) {
systemContent = [systemContent, mcpInstructions].filter(Boolean).join('\n\n');
logger.debug('[AgentClient] Injected MCP instructions for servers:', mcpServers);
}
} catch (error) {
logger.error('[AgentClient] Failed to inject MCP instructions:', error);
if (this.augmentedPrompt) {
sharedRunContextParts.push(this.augmentedPrompt);
}
}
if (systemContent) {
this.options.agent.instructions = systemContent;
/** Memory context (user preferences/memories) */
const withoutKeys = await this.useMemory();
if (withoutKeys) {
const memoryContext = `${memoryInstructions}\n\n# Existing memory about the user:\n${withoutKeys}`;
sharedRunContextParts.push(memoryContext);
}
const sharedRunContext = sharedRunContextParts.join('\n\n');
/** @type {Record<string, number> | undefined} */
let tokenCountMap;
@ -521,14 +524,27 @@ class AgentClient extends BaseClient {
opts.getReqData({ promptTokens });
}
const withoutKeys = await this.useMemory();
if (withoutKeys) {
systemContent += `${memoryInstructions}\n\n# Existing memory about the user:\n${withoutKeys}`;
}
if (systemContent) {
this.options.agent.instructions = systemContent;
}
/**
* Apply context to all agents.
* Each agent gets: shared run context + their own base instructions + their own MCP instructions.
*
* NOTE: This intentionally mutates agent objects in place. The agentConfigs Map
* holds references to config objects that will be passed to the graph runtime.
*/
const ephemeralAgent = this.options.req.body.ephemeralAgent;
const mcpManager = getMCPManager();
await Promise.all(
allAgents.map(({ agent, agentId }) =>
applyContextToAgent({
agent,
agentId,
logger,
mcpManager,
sharedRunContext,
ephemeralAgent: agentId === this.options.agent.id ? ephemeralAgent : undefined,
}),
),
);
return result;
}
@ -600,6 +616,8 @@ class AgentClient extends BaseClient {
agent_id: memoryConfig.agent.id,
endpoint: EModelEndpoint.agents,
});
} else if (memoryConfig.agent?.id != null) {
prelimAgent = this.options.agent;
} else if (
memoryConfig.agent?.id == null &&
memoryConfig.agent?.model != null &&
@ -614,6 +632,10 @@ class AgentClient extends BaseClient {
);
}
if (!prelimAgent) {
return;
}
const agent = await initializeAgent(
{
req: this.options.req,
@ -1084,11 +1106,20 @@ class AgentClient extends BaseClient {
this.artifactPromises.push(...attachments);
}
await this.recordCollectedUsage({
context: 'message',
balance: balanceConfig,
transactions: transactionsConfig,
});
/** Skip token spending if aborted - the abort handler (abortMiddleware.js) handles it
This prevents double-spending when user aborts via `/api/agents/chat/abort` */
const wasAborted = abortController?.signal?.aborted;
if (!wasAborted) {
await this.recordCollectedUsage({
context: 'message',
balance: balanceConfig,
transactions: transactionsConfig,
});
} else {
logger.debug(
'[api/server/controllers/agents/client.js #chatCompletion] Skipping token spending - handled by abort middleware',
);
}
} catch (err) {
logger.error(
'[api/server/controllers/agents/client.js #chatCompletion] Error in cleanup phase',

View file

@ -12,6 +12,17 @@ jest.mock('@librechat/agents', () => ({
jest.mock('@librechat/api', () => ({
...jest.requireActual('@librechat/api'),
checkAccess: jest.fn(),
initializeAgent: jest.fn(),
createMemoryProcessor: jest.fn(),
}));
jest.mock('~/models/Agent', () => ({
loadAgent: jest.fn(),
}));
jest.mock('~/models/Role', () => ({
getRoleByName: jest.fn(),
}));
// Mock getMCPManager
@ -1310,8 +1321,8 @@ describe('AgentClient - titleConvo', () => {
expect(client.options.agent.instructions).toContain('# MCP Server Instructions');
expect(client.options.agent.instructions).toContain('Use these tools carefully');
// Verify the base instructions are also included
expect(client.options.agent.instructions).toContain('Base instructions');
// Verify the base instructions are also included (from agent config, not buildOptions)
expect(client.options.agent.instructions).toContain('Base agent instructions');
});
it('should handle MCP instructions with ephemeral agent', async () => {
@ -1373,8 +1384,8 @@ describe('AgentClient - titleConvo', () => {
additional_instructions: null,
});
// Verify the instructions still work without MCP content
expect(client.options.agent.instructions).toBe('Base instructions only');
// Verify the instructions still work without MCP content (from agent config, not buildOptions)
expect(client.options.agent.instructions).toBe('Base agent instructions');
expect(client.options.agent.instructions).not.toContain('[object Promise]');
});
@ -1398,8 +1409,8 @@ describe('AgentClient - titleConvo', () => {
additional_instructions: null,
});
// Should still have base instructions without MCP content
expect(client.options.agent.instructions).toContain('Base instructions');
// Should still have base instructions without MCP content (from agent config, not buildOptions)
expect(client.options.agent.instructions).toContain('Base agent instructions');
expect(client.options.agent.instructions).not.toContain('[object Promise]');
});
});
@ -1849,4 +1860,400 @@ describe('AgentClient - titleConvo', () => {
});
});
});
describe('buildMessages - memory context for parallel agents', () => {
let client;
let mockReq;
let mockRes;
let mockAgent;
let mockOptions;
beforeEach(() => {
jest.clearAllMocks();
mockAgent = {
id: 'primary-agent',
name: 'Primary Agent',
endpoint: EModelEndpoint.openAI,
provider: EModelEndpoint.openAI,
instructions: 'Primary agent instructions',
model_parameters: {
model: 'gpt-4',
},
tools: [],
};
mockReq = {
user: {
id: 'user-123',
personalization: {
memories: true,
},
},
body: {
endpoint: EModelEndpoint.openAI,
},
config: {
memory: {
disabled: false,
},
},
};
mockRes = {};
mockOptions = {
req: mockReq,
res: mockRes,
agent: mockAgent,
endpoint: EModelEndpoint.agents,
};
client = new AgentClient(mockOptions);
client.conversationId = 'convo-123';
client.responseMessageId = 'response-123';
client.shouldSummarize = false;
client.maxContextTokens = 4096;
});
it('should pass memory context to parallel agents (addedConvo)', async () => {
const memoryContent = 'User prefers dark mode. User is a software developer.';
client.useMemory = jest.fn().mockResolvedValue(memoryContent);
const parallelAgent1 = {
id: 'parallel-agent-1',
name: 'Parallel Agent 1',
instructions: 'Parallel agent 1 instructions',
provider: EModelEndpoint.openAI,
};
const parallelAgent2 = {
id: 'parallel-agent-2',
name: 'Parallel Agent 2',
instructions: 'Parallel agent 2 instructions',
provider: EModelEndpoint.anthropic,
};
client.agentConfigs = new Map([
['parallel-agent-1', parallelAgent1],
['parallel-agent-2', parallelAgent2],
]);
const messages = [
{
messageId: 'msg-1',
parentMessageId: null,
sender: 'User',
text: 'Hello',
isCreatedByUser: true,
},
];
await client.buildMessages(messages, null, {
instructions: 'Base instructions',
additional_instructions: null,
});
expect(client.useMemory).toHaveBeenCalled();
// Verify primary agent has its configured instructions (not from buildOptions) and memory context
expect(client.options.agent.instructions).toContain('Primary agent instructions');
expect(client.options.agent.instructions).toContain(memoryContent);
expect(parallelAgent1.instructions).toContain('Parallel agent 1 instructions');
expect(parallelAgent1.instructions).toContain(memoryContent);
expect(parallelAgent2.instructions).toContain('Parallel agent 2 instructions');
expect(parallelAgent2.instructions).toContain(memoryContent);
});
it('should not modify parallel agents when no memory context is available', async () => {
client.useMemory = jest.fn().mockResolvedValue(undefined);
const parallelAgent = {
id: 'parallel-agent-1',
name: 'Parallel Agent 1',
instructions: 'Original parallel instructions',
provider: EModelEndpoint.openAI,
};
client.agentConfigs = new Map([['parallel-agent-1', parallelAgent]]);
const messages = [
{
messageId: 'msg-1',
parentMessageId: null,
sender: 'User',
text: 'Hello',
isCreatedByUser: true,
},
];
await client.buildMessages(messages, null, {
instructions: 'Base instructions',
additional_instructions: null,
});
expect(parallelAgent.instructions).toBe('Original parallel instructions');
});
it('should handle parallel agents without existing instructions', async () => {
const memoryContent = 'User is a data scientist.';
client.useMemory = jest.fn().mockResolvedValue(memoryContent);
const parallelAgentNoInstructions = {
id: 'parallel-agent-no-instructions',
name: 'Parallel Agent No Instructions',
provider: EModelEndpoint.openAI,
};
client.agentConfigs = new Map([
['parallel-agent-no-instructions', parallelAgentNoInstructions],
]);
const messages = [
{
messageId: 'msg-1',
parentMessageId: null,
sender: 'User',
text: 'Hello',
isCreatedByUser: true,
},
];
await client.buildMessages(messages, null, {
instructions: null,
additional_instructions: null,
});
expect(parallelAgentNoInstructions.instructions).toContain(memoryContent);
});
it('should not modify agentConfigs when none exist', async () => {
const memoryContent = 'User prefers concise responses.';
client.useMemory = jest.fn().mockResolvedValue(memoryContent);
client.agentConfigs = null;
const messages = [
{
messageId: 'msg-1',
parentMessageId: null,
sender: 'User',
text: 'Hello',
isCreatedByUser: true,
},
];
await expect(
client.buildMessages(messages, null, {
instructions: 'Base instructions',
additional_instructions: null,
}),
).resolves.not.toThrow();
expect(client.options.agent.instructions).toContain(memoryContent);
});
it('should handle empty agentConfigs map', async () => {
const memoryContent = 'User likes detailed explanations.';
client.useMemory = jest.fn().mockResolvedValue(memoryContent);
client.agentConfigs = new Map();
const messages = [
{
messageId: 'msg-1',
parentMessageId: null,
sender: 'User',
text: 'Hello',
isCreatedByUser: true,
},
];
await expect(
client.buildMessages(messages, null, {
instructions: 'Base instructions',
additional_instructions: null,
}),
).resolves.not.toThrow();
expect(client.options.agent.instructions).toContain(memoryContent);
});
});
describe('useMemory method - prelimAgent assignment', () => {
let client;
let mockReq;
let mockRes;
let mockAgent;
let mockOptions;
let mockCheckAccess;
let mockLoadAgent;
let mockInitializeAgent;
let mockCreateMemoryProcessor;
beforeEach(() => {
jest.clearAllMocks();
mockAgent = {
id: 'agent-123',
endpoint: EModelEndpoint.openAI,
provider: EModelEndpoint.openAI,
instructions: 'Test instructions',
model: 'gpt-4',
model_parameters: {
model: 'gpt-4',
},
};
mockReq = {
user: {
id: 'user-123',
personalization: {
memories: true,
},
},
config: {
memory: {
agent: {
id: 'agent-123',
},
},
endpoints: {
[EModelEndpoint.agents]: {
allowedProviders: [EModelEndpoint.openAI],
},
},
},
};
mockRes = {};
mockOptions = {
req: mockReq,
res: mockRes,
agent: mockAgent,
};
mockCheckAccess = require('@librechat/api').checkAccess;
mockLoadAgent = require('~/models/Agent').loadAgent;
mockInitializeAgent = require('@librechat/api').initializeAgent;
mockCreateMemoryProcessor = require('@librechat/api').createMemoryProcessor;
});
it('should use current agent when memory config agent.id matches current agent id', async () => {
mockCheckAccess.mockResolvedValue(true);
mockInitializeAgent.mockResolvedValue({
...mockAgent,
provider: EModelEndpoint.openAI,
});
mockCreateMemoryProcessor.mockResolvedValue([undefined, jest.fn()]);
client = new AgentClient(mockOptions);
client.conversationId = 'convo-123';
client.responseMessageId = 'response-123';
await client.useMemory();
expect(mockLoadAgent).not.toHaveBeenCalled();
expect(mockInitializeAgent).toHaveBeenCalledWith(
expect.objectContaining({
agent: mockAgent,
}),
expect.any(Object),
);
});
it('should load different agent when memory config agent.id differs from current agent id', async () => {
const differentAgentId = 'different-agent-456';
const differentAgent = {
id: differentAgentId,
provider: EModelEndpoint.openAI,
model: 'gpt-4',
instructions: 'Different agent instructions',
};
mockReq.config.memory.agent.id = differentAgentId;
mockCheckAccess.mockResolvedValue(true);
mockLoadAgent.mockResolvedValue(differentAgent);
mockInitializeAgent.mockResolvedValue({
...differentAgent,
provider: EModelEndpoint.openAI,
});
mockCreateMemoryProcessor.mockResolvedValue([undefined, jest.fn()]);
client = new AgentClient(mockOptions);
client.conversationId = 'convo-123';
client.responseMessageId = 'response-123';
await client.useMemory();
expect(mockLoadAgent).toHaveBeenCalledWith(
expect.objectContaining({
agent_id: differentAgentId,
}),
);
expect(mockInitializeAgent).toHaveBeenCalledWith(
expect.objectContaining({
agent: differentAgent,
}),
expect.any(Object),
);
});
it('should return early when prelimAgent is undefined (no valid memory agent config)', async () => {
mockReq.config.memory = {
agent: {},
};
mockCheckAccess.mockResolvedValue(true);
client = new AgentClient(mockOptions);
client.conversationId = 'convo-123';
client.responseMessageId = 'response-123';
const result = await client.useMemory();
expect(result).toBeUndefined();
expect(mockInitializeAgent).not.toHaveBeenCalled();
expect(mockCreateMemoryProcessor).not.toHaveBeenCalled();
});
it('should create ephemeral agent when no id but model and provider are specified', async () => {
mockReq.config.memory = {
agent: {
model: 'gpt-4',
provider: EModelEndpoint.openAI,
},
};
mockCheckAccess.mockResolvedValue(true);
mockInitializeAgent.mockResolvedValue({
id: Constants.EPHEMERAL_AGENT_ID,
model: 'gpt-4',
provider: EModelEndpoint.openAI,
});
mockCreateMemoryProcessor.mockResolvedValue([undefined, jest.fn()]);
client = new AgentClient(mockOptions);
client.conversationId = 'convo-123';
client.responseMessageId = 'response-123';
await client.useMemory();
expect(mockLoadAgent).not.toHaveBeenCalled();
expect(mockInitializeAgent).toHaveBeenCalledWith(
expect.objectContaining({
agent: expect.objectContaining({
id: Constants.EPHEMERAL_AGENT_ID,
model: 'gpt-4',
provider: EModelEndpoint.openAI,
}),
}),
expect.any(Object),
);
});
});
});

View file

@ -67,7 +67,15 @@ const ResumableAgentController = async (req, res, next, initializeClient, addTit
let client = null;
try {
logger.debug(`[ResumableAgentController] Creating job`, {
streamId,
conversationId,
reqConversationId,
userId,
});
const job = await GenerationJobManager.createJob(streamId, userId, conversationId);
const jobCreatedAt = job.createdAt; // Capture creation time to detect job replacement
req._resumableStreamId = streamId;
// Send JSON response IMMEDIATELY so client can connect to SSE stream
@ -272,6 +280,33 @@ const ResumableAgentController = async (req, res, next, initializeClient, addTit
});
}
// CRITICAL: Save response message BEFORE emitting final event.
// This prevents race conditions where the client sends a follow-up message
// before the response is saved to the database, causing orphaned parentMessageIds.
if (client.savedMessageIds && !client.savedMessageIds.has(messageId)) {
await saveMessage(
req,
{ ...response, user: userId, unfinished: wasAbortedBeforeComplete },
{ context: 'api/server/controllers/agents/request.js - resumable response end' },
);
}
// Check if our job was replaced by a new request before emitting
// This prevents stale requests from emitting events to newer jobs
const currentJob = await GenerationJobManager.getJob(streamId);
const jobWasReplaced = !currentJob || currentJob.createdAt !== jobCreatedAt;
if (jobWasReplaced) {
logger.debug(`[ResumableAgentController] Skipping FINAL emit - job was replaced`, {
streamId,
originalCreatedAt: jobCreatedAt,
currentCreatedAt: currentJob?.createdAt,
});
// Still decrement pending request since we incremented at start
await decrementPendingRequest(userId);
return;
}
if (!wasAbortedBeforeComplete) {
const finalEvent = {
final: true,
@ -281,26 +316,34 @@ const ResumableAgentController = async (req, res, next, initializeClient, addTit
responseMessage: { ...response },
};
logger.debug(`[ResumableAgentController] Emitting FINAL event`, {
streamId,
wasAbortedBeforeComplete,
userMessageId: userMessage?.messageId,
responseMessageId: response?.messageId,
conversationId: conversation?.conversationId,
});
GenerationJobManager.emitDone(streamId, finalEvent);
GenerationJobManager.completeJob(streamId);
await decrementPendingRequest(userId);
if (client.savedMessageIds && !client.savedMessageIds.has(messageId)) {
await saveMessage(
req,
{ ...response, user: userId },
{ context: 'api/server/controllers/agents/request.js - resumable response end' },
);
}
} else {
const finalEvent = {
final: true,
conversation,
title: conversation.title,
requestMessage: sanitizeMessageForTransmit(userMessage),
responseMessage: { ...response, error: true },
error: { message: 'Request was aborted' },
responseMessage: { ...response, unfinished: true },
};
logger.debug(`[ResumableAgentController] Emitting ABORTED FINAL event`, {
streamId,
wasAbortedBeforeComplete,
userMessageId: userMessage?.messageId,
responseMessageId: response?.messageId,
conversationId: conversation?.conversationId,
});
GenerationJobManager.emitDone(streamId, finalEvent);
GenerationJobManager.completeJob(streamId, 'Request aborted');
await decrementPendingRequest(userId);

View file

@ -7,13 +7,89 @@ const {
sanitizeMessageForTransmit,
} = require('@librechat/api');
const { isAssistantsEndpoint, ErrorTypes } = require('librechat-data-provider');
const { spendTokens, spendStructuredTokens } = require('~/models/spendTokens');
const { truncateText, smartTruncateText } = require('~/app/clients/prompts');
const clearPendingReq = require('~/cache/clearPendingReq');
const { sendError } = require('~/server/middleware/error');
const { spendTokens } = require('~/models/spendTokens');
const { saveMessage, getConvo } = require('~/models');
const { abortRun } = require('./abortRun');
/**
* Spend tokens for all models from collected usage.
* This handles both sequential and parallel agent execution.
*
* IMPORTANT: After spending, this function clears the collectedUsage array
* to prevent double-spending. The array is shared with AgentClient.collectedUsage,
* so clearing it here prevents the finally block from also spending tokens.
*
* @param {Object} params
* @param {string} params.userId - User ID
* @param {string} params.conversationId - Conversation ID
* @param {Array<Object>} params.collectedUsage - Usage metadata from all models
* @param {string} [params.fallbackModel] - Fallback model name if not in usage
*/
async function spendCollectedUsage({ userId, conversationId, collectedUsage, fallbackModel }) {
if (!collectedUsage || collectedUsage.length === 0) {
return;
}
const spendPromises = [];
for (const usage of collectedUsage) {
if (!usage) {
continue;
}
// Support both OpenAI format (input_token_details) and Anthropic format (cache_*_input_tokens)
const cache_creation =
Number(usage.input_token_details?.cache_creation) ||
Number(usage.cache_creation_input_tokens) ||
0;
const cache_read =
Number(usage.input_token_details?.cache_read) || Number(usage.cache_read_input_tokens) || 0;
const txMetadata = {
context: 'abort',
conversationId,
user: userId,
model: usage.model ?? fallbackModel,
};
if (cache_creation > 0 || cache_read > 0) {
spendPromises.push(
spendStructuredTokens(txMetadata, {
promptTokens: {
input: usage.input_tokens,
write: cache_creation,
read: cache_read,
},
completionTokens: usage.output_tokens,
}).catch((err) => {
logger.error('[abortMiddleware] Error spending structured tokens for abort', err);
}),
);
continue;
}
spendPromises.push(
spendTokens(txMetadata, {
promptTokens: usage.input_tokens,
completionTokens: usage.output_tokens,
}).catch((err) => {
logger.error('[abortMiddleware] Error spending tokens for abort', err);
}),
);
}
// Wait for all token spending to complete
await Promise.all(spendPromises);
// Clear the array to prevent double-spending from the AgentClient finally block.
// The collectedUsage array is shared by reference with AgentClient.collectedUsage,
// so clearing it here ensures recordCollectedUsage() sees an empty array and returns early.
collectedUsage.length = 0;
}
/**
* Abort an active message generation.
* Uses GenerationJobManager for all agent requests.
@ -39,9 +115,8 @@ async function abortMessage(req, res) {
return;
}
const { jobData, content, text } = abortResult;
const { jobData, content, text, collectedUsage } = abortResult;
// Count tokens and spend them
const completionTokens = await countTokens(text);
const promptTokens = jobData?.promptTokens ?? 0;
@ -62,10 +137,21 @@ async function abortMessage(req, res) {
tokenCount: completionTokens,
};
await spendTokens(
{ ...responseMessage, context: 'incomplete', user: userId },
{ promptTokens, completionTokens },
);
// Spend tokens for ALL models from collectedUsage (handles parallel agents/addedConvo)
if (collectedUsage && collectedUsage.length > 0) {
await spendCollectedUsage({
userId,
conversationId: jobData?.conversationId,
collectedUsage,
fallbackModel: jobData?.model,
});
} else {
// Fallback: no collected usage, use text-based token counting for primary model only
await spendTokens(
{ ...responseMessage, context: 'incomplete', user: userId },
{ promptTokens, completionTokens },
);
}
await saveMessage(
req,

View file

@ -0,0 +1,428 @@
/**
* Tests for abortMiddleware - spendCollectedUsage function
*
* This tests the token spending logic for abort scenarios,
* particularly for parallel agents (addedConvo) where multiple
* models need their tokens spent.
*/
const mockSpendTokens = jest.fn().mockResolvedValue();
const mockSpendStructuredTokens = jest.fn().mockResolvedValue();
jest.mock('~/models/spendTokens', () => ({
spendTokens: (...args) => mockSpendTokens(...args),
spendStructuredTokens: (...args) => mockSpendStructuredTokens(...args),
}));
jest.mock('@librechat/data-schemas', () => ({
logger: {
debug: jest.fn(),
error: jest.fn(),
warn: jest.fn(),
info: jest.fn(),
},
}));
jest.mock('@librechat/api', () => ({
countTokens: jest.fn().mockResolvedValue(100),
isEnabled: jest.fn().mockReturnValue(false),
sendEvent: jest.fn(),
GenerationJobManager: {
abortJob: jest.fn(),
},
sanitizeMessageForTransmit: jest.fn((msg) => msg),
}));
jest.mock('librechat-data-provider', () => ({
isAssistantsEndpoint: jest.fn().mockReturnValue(false),
ErrorTypes: { INVALID_REQUEST: 'INVALID_REQUEST', NO_SYSTEM_MESSAGES: 'NO_SYSTEM_MESSAGES' },
}));
jest.mock('~/app/clients/prompts', () => ({
truncateText: jest.fn((text) => text),
smartTruncateText: jest.fn((text) => text),
}));
jest.mock('~/cache/clearPendingReq', () => jest.fn().mockResolvedValue());
jest.mock('~/server/middleware/error', () => ({
sendError: jest.fn(),
}));
jest.mock('~/models', () => ({
saveMessage: jest.fn().mockResolvedValue(),
getConvo: jest.fn().mockResolvedValue({ title: 'Test Chat' }),
}));
jest.mock('./abortRun', () => ({
abortRun: jest.fn(),
}));
// Import the module after mocks are set up
// We need to extract the spendCollectedUsage function for testing
// Since it's not exported, we'll test it through the handleAbort flow
describe('abortMiddleware - spendCollectedUsage', () => {
beforeEach(() => {
jest.clearAllMocks();
});
describe('spendCollectedUsage logic', () => {
// Since spendCollectedUsage is not exported, we test the logic directly
// by replicating the function here for unit testing
const spendCollectedUsage = async ({
userId,
conversationId,
collectedUsage,
fallbackModel,
}) => {
if (!collectedUsage || collectedUsage.length === 0) {
return;
}
const spendPromises = [];
for (const usage of collectedUsage) {
if (!usage) {
continue;
}
const cache_creation =
Number(usage.input_token_details?.cache_creation) ||
Number(usage.cache_creation_input_tokens) ||
0;
const cache_read =
Number(usage.input_token_details?.cache_read) ||
Number(usage.cache_read_input_tokens) ||
0;
const txMetadata = {
context: 'abort',
conversationId,
user: userId,
model: usage.model ?? fallbackModel,
};
if (cache_creation > 0 || cache_read > 0) {
spendPromises.push(
mockSpendStructuredTokens(txMetadata, {
promptTokens: {
input: usage.input_tokens,
write: cache_creation,
read: cache_read,
},
completionTokens: usage.output_tokens,
}).catch(() => {
// Log error but don't throw
}),
);
continue;
}
spendPromises.push(
mockSpendTokens(txMetadata, {
promptTokens: usage.input_tokens,
completionTokens: usage.output_tokens,
}).catch(() => {
// Log error but don't throw
}),
);
}
// Wait for all token spending to complete
await Promise.all(spendPromises);
// Clear the array to prevent double-spending
collectedUsage.length = 0;
};
it('should return early if collectedUsage is empty', async () => {
await spendCollectedUsage({
userId: 'user-123',
conversationId: 'convo-123',
collectedUsage: [],
fallbackModel: 'gpt-4',
});
expect(mockSpendTokens).not.toHaveBeenCalled();
expect(mockSpendStructuredTokens).not.toHaveBeenCalled();
});
it('should return early if collectedUsage is null', async () => {
await spendCollectedUsage({
userId: 'user-123',
conversationId: 'convo-123',
collectedUsage: null,
fallbackModel: 'gpt-4',
});
expect(mockSpendTokens).not.toHaveBeenCalled();
expect(mockSpendStructuredTokens).not.toHaveBeenCalled();
});
it('should skip null entries in collectedUsage', async () => {
const collectedUsage = [
{ input_tokens: 100, output_tokens: 50, model: 'gpt-4' },
null,
{ input_tokens: 200, output_tokens: 60, model: 'gpt-4' },
];
await spendCollectedUsage({
userId: 'user-123',
conversationId: 'convo-123',
collectedUsage,
fallbackModel: 'gpt-4',
});
expect(mockSpendTokens).toHaveBeenCalledTimes(2);
});
it('should spend tokens for single model', async () => {
const collectedUsage = [{ input_tokens: 100, output_tokens: 50, model: 'gpt-4' }];
await spendCollectedUsage({
userId: 'user-123',
conversationId: 'convo-123',
collectedUsage,
fallbackModel: 'gpt-4',
});
expect(mockSpendTokens).toHaveBeenCalledTimes(1);
expect(mockSpendTokens).toHaveBeenCalledWith(
expect.objectContaining({
context: 'abort',
conversationId: 'convo-123',
user: 'user-123',
model: 'gpt-4',
}),
{ promptTokens: 100, completionTokens: 50 },
);
});
it('should spend tokens for multiple models (parallel agents)', async () => {
const collectedUsage = [
{ input_tokens: 100, output_tokens: 50, model: 'gpt-4' },
{ input_tokens: 80, output_tokens: 40, model: 'claude-3' },
{ input_tokens: 120, output_tokens: 60, model: 'gemini-pro' },
];
await spendCollectedUsage({
userId: 'user-123',
conversationId: 'convo-123',
collectedUsage,
fallbackModel: 'gpt-4',
});
expect(mockSpendTokens).toHaveBeenCalledTimes(3);
// Verify each model was called
expect(mockSpendTokens).toHaveBeenNthCalledWith(
1,
expect.objectContaining({ model: 'gpt-4' }),
{ promptTokens: 100, completionTokens: 50 },
);
expect(mockSpendTokens).toHaveBeenNthCalledWith(
2,
expect.objectContaining({ model: 'claude-3' }),
{ promptTokens: 80, completionTokens: 40 },
);
expect(mockSpendTokens).toHaveBeenNthCalledWith(
3,
expect.objectContaining({ model: 'gemini-pro' }),
{ promptTokens: 120, completionTokens: 60 },
);
});
it('should use fallbackModel when usage.model is missing', async () => {
const collectedUsage = [{ input_tokens: 100, output_tokens: 50 }];
await spendCollectedUsage({
userId: 'user-123',
conversationId: 'convo-123',
collectedUsage,
fallbackModel: 'fallback-model',
});
expect(mockSpendTokens).toHaveBeenCalledWith(
expect.objectContaining({ model: 'fallback-model' }),
expect.any(Object),
);
});
it('should use spendStructuredTokens for OpenAI format cache tokens', async () => {
const collectedUsage = [
{
input_tokens: 100,
output_tokens: 50,
model: 'gpt-4',
input_token_details: {
cache_creation: 20,
cache_read: 10,
},
},
];
await spendCollectedUsage({
userId: 'user-123',
conversationId: 'convo-123',
collectedUsage,
fallbackModel: 'gpt-4',
});
expect(mockSpendStructuredTokens).toHaveBeenCalledTimes(1);
expect(mockSpendTokens).not.toHaveBeenCalled();
expect(mockSpendStructuredTokens).toHaveBeenCalledWith(
expect.objectContaining({ model: 'gpt-4', context: 'abort' }),
{
promptTokens: {
input: 100,
write: 20,
read: 10,
},
completionTokens: 50,
},
);
});
it('should use spendStructuredTokens for Anthropic format cache tokens', async () => {
const collectedUsage = [
{
input_tokens: 100,
output_tokens: 50,
model: 'claude-3',
cache_creation_input_tokens: 25,
cache_read_input_tokens: 15,
},
];
await spendCollectedUsage({
userId: 'user-123',
conversationId: 'convo-123',
collectedUsage,
fallbackModel: 'claude-3',
});
expect(mockSpendStructuredTokens).toHaveBeenCalledTimes(1);
expect(mockSpendTokens).not.toHaveBeenCalled();
expect(mockSpendStructuredTokens).toHaveBeenCalledWith(
expect.objectContaining({ model: 'claude-3' }),
{
promptTokens: {
input: 100,
write: 25,
read: 15,
},
completionTokens: 50,
},
);
});
it('should handle mixed cache and non-cache entries', async () => {
const collectedUsage = [
{ input_tokens: 100, output_tokens: 50, model: 'gpt-4' },
{
input_tokens: 150,
output_tokens: 30,
model: 'claude-3',
cache_creation_input_tokens: 20,
cache_read_input_tokens: 10,
},
{ input_tokens: 200, output_tokens: 20, model: 'gemini-pro' },
];
await spendCollectedUsage({
userId: 'user-123',
conversationId: 'convo-123',
collectedUsage,
fallbackModel: 'gpt-4',
});
expect(mockSpendTokens).toHaveBeenCalledTimes(2);
expect(mockSpendStructuredTokens).toHaveBeenCalledTimes(1);
});
it('should handle real-world parallel agent abort scenario', async () => {
// Simulates: Primary agent (gemini) + addedConvo agent (gpt-5) aborted mid-stream
const collectedUsage = [
{ input_tokens: 31596, output_tokens: 151, model: 'gemini-3-flash-preview' },
{ input_tokens: 28000, output_tokens: 120, model: 'gpt-5.2' },
];
await spendCollectedUsage({
userId: 'user-123',
conversationId: 'convo-123',
collectedUsage,
fallbackModel: 'gemini-3-flash-preview',
});
expect(mockSpendTokens).toHaveBeenCalledTimes(2);
// Primary model
expect(mockSpendTokens).toHaveBeenNthCalledWith(
1,
expect.objectContaining({ model: 'gemini-3-flash-preview' }),
{ promptTokens: 31596, completionTokens: 151 },
);
// Parallel model (addedConvo)
expect(mockSpendTokens).toHaveBeenNthCalledWith(
2,
expect.objectContaining({ model: 'gpt-5.2' }),
{ promptTokens: 28000, completionTokens: 120 },
);
});
it('should clear collectedUsage array after spending to prevent double-spending', async () => {
// This tests the race condition fix: after abort middleware spends tokens,
// the collectedUsage array is cleared so AgentClient.recordCollectedUsage()
// (which shares the same array reference) sees an empty array and returns early.
const collectedUsage = [
{ input_tokens: 100, output_tokens: 50, model: 'gpt-4' },
{ input_tokens: 80, output_tokens: 40, model: 'claude-3' },
];
expect(collectedUsage.length).toBe(2);
await spendCollectedUsage({
userId: 'user-123',
conversationId: 'convo-123',
collectedUsage,
fallbackModel: 'gpt-4',
});
expect(mockSpendTokens).toHaveBeenCalledTimes(2);
// The array should be cleared after spending
expect(collectedUsage.length).toBe(0);
});
it('should await all token spending operations before clearing array', async () => {
// Ensure we don't clear the array before spending completes
let spendCallCount = 0;
mockSpendTokens.mockImplementation(async () => {
spendCallCount++;
// Simulate async delay
await new Promise((resolve) => setTimeout(resolve, 10));
});
const collectedUsage = [
{ input_tokens: 100, output_tokens: 50, model: 'gpt-4' },
{ input_tokens: 80, output_tokens: 40, model: 'claude-3' },
];
await spendCollectedUsage({
userId: 'user-123',
conversationId: 'convo-123',
collectedUsage,
fallbackModel: 'gpt-4',
});
// Both spend calls should have completed
expect(spendCallCount).toBe(2);
// Array should be cleared after awaiting
expect(collectedUsage.length).toBe(0);
});
});
});

View file

@ -0,0 +1,301 @@
/**
* Tests for the agent abort endpoint
*
* Tests the following fixes from PR #11462:
* 1. Authorization check - only job owner can abort
* 2. Early abort handling - skip save when no responseMessageId
* 3. Partial response saving - save message before returning
*/
const express = require('express');
const request = require('supertest');
const mockLogger = {
debug: jest.fn(),
warn: jest.fn(),
error: jest.fn(),
info: jest.fn(),
};
const mockGenerationJobManager = {
getJob: jest.fn(),
abortJob: jest.fn(),
getActiveJobIdsForUser: jest.fn(),
};
const mockSaveMessage = jest.fn();
jest.mock('@librechat/data-schemas', () => ({
logger: mockLogger,
}));
jest.mock('@librechat/api', () => ({
isEnabled: jest.fn().mockReturnValue(false),
GenerationJobManager: mockGenerationJobManager,
}));
jest.mock('~/models', () => ({
saveMessage: (...args) => mockSaveMessage(...args),
}));
jest.mock('~/server/middleware', () => ({
uaParser: (req, res, next) => next(),
checkBan: (req, res, next) => next(),
requireJwtAuth: (req, res, next) => {
req.user = { id: 'test-user-123' };
next();
},
messageIpLimiter: (req, res, next) => next(),
configMiddleware: (req, res, next) => next(),
messageUserLimiter: (req, res, next) => next(),
}));
// Mock the chat module - needs to be a router
jest.mock('~/server/routes/agents/chat', () => require('express').Router());
// Mock the v1 module - v1 is directly used as middleware
jest.mock('~/server/routes/agents/v1', () => ({
v1: require('express').Router(),
}));
// Import after mocks
const agentRoutes = require('~/server/routes/agents/index');
describe('Agent Abort Endpoint', () => {
let app;
beforeAll(() => {
app = express();
app.use(express.json());
app.use('/api/agents', agentRoutes);
});
beforeEach(() => {
jest.clearAllMocks();
});
describe('POST /chat/abort', () => {
describe('Authorization', () => {
it("should return 403 when user tries to abort another user's job", async () => {
const jobStreamId = 'test-stream-123';
mockGenerationJobManager.getJob.mockResolvedValue({
metadata: { userId: 'other-user-456' },
});
const response = await request(app)
.post('/api/agents/chat/abort')
.send({ conversationId: jobStreamId });
expect(response.status).toBe(403);
expect(response.body).toEqual({ error: 'Unauthorized' });
expect(mockLogger.warn).toHaveBeenCalledWith(
expect.stringContaining('Unauthorized abort attempt'),
);
expect(mockGenerationJobManager.abortJob).not.toHaveBeenCalled();
});
it('should allow abort when user owns the job', async () => {
const jobStreamId = 'test-stream-123';
mockGenerationJobManager.getJob.mockResolvedValue({
metadata: { userId: 'test-user-123' },
});
mockGenerationJobManager.abortJob.mockResolvedValue({
success: true,
jobData: null,
content: [],
text: '',
});
const response = await request(app)
.post('/api/agents/chat/abort')
.send({ conversationId: jobStreamId });
expect(response.status).toBe(200);
expect(response.body).toEqual({ success: true, aborted: jobStreamId });
expect(mockGenerationJobManager.abortJob).toHaveBeenCalledWith(jobStreamId);
});
it('should allow abort when job has no userId metadata (backwards compatibility)', async () => {
const jobStreamId = 'test-stream-123';
mockGenerationJobManager.getJob.mockResolvedValue({
metadata: {},
});
mockGenerationJobManager.abortJob.mockResolvedValue({
success: true,
jobData: null,
content: [],
text: '',
});
const response = await request(app)
.post('/api/agents/chat/abort')
.send({ conversationId: jobStreamId });
expect(response.status).toBe(200);
expect(response.body).toEqual({ success: true, aborted: jobStreamId });
});
});
describe('Early Abort Handling', () => {
it('should skip message saving when responseMessageId is missing (early abort)', async () => {
const jobStreamId = 'test-stream-123';
mockGenerationJobManager.getJob.mockResolvedValue({
metadata: { userId: 'test-user-123' },
});
mockGenerationJobManager.abortJob.mockResolvedValue({
success: true,
jobData: {
userMessage: { messageId: 'user-msg-123' },
// No responseMessageId - early abort before generation started
conversationId: jobStreamId,
},
content: [],
text: '',
});
const response = await request(app)
.post('/api/agents/chat/abort')
.send({ conversationId: jobStreamId });
expect(response.status).toBe(200);
expect(mockSaveMessage).not.toHaveBeenCalled();
});
it('should skip message saving when userMessage is missing', async () => {
const jobStreamId = 'test-stream-123';
mockGenerationJobManager.getJob.mockResolvedValue({
metadata: { userId: 'test-user-123' },
});
mockGenerationJobManager.abortJob.mockResolvedValue({
success: true,
jobData: {
// No userMessage
responseMessageId: 'response-msg-123',
conversationId: jobStreamId,
},
content: [],
text: '',
});
const response = await request(app)
.post('/api/agents/chat/abort')
.send({ conversationId: jobStreamId });
expect(response.status).toBe(200);
expect(mockSaveMessage).not.toHaveBeenCalled();
});
});
describe('Partial Response Saving', () => {
it('should save partial response when both userMessage and responseMessageId exist', async () => {
const jobStreamId = 'test-stream-123';
const userMessageId = 'user-msg-123';
const responseMessageId = 'response-msg-456';
mockGenerationJobManager.getJob.mockResolvedValue({
metadata: { userId: 'test-user-123' },
});
mockGenerationJobManager.abortJob.mockResolvedValue({
success: true,
jobData: {
userMessage: { messageId: userMessageId },
responseMessageId,
conversationId: jobStreamId,
sender: 'TestAgent',
endpoint: 'anthropic',
model: 'claude-3',
},
content: [{ type: 'text', text: 'Partial response...' }],
text: 'Partial response...',
});
mockSaveMessage.mockResolvedValue();
const response = await request(app)
.post('/api/agents/chat/abort')
.send({ conversationId: jobStreamId });
expect(response.status).toBe(200);
expect(mockSaveMessage).toHaveBeenCalledWith(
expect.anything(),
expect.objectContaining({
messageId: responseMessageId,
parentMessageId: userMessageId,
conversationId: jobStreamId,
content: [{ type: 'text', text: 'Partial response...' }],
text: 'Partial response...',
sender: 'TestAgent',
endpoint: 'anthropic',
model: 'claude-3',
unfinished: true,
error: false,
isCreatedByUser: false,
user: 'test-user-123',
}),
expect.objectContaining({
context: 'api/server/routes/agents/index.js - abort endpoint',
}),
);
});
it('should handle saveMessage errors gracefully', async () => {
const jobStreamId = 'test-stream-123';
mockGenerationJobManager.getJob.mockResolvedValue({
metadata: { userId: 'test-user-123' },
});
mockGenerationJobManager.abortJob.mockResolvedValue({
success: true,
jobData: {
userMessage: { messageId: 'user-msg-123' },
responseMessageId: 'response-msg-456',
conversationId: jobStreamId,
},
content: [],
text: '',
});
mockSaveMessage.mockRejectedValue(new Error('Database error'));
const response = await request(app)
.post('/api/agents/chat/abort')
.send({ conversationId: jobStreamId });
// Should still return success even if save fails
expect(response.status).toBe(200);
expect(response.body).toEqual({ success: true, aborted: jobStreamId });
expect(mockLogger.error).toHaveBeenCalledWith(
expect.stringContaining('Failed to save partial response'),
);
});
});
describe('Job Not Found', () => {
it('should return 404 when job is not found', async () => {
mockGenerationJobManager.getJob.mockResolvedValue(null);
mockGenerationJobManager.getActiveJobIdsForUser.mockResolvedValue([]);
const response = await request(app)
.post('/api/agents/chat/abort')
.send({ conversationId: 'non-existent-job' });
expect(response.status).toBe(404);
expect(response.body).toEqual({
error: 'Job not found',
streamId: 'non-existent-job',
});
});
});
});
});

View file

@ -9,6 +9,7 @@ const {
configMiddleware,
messageUserLimiter,
} = require('~/server/middleware');
const { saveMessage } = require('~/models');
const { v1 } = require('./v1');
const chat = require('./chat');
@ -46,6 +47,10 @@ router.get('/chat/stream/:streamId', async (req, res) => {
});
}
if (job.metadata?.userId && job.metadata.userId !== req.user.id) {
return res.status(403).json({ error: 'Unauthorized' });
}
res.setHeader('Content-Encoding', 'identity');
res.setHeader('Content-Type', 'text/event-stream');
res.setHeader('Cache-Control', 'no-cache, no-transform');
@ -194,9 +199,53 @@ router.post('/chat/abort', async (req, res) => {
logger.debug(`[AgentStream] Computed jobStreamId: ${jobStreamId}`);
if (job && jobStreamId) {
if (job.metadata?.userId && job.metadata.userId !== userId) {
logger.warn(`[AgentStream] Unauthorized abort attempt for ${jobStreamId} by user ${userId}`);
return res.status(403).json({ error: 'Unauthorized' });
}
logger.debug(`[AgentStream] Job found, aborting: ${jobStreamId}`);
await GenerationJobManager.abortJob(jobStreamId);
logger.debug(`[AgentStream] Job aborted successfully: ${jobStreamId}`);
const abortResult = await GenerationJobManager.abortJob(jobStreamId);
logger.debug(`[AgentStream] Job aborted successfully: ${jobStreamId}`, {
abortResultSuccess: abortResult.success,
abortResultUserMessageId: abortResult.jobData?.userMessage?.messageId,
abortResultResponseMessageId: abortResult.jobData?.responseMessageId,
});
// CRITICAL: Save partial response BEFORE returning to prevent race condition.
// If user sends a follow-up immediately after abort, the parentMessageId must exist in DB.
// Only save if we have a valid responseMessageId (skip early aborts before generation started)
if (
abortResult.success &&
abortResult.jobData?.userMessage?.messageId &&
abortResult.jobData?.responseMessageId
) {
const { jobData, content, text } = abortResult;
const responseMessage = {
messageId: jobData.responseMessageId,
parentMessageId: jobData.userMessage.messageId,
conversationId: jobData.conversationId,
content: content || [],
text: text || '',
sender: jobData.sender || 'AI',
endpoint: jobData.endpoint,
model: jobData.model,
unfinished: true,
error: false,
isCreatedByUser: false,
user: userId,
};
try {
await saveMessage(req, responseMessage, {
context: 'api/server/routes/agents/index.js - abort endpoint',
});
logger.debug(`[AgentStream] Saved partial response for: ${jobStreamId}`);
} catch (saveError) {
logger.error(`[AgentStream] Failed to save partial response: ${saveError.message}`);
}
}
return res.json({ success: true, aborted: jobStreamId });
}

View file

@ -73,15 +73,25 @@ const replaceArtifactContent = (originalText, artifact, original, updated) => {
return null;
}
// Check if there are code blocks
const codeBlockStart = artifactContent.indexOf('```\n', contentStart);
// Check if there are code blocks - handle both ```\n and ```lang\n formats
let codeBlockStart = artifactContent.indexOf('```', contentStart);
const codeBlockEnd = artifactContent.lastIndexOf('\n```', contentEnd);
// If we found opening backticks, find the actual newline (skipping any language identifier)
if (codeBlockStart !== -1) {
const newlineAfterBackticks = artifactContent.indexOf('\n', codeBlockStart);
if (newlineAfterBackticks !== -1 && newlineAfterBackticks < contentEnd) {
codeBlockStart = newlineAfterBackticks;
} else {
codeBlockStart = -1;
}
}
// Determine where to look for the original content
let searchStart, searchEnd;
if (codeBlockStart !== -1) {
// Code block starts
searchStart = codeBlockStart + 4; // after ```\n
// Code block starts - searchStart is right after the newline following ```[lang]
searchStart = codeBlockStart + 1; // after the newline
if (codeBlockEnd !== -1 && codeBlockEnd > codeBlockStart) {
// Code block has proper ending

View file

@ -494,5 +494,268 @@ ${original}`;
/```\n {2}function test\(\) \{\n {4}return \{\n {6}value: 100\n {4}\};\n {2}\}\n```/,
);
});
test('should handle code blocks with language identifiers (```svg, ```html, etc.)', () => {
const svgContent = `<svg viewBox="0 0 200 200" xmlns="http://www.w3.org/2000/svg">
<rect width="200" height="200" fill="#4A90A4"/>
<rect x="50" y="50" width="100" height="100" fill="#FFFFFF"/>
</svg>`;
/** Artifact with language identifier in code block */
const artifactText = `${ARTIFACT_START}{identifier="test-svg" type="image/svg+xml" title="Test SVG"}
\`\`\`svg
${svgContent}
\`\`\`
${ARTIFACT_END}`;
const message = { text: artifactText };
const artifacts = findAllArtifacts(message);
expect(artifacts).toHaveLength(1);
const updatedSvg = svgContent.replace('#FFFFFF', '#131313');
const result = replaceArtifactContent(artifactText, artifacts[0], svgContent, updatedSvg);
expect(result).not.toBeNull();
expect(result).toContain('#131313');
expect(result).not.toContain('#FFFFFF');
expect(result).toMatch(/```svg\n/);
});
test('should handle code blocks with complex language identifiers', () => {
const htmlContent = `<!DOCTYPE html>
<html>
<head><title>Test</title></head>
<body>Hello</body>
</html>`;
const artifactText = `${ARTIFACT_START}{identifier="test-html" type="text/html" title="Test HTML"}
\`\`\`html
${htmlContent}
\`\`\`
${ARTIFACT_END}`;
const message = { text: artifactText };
const artifacts = findAllArtifacts(message);
const updatedHtml = htmlContent.replace('Hello', 'Updated');
const result = replaceArtifactContent(artifactText, artifacts[0], htmlContent, updatedHtml);
expect(result).not.toBeNull();
expect(result).toContain('Updated');
expect(result).toMatch(/```html\n/);
});
});
describe('code block edge cases', () => {
test('should handle code block without language identifier (```\\n)', () => {
const content = 'const x = 1;\nconst y = 2;';
const artifactText = `${ARTIFACT_START}{identifier="test" type="text/plain" title="Test"}
\`\`\`
${content}
\`\`\`
${ARTIFACT_END}`;
const message = { text: artifactText };
const artifacts = findAllArtifacts(message);
const result = replaceArtifactContent(artifactText, artifacts[0], content, 'updated');
expect(result).not.toBeNull();
expect(result).toContain('updated');
expect(result).toMatch(/```\nupdated\n```/);
});
test('should handle various language identifiers', () => {
const languages = [
'javascript',
'typescript',
'python',
'jsx',
'tsx',
'css',
'json',
'xml',
'markdown',
'md',
];
for (const lang of languages) {
const content = `test content for ${lang}`;
const artifactText = `${ARTIFACT_START}{identifier="test-${lang}" type="text/plain" title="Test"}
\`\`\`${lang}
${content}
\`\`\`
${ARTIFACT_END}`;
const message = { text: artifactText };
const artifacts = findAllArtifacts(message);
expect(artifacts).toHaveLength(1);
const result = replaceArtifactContent(artifactText, artifacts[0], content, 'updated');
expect(result).not.toBeNull();
expect(result).toContain('updated');
expect(result).toMatch(new RegExp(`\`\`\`${lang}\\n`));
}
});
test('should handle single character language identifier', () => {
const content = 'single char lang';
const artifactText = `${ARTIFACT_START}{identifier="test" type="text/plain" title="Test"}
\`\`\`r
${content}
\`\`\`
${ARTIFACT_END}`;
const message = { text: artifactText };
const artifacts = findAllArtifacts(message);
const result = replaceArtifactContent(artifactText, artifacts[0], content, 'updated');
expect(result).not.toBeNull();
expect(result).toContain('updated');
expect(result).toMatch(/```r\n/);
});
test('should handle code block with content that looks like code fence', () => {
const content = 'Line 1\nSome text with ``` backticks in middle\nLine 3';
const artifactText = `${ARTIFACT_START}{identifier="test" type="text/plain" title="Test"}
\`\`\`text
${content}
\`\`\`
${ARTIFACT_END}`;
const message = { text: artifactText };
const artifacts = findAllArtifacts(message);
const result = replaceArtifactContent(artifactText, artifacts[0], content, 'updated');
expect(result).not.toBeNull();
expect(result).toContain('updated');
});
test('should handle code block with trailing whitespace in language line', () => {
const content = 'whitespace test';
/** Note: trailing spaces after 'python' */
const artifactText = `${ARTIFACT_START}{identifier="test" type="text/plain" title="Test"}
\`\`\`python
${content}
\`\`\`
${ARTIFACT_END}`;
const message = { text: artifactText };
const artifacts = findAllArtifacts(message);
const result = replaceArtifactContent(artifactText, artifacts[0], content, 'updated');
expect(result).not.toBeNull();
expect(result).toContain('updated');
});
test('should handle react/jsx content with complex syntax', () => {
const jsxContent = `function App() {
const [count, setCount] = useState(0);
return (
<div className="app">
<h1>Count: {count}</h1>
<button onClick={() => setCount(c => c + 1)}>
Increment
</button>
</div>
);
}`;
const artifactText = `${ARTIFACT_START}{identifier="react-app" type="application/vnd.react" title="React App"}
\`\`\`jsx
${jsxContent}
\`\`\`
${ARTIFACT_END}`;
const message = { text: artifactText };
const artifacts = findAllArtifacts(message);
const updatedJsx = jsxContent.replace('Increment', 'Click me');
const result = replaceArtifactContent(artifactText, artifacts[0], jsxContent, updatedJsx);
expect(result).not.toBeNull();
expect(result).toContain('Click me');
expect(result).not.toContain('Increment');
expect(result).toMatch(/```jsx\n/);
});
test('should handle mermaid diagram content', () => {
const mermaidContent = `graph TD
A[Start] --> B{Is it?}
B -->|Yes| C[OK]
B -->|No| D[End]`;
const artifactText = `${ARTIFACT_START}{identifier="diagram" type="application/vnd.mermaid" title="Flow"}
\`\`\`mermaid
${mermaidContent}
\`\`\`
${ARTIFACT_END}`;
const message = { text: artifactText };
const artifacts = findAllArtifacts(message);
const updatedMermaid = mermaidContent.replace('Start', 'Begin');
const result = replaceArtifactContent(
artifactText,
artifacts[0],
mermaidContent,
updatedMermaid,
);
expect(result).not.toBeNull();
expect(result).toContain('Begin');
expect(result).toMatch(/```mermaid\n/);
});
test('should handle artifact without code block (plain text)', () => {
const content = 'Just plain text without code fences';
const artifactText = `${ARTIFACT_START}{identifier="plain" type="text/plain" title="Plain"}
${content}
${ARTIFACT_END}`;
const message = { text: artifactText };
const artifacts = findAllArtifacts(message);
const result = replaceArtifactContent(
artifactText,
artifacts[0],
content,
'updated plain text',
);
expect(result).not.toBeNull();
expect(result).toContain('updated plain text');
expect(result).not.toContain('```');
});
test('should handle multiline content with various newline patterns', () => {
const content = `Line 1
Line 2
Line 4 after empty line
Indented line
Double indented`;
const artifactText = `${ARTIFACT_START}{identifier="test" type="text/plain" title="Test"}
\`\`\`
${content}
\`\`\`
${ARTIFACT_END}`;
const message = { text: artifactText };
const artifacts = findAllArtifacts(message);
const updated = content.replace('Line 1', 'First Line');
const result = replaceArtifactContent(artifactText, artifacts[0], content, updated);
expect(result).not.toBeNull();
expect(result).toContain('First Line');
expect(result).toContain(' Indented line');
expect(result).toContain(' Double indented');
});
});
});

View file

@ -3,10 +3,11 @@ const { createContentAggregator } = require('@librechat/agents');
const {
initializeAgent,
validateAgentModel,
getCustomEndpointConfig,
createSequentialChainEdges,
createEdgeCollector,
filterOrphanedEdges,
GenerationJobManager,
getCustomEndpointConfig,
createSequentialChainEdges,
} = require('@librechat/api');
const {
EModelEndpoint,
@ -314,6 +315,10 @@ const initializeClient = async ({ req, res, signal, endpointOption }) => {
endpoint: isEphemeralAgentId(primaryConfig.id) ? primaryConfig.endpoint : EModelEndpoint.agents,
});
if (streamId) {
GenerationJobManager.setCollectedUsage(streamId, collectedUsage);
}
return { client, userMCPAuthMap };
};

View file

@ -15,6 +15,38 @@ const {
} = require('~/strategies');
const { getLogStores } = require('~/cache');
/**
* Determines if secure cookies should be used.
* Only use secure cookies in production when not on localhost.
* @returns {boolean}
*/
function shouldUseSecureCookie() {
const isProduction = process.env.NODE_ENV === 'production';
const domainServer = process.env.DOMAIN_SERVER || '';
let hostname = '';
if (domainServer) {
try {
const normalized = /^https?:\/\//i.test(domainServer)
? domainServer
: `http://${domainServer}`;
const url = new URL(normalized);
hostname = (url.hostname || '').toLowerCase();
} catch {
// Fallback: treat DOMAIN_SERVER directly as a hostname-like string
hostname = domainServer.toLowerCase();
}
}
const isLocalhost =
hostname === 'localhost' ||
hostname === '127.0.0.1' ||
hostname === '::1' ||
hostname.endsWith('.localhost');
return isProduction && !isLocalhost;
}
/**
* Configures OpenID Connect for the application.
* @param {Express.Application} app - The Express application instance.
@ -22,7 +54,6 @@ const { getLogStores } = require('~/cache');
*/
async function configureOpenId(app) {
logger.info('Configuring OpenID Connect...');
const isProduction = process.env.NODE_ENV === 'production';
const sessionExpiry = Number(process.env.SESSION_EXPIRY) || DEFAULT_SESSION_EXPIRY;
const sessionOptions = {
secret: process.env.OPENID_SESSION_SECRET,
@ -31,7 +62,7 @@ async function configureOpenId(app) {
store: getLogStores(CacheKeys.OPENID_SESSION),
cookie: {
maxAge: sessionExpiry,
secure: isProduction,
secure: shouldUseSecureCookie(),
},
};
app.use(session(sessionOptions));
@ -88,7 +119,6 @@ const configureSocialLogins = async (app) => {
process.env.SAML_SESSION_SECRET
) {
logger.info('Configuring SAML Connect...');
const isProduction = process.env.NODE_ENV === 'production';
const sessionExpiry = Number(process.env.SESSION_EXPIRY) || DEFAULT_SESSION_EXPIRY;
const sessionOptions = {
secret: process.env.SAML_SESSION_SECRET,
@ -97,7 +127,7 @@ const configureSocialLogins = async (app) => {
store: getLogStores(CacheKeys.SAML_SESSION),
cookie: {
maxAge: sessionExpiry,
secure: isProduction,
secure: shouldUseSecureCookie(),
},
};
app.use(session(sessionOptions));

View file

@ -254,7 +254,7 @@
},
"packages/api": {
"name": "@librechat/api",
"version": "1.7.21",
"version": "1.7.22",
"devDependencies": {
"@babel/preset-env": "^7.21.5",
"@babel/preset-react": "^7.18.6",
@ -321,7 +321,7 @@
},
"packages/client": {
"name": "@librechat/client",
"version": "0.4.50",
"version": "0.4.51",
"devDependencies": {
"@babel/core": "^7.28.5",
"@babel/preset-env": "^7.28.5",
@ -409,7 +409,7 @@
},
"packages/data-provider": {
"name": "librechat-data-provider",
"version": "0.8.230",
"version": "0.8.231",
"dependencies": {
"axios": "^1.12.1",
"dayjs": "^1.11.13",
@ -447,7 +447,7 @@
},
"packages/data-schemas": {
"name": "@librechat/data-schemas",
"version": "0.0.34",
"version": "0.0.35",
"devDependencies": {
"@rollup/plugin-alias": "^5.1.0",
"@rollup/plugin-commonjs": "^29.0.0",

View file

@ -1,4 +1,4 @@
/** v0.8.2-rc3 */
/** v0.8.2 */
module.exports = {
roots: ['<rootDir>/src'],
testEnvironment: 'jsdom',

View file

@ -1,6 +1,6 @@
{
"name": "@librechat/frontend",
"version": "v0.8.2-rc3",
"version": "v0.8.2",
"description": "",
"type": "module",
"scripts": {
@ -77,7 +77,7 @@
"jotai": "^2.12.5",
"js-cookie": "^3.0.5",
"librechat-data-provider": "*",
"lodash": "^4.17.21",
"lodash": "^4.17.23",
"lucide-react": "^0.394.0",
"match-sorter": "^8.1.0",
"mermaid": "^11.12.2",

View file

@ -15,6 +15,8 @@ export interface MenuItemProps {
separate?: boolean;
hideOnClick?: boolean;
dialog?: React.ReactElement;
ariaLabel?: string;
ariaChecked?: boolean;
ref?: React.Ref<any>;
className?: string;
render?:

View file

@ -5,6 +5,7 @@ import { ThemeContext, Spinner, Button, isDark } from '@librechat/client';
import type { TLoginUser, TStartupConfig } from 'librechat-data-provider';
import type { TAuthContext } from '~/common';
import { useResendVerificationEmail, useGetStartupConfig } from '~/data-provider';
import { validateEmail } from '~/utils';
import { useLocalize } from '~/hooks';
type TLoginFormProps = {
@ -96,10 +97,9 @@ const LoginForm: React.FC<TLoginFormProps> = ({ onSubmit, startupConfig, error,
{...register('email', {
required: localize('com_auth_email_required'),
maxLength: { value: 120, message: localize('com_auth_email_max_length') },
pattern: {
value: useUsernameLogin ? /\S+/ : /\S+@\S+\.\S+/,
message: localize('com_auth_email_pattern'),
},
validate: useUsernameLogin
? undefined
: (value) => validateEmail(value, localize('com_auth_email_pattern')),
})}
aria-invalid={!!errors.email}
className="webkit-dark-styles transition-color peer w-full rounded-2xl border border-border-light bg-surface-primary px-3.5 pb-2.5 pt-3 text-text-primary duration-200 focus:border-green-500 focus:outline-none"

View file

@ -13,30 +13,14 @@ export default function Footer({ className }: { className?: string }) {
const termsOfService = config?.interface?.termsOfService;
const privacyPolicyRender = privacyPolicy?.externalUrl != null && (
<a
className="text-text-secondary underline"
href={privacyPolicy.externalUrl}
target={privacyPolicy.openNewTab === true ? '_blank' : undefined}
rel="noreferrer"
>
<a className="text-text-secondary underline" href={privacyPolicy.externalUrl} rel="noreferrer">
{localize('com_ui_privacy_policy')}
{privacyPolicy.openNewTab === true && (
<span className="sr-only">{' ' + localize('com_ui_opens_new_tab')}</span>
)}
</a>
);
const termsOfServiceRender = termsOfService?.externalUrl != null && (
<a
className="text-text-secondary underline"
href={termsOfService.externalUrl}
target={termsOfService.openNewTab === true ? '_blank' : undefined}
rel="noreferrer"
>
<a className="text-text-secondary underline" href={termsOfService.externalUrl} rel="noreferrer">
{localize('com_ui_terms_of_service')}
{termsOfService.openNewTab === true && (
<span className="sr-only">{' ' + localize('com_ui_opens_new_tab')}</span>
)}
</a>
);
@ -67,12 +51,10 @@ export default function Footer({ className }: { className?: string }) {
<a
className="text-text-secondary underline"
href={href}
target="_blank"
rel="noreferrer"
{...otherProps}
>
{children}
<span className="sr-only">{' ' + localize('com_ui_opens_new_tab')}</span>
</a>
);
},

View file

@ -258,7 +258,17 @@ const ChatForm = memo(({ index = 0 }: { index?: number }) => {
<FileFormChat conversation={conversation} />
{endpoint && (
<div className={cn('flex', isRTL ? 'flex-row-reverse' : 'flex-row')}>
<div className="relative flex-1">
<div
className="relative flex-1"
style={
isCollapsed
? {
WebkitMaskImage: 'linear-gradient(to bottom, black 60%, transparent 90%)',
maskImage: 'linear-gradient(to bottom, black 60%, transparent 90%)',
}
: undefined
}
>
<TextareaAutosize
{...registerProps}
ref={(e) => {
@ -290,16 +300,6 @@ const ChatForm = memo(({ index = 0 }: { index?: number }) => {
'scrollbar-hover transition-[max-height] duration-200 disabled:cursor-not-allowed',
)}
/>
{isCollapsed && (
<div
className="pointer-events-none absolute bottom-0 left-0 right-0 h-10 transition-all duration-200"
style={{
backdropFilter: 'blur(2px)',
WebkitMaskImage: 'linear-gradient(to top, black 15%, transparent 75%)',
maskImage: 'linear-gradient(to top, black 15%, transparent 75%)',
}}
/>
)}
</div>
<div className="flex flex-col items-start justify-start pr-2.5 pt-1.5">
<CollapseChat

View file

@ -99,6 +99,16 @@ const BookmarkMenu: FC = () => {
const newBookmarkRef = useRef<HTMLButtonElement>(null);
const tagsCount = tags?.length ?? 0;
const hasBookmarks = tagsCount > 0;
const buttonAriaLabel = useMemo(() => {
if (tagsCount > 0) {
return localize('com_ui_bookmarks_count_selected', { count: tagsCount });
}
return localize('com_ui_bookmarks_add');
}, [tagsCount, localize]);
const dropdownItems: t.MenuItemProps[] = useMemo(() => {
const items: t.MenuItemProps[] = [
{
@ -114,19 +124,19 @@ const BookmarkMenu: FC = () => {
if (data) {
for (const tag of data) {
const isSelected = tags?.includes(tag.tag);
const isSelected = tags?.includes(tag.tag) === true;
items.push({
id: tag.tag,
label: tag.tag,
hideOnClick: false,
icon:
isSelected === true ? (
<BookmarkFilledIcon className="size-4" />
) : (
<BookmarkIcon className="size-4" />
),
icon: isSelected ? (
<BookmarkFilledIcon className="size-4" />
) : (
<BookmarkIcon className="size-4" />
),
onClick: () => handleSubmit(tag.tag),
disabled: mutation.isLoading,
ariaChecked: isSelected,
});
}
}
@ -146,10 +156,10 @@ const BookmarkMenu: FC = () => {
if (mutation.isLoading) {
return <Spinner aria-label="Spinner" />;
}
if ((tags?.length ?? 0) > 0) {
return <BookmarkFilledIcon className="icon-lg" aria-label="Filled Bookmark" />;
if (hasBookmarks) {
return <BookmarkFilledIcon className="icon-lg" aria-hidden="true" />;
}
return <BookmarkIcon className="icon-lg" aria-label="Bookmark" />;
return <BookmarkIcon className="icon-lg" aria-hidden="true" />;
};
return (
@ -168,7 +178,8 @@ const BookmarkMenu: FC = () => {
render={
<Ariakit.MenuButton
id="bookmark-menu-button"
aria-label={localize('com_ui_bookmarks_add')}
aria-label={buttonAriaLabel}
aria-pressed={hasBookmarks}
className={cn(
'mt-text-sm flex size-10 flex-shrink-0 items-center justify-center gap-2 rounded-xl border border-border-light bg-presentation text-sm transition-colors duration-200 hover:bg-surface-hover',
isMenuOpen ? 'bg-surface-hover' : '',

View file

@ -1,4 +1,5 @@
import { useMemo } from 'react';
import { VisuallyHidden } from '@ariakit/react';
import { Spinner, TooltipAnchor } from '@librechat/client';
import { CheckCircle2, MousePointerClick, SettingsIcon } from 'lucide-react';
import { EModelEndpoint, isAgentsEndpoint, isAssistantsEndpoint } from 'librechat-data-provider';
@ -126,6 +127,8 @@ export function EndpointItem({ endpoint, endpointIndex }: EndpointItemProps) {
</div>
);
const isEndpointSelected = selectedEndpoint === endpoint.value;
if (endpoint.hasModels) {
const filteredModels = searchValue
? filterModels(
@ -153,9 +156,17 @@ export function EndpointItem({ endpoint, endpointIndex }: EndpointItemProps) {
label={
<div className="group flex w-full min-w-0 items-center justify-between gap-1.5 py-1 text-sm">
{renderIconLabel()}
{isUserProvided && (
<SettingsButton endpoint={endpoint} handleOpenKeyDialog={handleOpenKeyDialog} />
)}
<div className="flex shrink-0 items-center gap-1">
{isUserProvided && (
<SettingsButton endpoint={endpoint} handleOpenKeyDialog={handleOpenKeyDialog} />
)}
{isEndpointSelected && (
<>
<CheckCircle2 className="size-4 shrink-0 text-text-primary" aria-hidden="true" />
<VisuallyHidden>{localize('com_a11y_selected')}</VisuallyHidden>
</>
)}
</div>
</div>
}
>
@ -200,6 +211,7 @@ export function EndpointItem({ endpoint, endpointIndex }: EndpointItemProps) {
id={`endpoint-${endpoint.value}-menu`}
key={`endpoint-${endpoint.value}-item`}
onClick={() => handleSelectEndpoint(endpoint)}
aria-selected={isEndpointSelected || undefined}
className="group flex w-full cursor-pointer items-center justify-between gap-1.5 py-2 text-sm"
>
{renderIconLabel()}
@ -218,8 +230,11 @@ export function EndpointItem({ endpoint, endpointIndex }: EndpointItemProps) {
}
/>
)}
{selectedEndpoint === endpoint.value && !isAssistantsNotLoaded && (
<CheckCircle2 className="size-4 shrink-0 text-text-primary" aria-hidden="true" />
{isEndpointSelected && !isAssistantsNotLoaded && (
<>
<CheckCircle2 className="size-4 shrink-0 text-text-primary" aria-hidden="true" />
<VisuallyHidden>{localize('com_a11y_selected')}</VisuallyHidden>
</>
)}
</div>
</MenuItem>

View file

@ -1,5 +1,6 @@
import React, { useRef, useState, useEffect } from 'react';
import { EarthIcon, Pin, PinOff } from 'lucide-react';
import { VisuallyHidden } from '@ariakit/react';
import { CheckCircle2, EarthIcon, Pin, PinOff } from 'lucide-react';
import { isAgentsEndpoint, isAssistantsEndpoint } from 'librechat-data-provider';
import { useModelSelectorContext } from '../ModelSelectorContext';
import { CustomMenuItem as MenuItem } from '../CustomMenu';
@ -110,6 +111,7 @@ export function EndpointModelItem({ modelId, endpoint, isSelected }: EndpointMod
<MenuItem
ref={itemRef}
onClick={() => handleSelectModel(endpoint, modelId ?? '')}
aria-selected={isSelected || undefined}
className="group flex w-full cursor-pointer items-center justify-between rounded-lg px-2 text-sm"
>
<div className="flex w-full min-w-0 items-center gap-2 px-1 py-1">
@ -133,23 +135,10 @@ export function EndpointModelItem({ modelId, endpoint, isSelected }: EndpointMod
)}
</button>
{isSelected && (
<div className="flex-shrink-0 self-center">
<svg
width="16"
height="16"
viewBox="0 0 24 24"
fill="none"
xmlns="http://www.w3.org/2000/svg"
className="block"
>
<path
fillRule="evenodd"
clipRule="evenodd"
d="M2 12C2 6.47715 6.47715 2 12 2C17.5228 2 22 6.47715 22 12C22 17.5228 17.5228 22 12 22C6.47715 22 2 17.5228 2 12ZM16.0755 7.93219C16.5272 8.25003 16.6356 8.87383 16.3178 9.32549L11.5678 16.0755C11.3931 16.3237 11.1152 16.4792 10.8123 16.4981C10.5093 16.517 10.2142 16.3973 10.0101 16.1727L7.51006 13.4227C7.13855 13.014 7.16867 12.3816 7.57733 12.0101C7.98598 11.6386 8.61843 11.6687 8.98994 12.0773L10.6504 13.9039L14.6822 8.17451C15 7.72284 15.6238 7.61436 16.0755 7.93219Z"
fill="currentColor"
/>
</svg>
</div>
<>
<CheckCircle2 className="size-4 shrink-0 text-text-primary" aria-hidden="true" />
<VisuallyHidden>{localize('com_a11y_selected')}</VisuallyHidden>
</>
)}
</MenuItem>
);

View file

@ -1,7 +1,10 @@
import React from 'react';
import { CheckCircle2 } from 'lucide-react';
import { VisuallyHidden } from '@ariakit/react';
import type { TModelSpec } from 'librechat-data-provider';
import { CustomMenuItem as MenuItem } from '../CustomMenu';
import { useModelSelectorContext } from '../ModelSelectorContext';
import { useLocalize } from '~/hooks';
import SpecIcon from './SpecIcon';
import { cn } from '~/utils';
@ -11,12 +14,14 @@ interface ModelSpecItemProps {
}
export function ModelSpecItem({ spec, isSelected }: ModelSpecItemProps) {
const localize = useLocalize();
const { handleSelectSpec, endpointsConfig } = useModelSelectorContext();
const { showIconInMenu = true } = spec;
return (
<MenuItem
key={spec.name}
onClick={() => handleSelectSpec(spec)}
aria-selected={isSelected || undefined}
className={cn(
'flex w-full cursor-pointer items-center justify-between rounded-lg px-2 text-sm',
)}
@ -40,23 +45,13 @@ export function ModelSpecItem({ spec, isSelected }: ModelSpecItemProps) {
</div>
</div>
{isSelected && (
<div className="flex-shrink-0 self-center">
<svg
width="16"
height="16"
viewBox="0 0 24 24"
fill="none"
xmlns="http://www.w3.org/2000/svg"
className="block"
>
<path
fillRule="evenodd"
clipRule="evenodd"
d="M2 12C2 6.47715 6.47715 2 12 2C17.5228 2 22 6.47715 22 12C22 17.5228 17.5228 22 12 22C6.47715 22 2 17.5228 2 12ZM16.0755 7.93219C16.5272 8.25003 16.6356 8.87383 16.3178 9.32549L11.5678 16.0755C11.3931 16.3237 11.1152 16.4792 10.8123 16.4981C10.5093 16.517 10.2142 16.3973 10.0101 16.1727L7.51006 13.4227C7.13855 13.014 7.16867 12.3816 7.57733 12.0101C7.98598 11.6386 8.61843 11.6687 8.98994 12.0773L10.6504 13.9039L14.6822 8.17451C15 7.72284 15.6238 7.61436 16.0755 7.93219Z"
fill="currentColor"
/>
</svg>
</div>
<>
<CheckCircle2
className="size-4 shrink-0 self-center text-text-primary"
aria-hidden="true"
/>
<VisuallyHidden>{localize('com_a11y_selected')}</VisuallyHidden>
</>
)}
</MenuItem>
);

View file

@ -1,5 +1,6 @@
import React, { Fragment } from 'react';
import { EarthIcon } from 'lucide-react';
import { VisuallyHidden } from '@ariakit/react';
import { CheckCircle2, EarthIcon } from 'lucide-react';
import { isAgentsEndpoint, isAssistantsEndpoint } from 'librechat-data-provider';
import type { TModelSpec } from 'librechat-data-provider';
import type { Endpoint } from '~/common';
@ -60,6 +61,7 @@ export function SearchResults({ results, localize, searchValue }: SearchResultsP
<MenuItem
key={spec.name}
onClick={() => handleSelectSpec(spec)}
aria-selected={selectedSpec === spec.name || undefined}
className={cn(
'flex w-full cursor-pointer justify-between rounded-lg px-2 text-sm',
spec.description ? 'items-start' : 'items-center',
@ -84,23 +86,16 @@ export function SearchResults({ results, localize, searchValue }: SearchResultsP
</div>
</div>
{selectedSpec === spec.name && (
<div className={cn('flex-shrink-0', spec.description ? 'pt-1' : '')}>
<svg
width="16"
height="16"
viewBox="0 0 24 24"
fill="none"
xmlns="http://www.w3.org/2000/svg"
className="block"
>
<path
fillRule="evenodd"
clipRule="evenodd"
d="M2 12C2 6.47715 6.47715 2 12 2C17.5228 2 22 6.47715 22 12C22 17.5228 17.5228 22 12 22C6.47715 22 2 17.5228 2 12ZM16.0755 7.93219C16.5272 8.25003 16.6356 8.87383 16.3178 9.32549L11.5678 16.0755C11.3931 16.3237 11.1152 16.4792 10.8123 16.4981C10.5093 16.517 10.2142 16.3973 10.0101 16.1727L7.51006 13.4227C7.13855 13.014 7.16867 12.3816 7.57733 12.0101C7.98598 11.6386 8.61843 11.6687 8.98994 12.0773L10.6504 13.9039L14.6822 8.17451C15 7.72284 15.6238 7.61436 16.0755 7.93219Z"
fill="currentColor"
/>
</svg>
</div>
<>
<CheckCircle2
className={cn(
'size-4 shrink-0 text-text-primary',
spec.description ? 'mt-1' : '',
)}
aria-hidden="true"
/>
<VisuallyHidden>{localize('com_a11y_selected')}</VisuallyHidden>
</>
)}
</MenuItem>
);
@ -164,10 +159,13 @@ export function SearchResults({ results, localize, searchValue }: SearchResultsP
modelName = endpoint.assistantNames[modelId];
}
const isModelSelected =
selectedEndpoint === endpoint.value && selectedModel === modelId;
return (
<MenuItem
key={`${endpoint.value}-${modelId}-search-${i}`}
onClick={() => handleSelectModel(endpoint, modelId)}
aria-selected={isModelSelected || undefined}
className="flex w-full cursor-pointer items-center justify-start rounded-lg px-3 py-2 pl-6 text-sm"
>
<div className="flex items-center gap-2">
@ -185,22 +183,14 @@ export function SearchResults({ results, localize, searchValue }: SearchResultsP
{isGlobal && (
<EarthIcon className="ml-auto size-4 text-green-400" aria-hidden="true" />
)}
{selectedEndpoint === endpoint.value && selectedModel === modelId && (
<svg
width="16"
height="16"
viewBox="0 0 24 24"
fill="none"
xmlns="http://www.w3.org/2000/svg"
className="block"
>
<path
fillRule="evenodd"
clipRule="evenodd"
d="M2 12C2 6.47715 6.47715 2 12 2C17.5228 2 22 6.47715 22 12C22 17.5228 17.5228 22 12 22C6.47715 22 2 17.5228 2 12ZM16.0755 7.93219C16.5272 8.25003 16.6356 8.87383 16.3178 9.32549L11.5678 16.0755C11.3931 16.3237 11.1152 16.4792 10.8123 16.4981C10.5093 16.517 10.2142 16.3973 10.0101 16.1727L7.51006 13.4227C7.13855 13.014 7.16867 12.3816 7.57733 12.0101C7.98598 11.6386 8.61843 11.6687 8.98994 12.0773L10.6504 13.9039L14.6822 8.17451C15 7.72284 15.6238 7.61436 16.0755 7.93219Z"
fill="currentColor"
{isModelSelected && (
<>
<CheckCircle2
className="size-4 shrink-0 text-text-primary"
aria-hidden="true"
/>
</svg>
<VisuallyHidden>{localize('com_a11y_selected')}</VisuallyHidden>
</>
)}
</MenuItem>
);
@ -209,10 +199,12 @@ export function SearchResults({ results, localize, searchValue }: SearchResultsP
);
} else {
// Endpoints with no models
const isEndpointSelected = selectedEndpoint === endpoint.value;
return (
<MenuItem
key={`endpoint-${endpoint.value}-search-item`}
onClick={() => handleSelectEndpoint(endpoint)}
aria-selected={isEndpointSelected || undefined}
className="flex w-full cursor-pointer items-center justify-between rounded-xl px-3 py-2 text-sm"
>
<div className="flex items-center gap-2">
@ -226,22 +218,14 @@ export function SearchResults({ results, localize, searchValue }: SearchResultsP
)}
<span>{endpoint.label}</span>
</div>
{selectedEndpoint === endpoint.value && (
<svg
width="16"
height="16"
viewBox="0 0 24 24"
fill="none"
xmlns="http://www.w3.org/2000/svg"
className="block"
>
<path
fillRule="evenodd"
clipRule="evenodd"
d="M2 12C2 6.47715 6.47715 2 12 2C17.5228 2 22 6.47715 22 12C22 17.5228 17.5228 22 12 22C6.47715 22 2 17.5228 2 12ZM16.0755 7.93219C16.5272 8.25003 16.6356 8.87383 16.3178 9.32549L11.5678 16.0755C11.3931 16.3237 11.1152 16.4792 10.8123 16.4981C10.5093 16.517 10.2142 16.3973 10.0101 16.1727L7.51006 13.4227C7.13855 13.014 7.16867 12.3816 7.57733 12.0101C7.98598 11.6386 8.61843 11.6687 8.98994 12.0773L10.6504 13.9039L14.6822 8.17451C15 7.72284 15.6238 7.61436 16.0755 7.93219Z"
fill="currentColor"
{isEndpointSelected && (
<>
<CheckCircle2
className="size-4 shrink-0 text-text-primary"
aria-hidden="true"
/>
</svg>
<VisuallyHidden>{localize('com_a11y_selected')}</VisuallyHidden>
</>
)}
</MenuItem>
);

View file

@ -82,7 +82,7 @@ const ChatsHeader: FC<ChatsHeaderProps> = memo(({ isExpanded, onToggle }) => {
return (
<button
onClick={onToggle}
className="group flex w-full items-center justify-between px-1 py-2 text-xs font-bold text-text-secondary"
className="group flex w-full items-center justify-between rounded-lg px-1 py-2 text-xs font-bold text-text-secondary outline-none focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-inset focus-visible:ring-black dark:focus-visible:ring-white"
type="button"
>
<span className="select-none">{localize('com_ui_chats')}</span>

View file

@ -47,6 +47,7 @@ export default function Conversation({
const [hasInteracted, setHasInteracted] = useState(false);
const previousTitle = useRef(title);
const containerRef = useRef<HTMLDivElement>(null);
useEffect(() => {
if (title !== previousTitle.current) {
@ -109,6 +110,37 @@ export default function Conversation({
}
}, [hasInteracted]);
const handleMouseLeave = useCallback(() => {
if (!isPopoverActive) {
setHasInteracted(false);
}
}, [isPopoverActive]);
const handleBlur = useCallback(
(e: React.FocusEvent<HTMLDivElement>) => {
// Don't reset if focus is moving to a child element within this container
if (e.currentTarget.contains(e.relatedTarget as Node)) {
return;
}
if (!isPopoverActive) {
setHasInteracted(false);
}
},
[isPopoverActive],
);
const handlePopoverOpenChange = useCallback((open: boolean) => {
setIsPopoverActive(open);
if (!open) {
requestAnimationFrame(() => {
const container = containerRef.current;
if (container && !container.contains(document.activeElement)) {
setHasInteracted(false);
}
});
}
}, []);
const handleNavigation = (ctrlOrMetaKey: boolean) => {
if (ctrlOrMetaKey) {
toggleNav();
@ -141,14 +173,15 @@ export default function Conversation({
isActiveConvo,
conversationId,
isPopoverActive,
setIsPopoverActive,
setIsPopoverActive: handlePopoverOpenChange,
isShiftHeld: isActiveConvo ? isShiftHeld : false,
};
return (
<div
ref={containerRef}
className={cn(
'group relative flex h-12 w-full items-center rounded-lg md:h-9',
'group relative flex h-12 w-full items-center rounded-lg outline-none focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-inset focus-visible:ring-black dark:focus-visible:ring-white md:h-9',
isActiveConvo || isPopoverActive
? 'bg-surface-active-alt before:absolute before:bottom-1 before:left-0 before:top-1 before:w-0.5 before:rounded-full before:bg-black dark:before:bg-white'
: 'hover:bg-surface-active-alt',
@ -159,7 +192,9 @@ export default function Conversation({
title: title || localize('com_ui_untitled'),
})}
onMouseEnter={handleMouseEnter}
onMouseLeave={handleMouseLeave}
onFocus={handleMouseEnter}
onBlur={handleBlur}
onClick={(e) => {
if (renaming) {
return;

View file

@ -48,7 +48,7 @@ const ConvoLink: React.FC<ConvoLinkProps> = ({
</div>
<div
className={cn(
'absolute bottom-0 right-0 top-0 w-20 rounded-r-lg bg-gradient-to-l',
'pointer-events-none absolute bottom-0.5 right-0.5 top-0.5 w-20 rounded-r-md bg-gradient-to-l',
isActiveConvo || isPopoverActive
? 'from-surface-active-alt'
: 'from-surface-primary-alt from-0% to-transparent group-hover:from-surface-active-alt group-hover:from-40%',

View file

@ -35,7 +35,7 @@ function ConvoOptions({
retainView: () => void;
renameHandler: (e: MouseEvent) => void;
isPopoverActive: boolean;
setIsPopoverActive: React.Dispatch<React.SetStateAction<boolean>>;
setIsPopoverActive: (open: boolean) => void;
isActiveConvo: boolean;
isShiftHeld?: boolean;
}) {
@ -302,7 +302,7 @@ function ConvoOptions({
<Ariakit.MenuButton
id={`conversation-menu-${conversationId}`}
aria-label={localize('com_nav_convo_menu_options')}
aria-readonly={undefined}
aria-expanded={isPopoverActive}
className={cn(
'inline-flex h-7 w-7 items-center justify-center gap-2 rounded-md border-none p-0 text-sm font-medium ring-ring-primary transition-all duration-200 ease-in-out focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-offset-2 disabled:opacity-50',
isActiveConvo === true || isPopoverActive

View file

@ -25,7 +25,7 @@ type DeleteButtonProps = {
showDeleteDialog?: boolean;
setShowDeleteDialog?: (value: boolean) => void;
triggerRef?: React.RefObject<HTMLButtonElement>;
setMenuOpen?: React.Dispatch<React.SetStateAction<boolean>>;
setMenuOpen?: (open: boolean) => void;
};
export function DeleteConversationDialog({
@ -35,7 +35,7 @@ export function DeleteConversationDialog({
retainView,
title,
}: {
setMenuOpen?: React.Dispatch<React.SetStateAction<boolean>>;
setMenuOpen?: (open: boolean) => void;
setShowDeleteDialog: (value: boolean) => void;
conversationId: string;
retainView: () => void;

View file

@ -40,7 +40,7 @@ function AccountSettings() {
</div>
</Select.Select>
<Select.SelectPopover
className="popover-ui z-[125] w-[305px] rounded-lg md:w-[244px]"
className="account-settings-popover popover-ui z-[125] w-[305px] rounded-lg md:w-[244px]"
style={{
transformOrigin: 'bottom',
translate: '0 -4px',

View file

@ -25,6 +25,13 @@ const BookmarkNav: FC<BookmarkNavProps> = ({ tags, setTags }: BookmarkNavProps)
[tags, localize],
);
const buttonAriaLabel = useMemo(() => {
if (tags.length === 0) {
return localize('com_ui_bookmarks');
}
return localize('com_ui_bookmarks_count_selected', { count: tags.length });
}, [tags.length, localize]);
const bookmarks = useMemo(() => data?.filter((tag) => tag.count > 0) ?? [], [data]);
const handleTagClick = useCallback(
@ -73,6 +80,7 @@ const BookmarkNav: FC<BookmarkNavProps> = ({ tags, setTags }: BookmarkNavProps)
<BookmarkIcon className="size-4" />
),
onClick: () => handleTagClick(bookmark.tag),
ariaChecked: isSelected,
});
}
}
@ -96,11 +104,13 @@ const BookmarkNav: FC<BookmarkNavProps> = ({ tags, setTags }: BookmarkNavProps)
render={
<Ariakit.MenuButton
id="bookmark-nav-menu-button"
aria-label={localize('com_ui_bookmarks')}
aria-label={buttonAriaLabel}
aria-pressed={tags.length > 0}
className={cn(
'flex items-center justify-center',
'size-10 border-none text-text-primary hover:bg-accent hover:text-accent-foreground',
'rounded-full border-none p-2 hover:bg-surface-active-alt md:rounded-xl',
'outline-none focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-inset focus-visible:ring-black dark:focus-visible:ring-white',
isMenuOpen ? 'bg-surface-hover' : '',
)}
data-testid="bookmark-menu"

View file

@ -110,7 +110,7 @@ export default function FavoriteItem({
tabIndex={0}
aria-label={ariaLabel}
className={cn(
'group relative flex w-full cursor-pointer items-center justify-between rounded-lg px-3 py-2 text-sm text-text-primary hover:bg-surface-active-alt',
'group relative flex w-full cursor-pointer items-center justify-between rounded-lg px-3 py-2 text-sm text-text-primary outline-none hover:bg-surface-active-alt focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-inset focus-visible:ring-black dark:focus-visible:ring-white',
isPopoverActive ? 'bg-surface-active-alt' : '',
)}
onClick={handleClick}

View file

@ -282,7 +282,7 @@ export default function FavoritesList({
role="button"
tabIndex={0}
aria-label={localize('com_agents_marketplace')}
className="group relative flex w-full cursor-pointer items-center justify-between rounded-lg px-3 py-2 text-sm text-text-primary hover:bg-surface-active-alt"
className="group relative flex w-full cursor-pointer items-center justify-between rounded-lg px-3 py-2 text-sm text-text-primary outline-none hover:bg-surface-active-alt focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-inset focus-visible:ring-black dark:focus-visible:ring-white"
onClick={handleAgentMarketplace}
onKeyDown={(e) => {
if (e.key === 'Enter' || e.key === ' ') {

View file

@ -55,7 +55,7 @@ export default function NewChat({
return (
<>
<div className="flex items-center justify-between py-[2px] md:py-2">
<div className="flex items-center justify-between px-0.5 py-[2px] md:py-2">
<TooltipAnchor
description={localize('com_nav_close_sidebar')}
render={
@ -66,7 +66,7 @@ export default function NewChat({
data-testid="close-sidebar-button"
aria-label={localize('com_nav_close_sidebar')}
aria-expanded={true}
className="rounded-full border-none bg-transparent duration-0 hover:bg-surface-active-alt md:rounded-xl"
className="rounded-full border-none bg-transparent duration-0 hover:bg-surface-active-alt focus-visible:ring-inset focus-visible:ring-black focus-visible:ring-offset-0 dark:focus-visible:ring-white md:rounded-xl"
onClick={handleToggleNav}
>
<Sidebar aria-hidden="true" className="max-md:hidden" />
@ -88,7 +88,7 @@ export default function NewChat({
variant="outline"
data-testid="nav-new-chat-button"
aria-label={localize('com_ui_new_chat')}
className="rounded-full border-none bg-transparent duration-0 hover:bg-surface-active-alt md:rounded-xl"
className="rounded-full border-none bg-transparent duration-0 hover:bg-surface-active-alt focus-visible:ring-inset focus-visible:ring-black focus-visible:ring-offset-0 dark:focus-visible:ring-white md:rounded-xl"
onClick={clickHandler}
>
<NewChatIcon className="icon-lg text-text-primary" />

View file

@ -4,33 +4,33 @@ import debounce from 'lodash/debounce';
import { useRecoilValue } from 'recoil';
import { Link } from 'react-router-dom';
import {
TrashIcon,
MessageSquare,
ArrowUpDown,
ArrowUp,
TrashIcon,
ArrowDown,
ArrowUpDown,
ExternalLink,
MessageSquare,
} from 'lucide-react';
import type { SharedLinkItem, SharedLinksListParams } from 'librechat-data-provider';
import type { TranslationKeys } from '~/hooks';
import {
Label,
Button,
Spinner,
OGDialog,
useToastContext,
OGDialogTemplate,
OGDialogTrigger,
OGDialogContent,
DataTable,
useMediaQuery,
OGDialogHeader,
OGDialogTitle,
TooltipAnchor,
DataTable,
Spinner,
Button,
Label,
OGDialogHeader,
OGDialogTrigger,
OGDialogContent,
useToastContext,
OGDialogTemplate,
} from '@librechat/client';
import type { SharedLinkItem, SharedLinksListParams } from 'librechat-data-provider';
import type { TranslationKeys } from '~/hooks';
import { useDeleteSharedLinkMutation, useSharedLinksQuery } from '~/data-provider';
import { useLocalize } from '~/hooks';
import { NotificationSeverity } from '~/common';
import { useLocalize } from '~/hooks';
import { formatDate } from '~/utils';
import store from '~/store';
@ -47,12 +47,12 @@ const DEFAULT_PARAMS: SharedLinksListParams = {
export default function SharedLinks() {
const localize = useLocalize();
const { showToast } = useToastContext();
const isSmallScreen = useMediaQuery('(max-width: 768px)');
const isSearchEnabled = useRecoilValue(store.search);
const [queryParams, setQueryParams] = useState<SharedLinksListParams>(DEFAULT_PARAMS);
const [deleteRow, setDeleteRow] = useState<SharedLinkItem | null>(null);
const [isDeleteOpen, setIsDeleteOpen] = useState(false);
const [isOpen, setIsOpen] = useState(false);
const searchStore = useRecoilValue(store.search);
const [isDeleteOpen, setIsDeleteOpen] = useState(false);
const isSmallScreen = useMediaQuery('(max-width: 768px)');
const [deleteRow, setDeleteRow] = useState<SharedLinkItem | null>(null);
const [queryParams, setQueryParams] = useState<SharedLinksListParams>(DEFAULT_PARAMS);
const { data, fetchNextPage, hasNextPage, isFetchingNextPage, refetch, isLoading } =
useSharedLinksQuery(queryParams, {
@ -173,17 +173,23 @@ export default function SharedLinks() {
ariaSort = 'ascending';
}
return (
<Button
variant="ghost"
onClick={() => column.toggleSorting(column.getIsSorted() === 'asc')}
className="px-2 py-0 text-xs hover:bg-surface-hover sm:px-2 sm:py-2 sm:text-sm"
aria-sort={ariaSort}
aria-label={localize('com_ui_name_sort')}
aria-current={sortState ? 'true' : 'false'}
>
{localize('com_ui_name')}
<SortIcon className="ml-2 h-3 w-4 sm:h-4 sm:w-4" />
</Button>
<TooltipAnchor
description={localize('com_ui_name_sort')}
side="top"
render={
<Button
variant="ghost"
onClick={() => column.toggleSorting(column.getIsSorted() === 'asc')}
className="px-2 py-0 text-xs hover:bg-surface-hover sm:px-2 sm:py-2 sm:text-sm"
aria-sort={ariaSort}
aria-label={localize('com_ui_name_sort')}
aria-current={sortState ? 'true' : 'false'}
>
{localize('com_ui_name')}
<SortIcon className="ml-2 h-3 w-4 sm:h-4 sm:w-4" />
</Button>
}
/>
);
},
cell: ({ row }) => {
@ -207,7 +213,7 @@ export default function SharedLinks() {
);
},
meta: {
size: '35%',
size: '32%',
mobileSize: '50%',
},
},
@ -225,17 +231,23 @@ export default function SharedLinks() {
ariaSort = 'ascending';
}
return (
<Button
variant="ghost"
onClick={() => column.toggleSorting(column.getIsSorted() === 'asc')}
className="px-2 py-0 text-xs hover:bg-surface-hover sm:px-2 sm:py-2 sm:text-sm"
aria-sort={ariaSort}
aria-label={localize('com_ui_creation_date_sort' as TranslationKeys)}
aria-current={sortState ? 'true' : 'false'}
>
{localize('com_ui_date')}
<SortIcon className="ml-2 h-3 w-4 sm:h-4 sm:w-4" />
</Button>
<TooltipAnchor
description={localize('com_ui_date_sort')}
side="top"
render={
<Button
variant="ghost"
onClick={() => column.toggleSorting(column.getIsSorted() === 'asc')}
className="px-2 py-0 text-xs hover:bg-surface-hover sm:px-2 sm:py-2 sm:text-sm"
aria-sort={ariaSort}
aria-label={localize('com_ui_date_sort')}
aria-current={sortState ? 'true' : 'false'}
>
{localize('com_ui_date')}
<SortIcon className="ml-2 h-3 w-4 sm:h-4 sm:w-4" />
</Button>
}
/>
);
},
cell: ({ row }) => formatDate(row.original.createdAt?.toString() ?? '', isSmallScreen),
@ -247,7 +259,7 @@ export default function SharedLinks() {
{
accessorKey: 'actions',
header: () => (
<Label className="px-2 py-0 text-xs hover:bg-surface-hover sm:px-2 sm:py-2 sm:text-sm">
<Label className="px-2 py-0 text-xs sm:px-2 sm:py-2 sm:text-sm">
{localize('com_assistants_actions')}
</Label>
),
@ -330,7 +342,7 @@ export default function SharedLinks() {
onFilterChange={debouncedFilterChange}
filterValue={queryParams.search}
isLoading={isLoading}
enableSearch={isSearchEnabled}
enableSearch={searchStore.enabled === true}
/>
</OGDialogContent>
</OGDialog>

View file

@ -2,10 +2,18 @@ import { useState, useCallback, useMemo, useEffect } from 'react';
import { Trans } from 'react-i18next';
import debounce from 'lodash/debounce';
import { useRecoilValue } from 'recoil';
import { TrashIcon, ArchiveRestore, ArrowUp, ArrowDown, ArrowUpDown } from 'lucide-react';
import { Link } from 'react-router-dom';
import {
ArrowUp,
TrashIcon,
ArrowDown,
ArrowUpDown,
ExternalLink,
ArchiveRestore,
} from 'lucide-react';
import {
Button,
Label,
Button,
Spinner,
OGDialog,
DataTable,
@ -17,7 +25,6 @@ import {
OGDialogContent,
} from '@librechat/client';
import type { ConversationListParams, TConversation } from 'librechat-data-provider';
import type { TranslationKeys } from '~/hooks';
import {
useConversationsInfiniteQuery,
useDeleteConversationMutation,
@ -42,10 +49,10 @@ export default function ArchivedChatsTable({
onOpenChange: (isOpen: boolean) => void;
}) {
const localize = useLocalize();
const isSmallScreen = useMediaQuery('(max-width: 768px)');
const { showToast } = useToastContext();
const searchState = useRecoilValue(store.search);
const [isDeleteOpen, setIsDeleteOpen] = useState(false);
const isSmallScreen = useMediaQuery('(max-width: 768px)');
const [queryParams, setQueryParams] = useState<ConversationListParams>(DEFAULT_PARAMS);
const [deleteConversation, setDeleteConversation] = useState<TConversation | null>(null);
@ -138,35 +145,50 @@ export default function ArchivedChatsTable({
ariaSort = 'ascending';
}
return (
<Button
variant="ghost"
onClick={() => column.toggleSorting(column.getIsSorted() === 'asc')}
className="px-2 py-0 text-xs hover:bg-surface-hover sm:px-2 sm:py-2 sm:text-sm"
aria-sort={ariaSort}
aria-label={localize('com_nav_archive_name_sort' as TranslationKeys)}
aria-current={sortState ? 'true' : 'false'}
>
{localize('com_nav_archive_name')}
<SortIcon className="ml-2 h-3 w-4 sm:h-4 sm:w-4" />
</Button>
<TooltipAnchor
description={localize('com_ui_name_sort')}
side="top"
render={
<Button
variant="ghost"
onClick={() => column.toggleSorting(column.getIsSorted() === 'asc')}
className="px-2 py-0 text-xs hover:bg-surface-hover sm:px-2 sm:py-2 sm:text-sm"
aria-sort={ariaSort}
aria-label={localize('com_ui_name_sort')}
aria-current={sortState ? 'true' : 'false'}
>
{localize('com_nav_archive_name')}
<SortIcon className="ml-2 h-3 w-4 sm:h-4 sm:w-4" />
</Button>
}
/>
);
},
cell: ({ row }) => {
const { conversationId, title } = row.original;
return (
<button
type="button"
className="flex items-center gap-2 truncate rounded-sm"
onClick={() => window.open(`/c/${conversationId}`, '_blank')}
>
<div className="flex items-center gap-2">
<MinimalIcon
endpoint={row.original.endpoint}
size={28}
isCreatedByUser={false}
iconClassName="size-4"
/>
<span className="underline">{title}</span>
</button>
<Link
to={`/c/${conversationId}`}
target="_blank"
rel="noopener noreferrer"
className="group flex items-center gap-1 truncate rounded-sm text-blue-600 underline decoration-1 underline-offset-2 hover:decoration-2 focus:outline-none focus:ring-2 focus:ring-ring"
title={title}
aria-label={localize('com_ui_open_archived_chat_new_tab_title', { title })}
>
<span className="truncate">{title}</span>
<ExternalLink
className="size-3 flex-shrink-0 opacity-70 group-hover:opacity-100"
aria-hidden="true"
/>
</Link>
</div>
);
},
meta: {
@ -188,17 +210,23 @@ export default function ArchivedChatsTable({
ariaSort = 'ascending';
}
return (
<Button
variant="ghost"
onClick={() => column.toggleSorting(column.getIsSorted() === 'asc')}
className="px-2 py-0 text-xs hover:bg-surface-hover sm:px-2 sm:py-2 sm:text-sm"
aria-sort={ariaSort}
aria-label={localize('com_nav_archive_created_at_sort' as TranslationKeys)}
aria-current={sortState ? 'true' : 'false'}
>
{localize('com_nav_archive_created_at')}
<SortIcon className="ml-2 h-3 w-4 sm:h-4 sm:w-4" />
</Button>
<TooltipAnchor
description={localize('com_ui_date_sort')}
side="top"
render={
<Button
variant="ghost"
onClick={() => column.toggleSorting(column.getIsSorted() === 'asc')}
className="px-2 py-0 text-xs hover:bg-surface-hover sm:px-2 sm:py-2 sm:text-sm"
aria-sort={ariaSort}
aria-label={localize('com_ui_date_sort')}
aria-current={sortState ? 'true' : 'false'}
>
{localize('com_nav_archive_created_at')}
<SortIcon className="ml-2 h-3 w-4 sm:h-4 sm:w-4" />
</Button>
}
/>
);
},
cell: ({ row }) => formatDate(row.original.createdAt?.toString() ?? '', isSmallScreen),
@ -219,7 +247,7 @@ export default function ArchivedChatsTable({
return (
<div className="flex items-center gap-2">
<TooltipAnchor
description={localize('com_ui_unarchive')}
description={localize('com_ui_unarchive_conversation')}
render={
<Button
variant="ghost"
@ -230,8 +258,8 @@ export default function ArchivedChatsTable({
isArchived: false,
})
}
title={localize('com_ui_unarchive')}
aria-label={localize('com_ui_unarchive')}
title={localize('com_ui_unarchive_conversation')}
aria-label={localize('com_ui_unarchive_conversation')}
disabled={unarchiveMutation.isLoading}
>
{unarchiveMutation.isLoading ? (
@ -243,7 +271,7 @@ export default function ArchivedChatsTable({
}
/>
<TooltipAnchor
description={localize('com_ui_delete')}
description={localize('com_ui_delete_conversation_tooltip')}
render={
<Button
variant="ghost"
@ -252,8 +280,8 @@ export default function ArchivedChatsTable({
setDeleteConversation(row.original);
setIsDeleteOpen(true);
}}
title={localize('com_ui_delete')}
aria-label={localize('com_ui_delete')}
title={localize('com_ui_delete_conversation_tooltip')}
aria-label={localize('com_ui_delete_conversation_tooltip')}
>
<TrashIcon className="size-4" />
</Button>

View file

@ -209,6 +209,7 @@ export default function AgentConfig() {
'mt-1 w-56 text-sm text-red-500',
errors.name ? 'visible h-auto' : 'invisible h-0',
)}
role="alert"
>
{errors.name ? errors.name.message : ' '}
</div>

View file

@ -249,7 +249,7 @@ const useNewConvo = (index = 0) => {
state: disableFocus ? {} : { focusChat: true },
});
},
[endpointsConfig, defaultPreset, assistantsListMap, modelsQuery.data],
[endpointsConfig, defaultPreset, assistantsListMap, modelsQuery.data, hasAgentAccess],
);
const newConversation = useCallback(

View file

@ -1140,7 +1140,6 @@
"com_ui_open_source_chat_new_tab_title": "Quell-Chat in neuem Tab öffnen - {{title}}",
"com_ui_open_var": "{{0}} öffnen",
"com_ui_openai": "OpenAI",
"com_ui_opens_new_tab": "(öffnet in neuem Tab)",
"com_ui_optional": "(Optional)",
"com_ui_page": "Seite",
"com_ui_people": "Personen",

View file

@ -3,6 +3,7 @@
"chat_direction_right_to_left": "Right to Left",
"com_a11y_ai_composing": "The AI is still composing.",
"com_a11y_end": "The AI has finished their reply.",
"com_a11y_selected": "selected",
"com_a11y_start": "The AI has started their reply.",
"com_agents_agent_card_label": "{{name}} agent. {{description}}",
"com_agents_all": "All Agents",
@ -757,6 +758,7 @@
"com_ui_bookmarks": "Bookmarks",
"com_ui_bookmarks_add": "Add Bookmarks",
"com_ui_bookmarks_add_to_conversation": "Add to current conversation",
"com_ui_bookmarks_count_selected": "Bookmarks, {{count}} selected",
"com_ui_bookmarks_create_error": "There was an error creating the bookmark",
"com_ui_bookmarks_create_exists": "This bookmark already exists",
"com_ui_bookmarks_create_success": "Bookmark created successfully",
@ -881,6 +883,7 @@
"com_ui_delete_confirm_prompt_version_var": "This will delete the selected version for \"{{0}}.\" If no other versions exist, the prompt will be deleted.",
"com_ui_delete_confirm_strong": "This will delete <strong>{{title}}</strong>",
"com_ui_delete_conversation": "Delete chat?",
"com_ui_delete_conversation_tooltip": "Delete conversation",
"com_ui_delete_memory": "Delete Memory",
"com_ui_delete_not_allowed": "Delete operation is not allowed",
"com_ui_delete_preset": "Delete Preset?",
@ -1174,11 +1177,11 @@
"com_ui_off": "Off",
"com_ui_offline": "Offline",
"com_ui_on": "On",
"com_ui_open_archived_chat_new_tab_title": "{{title}} (opens in new tab)",
"com_ui_open_source_chat_new_tab": "Open Source Chat in New Tab",
"com_ui_open_source_chat_new_tab_title": "Open Source Chat in New Tab - {{title}}",
"com_ui_open_var": "Open {{0}}",
"com_ui_openai": "OpenAI",
"com_ui_opens_new_tab": "(opens in new tab)",
"com_ui_optional": "(optional)",
"com_ui_page": "Page",
"com_ui_people": "people",
@ -1374,6 +1377,7 @@
"com_ui_ui_resource_not_found": "UI Resource not found (index: {{0}})",
"com_ui_ui_resources": "UI Resources",
"com_ui_unarchive": "Unarchive",
"com_ui_unarchive_conversation": "Unarchive conversation",
"com_ui_unarchive_error": "Failed to unarchive conversation",
"com_ui_unavailable": "Unavailable",
"com_ui_unknown": "Unknown",

View file

@ -1164,7 +1164,6 @@
"com_ui_open_source_chat_new_tab_title": "新しいタブでチャットを開く- {{title}}",
"com_ui_open_var": "開く {{0}}",
"com_ui_openai": "OpenAI",
"com_ui_opens_new_tab": "(新しいタブで開く)",
"com_ui_optional": "(任意)",
"com_ui_page": "ページ",
"com_ui_people": "人々",

View file

@ -1,8 +1,9 @@
{
"chat_direction_left_to_right": "No kreisās uz labo",
"chat_direction_right_to_left": "No labās uz kreiso",
"com_a11y_ai_composing": "Mākslīgais intelekts joprojām veido.",
"com_a11y_end": "Mākslīgais intelekts ir pabeidzis atbildi.",
"com_a11y_ai_composing": "Mākslīgais intelekts joprojām veido savu atbildi.",
"com_a11y_end": "Mākslīgais intelekts ir pabeidzis veidot atbildi.",
"com_a11y_selected": "atlasīts",
"com_a11y_start": "Mākslīgais intelekts ir sācis savu atbildi.",
"com_agents_agent_card_label": "{{name}} aģents. {{description}}",
"com_agents_all": "Visi aģenti",
@ -757,6 +758,7 @@
"com_ui_bookmarks": "Grāmatzīmes",
"com_ui_bookmarks_add": "Pievienot grāmatzīmi",
"com_ui_bookmarks_add_to_conversation": "Pievienot pašreizējai sarunai",
"com_ui_bookmarks_count_selected": "Grāmatzīmes, {{count}} atlasītas",
"com_ui_bookmarks_create_error": "Veidojot grāmatzīmi, radās kļūda",
"com_ui_bookmarks_create_exists": "Šī grāmatzīme jau pastāv",
"com_ui_bookmarks_create_success": "Grāmatzīme veiksmīgi izveidota",
@ -1180,7 +1182,6 @@
"com_ui_open_source_chat_new_tab_title": "Atvērtā koda saruna jaunā cilnē - {{title}}",
"com_ui_open_var": "Atvērt {{0}}",
"com_ui_openai": "OpenAI",
"com_ui_opens_new_tab": "(atveras jaunā cilnē)",
"com_ui_optional": "(pēc izvēles)",
"com_ui_page": "Lapa",
"com_ui_people": "cilvēki",
@ -1398,7 +1399,7 @@
"com_ui_upload_image_input": "Augšupielādēt failu kā attēlu",
"com_ui_upload_invalid": "Nederīgs augšupielādējamais fails. Attēlam jābūt tādam, kas nepārsniedz ierobežojumu.",
"com_ui_upload_invalid_var": "Nederīgs augšupielādējams fails. Attēlam jābūt ne lielākam par {{0}} MB",
"com_ui_upload_ocr_text": "Augšupielādēt failu kā tekstu",
"com_ui_upload_ocr_text": "Augšupielādēt failu kā kontekstu",
"com_ui_upload_provider": "Augšupielādēt pakalpojumu sniedzējam",
"com_ui_upload_success": "Fails veiksmīgi augšupielādēts",
"com_ui_upload_type": "Izvēlieties augšupielādes veidu",

View file

@ -64,6 +64,17 @@ export default function Search() {
}
}, [isError, searchQuery, showToast]);
const resultsCount = messages?.length ?? 0;
const resultsAnnouncement = useMemo(() => {
if (resultsCount === 0) {
return localize('com_ui_nothing_found');
}
if (resultsCount === 1) {
return localize('com_ui_result_found', { count: resultsCount });
}
return localize('com_ui_results_found', { count: resultsCount });
}, [resultsCount, localize]);
const isSearchLoading = search.isTyping || isLoading || isFetchingNextPage;
if (isSearchLoading) {
@ -80,6 +91,9 @@ export default function Search() {
return (
<MinimalMessagesWrapper ref={containerRef} className="relative flex h-full pt-4">
<div className="sr-only" role="alert" aria-atomic="true">
{resultsAnnouncement}
</div>
{(messages && messages.length === 0) || messages == null ? (
<div className="absolute inset-0 flex items-center justify-center">
<div className="rounded-lg bg-white p-6 text-lg text-gray-500 dark:border-gray-800/50 dark:bg-gray-800 dark:text-gray-300">

View file

@ -2668,6 +2668,10 @@ html {
color: var(--text-primary);
}
.account-settings-popover .select-item[data-active-item] {
box-shadow: 0 0 0 2px hsl(var(--ring));
}
.popover-ui[data-enter] {
opacity: 1;
scale: 1;

View file

@ -14,7 +14,7 @@ const connect = require('./connect');
console.purple('--------------------------');
if (process.argv.length < 5) {
console.orange('Usage: npm run create-user <email> <name> <username> [--email-verified=false]');
console.orange('Usage: npm run create-user -- <email> <name> <username> [--email-verified=false]');
console.orange('Note: if you do not pass in the arguments, you will be prompted for them.');
console.orange(
'If you really need to pass in the password, you can do so as the 4th argument (not recommended for security).',
@ -88,7 +88,11 @@ If \`n\`, and email service is configured, the user will be sent a verification
If \`n\`, and email service is not configured, you must have the \`ALLOW_UNVERIFIED_EMAIL_LOGIN\` .env variable set to true,
or the user will need to attempt logging in to have a verification link sent to them.`);
if (emailVerifiedInput.toLowerCase() === 'n') {
const normalizedEmailVerifiedInput = emailVerifiedInput.trim().toLowerCase()
emailVerified = true
if (normalizedEmailVerifiedInput === 'n') {
emailVerified = false;
}
}

View file

@ -1,3 +1,3 @@
// v0.8.2-rc3
// v0.8.2
// See .env.test.example for an example of the '.env.test' file.
require('dotenv').config({ path: './e2e/.env.test' });

View file

@ -15,7 +15,7 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 1.9.6
version: 1.9.7
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
@ -23,7 +23,7 @@ version: 1.9.6
# It is recommended to use it with quotes.
# renovate: image=ghcr.io/danny-avila/librechat
appVersion: "v0.8.2-rc3"
appVersion: "v0.8.2"
home: https://www.librechat.ai

72
package-lock.json generated
View file

@ -1,12 +1,12 @@
{
"name": "LibreChat",
"version": "v0.8.2-rc3",
"version": "v0.8.2",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "LibreChat",
"version": "v0.8.2-rc3",
"version": "v0.8.2",
"license": "ISC",
"workspaces": [
"api",
@ -45,7 +45,7 @@
},
"api": {
"name": "@librechat/backend",
"version": "v0.8.2-rc3",
"version": "v0.8.2",
"license": "ISC",
"dependencies": {
"@anthropic-ai/sdk": "^0.71.0",
@ -63,7 +63,7 @@
"@librechat/api": "*",
"@librechat/data-schemas": "*",
"@microsoft/microsoft-graph-client": "^3.0.7",
"@modelcontextprotocol/sdk": "^1.25.2",
"@modelcontextprotocol/sdk": "^1.25.3",
"@node-saml/passport-saml": "^5.1.0",
"@smithy/node-http-handler": "^4.4.5",
"axios": "^1.12.1",
@ -94,7 +94,7 @@
"keyv-file": "^5.1.2",
"klona": "^2.0.6",
"librechat-data-provider": "*",
"lodash": "^4.17.21",
"lodash": "^4.17.23",
"mathjs": "^15.1.0",
"meilisearch": "^0.38.0",
"memorystore": "^1.6.7",
@ -442,7 +442,7 @@
},
"client": {
"name": "@librechat/frontend",
"version": "v0.8.2-rc3",
"version": "v0.8.2",
"license": "ISC",
"dependencies": {
"@ariakit/react": "^0.4.15",
@ -493,7 +493,7 @@
"jotai": "^2.12.5",
"js-cookie": "^3.0.5",
"librechat-data-provider": "*",
"lodash": "^4.17.21",
"lodash": "^4.17.23",
"lucide-react": "^0.394.0",
"match-sorter": "^8.1.0",
"mermaid": "^11.12.2",
@ -10787,9 +10787,9 @@
}
},
"node_modules/@hono/node-server": {
"version": "1.19.7",
"resolved": "https://registry.npmjs.org/@hono/node-server/-/node-server-1.19.7.tgz",
"integrity": "sha512-vUcD0uauS7EU2caukW8z5lJKtoGMokxNbJtBiwHgpqxEXokaHCBkQUmCHhjFB1VUTWdqj25QoMkMKzgjq+uhrw==",
"version": "1.19.9",
"resolved": "https://registry.npmjs.org/@hono/node-server/-/node-server-1.19.9.tgz",
"integrity": "sha512-vHL6w3ecZsky+8P5MD+eFfaGTyCeOHUIFYMGpQGbrBTSmNNoxv0if69rEZ5giu36weC5saFuznL411gRX7bJDw==",
"license": "MIT",
"engines": {
"node": ">=18.14.1"
@ -12770,12 +12770,12 @@
}
},
"node_modules/@modelcontextprotocol/sdk": {
"version": "1.25.2",
"resolved": "https://registry.npmjs.org/@modelcontextprotocol/sdk/-/sdk-1.25.2.tgz",
"integrity": "sha512-LZFeo4F9M5qOhC/Uc1aQSrBHxMrvxett+9KLHt7OhcExtoiRN9DKgbZffMP/nxjutWDQpfMDfP3nkHI4X9ijww==",
"version": "1.25.3",
"resolved": "https://registry.npmjs.org/@modelcontextprotocol/sdk/-/sdk-1.25.3.tgz",
"integrity": "sha512-vsAMBMERybvYgKbg/l4L1rhS7VXV1c0CtyJg72vwxONVX0l4ZfKVAnZEWTQixJGTzKnELjQ59e4NbdFDALRiAQ==",
"license": "MIT",
"dependencies": {
"@hono/node-server": "^1.19.7",
"@hono/node-server": "^1.19.9",
"ajv": "^8.17.1",
"ajv-formats": "^3.0.1",
"content-type": "^1.0.5",
@ -24995,6 +24995,16 @@
"resolved": "https://registry.npmjs.org/didyoumean/-/didyoumean-1.2.2.tgz",
"integrity": "sha512-gxtyfqMg7GKyhQmb056K7M3xszy/myH8w+B4RT+QXBQsvAOdc3XymqDDPHx1BgPgsdAA5SIifona89YtRATDzw=="
},
"node_modules/diff": {
"version": "4.0.4",
"resolved": "https://registry.npmjs.org/diff/-/diff-4.0.4.tgz",
"integrity": "sha512-X07nttJQkwkfKfvTPG/KSnE2OMdcUCao6+eXF3wmnIQRn2aPAHH3VxDbDOdegkd6JbPsXqShpvEOHfAT+nCNwQ==",
"devOptional": true,
"license": "BSD-3-Clause",
"engines": {
"node": ">=0.3.1"
}
},
"node_modules/diff-sequences": {
"version": "29.6.3",
"resolved": "https://registry.npmjs.org/diff-sequences/-/diff-sequences-29.6.3.tgz",
@ -28097,9 +28107,9 @@
"integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ=="
},
"node_modules/hono": {
"version": "4.11.4",
"resolved": "https://registry.npmjs.org/hono/-/hono-4.11.4.tgz",
"integrity": "sha512-U7tt8JsyrxSRKspfhtLET79pU8K+tInj5QZXs1jSugO1Vq5dFj3kmZsRldo29mTBfcjDRVRXrEZ6LS63Cog9ZA==",
"version": "4.11.7",
"resolved": "https://registry.npmjs.org/hono/-/hono-4.11.7.tgz",
"integrity": "sha512-l7qMiNee7t82bH3SeyUCt9UF15EVmaBvsppY2zQtrbIhl/yzBTny+YUxsVjSjQ6gaqaeVtZmGocom8TzBlA4Yw==",
"license": "MIT",
"peer": true,
"engines": {
@ -31542,9 +31552,10 @@
}
},
"node_modules/lodash": {
"version": "4.17.21",
"resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz",
"integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg=="
"version": "4.17.23",
"resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.23.tgz",
"integrity": "sha512-LgVTMpQtIopCi79SJeDiP0TfWi5CNEc/L/aRdTh3yIvmZXTnheWpKjSZhnvMl8iXbC1tFg9gdHHDMLoV7CnG+w==",
"license": "MIT"
},
"node_modules/lodash-es": {
"version": "4.17.22",
@ -40817,15 +40828,6 @@
"integrity": "sha512-58S9QDqG0Xx27YwPSt9fJxivjYl432YCwfDMfZ+71RAqUrZef7LrKQZ3LHLOwCS4FLNBplP533Zx895SeOCHvA==",
"devOptional": true
},
"node_modules/ts-node/node_modules/diff": {
"version": "4.0.2",
"resolved": "https://registry.npmjs.org/diff/-/diff-4.0.2.tgz",
"integrity": "sha512-58lmxKSA4BNyLz+HHMUzlOEpg09FV+ev6ZMe3vJihgdxzgcwZ8VoEEPmALCZG9LmqfVoNMMKpttIYTVG6uDY7A==",
"devOptional": true,
"engines": {
"node": ">=0.3.1"
}
},
"node_modules/tsconfig-paths": {
"version": "3.15.0",
"resolved": "https://registry.npmjs.org/tsconfig-paths/-/tsconfig-paths-3.15.0.tgz",
@ -43087,7 +43089,7 @@
},
"packages/api": {
"name": "@librechat/api",
"version": "1.7.21",
"version": "1.7.22",
"license": "ISC",
"devDependencies": {
"@babel/preset-env": "^7.21.5",
@ -43131,7 +43133,7 @@
"@langchain/core": "^0.3.80",
"@librechat/agents": "^3.0.776",
"@librechat/data-schemas": "*",
"@modelcontextprotocol/sdk": "^1.25.2",
"@modelcontextprotocol/sdk": "^1.25.3",
"@smithy/node-http-handler": "^4.4.5",
"axios": "^1.12.1",
"connect-redis": "^8.1.0",
@ -43198,7 +43200,7 @@
},
"packages/client": {
"name": "@librechat/client",
"version": "0.4.50",
"version": "0.4.51",
"devDependencies": {
"@babel/core": "^7.28.5",
"@babel/preset-env": "^7.28.5",
@ -45488,7 +45490,7 @@
},
"packages/data-provider": {
"name": "librechat-data-provider",
"version": "0.8.230",
"version": "0.8.231",
"license": "ISC",
"dependencies": {
"axios": "^1.12.1",
@ -45546,7 +45548,7 @@
},
"packages/data-schemas": {
"name": "@librechat/data-schemas",
"version": "0.0.34",
"version": "0.0.35",
"license": "MIT",
"devDependencies": {
"@rollup/plugin-alias": "^5.1.0",
@ -45573,7 +45575,7 @@
"jsonwebtoken": "^9.0.2",
"klona": "^2.0.6",
"librechat-data-provider": "*",
"lodash": "^4.17.21",
"lodash": "^4.17.23",
"meilisearch": "^0.38.0",
"mongoose": "^8.12.1",
"nanoid": "^3.3.7",

View file

@ -1,6 +1,6 @@
{
"name": "LibreChat",
"version": "v0.8.2-rc3",
"version": "v0.8.2",
"description": "",
"workspaces": [
"api",

View file

@ -1,6 +1,6 @@
{
"name": "@librechat/api",
"version": "1.7.21",
"version": "1.7.22",
"type": "commonjs",
"description": "MCP services for LibreChat",
"main": "dist/index.js",
@ -89,7 +89,7 @@
"@langchain/core": "^0.3.80",
"@librechat/agents": "^3.0.776",
"@librechat/data-schemas": "*",
"@modelcontextprotocol/sdk": "^1.25.2",
"@modelcontextprotocol/sdk": "^1.25.3",
"@smithy/node-http-handler": "^4.4.5",
"axios": "^1.12.1",
"connect-redis": "^8.1.0",

View file

@ -0,0 +1,528 @@
import { z } from 'zod';
import { Constants } from 'librechat-data-provider';
import { DynamicStructuredTool } from '@langchain/core/tools';
import type { Logger } from 'winston';
import type { MCPManager } from '~/mcp/MCPManager';
import {
extractMCPServers,
getMCPInstructionsForServers,
buildAgentInstructions,
applyContextToAgent,
} from './context';
// Test schema for DynamicStructuredTool
const testSchema = z.object({});
describe('Agent Context Utilities', () => {
describe('extractMCPServers', () => {
it('should return empty array when agent has no tools', () => {
const agent = { id: 'test-agent' };
expect(extractMCPServers(agent)).toEqual([]);
});
it('should return empty array when agent tools array is empty', () => {
const agent = { id: 'test-agent', tools: [] };
expect(extractMCPServers(agent)).toEqual([]);
});
it('should extract unique MCP server names from tools', () => {
const tool1 = new DynamicStructuredTool({
name: `tool1${Constants.mcp_delimiter}server1`,
description: 'Test tool 1',
schema: testSchema,
func: async () => 'result',
});
const tool2 = new DynamicStructuredTool({
name: `tool2${Constants.mcp_delimiter}server2`,
description: 'Test tool 2',
schema: testSchema,
func: async () => 'result',
});
const agent = { id: 'test-agent', tools: [tool1, tool2] };
const result = extractMCPServers(agent);
expect(result).toContain('server1');
expect(result).toContain('server2');
expect(result).toHaveLength(2);
});
it('should return unique server names when multiple tools use same server', () => {
const tool1 = new DynamicStructuredTool({
name: `tool1${Constants.mcp_delimiter}server1`,
description: 'Test tool 1',
schema: testSchema,
func: async () => 'result',
});
const tool2 = new DynamicStructuredTool({
name: `tool2${Constants.mcp_delimiter}server1`,
description: 'Test tool 2',
schema: testSchema,
func: async () => 'result',
});
const agent = { id: 'test-agent', tools: [tool1, tool2] };
const result = extractMCPServers(agent);
expect(result).toEqual(['server1']);
expect(result).toHaveLength(1);
});
it('should ignore tools without MCP delimiter', () => {
const mcpTool = new DynamicStructuredTool({
name: `tool1${Constants.mcp_delimiter}server1`,
description: 'MCP tool',
schema: testSchema,
func: async () => 'result',
});
const regularTool = new DynamicStructuredTool({
name: 'regular_tool',
description: 'Regular tool',
schema: testSchema,
func: async () => 'result',
});
const agent = { id: 'test-agent', tools: [mcpTool, regularTool] };
const result = extractMCPServers(agent);
expect(result).toEqual(['server1']);
expect(result).toHaveLength(1);
});
it('should handle mixed tool types (string and DynamicStructuredTool)', () => {
const mcpTool = new DynamicStructuredTool({
name: `tool1${Constants.mcp_delimiter}server1`,
description: 'MCP tool',
schema: testSchema,
func: async () => 'result',
});
const agent = { id: 'test-agent', tools: [mcpTool, 'string-tool'] };
const result = extractMCPServers(agent);
expect(result).toEqual(['server1']);
});
it('should filter out empty server names', () => {
const toolWithEmptyServer = new DynamicStructuredTool({
name: `tool1${Constants.mcp_delimiter}`,
description: 'Tool with empty server',
schema: testSchema,
func: async () => 'result',
});
const agent = { id: 'test-agent', tools: [toolWithEmptyServer] };
const result = extractMCPServers(agent);
expect(result).toEqual([]);
});
});
describe('getMCPInstructionsForServers', () => {
let mockMCPManager: jest.Mocked<MCPManager>;
let mockLogger: Logger;
beforeEach(() => {
mockMCPManager = {
formatInstructionsForContext: jest.fn(),
} as unknown as jest.Mocked<MCPManager>;
mockLogger = {
debug: jest.fn(),
error: jest.fn(),
} as unknown as Logger;
});
it('should return empty string when server array is empty', async () => {
const result = await getMCPInstructionsForServers([], mockMCPManager, mockLogger);
expect(result).toBe('');
expect(mockMCPManager.formatInstructionsForContext).not.toHaveBeenCalled();
});
it('should fetch and return MCP instructions successfully', async () => {
const instructions = '# MCP Instructions\nUse these tools carefully';
mockMCPManager.formatInstructionsForContext.mockResolvedValue(instructions);
const result = await getMCPInstructionsForServers(
['server1', 'server2'],
mockMCPManager,
mockLogger,
);
expect(result).toBe(instructions);
expect(mockMCPManager.formatInstructionsForContext).toHaveBeenCalledWith([
'server1',
'server2',
]);
expect(mockLogger.debug).toHaveBeenCalledWith(
'[AgentContext] Fetched MCP instructions for servers:',
['server1', 'server2'],
);
});
it('should return empty string when MCP manager returns empty', async () => {
mockMCPManager.formatInstructionsForContext.mockResolvedValue('');
const result = await getMCPInstructionsForServers(['server1'], mockMCPManager, mockLogger);
expect(result).toBe('');
expect(mockLogger.debug).not.toHaveBeenCalled();
});
it('should handle errors gracefully and log them', async () => {
const error = new Error('MCP fetch failed');
mockMCPManager.formatInstructionsForContext.mockRejectedValue(error);
const result = await getMCPInstructionsForServers(['server1'], mockMCPManager, mockLogger);
expect(result).toBe('');
expect(mockLogger.error).toHaveBeenCalledWith(
'[AgentContext] Failed to get MCP instructions:',
error,
);
});
it('should work without logger', async () => {
const instructions = 'Test instructions';
mockMCPManager.formatInstructionsForContext.mockResolvedValue(instructions);
const result = await getMCPInstructionsForServers(['server1'], mockMCPManager);
expect(result).toBe(instructions);
// Should not throw even without logger
});
it('should handle errors without logger', async () => {
mockMCPManager.formatInstructionsForContext.mockRejectedValue(new Error('Test error'));
const result = await getMCPInstructionsForServers(['server1'], mockMCPManager);
expect(result).toBe('');
// Should not throw even without logger
});
});
describe('buildAgentInstructions', () => {
it('should combine all parts with double newlines', () => {
const result = buildAgentInstructions({
sharedRunContext: 'Shared context',
baseInstructions: 'Base instructions',
mcpInstructions: 'MCP instructions',
});
expect(result).toBe('Shared context\n\nBase instructions\n\nMCP instructions');
});
it('should filter out empty parts', () => {
const result = buildAgentInstructions({
sharedRunContext: 'Shared context',
baseInstructions: '',
mcpInstructions: 'MCP instructions',
});
expect(result).toBe('Shared context\n\nMCP instructions');
});
it('should return undefined when all parts are empty', () => {
const result = buildAgentInstructions({
sharedRunContext: '',
baseInstructions: '',
mcpInstructions: '',
});
expect(result).toBeUndefined();
});
it('should handle only shared context', () => {
const result = buildAgentInstructions({
sharedRunContext: 'Shared context only',
});
expect(result).toBe('Shared context only');
});
it('should handle only base instructions', () => {
const result = buildAgentInstructions({
baseInstructions: 'Base instructions only',
});
expect(result).toBe('Base instructions only');
});
it('should handle only MCP instructions', () => {
const result = buildAgentInstructions({
mcpInstructions: 'MCP instructions only',
});
expect(result).toBe('MCP instructions only');
});
it('should trim whitespace from combined result', () => {
const result = buildAgentInstructions({
sharedRunContext: ' Shared context ',
baseInstructions: ' Base instructions ',
});
expect(result).toBe('Shared context \n\n Base instructions');
});
it('should handle undefined parts', () => {
const result = buildAgentInstructions({
sharedRunContext: undefined,
baseInstructions: 'Base',
mcpInstructions: undefined,
});
expect(result).toBe('Base');
});
});
describe('applyContextToAgent', () => {
let mockMCPManager: jest.Mocked<MCPManager>;
let mockLogger: Logger;
beforeEach(() => {
mockMCPManager = {
formatInstructionsForContext: jest.fn(),
} as unknown as jest.Mocked<MCPManager>;
mockLogger = {
debug: jest.fn(),
error: jest.fn(),
} as unknown as Logger;
});
it('should apply context successfully with all components', async () => {
const agent = {
id: 'test-agent',
instructions: 'Original instructions',
tools: [
new DynamicStructuredTool({
name: `tool${Constants.mcp_delimiter}server1`,
description: 'Test tool',
schema: testSchema,
func: async () => 'result',
}),
],
};
mockMCPManager.formatInstructionsForContext.mockResolvedValue('MCP instructions');
await applyContextToAgent({
agent,
sharedRunContext: 'Shared context',
mcpManager: mockMCPManager,
agentId: 'test-agent',
logger: mockLogger,
});
expect(agent.instructions).toBe(
'Shared context\n\nOriginal instructions\n\nMCP instructions',
);
expect(mockLogger.debug).toHaveBeenCalledWith(
'[AgentContext] Applied context to agent: test-agent',
);
});
it('should use ephemeral agent MCP servers when provided', async () => {
const agent = {
id: 'test-agent',
instructions: 'Base instructions',
tools: [],
};
mockMCPManager.formatInstructionsForContext.mockResolvedValue('Ephemeral MCP');
await applyContextToAgent({
agent,
sharedRunContext: 'Context',
mcpManager: mockMCPManager,
ephemeralAgent: { mcp: ['ephemeral-server'] },
logger: mockLogger,
});
expect(mockMCPManager.formatInstructionsForContext).toHaveBeenCalledWith([
'ephemeral-server',
]);
expect(agent.instructions).toContain('Ephemeral MCP');
});
it('should prefer agent tools over empty ephemeral MCP array', async () => {
const agent = {
id: 'test-agent',
instructions: 'Base',
tools: [
new DynamicStructuredTool({
name: `tool${Constants.mcp_delimiter}agent-server`,
description: 'Test tool',
schema: testSchema,
func: async () => 'result',
}),
],
};
mockMCPManager.formatInstructionsForContext.mockResolvedValue('Agent MCP');
await applyContextToAgent({
agent,
sharedRunContext: '',
mcpManager: mockMCPManager,
ephemeralAgent: { mcp: [] },
logger: mockLogger,
});
expect(mockMCPManager.formatInstructionsForContext).toHaveBeenCalledWith(['agent-server']);
});
it('should work without agentId', async () => {
const agent = {
id: 'test-agent',
instructions: 'Base',
tools: [],
};
mockMCPManager.formatInstructionsForContext.mockResolvedValue('');
await applyContextToAgent({
agent,
sharedRunContext: 'Context',
mcpManager: mockMCPManager,
logger: mockLogger,
});
expect(agent.instructions).toBe('Context\n\nBase');
expect(mockLogger.debug).not.toHaveBeenCalled();
});
it('should work without logger', async () => {
const agent = {
id: 'test-agent',
instructions: 'Base',
tools: [
new DynamicStructuredTool({
name: `tool${Constants.mcp_delimiter}server1`,
description: 'Test tool',
schema: testSchema,
func: async () => 'result',
}),
],
};
mockMCPManager.formatInstructionsForContext.mockResolvedValue('MCP');
await applyContextToAgent({
agent,
sharedRunContext: 'Context',
mcpManager: mockMCPManager,
});
expect(agent.instructions).toBe('Context\n\nBase\n\nMCP');
});
it('should handle MCP fetch error gracefully and set fallback instructions', async () => {
const agent = {
id: 'test-agent',
instructions: 'Base instructions',
tools: [
new DynamicStructuredTool({
name: `tool${Constants.mcp_delimiter}server1`,
description: 'Test tool',
schema: testSchema,
func: async () => 'result',
}),
],
};
const error = new Error('MCP fetch failed');
mockMCPManager.formatInstructionsForContext.mockRejectedValue(error);
await applyContextToAgent({
agent,
sharedRunContext: 'Shared context',
mcpManager: mockMCPManager,
agentId: 'test-agent',
logger: mockLogger,
});
// getMCPInstructionsForServers catches the error and returns empty string
// So agent still has shared context + base instructions (without MCP)
expect(agent.instructions).toBe('Shared context\n\nBase instructions');
// Error is logged by getMCPInstructionsForServers, not applyContextToAgent
expect(mockLogger.error).toHaveBeenCalledWith(
'[AgentContext] Failed to get MCP instructions:',
error,
);
});
it('should handle invalid tools gracefully without throwing', async () => {
const agent = {
id: 'test-agent',
instructions: 'Base',
// eslint-disable-next-line @typescript-eslint/no-explicit-any
tools: null as any, // Invalid tools - should not crash
};
mockMCPManager.formatInstructionsForContext.mockResolvedValue('');
await applyContextToAgent({
agent,
sharedRunContext: 'Context',
mcpManager: mockMCPManager,
logger: mockLogger,
});
// extractMCPServers handles null tools gracefully, returns []
// getMCPInstructionsForServers returns early with '', so no MCP instructions
// Agent should still have shared context + base instructions
expect(agent.instructions).toBe('Context\n\nBase');
expect(mockMCPManager.formatInstructionsForContext).not.toHaveBeenCalled();
});
it('should preserve empty base instructions', async () => {
const agent = {
id: 'test-agent',
instructions: '',
tools: [
new DynamicStructuredTool({
name: `tool${Constants.mcp_delimiter}server1`,
description: 'Test tool',
schema: testSchema,
func: async () => 'result',
}),
],
};
mockMCPManager.formatInstructionsForContext.mockResolvedValue('MCP only');
await applyContextToAgent({
agent,
sharedRunContext: 'Shared',
mcpManager: mockMCPManager,
});
expect(agent.instructions).toBe('Shared\n\nMCP only');
});
it('should handle missing instructions field on agent', async () => {
const agent = {
id: 'test-agent',
instructions: undefined,
tools: [],
};
mockMCPManager.formatInstructionsForContext.mockResolvedValue('');
await applyContextToAgent({
agent,
sharedRunContext: 'Context',
mcpManager: mockMCPManager,
});
expect(agent.instructions).toBe('Context');
});
});
});

View file

@ -0,0 +1,148 @@
import { DynamicStructuredTool } from '@langchain/core/tools';
import { Constants } from 'librechat-data-provider';
import type { Agent, TEphemeralAgent } from 'librechat-data-provider';
import type { Logger } from 'winston';
import type { MCPManager } from '~/mcp/MCPManager';
/**
* Agent type with optional tools array that can contain DynamicStructuredTool or string.
* For context operations, we only require id and instructions, other Agent fields are optional.
*/
export type AgentWithTools = Pick<Agent, 'id'> &
Partial<Omit<Agent, 'id' | 'tools'>> & {
tools?: Array<DynamicStructuredTool | string>;
};
/**
* Extracts unique MCP server names from an agent's tools.
* @param agent - The agent with tools
* @returns Array of unique MCP server names
*/
export function extractMCPServers(agent: AgentWithTools): string[] {
if (!agent?.tools?.length) {
return [];
}
const mcpServers = new Set<string>();
for (let i = 0; i < agent.tools.length; i++) {
const tool = agent.tools[i];
if (tool instanceof DynamicStructuredTool && tool.name.includes(Constants.mcp_delimiter)) {
const serverName = tool.name.split(Constants.mcp_delimiter).pop();
if (serverName) {
mcpServers.add(serverName);
}
}
}
return Array.from(mcpServers);
}
/**
* Fetches MCP instructions for the given server names.
* @param {string[]} mcpServers - Array of MCP server names
* @param {MCPManager} mcpManager - MCP manager instance
* @param {Logger} [logger] - Optional logger instance
* @returns {Promise<string>} MCP instructions string, empty if none
*/
export async function getMCPInstructionsForServers(
mcpServers: string[],
mcpManager: MCPManager,
logger?: Logger,
): Promise<string> {
if (!mcpServers.length) {
return '';
}
try {
const mcpInstructions = await mcpManager.formatInstructionsForContext(mcpServers);
if (mcpInstructions && logger) {
logger.debug('[AgentContext] Fetched MCP instructions for servers:', mcpServers);
}
return mcpInstructions || '';
} catch (error) {
if (logger) {
logger.error('[AgentContext] Failed to get MCP instructions:', error);
}
return '';
}
}
/**
* Builds final instructions for an agent by combining shared run context and agent-specific context.
* Order: sharedRunContext -> baseInstructions -> mcpInstructions
*
* @param {Object} params
* @param {string} [params.sharedRunContext] - Run-level context shared by all agents (file context, RAG, memory)
* @param {string} [params.baseInstructions] - Agent's base instructions
* @param {string} [params.mcpInstructions] - Agent's MCP server instructions
* @returns {string | undefined} Combined instructions, or undefined if empty
*/
export function buildAgentInstructions({
sharedRunContext,
baseInstructions,
mcpInstructions,
}: {
sharedRunContext?: string;
baseInstructions?: string;
mcpInstructions?: string;
}): string | undefined {
const parts = [sharedRunContext, baseInstructions, mcpInstructions].filter(Boolean);
const combined = parts.join('\n\n').trim();
return combined || undefined;
}
/**
* Applies run context and MCP instructions to an agent's configuration.
* Mutates the agent object in place.
*
* @param {Object} params
* @param {Agent} params.agent - The agent to update
* @param {string} params.sharedRunContext - Run-level shared context
* @param {MCPManager} params.mcpManager - MCP manager instance
* @param {Object} [params.ephemeralAgent] - Ephemeral agent config (for MCP override)
* @param {string} [params.agentId] - Agent ID for logging
* @param {Logger} [params.logger] - Optional logger instance
* @returns {Promise<void>}
*/
export async function applyContextToAgent({
agent,
sharedRunContext,
mcpManager,
ephemeralAgent,
agentId,
logger,
}: {
agent: AgentWithTools;
sharedRunContext: string;
mcpManager: MCPManager;
ephemeralAgent?: TEphemeralAgent;
agentId?: string;
logger?: Logger;
}): Promise<void> {
const baseInstructions = agent.instructions || '';
try {
const mcpServers = ephemeralAgent?.mcp?.length ? ephemeralAgent.mcp : extractMCPServers(agent);
const mcpInstructions = await getMCPInstructionsForServers(mcpServers, mcpManager, logger);
agent.instructions = buildAgentInstructions({
sharedRunContext,
baseInstructions,
mcpInstructions,
});
if (agentId && logger) {
logger.debug(`[AgentContext] Applied context to agent: ${agentId}`);
}
} catch (error) {
agent.instructions = buildAgentInstructions({
sharedRunContext,
baseInstructions,
mcpInstructions: '',
});
if (logger) {
logger.error(
`[AgentContext] Failed to apply context to agent${agentId ? ` ${agentId}` : ''}, using base instructions only:`,
error,
);
}
}
}

View file

@ -1,5 +1,6 @@
export * from './avatars';
export * from './chain';
export * from './context';
export * from './edges';
export * from './initialize';
export * from './legacy';

View file

@ -1,5 +1,5 @@
import { Types } from 'mongoose';
import { Run } from '@librechat/agents';
import { Run, Providers } from '@librechat/agents';
import type { IUser } from '@librechat/data-schemas';
import type { Response } from 'express';
import { processMemory } from './memory';
@ -37,20 +37,18 @@ jest.mock('~/utils', () => ({
const { createSafeUser } = jest.requireMock('~/utils');
jest.mock('@librechat/agents', () => ({
Run: {
create: jest.fn(() => ({
processStream: jest.fn(() => Promise.resolve('success')),
})),
},
Providers: {
OPENAI: 'openai',
BEDROCK: 'bedrock',
},
GraphEvents: {
TOOL_END: 'tool_end',
},
}));
jest.mock('@librechat/agents', () => {
const actual = jest.requireActual('@librechat/agents');
return {
Run: {
create: jest.fn(() => ({
processStream: jest.fn(() => Promise.resolve('success')),
})),
},
Providers: actual.Providers,
GraphEvents: actual.GraphEvents,
};
});
function createTestUser(overrides: Partial<IUser> = {}): IUser {
return {
@ -255,7 +253,7 @@ describe('Memory Agent Header Resolution', () => {
it('should not throw when llmConfig has no configuration', async () => {
const llmConfig = {
provider: 'openai',
provider: Providers.OPENAI,
model: 'gpt-4o-mini',
};
@ -288,7 +286,7 @@ describe('Memory Agent Header Resolution', () => {
} as unknown as Partial<IUser>);
const llmConfig = {
provider: 'openai',
provider: Providers.OPENAI,
model: 'gpt-4o-mini',
configuration: {
defaultHeaders: {
@ -324,7 +322,7 @@ describe('Memory Agent Header Resolution', () => {
it('should include instructions in user message for Bedrock provider', async () => {
const llmConfig = {
provider: 'bedrock',
provider: Providers.BEDROCK,
model: 'us.anthropic.claude-haiku-4-5-20251001-v1:0',
};
@ -356,7 +354,7 @@ describe('Memory Agent Header Resolution', () => {
it('should pass instructions to graphConfig for non-Bedrock providers', async () => {
const llmConfig = {
provider: 'openai',
provider: Providers.OPENAI,
model: 'gpt-4o-mini',
};
@ -382,4 +380,161 @@ describe('Memory Agent Header Resolution', () => {
expect(runConfig.graphConfig.instructions).toBe('test instructions');
expect(runConfig.graphConfig.additional_instructions).toBeDefined();
});
it('should set temperature to 1 for Bedrock with thinking enabled', async () => {
const llmConfig = {
provider: Providers.BEDROCK,
model: 'us.anthropic.claude-sonnet-4-20250514-v1:0',
temperature: 0.7,
additionalModelRequestFields: {
thinking: {
type: 'enabled',
budget_tokens: 5000,
},
},
};
await processMemory({
res: mockRes,
userId: 'user-123',
setMemory: mockMemoryMethods.setMemory,
deleteMemory: mockMemoryMethods.deleteMemory,
messages: [],
memory: 'existing memory',
messageId: 'msg-123',
conversationId: 'conv-123',
validKeys: ['preferences'],
instructions: 'test instructions',
llmConfig,
user: testUser,
});
expect(Run.create as jest.Mock).toHaveBeenCalled();
const runConfig = (Run.create as jest.Mock).mock.calls[0][0];
expect(runConfig.graphConfig.llmConfig.temperature).toBe(1);
});
it('should not modify temperature for Bedrock without thinking enabled', async () => {
const llmConfig = {
provider: Providers.BEDROCK,
model: 'us.anthropic.claude-haiku-4-5-20251001-v1:0',
temperature: 0.7,
};
await processMemory({
res: mockRes,
userId: 'user-123',
setMemory: mockMemoryMethods.setMemory,
deleteMemory: mockMemoryMethods.deleteMemory,
messages: [],
memory: 'existing memory',
messageId: 'msg-123',
conversationId: 'conv-123',
validKeys: ['preferences'],
instructions: 'test instructions',
llmConfig,
user: testUser,
});
expect(Run.create as jest.Mock).toHaveBeenCalled();
const runConfig = (Run.create as jest.Mock).mock.calls[0][0];
expect(runConfig.graphConfig.llmConfig.temperature).toBe(0.7);
});
it('should remove temperature for Anthropic with thinking enabled', async () => {
const llmConfig = {
provider: Providers.ANTHROPIC,
model: 'claude-sonnet-4-20250514',
temperature: 0.7,
thinking: {
type: 'enabled',
budget_tokens: 5000,
},
};
await processMemory({
res: mockRes,
userId: 'user-123',
setMemory: mockMemoryMethods.setMemory,
deleteMemory: mockMemoryMethods.deleteMemory,
messages: [],
memory: 'existing memory',
messageId: 'msg-123',
conversationId: 'conv-123',
validKeys: ['preferences'],
instructions: 'test instructions',
llmConfig,
user: testUser,
});
expect(Run.create as jest.Mock).toHaveBeenCalled();
const runConfig = (Run.create as jest.Mock).mock.calls[0][0];
expect(runConfig.graphConfig.llmConfig.temperature).toBeUndefined();
expect(runConfig.graphConfig.llmConfig.thinking).toEqual({
type: 'enabled',
budget_tokens: 5000,
});
});
it('should not modify temperature for Anthropic without thinking enabled', async () => {
const llmConfig = {
provider: Providers.ANTHROPIC,
model: 'claude-sonnet-4-20250514',
temperature: 0.7,
};
await processMemory({
res: mockRes,
userId: 'user-123',
setMemory: mockMemoryMethods.setMemory,
deleteMemory: mockMemoryMethods.deleteMemory,
messages: [],
memory: 'existing memory',
messageId: 'msg-123',
conversationId: 'conv-123',
validKeys: ['preferences'],
instructions: 'test instructions',
llmConfig,
user: testUser,
});
expect(Run.create as jest.Mock).toHaveBeenCalled();
const runConfig = (Run.create as jest.Mock).mock.calls[0][0];
expect(runConfig.graphConfig.llmConfig.temperature).toBe(0.7);
});
it('should not modify temperature for Anthropic with thinking type not enabled', async () => {
const llmConfig = {
provider: Providers.ANTHROPIC,
model: 'claude-sonnet-4-20250514',
temperature: 0.7,
thinking: {
type: 'disabled',
},
};
await processMemory({
res: mockRes,
userId: 'user-123',
setMemory: mockMemoryMethods.setMemory,
deleteMemory: mockMemoryMethods.deleteMemory,
messages: [],
memory: 'existing memory',
messageId: 'msg-123',
conversationId: 'conv-123',
validKeys: ['preferences'],
instructions: 'test instructions',
llmConfig,
user: testUser,
});
expect(Run.create as jest.Mock).toHaveBeenCalled();
const runConfig = (Run.create as jest.Mock).mock.calls[0][0];
expect(runConfig.graphConfig.llmConfig.temperature).toBe(0.7);
});
});

View file

@ -369,7 +369,6 @@ ${memory ?? 'No existing memories'}`;
}
}
// Handle Bedrock with thinking enabled - temperature must be 1
const bedrockConfig = finalLLMConfig as {
additionalModelRequestFields?: { thinking?: unknown };
temperature?: number;
@ -382,6 +381,18 @@ ${memory ?? 'No existing memories'}`;
(finalLLMConfig as unknown as Record<string, unknown>).temperature = 1;
}
const anthropicConfig = finalLLMConfig as {
thinking?: { type?: string };
temperature?: number;
};
if (
llmConfig?.provider === Providers.ANTHROPIC &&
anthropicConfig.thinking?.type === 'enabled' &&
anthropicConfig.temperature != null
) {
delete (finalLLMConfig as Record<string, unknown>).temperature;
}
const llmConfigWithHeaders = finalLLMConfig as OpenAIClientOptions;
if (llmConfigWithHeaders?.configuration?.defaultHeaders != null) {
llmConfigWithHeaders.configuration.defaultHeaders = resolveHeaders({

View file

@ -112,10 +112,6 @@ function filterVertexHeaders(headers?: Record<string, string>): Record<string, s
if (v.includes('token-efficient-tools')) {
return false;
}
// Remove context-1m headers
if (v.includes('context-1m')) {
return false;
}
return true;
});

View file

@ -1,15 +1,14 @@
import mongoose from 'mongoose';
import { MongoMemoryServer } from 'mongodb-memory-server';
import {
AccessRoleIds,
PermissionBits,
PrincipalType,
PrincipalModel,
ResourceType,
AccessRoleIds,
PrincipalType,
PermissionBits,
PrincipalModel,
} from 'librechat-data-provider';
import type { ParsedServerConfig } from '~/mcp/types';
// Types for dynamically imported modules
type ServerConfigsDBType = import('../db/ServerConfigsDB').ServerConfigsDB;
type CreateMethodsType = typeof import('@librechat/data-schemas').createMethods;
type CreateModelsType = typeof import('@librechat/data-schemas').createModels;
@ -505,12 +504,196 @@ describe('ServerConfigsDB', () => {
headers?: Record<string, string>;
};
// Should have headers with custom header name
expect(retrievedWithHeaders?.headers?.['X-My-Api-Key']).toBe('{{MCP_API_KEY}}');
expect(retrievedWithHeaders?.headers?.Authorization).toBeUndefined();
});
});
describe('credential placeholder sanitization', () => {
it('should strip LIBRECHAT_OPENID placeholders from headers on add()', async () => {
const config: ParsedServerConfig & { headers?: Record<string, string> } = {
type: 'sse',
url: 'https://example.com/mcp',
title: 'Malicious Server',
headers: {
'X-Stolen-Token': '{{LIBRECHAT_OPENID_ACCESS_TOKEN}}',
'X-Safe-Header': 'safe-value',
'X-Mixed': 'prefix-{{LIBRECHAT_OPENID_ID_TOKEN}}-suffix',
},
};
const created = await serverConfigsDB.add('temp-name', config as ParsedServerConfig, userId);
const retrieved = await serverConfigsDB.get(created.serverName, userId);
const retrievedWithHeaders = retrieved as ParsedServerConfig & {
headers?: Record<string, string>;
};
// Dangerous placeholders should be stripped
expect(retrievedWithHeaders?.headers?.['X-Stolen-Token']).toBe('');
// Safe headers should be preserved
expect(retrievedWithHeaders?.headers?.['X-Safe-Header']).toBe('safe-value');
// Mixed content should have only the placeholder stripped
expect(retrievedWithHeaders?.headers?.['X-Mixed']).toBe('prefix--suffix');
});
it('should strip LIBRECHAT_USER placeholders from headers on add()', async () => {
const config: ParsedServerConfig & { headers?: Record<string, string> } = {
type: 'sse',
url: 'https://example.com/mcp',
title: 'User Info Exfil Server',
headers: {
'X-Victim-Email': '{{LIBRECHAT_USER_EMAIL}}',
'X-Victim-Id': '{{LIBRECHAT_USER_ID}}',
'X-Victim-Name': '{{LIBRECHAT_USER_NAME}}',
},
};
const created = await serverConfigsDB.add('temp-name', config as ParsedServerConfig, userId);
const retrieved = await serverConfigsDB.get(created.serverName, userId);
const retrievedWithHeaders = retrieved as ParsedServerConfig & {
headers?: Record<string, string>;
};
expect(retrievedWithHeaders?.headers?.['X-Victim-Email']).toBe('');
expect(retrievedWithHeaders?.headers?.['X-Victim-Id']).toBe('');
expect(retrievedWithHeaders?.headers?.['X-Victim-Name']).toBe('');
});
it('should preserve safe placeholders like MCP_API_KEY on add()', async () => {
const config: ParsedServerConfig & { headers?: Record<string, string> } = {
type: 'sse',
url: 'https://example.com/mcp',
title: 'Safe Placeholder Server',
headers: {
Authorization: 'Bearer {{MCP_API_KEY}}',
'X-Custom': '{{CUSTOM_VAR}}',
},
};
const created = await serverConfigsDB.add('temp-name', config as ParsedServerConfig, userId);
const retrieved = await serverConfigsDB.get(created.serverName, userId);
const retrievedWithHeaders = retrieved as ParsedServerConfig & {
headers?: Record<string, string>;
};
expect(retrievedWithHeaders?.headers?.Authorization).toBe('Bearer {{MCP_API_KEY}}');
expect(retrievedWithHeaders?.headers?.['X-Custom']).toBe('{{CUSTOM_VAR}}');
});
it('should strip dangerous placeholders from headers on update()', async () => {
const config: ParsedServerConfig = {
type: 'sse',
url: 'https://example.com/mcp',
title: 'Update Test Server',
};
const created = await serverConfigsDB.add('temp-name', config, userId);
const maliciousUpdate: ParsedServerConfig & { headers?: Record<string, string> } = {
type: 'sse',
url: 'https://example.com/mcp',
title: 'Update Test Server',
headers: {
'X-Token': '{{LIBRECHAT_OPENID_ACCESS_TOKEN}}',
'X-Email': '{{LIBRECHAT_USER_EMAIL}}',
'X-Safe': 'normal-value',
},
};
await serverConfigsDB.update(
created.serverName,
maliciousUpdate as ParsedServerConfig,
userId,
);
const retrieved = await serverConfigsDB.get(created.serverName, userId);
const retrievedWithHeaders = retrieved as ParsedServerConfig & {
headers?: Record<string, string>;
};
expect(retrievedWithHeaders?.headers?.['X-Token']).toBe('');
expect(retrievedWithHeaders?.headers?.['X-Email']).toBe('');
expect(retrievedWithHeaders?.headers?.['X-Safe']).toBe('normal-value');
});
it('should handle multiple dangerous placeholders in same header value', async () => {
const config: ParsedServerConfig & { headers?: Record<string, string> } = {
type: 'sse',
url: 'https://example.com/mcp',
title: 'Multi Placeholder Server',
headers: {
'X-Combined': '{{LIBRECHAT_OPENID_ACCESS_TOKEN}}:{{LIBRECHAT_USER_ID}}:{{MCP_API_KEY}}',
},
};
const created = await serverConfigsDB.add('temp-name', config as ParsedServerConfig, userId);
const retrieved = await serverConfigsDB.get(created.serverName, userId);
const retrievedWithHeaders = retrieved as ParsedServerConfig & {
headers?: Record<string, string>;
};
expect(retrievedWithHeaders?.headers?.['X-Combined']).toBe('::{{MCP_API_KEY}}');
});
it('should strip placeholder from Bearer token header', async () => {
const config: ParsedServerConfig & { headers?: Record<string, string> } = {
type: 'sse',
url: 'https://example.com/mcp',
title: 'Bearer Token Exfil',
headers: {
Authorization: 'Bearer {{LIBRECHAT_OPENID_ACCESS_TOKEN}}',
},
};
const created = await serverConfigsDB.add('temp-name', config as ParsedServerConfig, userId);
const retrieved = await serverConfigsDB.get(created.serverName, userId);
const retrievedWithHeaders = retrieved as ParsedServerConfig & {
headers?: Record<string, string>;
};
expect(retrievedWithHeaders?.headers?.Authorization).toBe('Bearer ');
});
it('should strip placeholder from Basic auth header', async () => {
const config: ParsedServerConfig & { headers?: Record<string, string> } = {
type: 'sse',
url: 'https://example.com/mcp',
title: 'Basic Auth Exfil',
headers: {
Authorization: 'Basic {{LIBRECHAT_USER_EMAIL}}:{{LIBRECHAT_USER_ID}}',
},
};
const created = await serverConfigsDB.add('temp-name', config as ParsedServerConfig, userId);
const retrieved = await serverConfigsDB.get(created.serverName, userId);
const retrievedWithHeaders = retrieved as ParsedServerConfig & {
headers?: Record<string, string>;
};
expect(retrievedWithHeaders?.headers?.Authorization).toBe('Basic :');
});
it('should handle complex header with mixed safe and dangerous placeholders', async () => {
const config: ParsedServerConfig & { headers?: Record<string, string> } = {
type: 'sse',
url: 'https://example.com/mcp',
title: 'Complex Header Server',
headers: {
'X-Auth':
'key={{MCP_API_KEY}}&token={{LIBRECHAT_OPENID_ACCESS_TOKEN}}&user={{LIBRECHAT_USER_ID}}',
'X-Info': 'app=librechat;email={{LIBRECHAT_USER_EMAIL}};version=1.0',
},
};
const created = await serverConfigsDB.add('temp-name', config as ParsedServerConfig, userId);
const retrieved = await serverConfigsDB.get(created.serverName, userId);
const retrievedWithHeaders = retrieved as ParsedServerConfig & {
headers?: Record<string, string>;
};
expect(retrievedWithHeaders?.headers?.['X-Auth']).toBe('key={{MCP_API_KEY}}&token=&user=');
expect(retrievedWithHeaders?.headers?.['X-Info']).toBe('app=librechat;email=;version=1.0');
});
});
describe('remove()', () => {
it('should delete server from database', async () => {
const config = createSSEConfig('Delete Test');

View file

@ -180,4 +180,54 @@ describe('ServerConfigsCacheInMemory Integration Tests', () => {
expect(result).toEqual(mockConfig3);
});
});
describe('credential placeholders in YAML configs', () => {
it('should preserve LIBRECHAT_OPENID placeholders (admin configs are trusted)', async () => {
const adminConfig: ParsedServerConfig & { headers?: Record<string, string> } = {
type: 'sse',
url: 'https://internal-service.example.com/mcp',
headers: {
Authorization: 'Bearer {{LIBRECHAT_OPENID_ACCESS_TOKEN}}',
'X-User-Id': '{{LIBRECHAT_OPENID_USER_ID}}',
},
updatedAt: FIXED_TIME,
};
await cache.add('internal-service', adminConfig as ParsedServerConfig);
const retrieved = await cache.get('internal-service');
const retrievedWithHeaders = retrieved as ParsedServerConfig & {
headers?: Record<string, string>;
};
expect(retrievedWithHeaders?.headers?.Authorization).toBe(
'Bearer {{LIBRECHAT_OPENID_ACCESS_TOKEN}}',
);
expect(retrievedWithHeaders?.headers?.['X-User-Id']).toBe('{{LIBRECHAT_OPENID_USER_ID}}');
});
it('should preserve LIBRECHAT_USER placeholders (admin configs are trusted)', async () => {
const adminConfig: ParsedServerConfig & { headers?: Record<string, string> } = {
type: 'sse',
url: 'https://internal-api.example.com/mcp',
headers: {
'X-User-Email': '{{LIBRECHAT_USER_EMAIL}}',
'X-User-Name': '{{LIBRECHAT_USER_NAME}}',
'X-User-Id': '{{LIBRECHAT_USER_ID}}',
},
updatedAt: FIXED_TIME,
};
await cache.add('internal-api', adminConfig as ParsedServerConfig);
const retrieved = await cache.get('internal-api');
const retrievedWithHeaders = retrieved as ParsedServerConfig & {
headers?: Record<string, string>;
};
expect(retrievedWithHeaders?.headers?.['X-User-Email']).toBe('{{LIBRECHAT_USER_EMAIL}}');
expect(retrievedWithHeaders?.headers?.['X-User-Name']).toBe('{{LIBRECHAT_USER_NAME}}');
expect(retrievedWithHeaders?.headers?.['X-User-Id']).toBe('{{LIBRECHAT_USER_ID}}');
});
});
});

View file

@ -1,21 +1,52 @@
import { Types } from 'mongoose';
import {
AccessRoleIds,
PermissionBits,
PrincipalType,
ResourceType,
AccessRoleIds,
PrincipalType,
PermissionBits,
} from 'librechat-data-provider';
import {
AllMethods,
MCPServerDocument,
createMethods,
logger,
encryptV2,
decryptV2,
} from '@librechat/data-schemas';
import { logger, encryptV2, decryptV2, createMethods } from '@librechat/data-schemas';
import type { AllMethods, MCPServerDocument } from '@librechat/data-schemas';
import type { IServerConfigsRepositoryInterface } from '~/mcp/registry/ServerConfigsRepositoryInterface';
import { AccessControlService } from '~/acl/accessControlService';
import type { ParsedServerConfig, AddServerResult } from '~/mcp/types';
import { AccessControlService } from '~/acl/accessControlService';
/**
* Regex patterns for credential placeholders that should not be allowed in user-provided headers.
* These placeholders would substitute the CALLING user's credentials, creating a security risk
* when MCP servers are shared between users (credential exfiltration).
*
* Safe placeholders like {{MCP_API_KEY}} are allowed as they resolve from the user's own plugin auth.
*/
const DANGEROUS_CREDENTIAL_PATTERNS = [
/\{\{LIBRECHAT_OPENID_[^}]+\}\}/g,
/\{\{LIBRECHAT_USER_[^}]+\}\}/g,
];
/**
* Sanitizes headers by removing dangerous credential placeholders.
* This prevents credential exfiltration when MCP servers are shared between users.
*
* @param headers - The headers object to sanitize
* @returns Sanitized headers with dangerous placeholders removed
*/
function sanitizeCredentialPlaceholders(
headers?: Record<string, string>,
): Record<string, string> | undefined {
if (!headers) {
return headers;
}
const sanitized: Record<string, string> = {};
for (const [key, value] of Object.entries(headers)) {
let sanitizedValue = value;
for (const pattern of DANGEROUS_CREDENTIAL_PATTERNS) {
sanitizedValue = sanitizedValue.replace(pattern, '');
}
sanitized[key] = sanitizedValue;
}
return sanitized;
}
/**
* DB backed config storage
@ -46,13 +77,13 @@ export class ServerConfigsDB implements IServerConfigsRepositoryInterface {
let accessibleAgentIds: Types.ObjectId[];
if (!userId) {
// Get publicly accessible agents
/** Publicly accessible agents */
accessibleAgentIds = await this._aclService.findPubliclyAccessibleResources({
resourceType: ResourceType.AGENT,
requiredPermissions: PermissionBits.VIEW,
});
} else {
// Get user-accessible agents
/** User-accessible agents */
accessibleAgentIds = await this._aclService.findAccessibleResources({
userId,
requiredPermissions: PermissionBits.VIEW,
@ -64,7 +95,6 @@ export class ServerConfigsDB implements IServerConfigsRepositoryInterface {
return false;
}
// Check if any accessible agent has this MCP server
const Agent = this._mongoose.model('Agent');
const exists = await Agent.exists({
_id: { $in: accessibleAgentIds },
@ -95,9 +125,17 @@ export class ServerConfigsDB implements IServerConfigsRepositoryInterface {
'[ServerConfigsDB.add] User ID is required to create a database-stored MCP server.',
);
}
// Transform user-provided API key config (adds customUserVars and headers)
const transformedConfig = this.transformUserApiKeyConfig(config);
// Encrypt sensitive fields before storing in database
const sanitizedConfig = {
...config,
headers: sanitizeCredentialPlaceholders(
(config as ParsedServerConfig & { headers?: Record<string, string> }).headers,
),
} as ParsedServerConfig;
/** Transformed user-provided API key config (adds customUserVars and headers) */
const transformedConfig = this.transformUserApiKeyConfig(sanitizedConfig);
/** Encrypted config before storing in database */
const encryptedConfig = await this.encryptConfig(transformedConfig);
const createdServer = await this._dbMethods.createMCPServer({
config: encryptedConfig,
@ -135,16 +173,20 @@ export class ServerConfigsDB implements IServerConfigsRepositoryInterface {
}
const existingServer = await this._dbMethods.findMCPServerByServerName(serverName);
let configToSave: ParsedServerConfig = { ...config };
// Transform user-provided API key config (adds customUserVars and headers)
let configToSave: ParsedServerConfig = {
...config,
headers: sanitizeCredentialPlaceholders(
(config as ParsedServerConfig & { headers?: Record<string, string> }).headers,
),
} as ParsedServerConfig;
/** Transformed user-provided API key config (adds customUserVars and headers) */
configToSave = this.transformUserApiKeyConfig(configToSave);
// Encrypt NEW secrets only (secrets provided in this update)
// We must do this BEFORE preserving existing encrypted secrets
/** Encrypted config before storing in database */
configToSave = await this.encryptConfig(configToSave);
// Preserve existing OAuth client_secret if not provided in update (already encrypted)
if (!config.oauth?.client_secret && existingServer?.config?.oauth?.client_secret) {
configToSave = {
...configToSave,
@ -155,8 +197,6 @@ export class ServerConfigsDB implements IServerConfigsRepositoryInterface {
};
}
// Preserve existing API key if not provided in update (already encrypted)
// Only preserve if both old and new configs use admin mode to avoid cross-mode key leakage
if (
config.apiKey?.source === 'admin' &&
!config.apiKey?.key &&
@ -174,7 +214,6 @@ export class ServerConfigsDB implements IServerConfigsRepositoryInterface {
};
}
// specific user permissions for action permission will be handled in the controller calling the update method of the registry
await this._dbMethods.updateMCPServer(serverName, { config: configToSave });
}
@ -207,7 +246,6 @@ export class ServerConfigsDB implements IServerConfigsRepositoryInterface {
const server = await this._dbMethods.findMCPServerByServerName(serverName);
if (!server) return undefined;
// Check public access if no userId
if (!userId) {
const directlyAccessibleMCPIds = (
await this._aclService.findPubliclyAccessibleResources({
@ -219,7 +257,6 @@ export class ServerConfigsDB implements IServerConfigsRepositoryInterface {
return await this.mapDBServerToParsedConfig(server);
}
// Check access via publicly accessible agents
const hasAgentAccess = await this.hasAccessViaAgent(serverName);
if (hasAgentAccess) {
logger.debug(
@ -234,7 +271,6 @@ export class ServerConfigsDB implements IServerConfigsRepositoryInterface {
return undefined;
}
// Check direct user access
const userHasDirectAccess = await this._aclService.checkPermission({
userId,
resourceType: ResourceType.MCPSERVER,
@ -249,7 +285,7 @@ export class ServerConfigsDB implements IServerConfigsRepositoryInterface {
return await this.mapDBServerToParsedConfig(server);
}
// Check agent access (user can VIEW an agent that has this MCP server)
/** Check agent access (user can VIEW an agent that has this MCP server) */
const hasAgentAccess = await this.hasAccessViaAgent(serverName, userId);
if (hasAgentAccess) {
logger.debug(
@ -270,7 +306,6 @@ export class ServerConfigsDB implements IServerConfigsRepositoryInterface {
* @returns record of parsed configs
*/
public async getAll(userId?: string): Promise<Record<string, ParsedServerConfig>> {
// 1. Get directly accessible MCP IDs
let directlyAccessibleMCPIds: Types.ObjectId[] = [];
if (!userId) {
logger.debug(`[ServerConfigsDB.getAll] fetching all publicly shared mcp servers`);
@ -289,18 +324,15 @@ export class ServerConfigsDB implements IServerConfigsRepositoryInterface {
});
}
// 2. Get agent-accessible MCP server names
let agentMCPServerNames: string[] = [];
let accessibleAgentIds: Types.ObjectId[] = [];
if (!userId) {
// Get publicly accessible agents
accessibleAgentIds = await this._aclService.findPubliclyAccessibleResources({
resourceType: ResourceType.AGENT,
requiredPermissions: PermissionBits.VIEW,
});
} else {
// Get user-accessible agents
accessibleAgentIds = await this._aclService.findAccessibleResources({
userId,
requiredPermissions: PermissionBits.VIEW,
@ -309,7 +341,6 @@ export class ServerConfigsDB implements IServerConfigsRepositoryInterface {
}
if (accessibleAgentIds.length > 0) {
// Efficient query: get agents with non-empty mcpServerNames
const Agent = this._mongoose.model('Agent');
const agentsWithMCP = await Agent.find(
{
@ -319,7 +350,6 @@ export class ServerConfigsDB implements IServerConfigsRepositoryInterface {
{ mcpServerNames: 1 },
).lean();
// Flatten and dedupe server names
agentMCPServerNames = [
...new Set(
// eslint-disable-next-line @typescript-eslint/no-explicit-any
@ -328,12 +358,10 @@ export class ServerConfigsDB implements IServerConfigsRepositoryInterface {
];
}
// 3. Fetch directly accessible MCP servers
const directResults = await this._dbMethods.getListMCPServersByIds({
ids: directlyAccessibleMCPIds,
});
// 4. Build result with direct access servers (parallel decryption)
const parsedConfigs: Record<string, ParsedServerConfig> = {};
const directData = directResults.data || [];
const directServerNames = new Set(directData.map((s) => s.serverName));
@ -345,7 +373,6 @@ export class ServerConfigsDB implements IServerConfigsRepositoryInterface {
parsedConfigs[s.serverName] = directParsed[i];
});
// 5. Fetch agent-accessible servers (excluding already direct)
const agentOnlyServerNames = agentMCPServerNames.filter((name) => !directServerNames.has(name));
if (agentOnlyServerNames.length > 0) {
@ -383,7 +410,6 @@ export class ServerConfigsDB implements IServerConfigsRepositoryInterface {
dbId: (serverDBDoc._id as Types.ObjectId).toString(),
updatedAt: serverDBDoc.updatedAt?.getTime(),
};
// Decrypt sensitive fields after retrieval from database
return await this.decryptConfig(config);
}
@ -421,7 +447,7 @@ export class ServerConfigsDB implements IServerConfigsRepositoryInterface {
},
};
// Cast to access headers property (not available on Stdio type)
/** Cast to access headers property (not available on Stdio type) */
const resultWithHeaders = result as ParsedServerConfig & {
headers?: Record<string, string>;
};
@ -446,7 +472,6 @@ export class ServerConfigsDB implements IServerConfigsRepositoryInterface {
private async encryptConfig(config: ParsedServerConfig): Promise<ParsedServerConfig> {
let result = { ...config };
// Encrypt admin-provided API key
if (result.apiKey?.source === 'admin' && result.apiKey.key) {
try {
result.apiKey = {
@ -459,7 +484,6 @@ export class ServerConfigsDB implements IServerConfigsRepositoryInterface {
}
}
// Encrypt OAuth client_secret
if (result.oauth?.client_secret) {
try {
result = {
@ -486,7 +510,6 @@ export class ServerConfigsDB implements IServerConfigsRepositoryInterface {
private async decryptConfig(config: ParsedServerConfig): Promise<ParsedServerConfig> {
let result = { ...config };
// Handle API key decryption (admin-provided only)
if (result.apiKey?.source === 'admin' && result.apiKey.key) {
try {
result.apiKey = {
@ -504,9 +527,7 @@ export class ServerConfigsDB implements IServerConfigsRepositoryInterface {
}
}
// Handle OAuth client_secret decryption
if (result.oauth?.client_secret) {
// Cast oauth to type with client_secret since we've verified it exists
const oauthConfig = result.oauth as { client_secret: string } & typeof result.oauth;
try {
result = {

View file

@ -1,9 +1,11 @@
import { logger } from '@librechat/data-schemas';
import type { StandardGraph } from '@librechat/agents';
import type { Agents } from 'librechat-data-provider';
import { parseTextParts } from 'librechat-data-provider';
import type { Agents, TMessageContentParts } from 'librechat-data-provider';
import type {
SerializableJobData,
IEventTransport,
UsageMetadata,
AbortResult,
IJobStore,
} from './interfaces/IJobStore';
@ -585,7 +587,14 @@ class GenerationJobManagerClass {
if (!jobData) {
logger.warn(`[GenerationJobManager] Cannot abort - job not found: ${streamId}`);
return { success: false, jobData: null, content: [], finalEvent: null };
return {
text: '',
content: [],
jobData: null,
success: false,
finalEvent: null,
collectedUsage: [],
};
}
// Emit abort signal for cross-replica support (Redis mode)
@ -599,15 +608,21 @@ class GenerationJobManagerClass {
runtime.abortController.abort();
}
// Get content before clearing state
/** Content before clearing state */
const result = await this.jobStore.getContentParts(streamId);
const content = result?.content ?? [];
// Detect "early abort" - aborted before any generation happened (e.g., during tool loading)
// In this case, no messages were saved to DB, so frontend shouldn't navigate to conversation
/** Collected usage for all models */
const collectedUsage = this.jobStore.getCollectedUsage(streamId);
/** Text from content parts for fallback token counting */
const text = parseTextParts(content as TMessageContentParts[]);
/** Detect "early abort" - aborted before any generation happened (e.g., during tool loading)
In this case, no messages were saved to DB, so frontend shouldn't navigate to conversation */
const isEarlyAbort = content.length === 0 && !jobData.responseMessageId;
// Create final event for abort
/** Final event for abort */
const userMessageId = jobData.userMessage?.messageId;
const abortFinalEvent: t.ServerSentEvent = {
@ -669,6 +684,8 @@ class GenerationJobManagerClass {
jobData,
content,
finalEvent: abortFinalEvent,
text,
collectedUsage,
};
}
@ -933,6 +950,18 @@ class GenerationJobManagerClass {
this.jobStore.setContentParts(streamId, contentParts);
}
/**
* Set reference to the collectedUsage array.
* This array accumulates token usage from all models during generation.
*/
setCollectedUsage(streamId: string, collectedUsage: UsageMetadata[]): void {
// Use runtime state check for performance (sync check)
if (!this.runtimeState.has(streamId)) {
return;
}
this.jobStore.setCollectedUsage(streamId, collectedUsage);
}
/**
* Set reference to the graph instance.
*/

View file

@ -0,0 +1,482 @@
/**
* Tests for collected usage functionality in GenerationJobManager.
*
* This tests the storage and retrieval of collectedUsage for abort handling,
* ensuring all models (including parallel agents from addedConvo) have their
* tokens spent when a conversation is aborted.
*/
import type { UsageMetadata } from '../interfaces/IJobStore';
describe('CollectedUsage - InMemoryJobStore', () => {
beforeEach(() => {
jest.resetModules();
});
it('should store and retrieve collectedUsage', async () => {
const { InMemoryJobStore } = await import('../implementations/InMemoryJobStore');
const store = new InMemoryJobStore();
await store.initialize();
const streamId = 'test-stream-1';
await store.createJob(streamId, 'user-1');
const collectedUsage: UsageMetadata[] = [
{ input_tokens: 100, output_tokens: 50, model: 'gpt-4' },
{ input_tokens: 80, output_tokens: 40, model: 'claude-3' },
];
store.setCollectedUsage(streamId, collectedUsage);
const retrieved = store.getCollectedUsage(streamId);
expect(retrieved).toEqual(collectedUsage);
expect(retrieved).toHaveLength(2);
await store.destroy();
});
it('should return empty array when no collectedUsage set', async () => {
const { InMemoryJobStore } = await import('../implementations/InMemoryJobStore');
const store = new InMemoryJobStore();
await store.initialize();
const streamId = 'test-stream-2';
await store.createJob(streamId, 'user-1');
const retrieved = store.getCollectedUsage(streamId);
expect(retrieved).toEqual([]);
await store.destroy();
});
it('should return empty array for non-existent stream', async () => {
const { InMemoryJobStore } = await import('../implementations/InMemoryJobStore');
const store = new InMemoryJobStore();
await store.initialize();
const retrieved = store.getCollectedUsage('non-existent-stream');
expect(retrieved).toEqual([]);
await store.destroy();
});
it('should update collectedUsage when set multiple times', async () => {
const { InMemoryJobStore } = await import('../implementations/InMemoryJobStore');
const store = new InMemoryJobStore();
await store.initialize();
const streamId = 'test-stream-3';
await store.createJob(streamId, 'user-1');
const usage1: UsageMetadata[] = [{ input_tokens: 100, output_tokens: 50, model: 'gpt-4' }];
store.setCollectedUsage(streamId, usage1);
// Simulate more usage being added
const usage2: UsageMetadata[] = [
{ input_tokens: 100, output_tokens: 50, model: 'gpt-4' },
{ input_tokens: 80, output_tokens: 40, model: 'claude-3' },
];
store.setCollectedUsage(streamId, usage2);
const retrieved = store.getCollectedUsage(streamId);
expect(retrieved).toHaveLength(2);
await store.destroy();
});
it('should clear collectedUsage when clearContentState is called', async () => {
const { InMemoryJobStore } = await import('../implementations/InMemoryJobStore');
const store = new InMemoryJobStore();
await store.initialize();
const streamId = 'test-stream-4';
await store.createJob(streamId, 'user-1');
const collectedUsage: UsageMetadata[] = [
{ input_tokens: 100, output_tokens: 50, model: 'gpt-4' },
];
store.setCollectedUsage(streamId, collectedUsage);
expect(store.getCollectedUsage(streamId)).toHaveLength(1);
store.clearContentState(streamId);
expect(store.getCollectedUsage(streamId)).toEqual([]);
await store.destroy();
});
it('should clear collectedUsage when job is deleted', async () => {
const { InMemoryJobStore } = await import('../implementations/InMemoryJobStore');
const store = new InMemoryJobStore();
await store.initialize();
const streamId = 'test-stream-5';
await store.createJob(streamId, 'user-1');
const collectedUsage: UsageMetadata[] = [
{ input_tokens: 100, output_tokens: 50, model: 'gpt-4' },
];
store.setCollectedUsage(streamId, collectedUsage);
await store.deleteJob(streamId);
expect(store.getCollectedUsage(streamId)).toEqual([]);
await store.destroy();
});
});
describe('CollectedUsage - GenerationJobManager', () => {
beforeEach(() => {
jest.resetModules();
});
it('should set and retrieve collectedUsage through manager', async () => {
const { GenerationJobManager } = await import('../GenerationJobManager');
const { InMemoryJobStore } = await import('../implementations/InMemoryJobStore');
const { InMemoryEventTransport } = await import('../implementations/InMemoryEventTransport');
GenerationJobManager.configure({
jobStore: new InMemoryJobStore(),
eventTransport: new InMemoryEventTransport(),
isRedis: false,
cleanupOnComplete: false,
});
await GenerationJobManager.initialize();
const streamId = `manager-test-${Date.now()}`;
await GenerationJobManager.createJob(streamId, 'user-1');
const collectedUsage: UsageMetadata[] = [
{ input_tokens: 100, output_tokens: 50, model: 'gpt-4' },
{ input_tokens: 80, output_tokens: 40, model: 'claude-3' },
];
GenerationJobManager.setCollectedUsage(streamId, collectedUsage);
// Retrieve through abort
const abortResult = await GenerationJobManager.abortJob(streamId);
expect(abortResult.collectedUsage).toEqual(collectedUsage);
expect(abortResult.collectedUsage).toHaveLength(2);
await GenerationJobManager.destroy();
});
it('should return empty collectedUsage when none set', async () => {
const { GenerationJobManager } = await import('../GenerationJobManager');
const { InMemoryJobStore } = await import('../implementations/InMemoryJobStore');
const { InMemoryEventTransport } = await import('../implementations/InMemoryEventTransport');
GenerationJobManager.configure({
jobStore: new InMemoryJobStore(),
eventTransport: new InMemoryEventTransport(),
isRedis: false,
cleanupOnComplete: false,
});
await GenerationJobManager.initialize();
const streamId = `no-usage-test-${Date.now()}`;
await GenerationJobManager.createJob(streamId, 'user-1');
const abortResult = await GenerationJobManager.abortJob(streamId);
expect(abortResult.collectedUsage).toEqual([]);
await GenerationJobManager.destroy();
});
it('should not set collectedUsage if job does not exist', async () => {
const { GenerationJobManager } = await import('../GenerationJobManager');
const { InMemoryJobStore } = await import('../implementations/InMemoryJobStore');
const { InMemoryEventTransport } = await import('../implementations/InMemoryEventTransport');
GenerationJobManager.configure({
jobStore: new InMemoryJobStore(),
eventTransport: new InMemoryEventTransport(),
isRedis: false,
});
await GenerationJobManager.initialize();
const collectedUsage: UsageMetadata[] = [
{ input_tokens: 100, output_tokens: 50, model: 'gpt-4' },
];
// This should not throw, just silently do nothing
GenerationJobManager.setCollectedUsage('non-existent-stream', collectedUsage);
const abortResult = await GenerationJobManager.abortJob('non-existent-stream');
expect(abortResult.success).toBe(false);
await GenerationJobManager.destroy();
});
});
describe('AbortJob - Text and CollectedUsage', () => {
beforeEach(() => {
jest.resetModules();
});
it('should extract text from content parts on abort', async () => {
const { GenerationJobManager } = await import('../GenerationJobManager');
const { InMemoryJobStore } = await import('../implementations/InMemoryJobStore');
const { InMemoryEventTransport } = await import('../implementations/InMemoryEventTransport');
GenerationJobManager.configure({
jobStore: new InMemoryJobStore(),
eventTransport: new InMemoryEventTransport(),
isRedis: false,
cleanupOnComplete: false,
});
await GenerationJobManager.initialize();
const streamId = `text-extract-${Date.now()}`;
await GenerationJobManager.createJob(streamId, 'user-1');
// Set content parts with text
const contentParts = [
{ type: 'text', text: 'Hello ' },
{ type: 'text', text: 'world!' },
];
GenerationJobManager.setContentParts(streamId, contentParts as never);
const abortResult = await GenerationJobManager.abortJob(streamId);
expect(abortResult.text).toBe('Hello world!');
expect(abortResult.success).toBe(true);
await GenerationJobManager.destroy();
});
it('should return empty text when no content parts', async () => {
const { GenerationJobManager } = await import('../GenerationJobManager');
const { InMemoryJobStore } = await import('../implementations/InMemoryJobStore');
const { InMemoryEventTransport } = await import('../implementations/InMemoryEventTransport');
GenerationJobManager.configure({
jobStore: new InMemoryJobStore(),
eventTransport: new InMemoryEventTransport(),
isRedis: false,
cleanupOnComplete: false,
});
await GenerationJobManager.initialize();
const streamId = `empty-text-${Date.now()}`;
await GenerationJobManager.createJob(streamId, 'user-1');
const abortResult = await GenerationJobManager.abortJob(streamId);
expect(abortResult.text).toBe('');
await GenerationJobManager.destroy();
});
it('should return both text and collectedUsage on abort', async () => {
const { GenerationJobManager } = await import('../GenerationJobManager');
const { InMemoryJobStore } = await import('../implementations/InMemoryJobStore');
const { InMemoryEventTransport } = await import('../implementations/InMemoryEventTransport');
GenerationJobManager.configure({
jobStore: new InMemoryJobStore(),
eventTransport: new InMemoryEventTransport(),
isRedis: false,
cleanupOnComplete: false,
});
await GenerationJobManager.initialize();
const streamId = `full-abort-${Date.now()}`;
await GenerationJobManager.createJob(streamId, 'user-1');
// Set content parts
const contentParts = [{ type: 'text', text: 'Partial response...' }];
GenerationJobManager.setContentParts(streamId, contentParts as never);
// Set collected usage
const collectedUsage: UsageMetadata[] = [
{ input_tokens: 100, output_tokens: 50, model: 'gpt-4' },
{ input_tokens: 80, output_tokens: 40, model: 'claude-3' },
];
GenerationJobManager.setCollectedUsage(streamId, collectedUsage);
const abortResult = await GenerationJobManager.abortJob(streamId);
expect(abortResult.success).toBe(true);
expect(abortResult.text).toBe('Partial response...');
expect(abortResult.collectedUsage).toEqual(collectedUsage);
expect(abortResult.content).toHaveLength(1);
await GenerationJobManager.destroy();
});
it('should return empty values for non-existent job', async () => {
const { GenerationJobManager } = await import('../GenerationJobManager');
const { InMemoryJobStore } = await import('../implementations/InMemoryJobStore');
const { InMemoryEventTransport } = await import('../implementations/InMemoryEventTransport');
GenerationJobManager.configure({
jobStore: new InMemoryJobStore(),
eventTransport: new InMemoryEventTransport(),
isRedis: false,
});
await GenerationJobManager.initialize();
const abortResult = await GenerationJobManager.abortJob('non-existent-job');
expect(abortResult.success).toBe(false);
expect(abortResult.text).toBe('');
expect(abortResult.collectedUsage).toEqual([]);
expect(abortResult.content).toEqual([]);
expect(abortResult.jobData).toBeNull();
await GenerationJobManager.destroy();
});
});
describe('Real-world Scenarios', () => {
beforeEach(() => {
jest.resetModules();
});
it('should handle parallel agent abort with collected usage', async () => {
/**
* Scenario: User aborts a conversation with addedConvo (parallel agents)
* - Primary agent: gemini-3-flash-preview
* - Parallel agent: gpt-5.2
* Both should have their tokens spent on abort
*/
const { GenerationJobManager } = await import('../GenerationJobManager');
const { InMemoryJobStore } = await import('../implementations/InMemoryJobStore');
const { InMemoryEventTransport } = await import('../implementations/InMemoryEventTransport');
GenerationJobManager.configure({
jobStore: new InMemoryJobStore(),
eventTransport: new InMemoryEventTransport(),
isRedis: false,
cleanupOnComplete: false,
});
await GenerationJobManager.initialize();
const streamId = `parallel-abort-${Date.now()}`;
await GenerationJobManager.createJob(streamId, 'user-1');
// Simulate content from primary agent
const contentParts = [
{ type: 'text', text: 'Primary agent output...' },
{ type: 'text', text: 'More content...' },
];
GenerationJobManager.setContentParts(streamId, contentParts as never);
// Simulate collected usage from both agents (as would happen during generation)
const collectedUsage: UsageMetadata[] = [
{
input_tokens: 31596,
output_tokens: 151,
model: 'gemini-3-flash-preview',
},
{
input_tokens: 28000,
output_tokens: 120,
model: 'gpt-5.2',
},
];
GenerationJobManager.setCollectedUsage(streamId, collectedUsage);
// Abort the job
const abortResult = await GenerationJobManager.abortJob(streamId);
// Verify both models' usage is returned
expect(abortResult.success).toBe(true);
expect(abortResult.collectedUsage).toHaveLength(2);
expect(abortResult.collectedUsage[0].model).toBe('gemini-3-flash-preview');
expect(abortResult.collectedUsage[1].model).toBe('gpt-5.2');
// Verify text is extracted
expect(abortResult.text).toContain('Primary agent output');
await GenerationJobManager.destroy();
});
it('should handle abort with cache tokens from Anthropic', async () => {
const { GenerationJobManager } = await import('../GenerationJobManager');
const { InMemoryJobStore } = await import('../implementations/InMemoryJobStore');
const { InMemoryEventTransport } = await import('../implementations/InMemoryEventTransport');
GenerationJobManager.configure({
jobStore: new InMemoryJobStore(),
eventTransport: new InMemoryEventTransport(),
isRedis: false,
cleanupOnComplete: false,
});
await GenerationJobManager.initialize();
const streamId = `cache-abort-${Date.now()}`;
await GenerationJobManager.createJob(streamId, 'user-1');
// Anthropic-style cache tokens
const collectedUsage: UsageMetadata[] = [
{
input_tokens: 788,
output_tokens: 163,
cache_creation_input_tokens: 30808,
cache_read_input_tokens: 0,
model: 'claude-opus-4-5-20251101',
},
];
GenerationJobManager.setCollectedUsage(streamId, collectedUsage);
const abortResult = await GenerationJobManager.abortJob(streamId);
expect(abortResult.collectedUsage[0].cache_creation_input_tokens).toBe(30808);
await GenerationJobManager.destroy();
});
it('should handle abort with sequential tool calls usage', async () => {
/**
* Scenario: Single agent with multiple tool calls, aborted mid-execution
* Usage accumulates for each LLM call
*/
const { GenerationJobManager } = await import('../GenerationJobManager');
const { InMemoryJobStore } = await import('../implementations/InMemoryJobStore');
const { InMemoryEventTransport } = await import('../implementations/InMemoryEventTransport');
GenerationJobManager.configure({
jobStore: new InMemoryJobStore(),
eventTransport: new InMemoryEventTransport(),
isRedis: false,
cleanupOnComplete: false,
});
await GenerationJobManager.initialize();
const streamId = `sequential-abort-${Date.now()}`;
await GenerationJobManager.createJob(streamId, 'user-1');
// Usage from multiple sequential LLM calls (tool use pattern)
const collectedUsage: UsageMetadata[] = [
{ input_tokens: 100, output_tokens: 50, model: 'gpt-4' }, // Initial call
{ input_tokens: 150, output_tokens: 30, model: 'gpt-4' }, // After tool result 1
{ input_tokens: 180, output_tokens: 20, model: 'gpt-4' }, // After tool result 2 (aborted here)
];
GenerationJobManager.setCollectedUsage(streamId, collectedUsage);
const abortResult = await GenerationJobManager.abortJob(streamId);
expect(abortResult.collectedUsage).toHaveLength(3);
// All three entries should be present for proper token accounting
await GenerationJobManager.destroy();
});
});

View file

@ -1,7 +1,12 @@
import { logger } from '@librechat/data-schemas';
import type { StandardGraph } from '@librechat/agents';
import type { Agents } from 'librechat-data-provider';
import type { IJobStore, SerializableJobData, JobStatus } from '~/stream/interfaces/IJobStore';
import type {
SerializableJobData,
UsageMetadata,
IJobStore,
JobStatus,
} from '~/stream/interfaces/IJobStore';
/**
* Content state for a job - volatile, in-memory only.
@ -10,6 +15,7 @@ import type { IJobStore, SerializableJobData, JobStatus } from '~/stream/interfa
interface ContentState {
contentParts: Agents.MessageContentComplex[];
graphRef: WeakRef<StandardGraph> | null;
collectedUsage: UsageMetadata[];
}
/**
@ -240,6 +246,7 @@ export class InMemoryJobStore implements IJobStore {
this.contentState.set(streamId, {
contentParts: [],
graphRef: new WeakRef(graph),
collectedUsage: [],
});
}
}
@ -252,10 +259,30 @@ export class InMemoryJobStore implements IJobStore {
if (existing) {
existing.contentParts = contentParts;
} else {
this.contentState.set(streamId, { contentParts, graphRef: null });
this.contentState.set(streamId, { contentParts, graphRef: null, collectedUsage: [] });
}
}
/**
* Set collected usage reference for a job.
*/
setCollectedUsage(streamId: string, collectedUsage: UsageMetadata[]): void {
const existing = this.contentState.get(streamId);
if (existing) {
existing.collectedUsage = collectedUsage;
} else {
this.contentState.set(streamId, { contentParts: [], graphRef: null, collectedUsage });
}
}
/**
* Get collected usage for a job.
*/
getCollectedUsage(streamId: string): UsageMetadata[] {
const state = this.contentState.get(streamId);
return state?.collectedUsage ?? [];
}
/**
* Get content parts for a job.
* Returns live content from stored reference.

View file

@ -1,9 +1,14 @@
import { logger } from '@librechat/data-schemas';
import { createContentAggregator } from '@librechat/agents';
import type { IJobStore, SerializableJobData, JobStatus } from '~/stream/interfaces/IJobStore';
import type { StandardGraph } from '@librechat/agents';
import type { Agents } from 'librechat-data-provider';
import type { Redis, Cluster } from 'ioredis';
import type {
SerializableJobData,
UsageMetadata,
IJobStore,
JobStatus,
} from '~/stream/interfaces/IJobStore';
/**
* Key prefixes for Redis storage.
@ -90,6 +95,13 @@ export class RedisJobStore implements IJobStore {
*/
private localGraphCache = new Map<string, WeakRef<StandardGraph>>();
/**
* Local cache for collectedUsage arrays.
* Generation happens on a single instance, so collectedUsage is only available locally.
* For cross-replica abort, the abort handler falls back to text-based token counting.
*/
private localCollectedUsageCache = new Map<string, UsageMetadata[]>();
/** Cleanup interval in ms (1 minute) */
private cleanupIntervalMs = 60000;
@ -227,6 +239,7 @@ export class RedisJobStore implements IJobStore {
async deleteJob(streamId: string): Promise<void> {
// Clear local caches
this.localGraphCache.delete(streamId);
this.localCollectedUsageCache.delete(streamId);
// Note: userJobs cleanup is handled lazily via self-healing in getActiveJobIdsByUser
// In cluster mode, separate runningJobs (global) from stream-specific keys (same slot)
@ -290,6 +303,7 @@ export class RedisJobStore implements IJobStore {
if (!job) {
await this.redis.srem(KEYS.runningJobs, streamId);
this.localGraphCache.delete(streamId);
this.localCollectedUsageCache.delete(streamId);
cleaned++;
continue;
}
@ -298,6 +312,7 @@ export class RedisJobStore implements IJobStore {
if (job.status !== 'running') {
await this.redis.srem(KEYS.runningJobs, streamId);
this.localGraphCache.delete(streamId);
this.localCollectedUsageCache.delete(streamId);
cleaned++;
continue;
}
@ -382,6 +397,7 @@ export class RedisJobStore implements IJobStore {
}
// Clear local caches
this.localGraphCache.clear();
this.localCollectedUsageCache.clear();
// Don't close the Redis connection - it's shared
logger.info('[RedisJobStore] Destroyed');
}
@ -406,11 +422,28 @@ export class RedisJobStore implements IJobStore {
* No-op for Redis - content parts are reconstructed from chunks.
* Metadata (agentId, groupId) is embedded directly on content parts by the agent runtime.
*/
setContentParts(_streamId: string, _contentParts: Agents.MessageContentComplex[]): void {
setContentParts(): void {
// Content parts are reconstructed from chunks during getContentParts
// No separate storage needed
}
/**
* Store collectedUsage reference in local cache.
* This is used for abort handling to spend tokens for all models.
* Note: Only available on the generating instance; cross-replica abort uses fallback.
*/
setCollectedUsage(streamId: string, collectedUsage: UsageMetadata[]): void {
this.localCollectedUsageCache.set(streamId, collectedUsage);
}
/**
* Get collected usage for a job.
* Only available if this is the generating instance.
*/
getCollectedUsage(streamId: string): UsageMetadata[] {
return this.localCollectedUsageCache.get(streamId) ?? [];
}
/**
* Get aggregated content - tries local cache first, falls back to Redis reconstruction.
*
@ -528,6 +561,7 @@ export class RedisJobStore implements IJobStore {
clearContentState(streamId: string): void {
// Clear local caches immediately
this.localGraphCache.delete(streamId);
this.localCollectedUsageCache.delete(streamId);
// Fire and forget - async cleanup for Redis
this.clearContentStateAsync(streamId).catch((err) => {

View file

@ -5,11 +5,12 @@ export {
} from './GenerationJobManager';
export type {
AbortResult,
SerializableJobData,
IEventTransport,
UsageMetadata,
AbortResult,
JobStatus,
IJobStore,
IEventTransport,
} from './interfaces/IJobStore';
export { createStreamServices } from './createStreamServices';

View file

@ -45,6 +45,54 @@ export interface SerializableJobData {
promptTokens?: number;
}
/**
* Usage metadata for token spending across different LLM providers.
*
* This interface supports two mutually exclusive cache token formats:
*
* **OpenAI format** (GPT-4, o1, etc.):
* - Uses `input_token_details.cache_creation` and `input_token_details.cache_read`
* - Cache tokens are nested under the `input_token_details` object
*
* **Anthropic format** (Claude models):
* - Uses `cache_creation_input_tokens` and `cache_read_input_tokens`
* - Cache tokens are top-level properties
*
* When processing usage data, check both formats:
* ```typescript
* const cacheCreation = usage.input_token_details?.cache_creation
* || usage.cache_creation_input_tokens || 0;
* ```
*/
export interface UsageMetadata {
/** Total input tokens (prompt tokens) */
input_tokens?: number;
/** Total output tokens (completion tokens) */
output_tokens?: number;
/** Model identifier that generated this usage */
model?: string;
/**
* OpenAI-style cache token details.
* Present for OpenAI models (GPT-4, o1, etc.)
*/
input_token_details?: {
/** Tokens written to cache */
cache_creation?: number;
/** Tokens read from cache */
cache_read?: number;
};
/**
* Anthropic-style cache creation tokens.
* Present for Claude models. Mutually exclusive with input_token_details.
*/
cache_creation_input_tokens?: number;
/**
* Anthropic-style cache read tokens.
* Present for Claude models. Mutually exclusive with input_token_details.
*/
cache_read_input_tokens?: number;
}
/**
* Result returned from aborting a job - contains all data needed
* for token spending and message saving without storing callbacks
@ -58,6 +106,10 @@ export interface AbortResult {
content: Agents.MessageContentComplex[];
/** Final event to send to client */
finalEvent: unknown;
/** Concatenated text from all content parts for token counting fallback */
text: string;
/** Collected usage metadata from all models for token spending */
collectedUsage: UsageMetadata[];
}
/**
@ -210,6 +262,23 @@ export interface IJobStore {
* @param runSteps - Run steps to save
*/
saveRunSteps?(streamId: string, runSteps: Agents.RunStep[]): Promise<void>;
/**
* Set collected usage reference for a job.
* This array accumulates token usage from all models during generation.
*
* @param streamId - The stream identifier
* @param collectedUsage - Array of usage metadata from all models
*/
setCollectedUsage(streamId: string, collectedUsage: UsageMetadata[]): void;
/**
* Get collected usage for a job.
*
* @param streamId - The stream identifier
* @returns Array of usage metadata or empty array
*/
getCollectedUsage(streamId: string): UsageMetadata[];
}
/**

View file

@ -1,5 +1,10 @@
import { TokenExchangeMethodEnum } from 'librechat-data-provider';
import { resolveHeaders, resolveNestedObject, processMCPEnv } from './env';
import {
resolveHeaders,
resolveNestedObject,
processMCPEnv,
encodeHeaderValue,
} from './env';
import type { MCPOptions } from 'librechat-data-provider';
import type { IUser } from '@librechat/data-schemas';
import { Types } from 'mongoose';
@ -32,6 +37,83 @@ function createTestUser(overrides: Partial<IUser> = {}): IUser {
} as IUser;
}
describe('encodeHeaderValue', () => {
it('should return empty string for empty input', () => {
expect(encodeHeaderValue('')).toBe('');
});
it('should return empty string for null/undefined coerced to empty string', () => {
// TypeScript would prevent these, but testing runtime behavior
expect(encodeHeaderValue(null as any)).toBe('');
expect(encodeHeaderValue(undefined as any)).toBe('');
});
it('should return empty string for non-string values', () => {
expect(encodeHeaderValue(123 as any)).toBe('');
expect(encodeHeaderValue(false as any)).toBe('');
expect(encodeHeaderValue({} as any)).toBe('');
});
it('should pass through ASCII characters (0-127) unchanged', () => {
expect(encodeHeaderValue('Hello')).toBe('Hello');
expect(encodeHeaderValue('test@example.com')).toBe('test@example.com');
expect(encodeHeaderValue('ABC123')).toBe('ABC123');
});
it('should pass through Latin-1 characters (128-255) unchanged', () => {
// Characters with Unicode values 128-255 are safe
expect(encodeHeaderValue('José')).toBe('José'); // é = U+00E9 (233)
expect(encodeHeaderValue('Müller')).toBe('Müller'); // ü = U+00FC (252)
expect(encodeHeaderValue('Zoë')).toBe('Zoë'); // ë = U+00EB (235)
expect(encodeHeaderValue('Björk')).toBe('Björk'); // ö = U+00F6 (246)
});
it('should Base64 encode Slavic characters (>255)', () => {
// Slavic characters that cause ByteString errors
expect(encodeHeaderValue('Marić')).toBe('b64:TWFyacSH'); // ć = U+0107 (263)
expect(encodeHeaderValue('Đorđe')).toBe('b64:xJBvcsSRZQ=='); // Đ = U+0110 (272), đ = U+0111 (273)
});
it('should Base64 encode Polish characters (>255)', () => {
expect(encodeHeaderValue('Łukasz')).toBe('b64:xYF1a2Fzeg=='); // Ł = U+0141 (321)
});
it('should Base64 encode various extended Unicode characters (>255)', () => {
expect(encodeHeaderValue('Žarko')).toBe('b64:xb1hcmtv'); // Ž = U+017D (381)
expect(encodeHeaderValue('Šime')).toBe('b64:xaBpbWU='); // Š = U+0160 (352)
});
it('should have correct b64: prefix format', () => {
const result = encodeHeaderValue('Ćiro'); // Ć = U+0106 (262)
expect(result.startsWith('b64:')).toBe(true);
// Verify the encoded part after prefix is valid Base64
const base64Part = result.slice(4);
expect(Buffer.from(base64Part, 'base64').toString('utf8')).toBe('Ćiro');
});
it('should handle mixed safe and unsafe characters', () => {
const result = encodeHeaderValue('Hello Đorđe!');
expect(result).toBe('b64:SGVsbG8gxJBvcsSRZSE=');
});
it('should be reversible with Base64 decode', () => {
const original = 'Marko Marić';
const encoded = encodeHeaderValue(original);
expect(encoded.startsWith('b64:')).toBe(true);
// Verify decoding works
const decoded = Buffer.from(encoded.slice(4), 'base64').toString('utf8');
expect(decoded).toBe(original);
});
it('should handle emoji and other high Unicode characters', () => {
const result = encodeHeaderValue('Hello 👋');
expect(result.startsWith('b64:')).toBe(true);
const decoded = Buffer.from(result.slice(4), 'base64').toString('utf8');
expect(decoded).toBe('Hello 👋');
});
});
describe('resolveHeaders', () => {
beforeEach(() => {
process.env.TEST_API_KEY = 'test-api-key-value';

View file

@ -31,6 +31,50 @@ const ALLOWED_USER_FIELDS = [
type AllowedUserField = (typeof ALLOWED_USER_FIELDS)[number];
type SafeUser = Pick<IUser, AllowedUserField>;
/**
* Encodes a string value to be safe for HTTP headers.
* HTTP headers are restricted to ASCII characters (0-255) per the Fetch API standard.
* Non-ASCII characters with Unicode values > 255 are Base64 encoded with 'b64:' prefix.
*
* NOTE: This is a LibreChat-specific encoding scheme to work around Fetch API limitations.
* MCP servers receiving headers with the 'b64:' prefix should:
* 1. Detect the 'b64:' prefix in header values
* 2. Remove the prefix and Base64-decode the remaining string
* 3. Use the decoded UTF-8 string as the actual value
*
* Example decoding (Node.js):
* if (headerValue.startsWith('b64:')) {
* const decoded = Buffer.from(headerValue.slice(4), 'base64').toString('utf8');
* }
*
* @param value - The string value to encode
* @returns ASCII-safe string (encoded if necessary)
*
* @example
* encodeHeaderValue("José") // Returns "José" (é = 233, safe)
* encodeHeaderValue("Marić") // Returns "b64:TWFyacSH" (ć = 263, needs encoding)
*/
export function encodeHeaderValue(value: string): string {
// Handle non-string or empty values
if (!value || typeof value !== 'string') {
return '';
}
// Check if string contains extended Unicode characters (> 255)
// Characters 0-255 (ASCII + Latin-1) are safe and don't need encoding
// Characters > 255 (e.g., ć=263, đ=272, ł=322) need Base64 encoding
// eslint-disable-next-line no-control-regex
const hasExtendedUnicode = /[^\u0000-\u00FF]/.test(value);
if (!hasExtendedUnicode) {
return value; // Safe to pass through
}
// Encode to Base64 for extended Unicode characters
const base64 = Buffer.from(value, 'utf8').toString('base64');
return `b64:${base64}`;
}
/**
* Creates a safe user object containing only allowed fields.
* Preserves federatedTokens for OpenID token template variable resolution.
@ -66,12 +110,15 @@ export function createSafeUser(
const ALLOWED_BODY_FIELDS = ['conversationId', 'parentMessageId', 'messageId'] as const;
/**
* Processes a string value to replace user field placeholders
* Processes a string value to replace user field placeholders.
* When isHeader is true, non-ASCII characters in certain fields are Base64 encoded.
*
* @param value - The string value to process
* @param user - The user object
* @returns The processed string with placeholders replaced
* @param isHeader - Whether this value will be used in an HTTP header
* @returns The processed string with placeholders replaced (and encoded if necessary)
*/
function processUserPlaceholders(value: string, user?: IUser): string {
function processUserPlaceholders(value: string, user?: IUser, isHeader: boolean = false): string {
if (!user || typeof value !== 'string') {
return value;
}
@ -95,7 +142,18 @@ function processUserPlaceholders(value: string, user?: IUser): string {
continue;
}
const replacementValue = fieldValue == null ? '' : String(fieldValue);
let replacementValue = fieldValue == null ? '' : String(fieldValue);
// Encode non-ASCII characters when used in headers
// Fields like name, username, email can contain non-ASCII characters
// that would cause ByteString conversion errors in the Fetch API
if (isHeader) {
const fieldsToEncode = ['name', 'username', 'email'];
if (fieldsToEncode.includes(field)) {
replacementValue = encodeHeaderValue(replacementValue);
}
}
value = value.replace(new RegExp(placeholder, 'g'), replacementValue);
}
@ -133,10 +191,12 @@ function processBodyPlaceholders(value: string, body: RequestBody): string {
/**
* Processes a single string value by replacing various types of placeholders
*
* @param originalValue - The original string value to process
* @param customUserVars - Optional custom user variables to replace placeholders
* @param user - Optional user object for replacing user field placeholders
* @param body - Optional request body object for replacing body field placeholders
* @param isHeader - Whether this value will be used in an HTTP header (enables encoding)
* @returns The processed string with all placeholders replaced
*/
function processSingleValue({
@ -144,11 +204,13 @@ function processSingleValue({
customUserVars,
user,
body = undefined,
isHeader = false,
}: {
originalValue: string;
customUserVars?: Record<string, string>;
user?: IUser;
body?: RequestBody;
isHeader?: boolean;
}): string {
// Type guard: ensure we're working with a string
if (typeof originalValue !== 'string') {
@ -166,7 +228,7 @@ function processSingleValue({
}
}
value = processUserPlaceholders(value, user);
value = processUserPlaceholders(value, user, isHeader);
const openidTokenInfo = extractOpenIDTokenInfo(user);
if (openidTokenInfo && isOpenIDTokenValid(openidTokenInfo)) {
@ -258,7 +320,13 @@ export function processMCPEnv(params: {
if ('headers' in newObj && newObj.headers) {
const processedHeaders: Record<string, string> = {};
for (const [key, originalValue] of Object.entries(newObj.headers)) {
processedHeaders[key] = processSingleValue({ originalValue, customUserVars, user, body });
processedHeaders[key] = processSingleValue({
originalValue,
customUserVars,
user,
body,
isHeader: true, // Important: Enable header encoding
});
}
newObj.headers = processedHeaders;
}
@ -356,13 +424,14 @@ export function resolveNestedObject<T = unknown>(options?: {
/**
* Resolves header values by replacing user placeholders, body variables, custom variables, and environment variables.
* Automatically encodes non-ASCII characters for header safety.
*
* @param options - Optional configuration object.
* @param options.headers - The headers object to process.
* @param options.user - Optional user object for replacing user field placeholders (can be partial with just id).
* @param options.body - Optional request body object for replacing body field placeholders.
* @param options.customUserVars - Optional custom user variables to replace placeholders.
* @returns The processed headers with all placeholders replaced.
* @param options - Optional configuration object
* @param options.headers - The headers object to process
* @param options.user - Optional user object for replacing user field placeholders (can be partial with just id)
* @param options.body - Optional request body object for replacing body field placeholders
* @param options.customUserVars - Optional custom user variables to replace placeholders
* @returns The processed headers with all placeholders replaced
*/
export function resolveHeaders(options?: {
headers: Record<string, string> | undefined;
@ -382,6 +451,7 @@ export function resolveHeaders(options?: {
customUserVars,
user: user as IUser,
body,
isHeader: true, // Important: Enable header encoding
});
});
}

View file

@ -1,6 +1,6 @@
{
"name": "@librechat/client",
"version": "0.4.50",
"version": "0.4.51",
"description": "React components for LibreChat",
"repository": {
"type": "git",

View file

@ -27,6 +27,8 @@ export interface MenuItemProps {
| 'grid'
| undefined;
ariaControls?: string;
ariaLabel?: string;
ariaChecked?: boolean;
ref?: React.Ref<any>;
className?: string;
render?:

View file

@ -148,6 +148,9 @@ const Menu: React.FC<MenuProps> = ({
hideOnClick={item.hideOnClick}
aria-haspopup={item.ariaHasPopup}
aria-controls={item.ariaControls}
aria-label={item.ariaLabel}
aria-checked={item.ariaChecked}
{...(item.ariaChecked !== undefined ? { role: 'menuitemcheckbox' } : {})}
onClick={(event) => {
event.preventDefault();
if (item.onClick) {

View file

@ -55,7 +55,7 @@ export const DialogOverlay = React.forwardRef<
React.ComponentPropsWithoutRef<typeof DialogPrimitive.Overlay>
>(({ className, style, ...props }, ref) => {
const depth = React.useContext(DialogDepthContext);
const overlayZIndex = 50 + (depth - 1) * 60;
const overlayZIndex = 130 + (depth - 1) * 60;
return (
<DialogPrimitive.Overlay
@ -94,7 +94,7 @@ const DialogContent = React.forwardRef<
ref,
) => {
const depth = React.useContext(DialogDepthContext);
const contentZIndex = 100 + (depth - 1) * 60;
const contentZIndex = 140 + (depth - 1) * 60;
/* Handle Escape key to prevent closing dialog if a tooltip or dropdown has focus
(this is a workaround in order to achieve WCAG compliance which requires

View file

@ -1,6 +1,6 @@
{
"name": "librechat-data-provider",
"version": "0.8.230",
"version": "0.8.231",
"description": "data services for librechat apps",
"main": "dist/index.js",
"module": "dist/index.es.js",

View file

@ -1702,9 +1702,9 @@ export enum TTSProviders {
/** Enum for app-wide constants */
export enum Constants {
/** Key for the app's version. */
VERSION = 'v0.8.2-rc3',
VERSION = 'v0.8.2',
/** Key for the Custom Config's version (librechat.yaml). */
CONFIG_VERSION = '1.3.1',
CONFIG_VERSION = '1.3.3',
/** Standard value for the first message's `parentMessageId` value, to indicate no parent exists. */
NO_PARENT = '00000000-0000-0000-0000-000000000000',
/** Standard value to use whatever the submission prelim. `responseMessageId` is */

View file

@ -217,7 +217,7 @@ export type Agent = {
description: string | null;
created_at: number;
avatar: AgentAvatar | null;
instructions: string | null;
instructions?: string | null;
additional_instructions?: string | null;
tools?: string[];
projectIds?: string[];

View file

@ -1,6 +1,6 @@
{
"name": "@librechat/data-schemas",
"version": "0.0.34",
"version": "0.0.35",
"description": "Mongoose schemas and models for LibreChat",
"type": "module",
"main": "dist/index.cjs",
@ -61,7 +61,7 @@
"jsonwebtoken": "^9.0.2",
"klona": "^2.0.6",
"librechat-data-provider": "*",
"lodash": "^4.17.21",
"lodash": "^4.17.23",
"meilisearch": "^0.38.0",
"mongoose": "^8.12.1",
"nanoid": "^3.3.7",

View file

@ -625,4 +625,394 @@ describe('Meilisearch Mongoose plugin', () => {
await expect(conversationModel.syncWithMeili()).rejects.toThrow('Custom sync error');
});
});
describe('cleanupMeiliIndex', () => {
let mockGetDocuments: jest.Mock;
beforeEach(() => {
mockGetDocuments = jest.fn();
mockDeleteDocuments.mockClear();
mockIndex.mockReturnValue({
getRawInfo: jest.fn(),
updateSettings: jest.fn(),
addDocuments: mockAddDocuments,
addDocumentsInBatches: mockAddDocumentsInBatches,
updateDocuments: mockUpdateDocuments,
deleteDocument: mockDeleteDocument,
deleteDocuments: mockDeleteDocuments,
getDocument: mockGetDocument,
getDocuments: mockGetDocuments,
});
});
test('cleanupMeiliIndex deletes orphaned documents from MeiliSearch', async () => {
const conversationModel = createConversationModel(mongoose) as SchemaWithMeiliMethods;
await conversationModel.deleteMany({});
const existingConvoId = new mongoose.Types.ObjectId().toString();
const orphanedConvoId1 = new mongoose.Types.ObjectId().toString();
const orphanedConvoId2 = new mongoose.Types.ObjectId().toString();
// Create one document in MongoDB
await conversationModel.collection.insertOne({
conversationId: existingConvoId,
user: new mongoose.Types.ObjectId(),
title: 'Existing Conversation',
endpoint: EModelEndpoint.openAI,
_meiliIndex: true,
expiredAt: null,
});
// Mock MeiliSearch to return 3 documents (1 exists in MongoDB, 2 are orphaned)
mockGetDocuments.mockResolvedValueOnce({
results: [
{ conversationId: existingConvoId },
{ conversationId: orphanedConvoId1 },
{ conversationId: orphanedConvoId2 },
],
});
const indexMock = mockIndex();
await conversationModel.cleanupMeiliIndex(indexMock, 'conversationId', 100, 0);
// Should delete the 2 orphaned documents
expect(mockDeleteDocuments).toHaveBeenCalledWith([orphanedConvoId1, orphanedConvoId2]);
});
test('cleanupMeiliIndex handles offset correctly when documents are deleted', async () => {
const messageModel = createMessageModel(mongoose) as SchemaWithMeiliMethods;
await messageModel.deleteMany({});
const existingIds = [
new mongoose.Types.ObjectId().toString(),
new mongoose.Types.ObjectId().toString(),
new mongoose.Types.ObjectId().toString(),
];
const orphanedIds = [
new mongoose.Types.ObjectId().toString(),
new mongoose.Types.ObjectId().toString(),
];
// Create existing documents in MongoDB
for (const id of existingIds) {
await messageModel.collection.insertOne({
messageId: id,
conversationId: new mongoose.Types.ObjectId(),
user: new mongoose.Types.ObjectId(),
isCreatedByUser: true,
_meiliIndex: true,
expiredAt: null,
});
}
// Mock MeiliSearch to return batches with mixed existing and orphaned documents
// First batch: 3 documents (1 existing, 2 orphaned) with batchSize=3
mockGetDocuments
.mockResolvedValueOnce({
results: [
{ messageId: existingIds[0] },
{ messageId: orphanedIds[0] },
{ messageId: orphanedIds[1] },
],
})
// Second batch: should use offset=1 (3 - 2 deleted = 1)
// results.length=2 < batchSize=3, so loop should stop after this
.mockResolvedValueOnce({
results: [{ messageId: existingIds[1] }, { messageId: existingIds[2] }],
});
const indexMock = mockIndex();
await messageModel.cleanupMeiliIndex(indexMock, 'messageId', 3, 0);
// Should have called getDocuments with correct offsets
expect(mockGetDocuments).toHaveBeenCalledTimes(2);
expect(mockGetDocuments).toHaveBeenNthCalledWith(1, { limit: 3, offset: 0 });
// After deleting 2 documents, offset should be: 0 + (3 - 2) = 1
expect(mockGetDocuments).toHaveBeenNthCalledWith(2, { limit: 3, offset: 1 });
// Should delete only the orphaned documents
expect(mockDeleteDocuments).toHaveBeenCalledWith([orphanedIds[0], orphanedIds[1]]);
});
test('cleanupMeiliIndex preserves existing documents', async () => {
const conversationModel = createConversationModel(mongoose) as SchemaWithMeiliMethods;
await conversationModel.deleteMany({});
const existingId1 = new mongoose.Types.ObjectId().toString();
const existingId2 = new mongoose.Types.ObjectId().toString();
// Create documents in MongoDB
await conversationModel.collection.insertMany([
{
conversationId: existingId1,
user: new mongoose.Types.ObjectId(),
title: 'Conversation 1',
endpoint: EModelEndpoint.openAI,
_meiliIndex: true,
expiredAt: null,
},
{
conversationId: existingId2,
user: new mongoose.Types.ObjectId(),
title: 'Conversation 2',
endpoint: EModelEndpoint.openAI,
_meiliIndex: true,
expiredAt: null,
},
]);
// Mock MeiliSearch to return the same documents
mockGetDocuments.mockResolvedValueOnce({
results: [{ conversationId: existingId1 }, { conversationId: existingId2 }],
});
const indexMock = mockIndex();
await conversationModel.cleanupMeiliIndex(indexMock, 'conversationId', 100, 0);
// Should NOT delete any documents
expect(mockDeleteDocuments).not.toHaveBeenCalled();
});
test('cleanupMeiliIndex handles empty MeiliSearch index', async () => {
const messageModel = createMessageModel(mongoose) as SchemaWithMeiliMethods;
// Mock empty MeiliSearch index
mockGetDocuments.mockResolvedValueOnce({
results: [],
});
const indexMock = mockIndex();
await messageModel.cleanupMeiliIndex(indexMock, 'messageId', 100, 0);
// Should not attempt to delete anything
expect(mockDeleteDocuments).not.toHaveBeenCalled();
expect(mockGetDocuments).toHaveBeenCalledTimes(1);
});
test('cleanupMeiliIndex stops when results.length < batchSize', async () => {
const conversationModel = createConversationModel(mongoose) as SchemaWithMeiliMethods;
await conversationModel.deleteMany({});
const id1 = new mongoose.Types.ObjectId().toString();
const id2 = new mongoose.Types.ObjectId().toString();
await conversationModel.collection.insertMany([
{
conversationId: id1,
user: new mongoose.Types.ObjectId(),
title: 'Conversation 1',
endpoint: EModelEndpoint.openAI,
_meiliIndex: true,
expiredAt: null,
},
{
conversationId: id2,
user: new mongoose.Types.ObjectId(),
title: 'Conversation 2',
endpoint: EModelEndpoint.openAI,
_meiliIndex: true,
expiredAt: null,
},
]);
// Mock: results.length (2) is less than batchSize (100), should process once and stop
mockGetDocuments.mockResolvedValueOnce({
results: [{ conversationId: id1 }, { conversationId: id2 }],
});
const indexMock = mockIndex();
await conversationModel.cleanupMeiliIndex(indexMock, 'conversationId', 100, 0);
// Should only call getDocuments once
expect(mockGetDocuments).toHaveBeenCalledTimes(1);
expect(mockDeleteDocuments).not.toHaveBeenCalled();
});
test('cleanupMeiliIndex handles multiple batches correctly', async () => {
const messageModel = createMessageModel(mongoose) as SchemaWithMeiliMethods;
await messageModel.deleteMany({});
const existingIds = Array.from({ length: 5 }, () => new mongoose.Types.ObjectId().toString());
const orphanedIds = Array.from({ length: 3 }, () => new mongoose.Types.ObjectId().toString());
// Create existing documents in MongoDB
for (const id of existingIds) {
await messageModel.collection.insertOne({
messageId: id,
conversationId: new mongoose.Types.ObjectId(),
user: new mongoose.Types.ObjectId(),
isCreatedByUser: true,
_meiliIndex: true,
expiredAt: null,
});
}
// Mock multiple batches with batchSize=3
mockGetDocuments
// Batch 1: 2 existing, 1 orphaned
.mockResolvedValueOnce({
results: [
{ messageId: existingIds[0] },
{ messageId: existingIds[1] },
{ messageId: orphanedIds[0] },
],
})
// Batch 2: offset should be 0 + (3 - 1) = 2
.mockResolvedValueOnce({
results: [
{ messageId: existingIds[2] },
{ messageId: orphanedIds[1] },
{ messageId: orphanedIds[2] },
],
})
// Batch 3: offset should be 2 + (3 - 2) = 3
.mockResolvedValueOnce({
results: [{ messageId: existingIds[3] }, { messageId: existingIds[4] }],
});
const indexMock = mockIndex();
await messageModel.cleanupMeiliIndex(indexMock, 'messageId', 3, 0);
expect(mockGetDocuments).toHaveBeenCalledTimes(3);
expect(mockGetDocuments).toHaveBeenNthCalledWith(1, { limit: 3, offset: 0 });
expect(mockGetDocuments).toHaveBeenNthCalledWith(2, { limit: 3, offset: 2 });
expect(mockGetDocuments).toHaveBeenNthCalledWith(3, { limit: 3, offset: 3 });
// Should have deleted orphaned documents in batches
expect(mockDeleteDocuments).toHaveBeenCalledTimes(2);
expect(mockDeleteDocuments).toHaveBeenNthCalledWith(1, [orphanedIds[0]]);
expect(mockDeleteDocuments).toHaveBeenNthCalledWith(2, [orphanedIds[1], orphanedIds[2]]);
});
test('cleanupMeiliIndex handles delay between batches', async () => {
const conversationModel = createConversationModel(mongoose) as SchemaWithMeiliMethods;
await conversationModel.deleteMany({});
const id1 = new mongoose.Types.ObjectId().toString();
const id2 = new mongoose.Types.ObjectId().toString();
await conversationModel.collection.insertMany([
{
conversationId: id1,
user: new mongoose.Types.ObjectId(),
title: 'Conversation 1',
endpoint: EModelEndpoint.openAI,
_meiliIndex: true,
expiredAt: null,
},
{
conversationId: id2,
user: new mongoose.Types.ObjectId(),
title: 'Conversation 2',
endpoint: EModelEndpoint.openAI,
_meiliIndex: true,
expiredAt: null,
},
]);
mockGetDocuments
.mockResolvedValueOnce({
results: [{ conversationId: id1 }],
})
.mockResolvedValueOnce({
results: [{ conversationId: id2 }],
})
.mockResolvedValueOnce({
results: [],
});
const indexMock = mockIndex();
const startTime = Date.now();
await conversationModel.cleanupMeiliIndex(indexMock, 'conversationId', 1, 100);
const endTime = Date.now();
// Should have taken at least 200ms due to delay (2 delays between 3 batches)
expect(endTime - startTime).toBeGreaterThanOrEqual(200);
expect(mockGetDocuments).toHaveBeenCalledTimes(3);
});
test('cleanupMeiliIndex handles errors gracefully', async () => {
const messageModel = createMessageModel(mongoose) as SchemaWithMeiliMethods;
mockGetDocuments.mockRejectedValueOnce(new Error('MeiliSearch connection error'));
const indexMock = mockIndex();
// Should not throw, errors are caught and logged
await expect(
messageModel.cleanupMeiliIndex(indexMock, 'messageId', 100, 0),
).resolves.not.toThrow();
});
test('cleanupMeiliIndex with all documents being orphaned', async () => {
const conversationModel = createConversationModel(mongoose) as SchemaWithMeiliMethods;
await conversationModel.deleteMany({});
const orphanedId1 = new mongoose.Types.ObjectId().toString();
const orphanedId2 = new mongoose.Types.ObjectId().toString();
const orphanedId3 = new mongoose.Types.ObjectId().toString();
// MeiliSearch has documents but MongoDB is empty
mockGetDocuments.mockResolvedValueOnce({
results: [
{ conversationId: orphanedId1 },
{ conversationId: orphanedId2 },
{ conversationId: orphanedId3 },
],
});
const indexMock = mockIndex();
await conversationModel.cleanupMeiliIndex(indexMock, 'conversationId', 100, 0);
// Should delete all documents since none exist in MongoDB
expect(mockDeleteDocuments).toHaveBeenCalledWith([orphanedId1, orphanedId2, orphanedId3]);
});
test('cleanupMeiliIndex adjusts offset to 0 when all batch documents are deleted', async () => {
const messageModel = createMessageModel(mongoose) as SchemaWithMeiliMethods;
await messageModel.deleteMany({});
const orphanedIds = Array.from({ length: 3 }, () => new mongoose.Types.ObjectId().toString());
const existingId = new mongoose.Types.ObjectId().toString();
// Create one existing document
await messageModel.collection.insertOne({
messageId: existingId,
conversationId: new mongoose.Types.ObjectId(),
user: new mongoose.Types.ObjectId(),
isCreatedByUser: true,
_meiliIndex: true,
expiredAt: null,
});
mockGetDocuments
// Batch 1: All 3 are orphaned
.mockResolvedValueOnce({
results: [
{ messageId: orphanedIds[0] },
{ messageId: orphanedIds[1] },
{ messageId: orphanedIds[2] },
],
})
// Batch 2: offset should be 0 + (3 - 3) = 0
.mockResolvedValueOnce({
results: [{ messageId: existingId }],
});
const indexMock = mockIndex();
await messageModel.cleanupMeiliIndex(indexMock, 'messageId', 3, 0);
expect(mockGetDocuments).toHaveBeenCalledTimes(2);
expect(mockGetDocuments).toHaveBeenNthCalledWith(1, { limit: 3, offset: 0 });
// After deleting all 3, offset remains at 0
expect(mockGetDocuments).toHaveBeenNthCalledWith(2, { limit: 3, offset: 0 });
expect(mockDeleteDocuments).toHaveBeenCalledWith([
orphanedIds[0],
orphanedIds[1],
orphanedIds[2],
]);
});
});
});

View file

@ -304,8 +304,12 @@ const createMeiliMongooseModel = ({
await index.deleteDocuments(toDelete.map(String));
logger.debug(`[cleanupMeiliIndex] Deleted ${toDelete.length} orphaned documents`);
}
// if fetch documents request returns less documents than limit, all documents are processed
if (batch.results.length < batchSize) {
break;
}
offset += batchSize;
offset += batchSize - toDelete.length;
// Add delay between batches
if (delayMs > 0) {