Merge branch 'main' into feature/cohere-base-url

This commit is contained in:
Benedikt Óskarsson 2026-01-20 07:36:40 +00:00 committed by GitHub
commit f60dec2682
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
56 changed files with 2605 additions and 639 deletions

View file

@ -331,10 +331,6 @@ FLUX_API_BASE_URL=https://api.us1.bfl.ai
GOOGLE_SEARCH_API_KEY=
GOOGLE_CSE_ID=
# YOUTUBE
#-----------------
YOUTUBE_API_KEY=
# Stable Diffusion
#-----------------
SD_WEBUI_URL=http://host.docker.internal:7860

View file

@ -1,4 +1,4 @@
# v0.8.2-rc2
# v0.8.2-rc3
# Base node image
FROM node:20-alpine AS node

View file

@ -1,5 +1,5 @@
# Dockerfile.multi
# v0.8.2-rc2
# v0.8.2-rc3
# Set configurable max-old-space-size with default
ARG NODE_MAX_OLD_SPACE_SIZE=6144

View file

@ -5,7 +5,6 @@ const DALLE3 = require('./structured/DALLE3');
const FluxAPI = require('./structured/FluxAPI');
const OpenWeather = require('./structured/OpenWeather');
const StructuredWolfram = require('./structured/Wolfram');
const createYouTubeTools = require('./structured/YouTube');
const StructuredACS = require('./structured/AzureAISearch');
const StructuredSD = require('./structured/StableDiffusion');
const GoogleSearchAPI = require('./structured/GoogleSearch');
@ -25,7 +24,6 @@ module.exports = {
GoogleSearchAPI,
TraversaalSearch,
StructuredWolfram,
createYouTubeTools,
TavilySearchResults,
createOpenAIImageTools,
createGeminiImageTool,

View file

@ -30,20 +30,6 @@
}
]
},
{
"name": "YouTube",
"pluginKey": "youtube",
"toolkit": true,
"description": "Get YouTube video information, retrieve comments, analyze transcripts and search for videos.",
"icon": "https://www.youtube.com/s/desktop/7449ebf7/img/favicon_144x144.png",
"authConfig": [
{
"authField": "YOUTUBE_API_KEY",
"label": "YouTube API Key",
"description": "Your YouTube Data API v3 key."
}
]
},
{
"name": "OpenAI Image Tools",
"pluginKey": "image_gen_oai",

View file

@ -1,137 +0,0 @@
const { ytToolkit } = require('@librechat/api');
const { tool } = require('@langchain/core/tools');
const { youtube } = require('@googleapis/youtube');
const { logger } = require('@librechat/data-schemas');
const { YoutubeTranscript } = require('youtube-transcript');
const { getApiKey } = require('./credentials');
function extractVideoId(url) {
const rawIdRegex = /^[a-zA-Z0-9_-]{11}$/;
if (rawIdRegex.test(url)) {
return url;
}
const regex = new RegExp(
'(?:youtu\\.be/|youtube(?:\\.com)?/(?:' +
'(?:watch\\?v=)|(?:embed/)|(?:shorts/)|(?:live/)|(?:v/)|(?:/))?)' +
'([a-zA-Z0-9_-]{11})(?:\\S+)?$',
);
const match = url.match(regex);
return match ? match[1] : null;
}
function parseTranscript(transcriptResponse) {
if (!Array.isArray(transcriptResponse)) {
return '';
}
return transcriptResponse
.map((entry) => entry.text.trim())
.filter((text) => text)
.join(' ')
.replaceAll(''', "'");
}
function createYouTubeTools(fields = {}) {
const envVar = 'YOUTUBE_API_KEY';
const override = fields.override ?? false;
const apiKey = fields.apiKey ?? fields[envVar] ?? getApiKey(envVar, override);
const youtubeClient = youtube({
version: 'v3',
auth: apiKey,
});
const searchTool = tool(async ({ query, maxResults = 5 }) => {
const response = await youtubeClient.search.list({
part: 'snippet',
q: query,
type: 'video',
maxResults: maxResults || 5,
});
const result = response.data.items.map((item) => ({
title: item.snippet.title,
description: item.snippet.description,
url: `https://www.youtube.com/watch?v=${item.id.videoId}`,
}));
return JSON.stringify(result, null, 2);
}, ytToolkit.youtube_search);
const infoTool = tool(async ({ url }) => {
const videoId = extractVideoId(url);
if (!videoId) {
throw new Error('Invalid YouTube URL or video ID');
}
const response = await youtubeClient.videos.list({
part: 'snippet,statistics',
id: videoId,
});
if (!response.data.items?.length) {
throw new Error('Video not found');
}
const video = response.data.items[0];
const result = {
title: video.snippet.title,
description: video.snippet.description,
views: video.statistics.viewCount,
likes: video.statistics.likeCount,
comments: video.statistics.commentCount,
};
return JSON.stringify(result, null, 2);
}, ytToolkit.youtube_info);
const commentsTool = tool(async ({ url, maxResults = 10 }) => {
const videoId = extractVideoId(url);
if (!videoId) {
throw new Error('Invalid YouTube URL or video ID');
}
const response = await youtubeClient.commentThreads.list({
part: 'snippet',
videoId,
maxResults: maxResults || 10,
});
const result = response.data.items.map((item) => ({
author: item.snippet.topLevelComment.snippet.authorDisplayName,
text: item.snippet.topLevelComment.snippet.textDisplay,
likes: item.snippet.topLevelComment.snippet.likeCount,
}));
return JSON.stringify(result, null, 2);
}, ytToolkit.youtube_comments);
const transcriptTool = tool(async ({ url }) => {
const videoId = extractVideoId(url);
if (!videoId) {
throw new Error('Invalid YouTube URL or video ID');
}
try {
try {
const transcript = await YoutubeTranscript.fetchTranscript(videoId, { lang: 'en' });
return parseTranscript(transcript);
} catch (e) {
logger.error(e);
}
try {
const transcript = await YoutubeTranscript.fetchTranscript(videoId, { lang: 'de' });
return parseTranscript(transcript);
} catch (e) {
logger.error(e);
}
const transcript = await YoutubeTranscript.fetchTranscript(videoId);
return parseTranscript(transcript);
} catch (error) {
throw new Error(`Failed to fetch transcript: ${error.message}`);
}
}, ytToolkit.youtube_transcript);
return [searchTool, infoTool, commentsTool, transcriptTool];
}
module.exports = createYouTubeTools;

View file

@ -34,7 +34,6 @@ const {
StructuredACS,
TraversaalSearch,
StructuredWolfram,
createYouTubeTools,
TavilySearchResults,
createGeminiImageTool,
createOpenAIImageTools,
@ -185,11 +184,6 @@ const loadTools = async ({
};
const customConstructors = {
youtube: async (_toolContextMap) => {
const authFields = getAuthFields('youtube');
const authValues = await loadAuthValues({ userId: user, authFields });
return createYouTubeTools(authValues);
},
image_gen_oai: async (toolContextMap) => {
const authFields = getAuthFields('image_gen_oai');
const authValues = await loadAuthValues({ userId: user, authFields });

View file

@ -13,6 +13,11 @@ const searchEnabled = isEnabled(process.env.SEARCH);
const indexingDisabled = isEnabled(process.env.MEILI_NO_SYNC);
let currentTimeout = null;
const defaultSyncThreshold = 1000;
const syncThreshold = process.env.MEILI_SYNC_THRESHOLD
? parseInt(process.env.MEILI_SYNC_THRESHOLD, 10)
: defaultSyncThreshold;
class MeiliSearchClient {
static instance = null;
@ -221,25 +226,25 @@ async function performSync(flowManager, flowId, flowType) {
}
// Check if we need to sync messages
logger.info('[indexSync] Requesting message sync progress...');
const messageProgress = await Message.getSyncProgress();
if (!messageProgress.isComplete || settingsUpdated) {
logger.info(
`[indexSync] Messages need syncing: ${messageProgress.totalProcessed}/${messageProgress.totalDocuments} indexed`,
);
// Check if we should do a full sync or incremental
const messageCount = await Message.countDocuments();
const messageCount = messageProgress.totalDocuments;
const messagesIndexed = messageProgress.totalProcessed;
const syncThreshold = parseInt(process.env.MEILI_SYNC_THRESHOLD || '1000', 10);
const unindexedMessages = messageCount - messagesIndexed;
if (messageCount - messagesIndexed > syncThreshold) {
logger.info('[indexSync] Starting full message sync due to large difference');
await Message.syncWithMeili();
messagesSync = true;
} else if (messageCount !== messagesIndexed) {
logger.warn('[indexSync] Messages out of sync, performing incremental sync');
if (settingsUpdated || unindexedMessages > syncThreshold) {
logger.info(`[indexSync] Starting message sync (${unindexedMessages} unindexed)`);
await Message.syncWithMeili();
messagesSync = true;
} else if (unindexedMessages > 0) {
logger.info(
`[indexSync] ${unindexedMessages} messages unindexed (below threshold: ${syncThreshold}, skipping)`,
);
}
} else {
logger.info(
@ -254,18 +259,18 @@ async function performSync(flowManager, flowId, flowType) {
`[indexSync] Conversations need syncing: ${convoProgress.totalProcessed}/${convoProgress.totalDocuments} indexed`,
);
const convoCount = await Conversation.countDocuments();
const convoCount = convoProgress.totalDocuments;
const convosIndexed = convoProgress.totalProcessed;
const syncThreshold = parseInt(process.env.MEILI_SYNC_THRESHOLD || '1000', 10);
if (convoCount - convosIndexed > syncThreshold) {
logger.info('[indexSync] Starting full conversation sync due to large difference');
await Conversation.syncWithMeili();
convosSync = true;
} else if (convoCount !== convosIndexed) {
logger.warn('[indexSync] Convos out of sync, performing incremental sync');
const unindexedConvos = convoCount - convosIndexed;
if (settingsUpdated || unindexedConvos > syncThreshold) {
logger.info(`[indexSync] Starting convos sync (${unindexedConvos} unindexed)`);
await Conversation.syncWithMeili();
convosSync = true;
} else if (unindexedConvos > 0) {
logger.info(
`[indexSync] ${unindexedConvos} convos unindexed (below threshold: ${syncThreshold}, skipping)`,
);
}
} else {
logger.info(

465
api/db/indexSync.spec.js Normal file
View file

@ -0,0 +1,465 @@
/**
* Unit tests for performSync() function in indexSync.js
*
* Tests use real mongoose with mocked model methods, only mocking external calls.
*/
const mongoose = require('mongoose');
// Mock only external dependencies (not internal classes/models)
const mockLogger = {
info: jest.fn(),
warn: jest.fn(),
error: jest.fn(),
debug: jest.fn(),
};
const mockMeiliHealth = jest.fn();
const mockMeiliIndex = jest.fn();
const mockBatchResetMeiliFlags = jest.fn();
const mockIsEnabled = jest.fn();
const mockGetLogStores = jest.fn();
// Create mock models that will be reused
const createMockModel = (collectionName) => ({
collection: { name: collectionName },
getSyncProgress: jest.fn(),
syncWithMeili: jest.fn(),
countDocuments: jest.fn(),
});
const originalMessageModel = mongoose.models.Message;
const originalConversationModel = mongoose.models.Conversation;
// Mock external modules
jest.mock('@librechat/data-schemas', () => ({
logger: mockLogger,
}));
jest.mock('meilisearch', () => ({
MeiliSearch: jest.fn(() => ({
health: mockMeiliHealth,
index: mockMeiliIndex,
})),
}));
jest.mock('./utils', () => ({
batchResetMeiliFlags: mockBatchResetMeiliFlags,
}));
jest.mock('@librechat/api', () => ({
isEnabled: mockIsEnabled,
FlowStateManager: jest.fn(),
}));
jest.mock('~/cache', () => ({
getLogStores: mockGetLogStores,
}));
// Set environment before module load
process.env.MEILI_HOST = 'http://localhost:7700';
process.env.MEILI_MASTER_KEY = 'test-key';
process.env.SEARCH = 'true';
process.env.MEILI_SYNC_THRESHOLD = '1000'; // Set threshold before module loads
describe('performSync() - syncThreshold logic', () => {
const ORIGINAL_ENV = process.env;
let Message;
let Conversation;
beforeAll(() => {
Message = createMockModel('messages');
Conversation = createMockModel('conversations');
mongoose.models.Message = Message;
mongoose.models.Conversation = Conversation;
});
beforeEach(() => {
// Reset all mocks
jest.clearAllMocks();
// Reset modules to ensure fresh load of indexSync.js and its top-level consts (like syncThreshold)
jest.resetModules();
// Set up environment
process.env = { ...ORIGINAL_ENV };
process.env.MEILI_HOST = 'http://localhost:7700';
process.env.MEILI_MASTER_KEY = 'test-key';
process.env.SEARCH = 'true';
delete process.env.MEILI_NO_SYNC;
// Re-ensure models are available in mongoose after resetModules
// We must require mongoose again to get the fresh instance that indexSync will use
const mongoose = require('mongoose');
mongoose.models.Message = Message;
mongoose.models.Conversation = Conversation;
// Mock isEnabled
mockIsEnabled.mockImplementation((val) => val === 'true' || val === true);
// Mock MeiliSearch client responses
mockMeiliHealth.mockResolvedValue({ status: 'available' });
mockMeiliIndex.mockReturnValue({
getSettings: jest.fn().mockResolvedValue({ filterableAttributes: ['user'] }),
updateSettings: jest.fn().mockResolvedValue({}),
search: jest.fn().mockResolvedValue({ hits: [] }),
});
mockBatchResetMeiliFlags.mockResolvedValue(undefined);
});
afterEach(() => {
process.env = ORIGINAL_ENV;
});
afterAll(() => {
mongoose.models.Message = originalMessageModel;
mongoose.models.Conversation = originalConversationModel;
});
test('triggers sync when unindexed messages exceed syncThreshold', async () => {
// Arrange: Set threshold before module load
process.env.MEILI_SYNC_THRESHOLD = '1000';
// Arrange: 1050 unindexed messages > 1000 threshold
Message.getSyncProgress.mockResolvedValue({
totalProcessed: 100,
totalDocuments: 1150, // 1050 unindexed
isComplete: false,
});
Conversation.getSyncProgress.mockResolvedValue({
totalProcessed: 50,
totalDocuments: 50,
isComplete: true,
});
Message.syncWithMeili.mockResolvedValue(undefined);
// Act
const indexSync = require('./indexSync');
await indexSync();
// Assert: No countDocuments calls
expect(Message.countDocuments).not.toHaveBeenCalled();
expect(Conversation.countDocuments).not.toHaveBeenCalled();
// Assert: Message sync triggered because 1050 > 1000
expect(Message.syncWithMeili).toHaveBeenCalledTimes(1);
expect(mockLogger.info).toHaveBeenCalledWith(
'[indexSync] Messages need syncing: 100/1150 indexed',
);
expect(mockLogger.info).toHaveBeenCalledWith(
'[indexSync] Starting message sync (1050 unindexed)',
);
// Assert: Conversation sync NOT triggered (already complete)
expect(Conversation.syncWithMeili).not.toHaveBeenCalled();
});
test('skips sync when unindexed messages are below syncThreshold', async () => {
// Arrange: 50 unindexed messages < 1000 threshold
Message.getSyncProgress.mockResolvedValue({
totalProcessed: 100,
totalDocuments: 150, // 50 unindexed
isComplete: false,
});
Conversation.getSyncProgress.mockResolvedValue({
totalProcessed: 50,
totalDocuments: 50,
isComplete: true,
});
process.env.MEILI_SYNC_THRESHOLD = '1000';
// Act
const indexSync = require('./indexSync');
await indexSync();
// Assert: No countDocuments calls
expect(Message.countDocuments).not.toHaveBeenCalled();
expect(Conversation.countDocuments).not.toHaveBeenCalled();
// Assert: Message sync NOT triggered because 50 < 1000
expect(Message.syncWithMeili).not.toHaveBeenCalled();
expect(mockLogger.info).toHaveBeenCalledWith(
'[indexSync] Messages need syncing: 100/150 indexed',
);
expect(mockLogger.info).toHaveBeenCalledWith(
'[indexSync] 50 messages unindexed (below threshold: 1000, skipping)',
);
// Assert: Conversation sync NOT triggered (already complete)
expect(Conversation.syncWithMeili).not.toHaveBeenCalled();
});
test('respects syncThreshold at boundary (exactly at threshold)', async () => {
// Arrange: 1000 unindexed messages = 1000 threshold (NOT greater than)
Message.getSyncProgress.mockResolvedValue({
totalProcessed: 100,
totalDocuments: 1100, // 1000 unindexed
isComplete: false,
});
Conversation.getSyncProgress.mockResolvedValue({
totalProcessed: 0,
totalDocuments: 0,
isComplete: true,
});
process.env.MEILI_SYNC_THRESHOLD = '1000';
// Act
const indexSync = require('./indexSync');
await indexSync();
// Assert: No countDocuments calls
expect(Message.countDocuments).not.toHaveBeenCalled();
// Assert: Message sync NOT triggered because 1000 is NOT > 1000
expect(Message.syncWithMeili).not.toHaveBeenCalled();
expect(mockLogger.info).toHaveBeenCalledWith(
'[indexSync] Messages need syncing: 100/1100 indexed',
);
expect(mockLogger.info).toHaveBeenCalledWith(
'[indexSync] 1000 messages unindexed (below threshold: 1000, skipping)',
);
});
test('triggers sync when unindexed is threshold + 1', async () => {
// Arrange: 1001 unindexed messages > 1000 threshold
Message.getSyncProgress.mockResolvedValue({
totalProcessed: 100,
totalDocuments: 1101, // 1001 unindexed
isComplete: false,
});
Conversation.getSyncProgress.mockResolvedValue({
totalProcessed: 0,
totalDocuments: 0,
isComplete: true,
});
Message.syncWithMeili.mockResolvedValue(undefined);
process.env.MEILI_SYNC_THRESHOLD = '1000';
// Act
const indexSync = require('./indexSync');
await indexSync();
// Assert: No countDocuments calls
expect(Message.countDocuments).not.toHaveBeenCalled();
// Assert: Message sync triggered because 1001 > 1000
expect(Message.syncWithMeili).toHaveBeenCalledTimes(1);
expect(mockLogger.info).toHaveBeenCalledWith(
'[indexSync] Messages need syncing: 100/1101 indexed',
);
expect(mockLogger.info).toHaveBeenCalledWith(
'[indexSync] Starting message sync (1001 unindexed)',
);
});
test('uses totalDocuments from convoProgress for conversation sync decisions', async () => {
// Arrange: Messages complete, conversations need sync
Message.getSyncProgress.mockResolvedValue({
totalProcessed: 100,
totalDocuments: 100,
isComplete: true,
});
Conversation.getSyncProgress.mockResolvedValue({
totalProcessed: 50,
totalDocuments: 1100, // 1050 unindexed > 1000 threshold
isComplete: false,
});
Conversation.syncWithMeili.mockResolvedValue(undefined);
process.env.MEILI_SYNC_THRESHOLD = '1000';
// Act
const indexSync = require('./indexSync');
await indexSync();
// Assert: No countDocuments calls (the optimization)
expect(Message.countDocuments).not.toHaveBeenCalled();
expect(Conversation.countDocuments).not.toHaveBeenCalled();
// Assert: Only conversation sync triggered
expect(Message.syncWithMeili).not.toHaveBeenCalled();
expect(Conversation.syncWithMeili).toHaveBeenCalledTimes(1);
expect(mockLogger.info).toHaveBeenCalledWith(
'[indexSync] Conversations need syncing: 50/1100 indexed',
);
expect(mockLogger.info).toHaveBeenCalledWith(
'[indexSync] Starting convos sync (1050 unindexed)',
);
});
test('skips sync when collections are fully synced', async () => {
// Arrange: Everything already synced
Message.getSyncProgress.mockResolvedValue({
totalProcessed: 100,
totalDocuments: 100,
isComplete: true,
});
Conversation.getSyncProgress.mockResolvedValue({
totalProcessed: 50,
totalDocuments: 50,
isComplete: true,
});
// Act
const indexSync = require('./indexSync');
await indexSync();
// Assert: No countDocuments calls
expect(Message.countDocuments).not.toHaveBeenCalled();
expect(Conversation.countDocuments).not.toHaveBeenCalled();
// Assert: No sync triggered
expect(Message.syncWithMeili).not.toHaveBeenCalled();
expect(Conversation.syncWithMeili).not.toHaveBeenCalled();
// Assert: Correct logs
expect(mockLogger.info).toHaveBeenCalledWith('[indexSync] Messages are fully synced: 100/100');
expect(mockLogger.info).toHaveBeenCalledWith(
'[indexSync] Conversations are fully synced: 50/50',
);
});
test('triggers message sync when settingsUpdated even if below syncThreshold', async () => {
// Arrange: Only 50 unindexed messages (< 1000 threshold), but settings were updated
Message.getSyncProgress.mockResolvedValue({
totalProcessed: 100,
totalDocuments: 150, // 50 unindexed
isComplete: false,
});
Conversation.getSyncProgress.mockResolvedValue({
totalProcessed: 50,
totalDocuments: 50,
isComplete: true,
});
Message.syncWithMeili.mockResolvedValue(undefined);
// Mock settings update scenario
mockMeiliIndex.mockReturnValue({
getSettings: jest.fn().mockResolvedValue({ filterableAttributes: [] }), // No user field
updateSettings: jest.fn().mockResolvedValue({}),
search: jest.fn().mockResolvedValue({ hits: [] }),
});
process.env.MEILI_SYNC_THRESHOLD = '1000';
// Act
const indexSync = require('./indexSync');
await indexSync();
// Assert: Flags were reset due to settings update
expect(mockBatchResetMeiliFlags).toHaveBeenCalledWith(Message.collection);
expect(mockBatchResetMeiliFlags).toHaveBeenCalledWith(Conversation.collection);
// Assert: Message sync triggered despite being below threshold (50 < 1000)
expect(Message.syncWithMeili).toHaveBeenCalledTimes(1);
expect(mockLogger.info).toHaveBeenCalledWith(
'[indexSync] Settings updated. Forcing full re-sync to reindex with new configuration...',
);
expect(mockLogger.info).toHaveBeenCalledWith(
'[indexSync] Starting message sync (50 unindexed)',
);
});
test('triggers conversation sync when settingsUpdated even if below syncThreshold', async () => {
// Arrange: Messages complete, conversations have 50 unindexed (< 1000 threshold), but settings were updated
Message.getSyncProgress.mockResolvedValue({
totalProcessed: 100,
totalDocuments: 100,
isComplete: true,
});
Conversation.getSyncProgress.mockResolvedValue({
totalProcessed: 50,
totalDocuments: 100, // 50 unindexed
isComplete: false,
});
Conversation.syncWithMeili.mockResolvedValue(undefined);
// Mock settings update scenario
mockMeiliIndex.mockReturnValue({
getSettings: jest.fn().mockResolvedValue({ filterableAttributes: [] }), // No user field
updateSettings: jest.fn().mockResolvedValue({}),
search: jest.fn().mockResolvedValue({ hits: [] }),
});
process.env.MEILI_SYNC_THRESHOLD = '1000';
// Act
const indexSync = require('./indexSync');
await indexSync();
// Assert: Flags were reset due to settings update
expect(mockBatchResetMeiliFlags).toHaveBeenCalledWith(Message.collection);
expect(mockBatchResetMeiliFlags).toHaveBeenCalledWith(Conversation.collection);
// Assert: Conversation sync triggered despite being below threshold (50 < 1000)
expect(Conversation.syncWithMeili).toHaveBeenCalledTimes(1);
expect(mockLogger.info).toHaveBeenCalledWith(
'[indexSync] Settings updated. Forcing full re-sync to reindex with new configuration...',
);
expect(mockLogger.info).toHaveBeenCalledWith('[indexSync] Starting convos sync (50 unindexed)');
});
test('triggers both message and conversation sync when settingsUpdated even if both below syncThreshold', async () => {
// Arrange: Set threshold before module load
process.env.MEILI_SYNC_THRESHOLD = '1000';
// Arrange: Both have documents below threshold (50 each), but settings were updated
Message.getSyncProgress.mockResolvedValue({
totalProcessed: 100,
totalDocuments: 150, // 50 unindexed
isComplete: false,
});
Conversation.getSyncProgress.mockResolvedValue({
totalProcessed: 50,
totalDocuments: 100, // 50 unindexed
isComplete: false,
});
Message.syncWithMeili.mockResolvedValue(undefined);
Conversation.syncWithMeili.mockResolvedValue(undefined);
// Mock settings update scenario
mockMeiliIndex.mockReturnValue({
getSettings: jest.fn().mockResolvedValue({ filterableAttributes: [] }), // No user field
updateSettings: jest.fn().mockResolvedValue({}),
search: jest.fn().mockResolvedValue({ hits: [] }),
});
// Act
const indexSync = require('./indexSync');
await indexSync();
// Assert: Flags were reset due to settings update
expect(mockBatchResetMeiliFlags).toHaveBeenCalledWith(Message.collection);
expect(mockBatchResetMeiliFlags).toHaveBeenCalledWith(Conversation.collection);
// Assert: Both syncs triggered despite both being below threshold
expect(Message.syncWithMeili).toHaveBeenCalledTimes(1);
expect(Conversation.syncWithMeili).toHaveBeenCalledTimes(1);
expect(mockLogger.info).toHaveBeenCalledWith(
'[indexSync] Settings updated. Forcing full re-sync to reindex with new configuration...',
);
expect(mockLogger.info).toHaveBeenCalledWith(
'[indexSync] Starting message sync (50 unindexed)',
);
expect(mockLogger.info).toHaveBeenCalledWith('[indexSync] Starting convos sync (50 unindexed)');
});
});

View file

@ -1,6 +1,6 @@
{
"name": "@librechat/backend",
"version": "v0.8.2-rc2",
"version": "v0.8.2-rc3",
"description": "",
"scripts": {
"start": "echo 'please run this from the root directory'",
@ -43,10 +43,9 @@
"@azure/search-documents": "^12.0.0",
"@azure/storage-blob": "^12.27.0",
"@google/genai": "^1.19.0",
"@googleapis/youtube": "^20.0.0",
"@keyv/redis": "^4.3.3",
"@langchain/core": "^0.3.80",
"@librechat/agents": "^3.0.77",
"@librechat/agents": "^3.0.776",
"@librechat/api": "*",
"@librechat/data-schemas": "*",
"@microsoft/microsoft-graph-client": "^3.0.7",
@ -109,10 +108,9 @@
"tiktoken": "^1.0.15",
"traverse": "^0.6.7",
"ua-parser-js": "^1.0.36",
"undici": "^7.10.0",
"undici": "^7.18.2",
"winston": "^3.11.0",
"winston-daily-rotate-file": "^5.0.0",
"youtube-transcript": "^1.2.1",
"zod": "^3.22.4"
},
"devDependencies": {

View file

@ -5,7 +5,9 @@ const { logger } = require('@librechat/data-schemas');
const {
agentCreateSchema,
agentUpdateSchema,
refreshListAvatars,
mergeAgentOcrConversion,
MAX_AVATAR_REFRESH_AGENTS,
convertOcrToContextInPlace,
} = require('@librechat/api');
const {
@ -56,46 +58,6 @@ const systemTools = {
const MAX_SEARCH_LEN = 100;
const escapeRegex = (str = '') => str.replace(/[.*+?^${}()|[\]\\]/g, '\\$&');
/**
* Opportunistically refreshes S3-backed avatars for agent list responses.
* Only list responses are refreshed because they're the highest-traffic surface and
* the avatar URLs have a short-lived TTL. The refresh is cached per-user for 30 minutes
* via {@link CacheKeys.S3_EXPIRY_INTERVAL} so we refresh once per interval at most.
* @param {Array} agents - Agents being enriched with S3-backed avatars
* @param {string} userId - User identifier used for the cache refresh key
*/
const refreshListAvatars = async (agents, userId) => {
if (!agents?.length) {
return;
}
const cache = getLogStores(CacheKeys.S3_EXPIRY_INTERVAL);
const refreshKey = `${userId}:agents_list`;
const alreadyChecked = await cache.get(refreshKey);
if (alreadyChecked) {
return;
}
await Promise.all(
agents.map(async (agent) => {
if (agent?.avatar?.source !== FileSources.s3 || !agent?.avatar?.filepath) {
return;
}
try {
const newPath = await refreshS3Url(agent.avatar);
if (newPath && newPath !== agent.avatar.filepath) {
agent.avatar = { ...agent.avatar, filepath: newPath };
}
} catch (err) {
logger.debug('[/Agents] Avatar refresh error for list item', err);
}
}),
);
await cache.set(refreshKey, true, Time.THIRTY_MINUTES);
};
/**
* Creates an Agent.
* @route POST /Agents
@ -544,6 +506,35 @@ const getListAgentsHandler = async (req, res) => {
requiredPermissions: PermissionBits.VIEW,
});
/**
* Refresh all S3 avatars for this user's accessible agent set (not only the current page)
* This addresses page-size limits preventing refresh of agents beyond the first page
*/
const cache = getLogStores(CacheKeys.S3_EXPIRY_INTERVAL);
const refreshKey = `${userId}:agents_avatar_refresh`;
const alreadyChecked = await cache.get(refreshKey);
if (alreadyChecked) {
logger.debug('[/Agents] S3 avatar refresh already checked, skipping');
} else {
try {
const fullList = await getListAgentsByAccess({
accessibleIds,
otherParams: {},
limit: MAX_AVATAR_REFRESH_AGENTS,
after: null,
});
await refreshListAvatars({
agents: fullList?.data ?? [],
userId,
refreshS3Url,
updateAgent,
});
await cache.set(refreshKey, true, Time.THIRTY_MINUTES);
} catch (err) {
logger.error('[/Agents] Error refreshing avatars for full list: %o', err);
}
}
// Use the new ACL-aware function
const data = await getListAgentsByAccess({
accessibleIds,
@ -571,15 +562,9 @@ const getListAgentsHandler = async (req, res) => {
return agent;
});
// Opportunistically refresh S3 avatar URLs for list results with caching
try {
await refreshListAvatars(data.data, req.user.id);
} catch (err) {
logger.debug('[/Agents] Skipping avatar refresh for list', err);
}
return res.json(data);
} catch (error) {
logger.error('[/Agents] Error listing Agents', error);
logger.error('[/Agents] Error listing Agents: %o', error);
res.status(500).json({ error: error.message });
}
};

View file

@ -1,8 +1,9 @@
const mongoose = require('mongoose');
const { v4: uuidv4 } = require('uuid');
const { nanoid } = require('nanoid');
const { MongoMemoryServer } = require('mongodb-memory-server');
const { v4: uuidv4 } = require('uuid');
const { agentSchema } = require('@librechat/data-schemas');
const { FileSources } = require('librechat-data-provider');
const { MongoMemoryServer } = require('mongodb-memory-server');
// Only mock the dependencies that are not database-related
jest.mock('~/server/services/Config', () => ({
@ -54,6 +55,15 @@ jest.mock('~/models', () => ({
getCategoriesWithCounts: jest.fn(),
}));
// Mock cache for S3 avatar refresh tests
const mockCache = {
get: jest.fn(),
set: jest.fn(),
};
jest.mock('~/cache', () => ({
getLogStores: jest.fn(() => mockCache),
}));
const {
createAgent: createAgentHandler,
updateAgent: updateAgentHandler,
@ -65,6 +75,8 @@ const {
findPubliclyAccessibleResources,
} = require('~/server/services/PermissionService');
const { refreshS3Url } = require('~/server/services/Files/S3/crud');
/**
* @type {import('mongoose').Model<import('@librechat/data-schemas').IAgent>}
*/
@ -1207,4 +1219,349 @@ describe('Agent Controllers - Mass Assignment Protection', () => {
expect(response.data[0].is_promoted).toBe(true);
});
});
describe('S3 Avatar Refresh', () => {
let userA, userB;
let agentWithS3Avatar, agentWithLocalAvatar, agentOwnedByOther;
beforeEach(async () => {
await Agent.deleteMany({});
jest.clearAllMocks();
// Reset cache mock
mockCache.get.mockResolvedValue(false);
mockCache.set.mockResolvedValue(undefined);
userA = new mongoose.Types.ObjectId();
userB = new mongoose.Types.ObjectId();
// Create agent with S3 avatar owned by userA
agentWithS3Avatar = await Agent.create({
id: `agent_${nanoid(12)}`,
name: 'Agent with S3 Avatar',
description: 'Has S3 avatar',
provider: 'openai',
model: 'gpt-4',
author: userA,
avatar: {
source: FileSources.s3,
filepath: 'old-s3-path.jpg',
},
versions: [
{
name: 'Agent with S3 Avatar',
description: 'Has S3 avatar',
provider: 'openai',
model: 'gpt-4',
createdAt: new Date(),
updatedAt: new Date(),
},
],
});
// Create agent with local avatar owned by userA
agentWithLocalAvatar = await Agent.create({
id: `agent_${nanoid(12)}`,
name: 'Agent with Local Avatar',
description: 'Has local avatar',
provider: 'openai',
model: 'gpt-4',
author: userA,
avatar: {
source: 'local',
filepath: 'local-path.jpg',
},
versions: [
{
name: 'Agent with Local Avatar',
description: 'Has local avatar',
provider: 'openai',
model: 'gpt-4',
createdAt: new Date(),
updatedAt: new Date(),
},
],
});
// Create agent with S3 avatar owned by userB
agentOwnedByOther = await Agent.create({
id: `agent_${nanoid(12)}`,
name: 'Agent Owned By Other',
description: 'Owned by userB',
provider: 'openai',
model: 'gpt-4',
author: userB,
avatar: {
source: FileSources.s3,
filepath: 'other-s3-path.jpg',
},
versions: [
{
name: 'Agent Owned By Other',
description: 'Owned by userB',
provider: 'openai',
model: 'gpt-4',
createdAt: new Date(),
updatedAt: new Date(),
},
],
});
});
test('should skip avatar refresh if cache hit', async () => {
mockCache.get.mockResolvedValue(true);
findAccessibleResources.mockResolvedValue([agentWithS3Avatar._id]);
findPubliclyAccessibleResources.mockResolvedValue([]);
const mockReq = {
user: { id: userA.toString(), role: 'USER' },
query: {},
};
const mockRes = {
status: jest.fn().mockReturnThis(),
json: jest.fn().mockReturnThis(),
};
await getListAgentsHandler(mockReq, mockRes);
// Should not call refreshS3Url when cache hit
expect(refreshS3Url).not.toHaveBeenCalled();
});
test('should refresh and persist S3 avatars on cache miss', async () => {
mockCache.get.mockResolvedValue(false);
findAccessibleResources.mockResolvedValue([agentWithS3Avatar._id]);
findPubliclyAccessibleResources.mockResolvedValue([]);
refreshS3Url.mockResolvedValue('new-s3-path.jpg');
const mockReq = {
user: { id: userA.toString(), role: 'USER' },
query: {},
};
const mockRes = {
status: jest.fn().mockReturnThis(),
json: jest.fn().mockReturnThis(),
};
await getListAgentsHandler(mockReq, mockRes);
// Verify S3 URL was refreshed
expect(refreshS3Url).toHaveBeenCalled();
// Verify cache was set
expect(mockCache.set).toHaveBeenCalled();
// Verify response was returned
expect(mockRes.json).toHaveBeenCalled();
});
test('should refresh avatars for all accessible agents (VIEW permission)', async () => {
mockCache.get.mockResolvedValue(false);
// User A has access to both their own agent and userB's agent
findAccessibleResources.mockResolvedValue([agentWithS3Avatar._id, agentOwnedByOther._id]);
findPubliclyAccessibleResources.mockResolvedValue([]);
refreshS3Url.mockResolvedValue('new-path.jpg');
const mockReq = {
user: { id: userA.toString(), role: 'USER' },
query: {},
};
const mockRes = {
status: jest.fn().mockReturnThis(),
json: jest.fn().mockReturnThis(),
};
await getListAgentsHandler(mockReq, mockRes);
// Should be called for both agents - any user with VIEW access can refresh
expect(refreshS3Url).toHaveBeenCalledTimes(2);
});
test('should skip non-S3 avatars', async () => {
mockCache.get.mockResolvedValue(false);
findAccessibleResources.mockResolvedValue([agentWithLocalAvatar._id, agentWithS3Avatar._id]);
findPubliclyAccessibleResources.mockResolvedValue([]);
refreshS3Url.mockResolvedValue('new-path.jpg');
const mockReq = {
user: { id: userA.toString(), role: 'USER' },
query: {},
};
const mockRes = {
status: jest.fn().mockReturnThis(),
json: jest.fn().mockReturnThis(),
};
await getListAgentsHandler(mockReq, mockRes);
// Should only be called for S3 avatar agent
expect(refreshS3Url).toHaveBeenCalledTimes(1);
});
test('should not update if S3 URL unchanged', async () => {
mockCache.get.mockResolvedValue(false);
findAccessibleResources.mockResolvedValue([agentWithS3Avatar._id]);
findPubliclyAccessibleResources.mockResolvedValue([]);
// Return the same path - no update needed
refreshS3Url.mockResolvedValue('old-s3-path.jpg');
const mockReq = {
user: { id: userA.toString(), role: 'USER' },
query: {},
};
const mockRes = {
status: jest.fn().mockReturnThis(),
json: jest.fn().mockReturnThis(),
};
await getListAgentsHandler(mockReq, mockRes);
// Verify refreshS3Url was called
expect(refreshS3Url).toHaveBeenCalled();
// Response should still be returned
expect(mockRes.json).toHaveBeenCalled();
});
test('should handle S3 refresh errors gracefully', async () => {
mockCache.get.mockResolvedValue(false);
findAccessibleResources.mockResolvedValue([agentWithS3Avatar._id]);
findPubliclyAccessibleResources.mockResolvedValue([]);
refreshS3Url.mockRejectedValue(new Error('S3 error'));
const mockReq = {
user: { id: userA.toString(), role: 'USER' },
query: {},
};
const mockRes = {
status: jest.fn().mockReturnThis(),
json: jest.fn().mockReturnThis(),
};
// Should not throw - handles error gracefully
await expect(getListAgentsHandler(mockReq, mockRes)).resolves.not.toThrow();
// Response should still be returned
expect(mockRes.json).toHaveBeenCalled();
});
test('should process agents in batches', async () => {
mockCache.get.mockResolvedValue(false);
// Create 25 agents (should be processed in batches of 20)
const manyAgents = [];
for (let i = 0; i < 25; i++) {
const agent = await Agent.create({
id: `agent_${nanoid(12)}`,
name: `Agent ${i}`,
description: `Agent ${i} description`,
provider: 'openai',
model: 'gpt-4',
author: userA,
avatar: {
source: FileSources.s3,
filepath: `path${i}.jpg`,
},
versions: [
{
name: `Agent ${i}`,
description: `Agent ${i} description`,
provider: 'openai',
model: 'gpt-4',
createdAt: new Date(),
updatedAt: new Date(),
},
],
});
manyAgents.push(agent);
}
const allAgentIds = manyAgents.map((a) => a._id);
findAccessibleResources.mockResolvedValue(allAgentIds);
findPubliclyAccessibleResources.mockResolvedValue([]);
refreshS3Url.mockImplementation((avatar) =>
Promise.resolve(avatar.filepath.replace('.jpg', '-new.jpg')),
);
const mockReq = {
user: { id: userA.toString(), role: 'USER' },
query: {},
};
const mockRes = {
status: jest.fn().mockReturnThis(),
json: jest.fn().mockReturnThis(),
};
await getListAgentsHandler(mockReq, mockRes);
// All 25 should be processed
expect(refreshS3Url).toHaveBeenCalledTimes(25);
});
test('should skip agents without id or author', async () => {
mockCache.get.mockResolvedValue(false);
// Create agent without proper id field (edge case)
const agentWithoutId = await Agent.create({
id: `agent_${nanoid(12)}`,
name: 'Agent without ID field',
description: 'Testing',
provider: 'openai',
model: 'gpt-4',
author: userA,
avatar: {
source: FileSources.s3,
filepath: 'test-path.jpg',
},
versions: [
{
name: 'Agent without ID field',
description: 'Testing',
provider: 'openai',
model: 'gpt-4',
createdAt: new Date(),
updatedAt: new Date(),
},
],
});
findAccessibleResources.mockResolvedValue([agentWithoutId._id, agentWithS3Avatar._id]);
findPubliclyAccessibleResources.mockResolvedValue([]);
refreshS3Url.mockResolvedValue('new-path.jpg');
const mockReq = {
user: { id: userA.toString(), role: 'USER' },
query: {},
};
const mockRes = {
status: jest.fn().mockReturnThis(),
json: jest.fn().mockReturnThis(),
};
await getListAgentsHandler(mockReq, mockRes);
// Should still complete without errors
expect(mockRes.json).toHaveBeenCalled();
});
test('should use MAX_AVATAR_REFRESH_AGENTS limit for full list query', async () => {
mockCache.get.mockResolvedValue(false);
findAccessibleResources.mockResolvedValue([]);
findPubliclyAccessibleResources.mockResolvedValue([]);
const mockReq = {
user: { id: userA.toString(), role: 'USER' },
query: {},
};
const mockRes = {
status: jest.fn().mockReturnThis(),
json: jest.fn().mockReturnThis(),
};
await getListAgentsHandler(mockReq, mockRes);
// Verify that the handler completed successfully
expect(mockRes.json).toHaveBeenCalled();
});
});
});

View file

@ -2,11 +2,11 @@ const path = require('path');
const fs = require('fs').promises;
const express = require('express');
const { logger } = require('@librechat/data-schemas');
const { isAgentsEndpoint } = require('librechat-data-provider');
const { isAssistantsEndpoint } = require('librechat-data-provider');
const {
filterFile,
processImageFile,
processAgentFileUpload,
processImageFile,
filterFile,
} = require('~/server/services/Files/process');
const router = express.Router();
@ -21,7 +21,7 @@ router.post('/', async (req, res) => {
metadata.temp_file_id = metadata.file_id;
metadata.file_id = req.file_id;
if (isAgentsEndpoint(metadata.endpoint) && metadata.tool_resource != null) {
if (!isAssistantsEndpoint(metadata.endpoint) && metadata.tool_resource != null) {
return await processAgentFileUpload({ req, res, metadata });
}

View file

@ -5,7 +5,7 @@ const { Calculator } = require('@librechat/agents');
const { logger } = require('@librechat/data-schemas');
const { zodToJsonSchema } = require('zod-to-json-schema');
const { Tools, ImageVisionTool } = require('librechat-data-provider');
const { getToolkitKey, oaiToolkit, ytToolkit, geminiToolkit } = require('@librechat/api');
const { getToolkitKey, oaiToolkit, geminiToolkit } = require('@librechat/api');
const { toolkits } = require('~/app/clients/tools/manifest');
/**
@ -83,7 +83,6 @@ function loadAndFormatTools({ directory, adminFilter = [], adminIncluded = [] })
const basicToolInstances = [
new Calculator(),
...Object.values(oaiToolkit),
...Object.values(ytToolkit),
...Object.values(geminiToolkit),
];
for (const toolInstance of basicToolInstances) {

View file

@ -1,8 +1,8 @@
const passport = require('passport');
const session = require('express-session');
const { isEnabled } = require('@librechat/api');
const { logger } = require('@librechat/data-schemas');
const { CacheKeys } = require('librechat-data-provider');
const { logger, DEFAULT_SESSION_EXPIRY } = require('@librechat/data-schemas');
const {
openIdJwtLogin,
facebookLogin,
@ -22,11 +22,17 @@ const { getLogStores } = require('~/cache');
*/
async function configureOpenId(app) {
logger.info('Configuring OpenID Connect...');
const isProduction = process.env.NODE_ENV === 'production';
const sessionExpiry = Number(process.env.SESSION_EXPIRY) || DEFAULT_SESSION_EXPIRY;
const sessionOptions = {
secret: process.env.OPENID_SESSION_SECRET,
resave: false,
saveUninitialized: false,
store: getLogStores(CacheKeys.OPENID_SESSION),
cookie: {
maxAge: sessionExpiry,
secure: isProduction,
},
};
app.use(session(sessionOptions));
app.use(passport.session());
@ -82,11 +88,17 @@ const configureSocialLogins = async (app) => {
process.env.SAML_SESSION_SECRET
) {
logger.info('Configuring SAML Connect...');
const isProduction = process.env.NODE_ENV === 'production';
const sessionExpiry = Number(process.env.SESSION_EXPIRY) || DEFAULT_SESSION_EXPIRY;
const sessionOptions = {
secret: process.env.SAML_SESSION_SECRET,
resave: false,
saveUninitialized: false,
store: getLogStores(CacheKeys.SAML_SESSION),
cookie: {
maxAge: sessionExpiry,
secure: isProduction,
},
};
app.use(session(sessionOptions));
app.use(passport.session());

View file

@ -254,7 +254,7 @@
},
"packages/api": {
"name": "@librechat/api",
"version": "1.7.20",
"version": "1.7.21",
"devDependencies": {
"@babel/preset-env": "^7.21.5",
"@babel/preset-react": "^7.18.6",
@ -321,7 +321,7 @@
},
"packages/client": {
"name": "@librechat/client",
"version": "0.4.4",
"version": "0.4.50",
"devDependencies": {
"@babel/core": "^7.28.5",
"@babel/preset-env": "^7.28.5",
@ -409,7 +409,7 @@
},
"packages/data-provider": {
"name": "librechat-data-provider",
"version": "0.8.220",
"version": "0.8.230",
"dependencies": {
"axios": "^1.12.1",
"dayjs": "^1.11.13",
@ -447,7 +447,7 @@
},
"packages/data-schemas": {
"name": "@librechat/data-schemas",
"version": "0.0.33",
"version": "0.0.34",
"devDependencies": {
"@rollup/plugin-alias": "^5.1.0",
"@rollup/plugin-commonjs": "^29.0.0",

View file

@ -1,4 +1,4 @@
/** v0.8.2-rc2 */
/** v0.8.2-rc3 */
module.exports = {
roots: ['<rootDir>/src'],
testEnvironment: 'jsdom',

View file

@ -1,6 +1,6 @@
{
"name": "@librechat/frontend",
"version": "v0.8.2-rc2",
"version": "v0.8.2-rc3",
"description": "",
"type": "module",
"scripts": {

View file

@ -294,6 +294,7 @@ function ConvoOptions({
portal={true}
menuId={menuId}
focusLoop={true}
className="z-[125]"
unmountOnHide={true}
isOpen={isPopoverActive}
setIsOpen={setIsPopoverActive}
@ -321,7 +322,6 @@ function ConvoOptions({
</Ariakit.MenuButton>
}
items={dropdownItems}
className="z-30"
/>
{showShareDialog && (
<ShareButton

View file

@ -23,9 +23,10 @@ interface AuthFieldProps {
hasValue: boolean;
control: any;
errors: any;
autoFocus?: boolean;
}
function AuthField({ name, config, hasValue, control, errors }: AuthFieldProps) {
function AuthField({ name, config, hasValue, control, errors, autoFocus }: AuthFieldProps) {
const localize = useLocalize();
const statusText = hasValue ? localize('com_ui_set') : localize('com_ui_unset');
@ -85,6 +86,11 @@ function AuthField({ name, config, hasValue, control, errors }: AuthFieldProps)
<Input
id={name}
type="text"
/* autoFocus is generally disabled due to the fact that it can disorient users,
* but in this case, the required field would logically be immediately navigated to anyways, and the component's
* functionality emulates that of a new modal opening, where users would expect focus to be shifted to the new content */
// eslint-disable-next-line jsx-a11y/no-autofocus
autoFocus={autoFocus}
{...field}
placeholder={
hasValue
@ -150,7 +156,7 @@ export default function CustomUserVarsSection({
return (
<div className="flex-1 space-y-4">
<form onSubmit={handleSubmit(onFormSubmit)} className="space-y-4">
{Object.entries(fields).map(([key, config]) => {
{Object.entries(fields).map(([key, config], index) => {
const hasValue = authValuesData?.authValueFlags?.[key] || false;
return (
@ -161,6 +167,8 @@ export default function CustomUserVarsSection({
hasValue={hasValue}
control={control}
errors={errors}
// eslint-disable-next-line jsx-a11y/no-autofocus -- See AuthField autoFocus comment for more details
autoFocus={index === 0}
/>
);
})}

View file

@ -40,7 +40,7 @@ function AccountSettings() {
</div>
</Select.Select>
<Select.SelectPopover
className="popover-ui w-[305px] rounded-lg md:w-[244px]"
className="popover-ui z-[125] w-[305px] rounded-lg md:w-[244px]"
style={{
transformOrigin: 'bottom',
translate: '0 -4px',

View file

@ -89,6 +89,7 @@ const BookmarkNav: FC<BookmarkNavProps> = ({ tags, setTags }: BookmarkNavProps)
unmountOnHide={true}
setIsOpen={setIsMenuOpen}
keyPrefix="bookmark-nav-"
className="z-[125]"
trigger={
<TooltipAnchor
description={label}

View file

@ -136,6 +136,7 @@ export default function FavoriteItem({
mountByState={true}
isOpen={isPopoverActive}
setIsOpen={setIsPopoverActive}
className="z-[125]"
trigger={
<Menu.MenuButton
className={cn(

View file

@ -262,7 +262,7 @@ const Nav = memo(
<div
data-testid="nav"
className={cn(
'nav fixed left-0 top-0 z-[70] h-full bg-surface-primary-alt',
'nav fixed left-0 top-0 z-[110] h-full bg-surface-primary-alt',
navVisible && 'active',
)}
style={{

View file

@ -56,6 +56,11 @@ function PluginAuthForm({ plugin, onSubmit, isEntityTool }: TPluginAuthFormProps
aria-describedby={`${authField}-error`}
aria-label={config.label}
aria-required="true"
/* autoFocus is generally disabled due to the fact that it can disorient users,
* but in this case, the required field must be navigated to anyways, and the component's functionality
* emulates that of a new modal opening, where users would expect focus to be shifted to the new content */
// eslint-disable-next-line jsx-a11y/no-autofocus
autoFocus={i === 0}
{...register(authField, {
required: `${config.label} is required.`,
minLength: {
@ -70,7 +75,7 @@ function PluginAuthForm({ plugin, onSubmit, isEntityTool }: TPluginAuthFormProps
</HoverCard>
{errors[authField] && (
<span role="alert" className="mt-1 text-sm text-red-400">
{errors?.[authField]?.message ?? ''}
{String(errors?.[authField]?.message ?? '')}
</span>
)}
</div>

View file

@ -6,7 +6,7 @@ import { ResizablePanel, ResizablePanelGroup, useMediaQuery } from '@librechat/c
import type { ImperativePanelHandle } from 'react-resizable-panels';
import { useGetStartupConfig } from '~/data-provider';
import ArtifactsPanel from './ArtifactsPanel';
import { normalizeLayout } from '~/utils';
import { normalizeLayout, cn } from '~/utils';
import SidePanel from './SidePanel';
import store from '~/store';
@ -149,9 +149,9 @@ const SidePanelGroup = memo(
)}
{!hideSidePanel && interfaceConfig.sidePanel === true && (
<button
aria-label="Close right side panel"
className={`nav-mask ${!isCollapsed ? 'active' : ''}`}
onClick={handleClosePanel}
aria-label="Close right side panel"
className={cn('sidenav-mask', !isCollapsed ? 'active' : '')}
/>
)}
</>

View file

@ -1,5 +1,5 @@
import { useMutation } from '@tanstack/react-query';
import { request } from 'librechat-data-provider';
import { apiBaseUrl, request } from 'librechat-data-provider';
export interface AbortStreamParams {
/** The stream ID to abort (if known) */
@ -23,7 +23,10 @@ export interface AbortStreamResponse {
*/
export const abortStream = async (params: AbortStreamParams): Promise<AbortStreamResponse> => {
console.log('[abortStream] Calling abort endpoint with params:', params);
const result = (await request.post('/api/agents/chat/abort', params)) as AbortStreamResponse;
const result = (await request.post(
`${apiBaseUrl()}/api/agents/chat/abort`,
params,
)) as AbortStreamResponse;
console.log('[abortStream] Abort response:', result);
return result;
};

View file

@ -1,5 +1,5 @@
import { useEffect, useMemo, useState } from 'react';
import { QueryKeys, request, dataService } from 'librechat-data-provider';
import { apiBaseUrl, QueryKeys, request, dataService } from 'librechat-data-provider';
import { useQuery, useQueries, useQueryClient } from '@tanstack/react-query';
import type { Agents, TConversation } from 'librechat-data-provider';
import { updateConvoInAllQueries } from '~/utils';
@ -16,7 +16,9 @@ export interface StreamStatusResponse {
export const streamStatusQueryKey = (conversationId: string) => ['streamStatus', conversationId];
export const fetchStreamStatus = async (conversationId: string): Promise<StreamStatusResponse> => {
return request.get<StreamStatusResponse>(`/api/agents/chat/status/${conversationId}`);
return request.get<StreamStatusResponse>(
`${apiBaseUrl()}/api/agents/chat/status/${conversationId}`,
);
};
export function useStreamStatus(conversationId: string | undefined, enabled = true) {

View file

@ -7,7 +7,10 @@ import {
request,
Constants,
QueryKeys,
ErrorTypes,
apiBaseUrl,
createPayload,
ViolationTypes,
LocalStorageKeys,
removeNullishValues,
} from 'librechat-data-provider';
@ -144,7 +147,7 @@ export default function useResumableSSE(
let { userMessage } = currentSubmission;
let textIndex: number | null = null;
const baseUrl = `/api/agents/chat/stream/${encodeURIComponent(currentStreamId)}`;
const baseUrl = `${apiBaseUrl()}/api/agents/chat/stream/${encodeURIComponent(currentStreamId)}`;
const url = isResume ? `${baseUrl}?resume=true` : baseUrl;
console.log('[ResumableSSE] Subscribing to stream:', url, { isResume });
@ -333,8 +336,11 @@ export default function useResumableSSE(
});
/**
* Error event - fired on actual network failures (non-200, connection lost, etc.)
* This should trigger reconnection with exponential backoff, except for 404 errors.
* Error event handler - handles BOTH:
* 1. HTTP-level errors (responseCode present) - 404, 401, network failures
* 2. Server-sent error events (event: error with data) - known errors like ViolationTypes/ErrorTypes
*
* Order matters: check responseCode first since HTTP errors may also include data
*/
sse.addEventListener('error', async (e: MessageEvent) => {
(startupConfig?.balance?.enabled ?? false) && balanceQuery.refetch();
@ -346,7 +352,6 @@ export default function useResumableSSE(
if (responseCode === 404) {
console.log('[ResumableSSE] Stream not found (404) - job completed or expired');
sse.close();
// Optimistically remove from active jobs since job is gone
removeActiveJob(currentStreamId);
setIsSubmitting(false);
setShowStopButton(false);
@ -355,8 +360,6 @@ export default function useResumableSSE(
return;
}
console.log('[ResumableSSE] Stream error (network failure) - will attempt reconnect');
// Check for 401 and try to refresh token (same pattern as useSSE)
if (responseCode === 401) {
try {
@ -365,7 +368,6 @@ export default function useResumableSSE(
if (!newToken) {
throw new Error('Token refresh failed.');
}
// Update headers on same SSE instance and retry (like useSSE)
sse.headers = {
Authorization: `Bearer ${newToken}`,
};
@ -377,6 +379,64 @@ export default function useResumableSSE(
}
}
/**
* Server-sent error event (event: error with data) - no responseCode.
* These are known errors (ErrorTypes, ViolationTypes) that should be displayed to user.
* Only check e.data if there's no HTTP responseCode, since HTTP errors may also have body data.
*/
if (!responseCode && e.data) {
console.log('[ResumableSSE] Server-sent error event received:', e.data);
sse.close();
removeActiveJob(currentStreamId);
try {
const errorData = JSON.parse(e.data);
const errorString = errorData.error ?? errorData.message ?? JSON.stringify(errorData);
// Check if it's a known error type (ViolationTypes or ErrorTypes)
let isKnownError = false;
try {
const parsed =
typeof errorString === 'string' ? JSON.parse(errorString) : errorString;
const errorType = parsed?.type ?? parsed?.code;
if (errorType) {
const violationValues = Object.values(ViolationTypes) as string[];
const errorTypeValues = Object.values(ErrorTypes) as string[];
isKnownError =
violationValues.includes(errorType) || errorTypeValues.includes(errorType);
}
} catch {
// Not JSON or parsing failed - treat as generic error
}
console.log('[ResumableSSE] Error type check:', { isKnownError, errorString });
// Display the error to user via errorHandler
errorHandler({
data: { text: errorString } as unknown as Parameters<typeof errorHandler>[0]['data'],
submission: currentSubmission as EventSubmission,
});
} catch (parseError) {
console.error('[ResumableSSE] Failed to parse server error:', parseError);
errorHandler({
data: { text: e.data } as unknown as Parameters<typeof errorHandler>[0]['data'],
submission: currentSubmission as EventSubmission,
});
}
setIsSubmitting(false);
setShowStopButton(false);
setStreamId(null);
reconnectAttemptRef.current = 0;
return;
}
// Network failure or unknown HTTP error - attempt reconnection with backoff
console.log('[ResumableSSE] Stream error (network failure) - will attempt reconnect', {
responseCode,
hasData: !!e.data,
});
if (reconnectAttemptRef.current < MAX_RETRIES) {
// Increment counter BEFORE close() so abort handler knows we're reconnecting
reconnectAttemptRef.current++;

View file

@ -533,6 +533,7 @@
"com_nav_log_out": "Izrakstīties",
"com_nav_long_audio_warning": "Garāku tekstu apstrāde prasīs ilgāku laiku.",
"com_nav_maximize_chat_space": "Maksimāli izmantot sarunu telpas izmērus",
"com_nav_mcp_access_revoked": "MCP servera piekļuve veiksmīgi atsaukta.",
"com_nav_mcp_configure_server": "Konfigurēt {{0}}",
"com_nav_mcp_connect": "Savienot",
"com_nav_mcp_connect_server": "Savienot {{0}}",

View file

@ -1,6 +1,6 @@
{
"chat_direction_left_to_right": "Noe bør legges inn her. Tomt felt.",
"chat_direction_right_to_left": "Noe bør legges inn her. Tomt felt.",
"chat_direction_left_to_right": "Venstre til høyre",
"chat_direction_right_to_left": "Høyre til venstre",
"com_a11y_ai_composing": "KI-en skriver fortsatt.",
"com_a11y_end": "KI-en har fullført svaret sitt.",
"com_a11y_start": "KI-en har begynt å svare.",
@ -372,7 +372,7 @@
"com_files_number_selected": "{{0}} av {{1}} valgt",
"com_files_preparing_download": "Forbereder nedlasting ...",
"com_files_sharepoint_picker_title": "Velg filer",
"com_files_table": "[Plassholder: Tabell over filer]",
"com_files_table": "Fil-tabell",
"com_files_upload_local_machine": "Fra lokal datamaskin",
"com_files_upload_sharepoint": "Fra SharePoint",
"com_generated_files": "Genererte filer:",
@ -813,7 +813,7 @@
"com_ui_download_backup": "Last ned reservekoder",
"com_ui_download_backup_tooltip": "Før du fortsetter, last ned reservekodene dine. Du vil trenge dem for å få tilgang igjen hvis du mister autentiseringsenheten din.",
"com_ui_download_error": "Feil ved nedlasting av fil. Filen kan ha blitt slettet.",
"com_ui_drag_drop": "Dra og slipp filer her, eller klikk for å velge.",
"com_ui_drag_drop": "Dra og slipp fil(er) her, eller klikk for å velge.",
"com_ui_dropdown_variables": "Nedtrekksvariabler:",
"com_ui_dropdown_variables_info": "Opprett egendefinerte nedtrekksmenyer for promptene dine: `{{variabelnavn:valg1|valg2|valg3}}`",
"com_ui_duplicate": "Dupliser",

View file

@ -9,7 +9,7 @@
.nav {
position: fixed;
z-index: 64;
z-index: 110;
top: 0;
/* max-width: 260px; */
@ -36,7 +36,7 @@
}
.nav-mask {
position: fixed;
z-index: 63;
z-index: 105;
left: 0;
right: 0;
top: 0;
@ -153,7 +153,7 @@
right: 0;
top: 0;
bottom: 0;
background-color: rgba(86, 88, 105, 0.75);
background-color: rgba(7, 7, 7, 0.4);
padding-left: 420px;
padding-top: 12px;
opacity: 0;

View file

@ -1,3 +1,3 @@
// v0.8.2-rc2
// v0.8.2-rc3
// See .env.test.example for an example of the '.env.test' file.
require('dotenv').config({ path: './e2e/.env.test' });

View file

@ -15,7 +15,7 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 1.9.5
version: 1.9.6
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
@ -23,7 +23,7 @@ version: 1.9.5
# It is recommended to use it with quotes.
# renovate: image=ghcr.io/danny-avila/librechat
appVersion: "v0.8.2-rc2"
appVersion: "v0.8.2-rc3"
home: https://www.librechat.ai

239
package-lock.json generated
View file

@ -1,12 +1,12 @@
{
"name": "LibreChat",
"version": "v0.8.2-rc2",
"version": "v0.8.2-rc3",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "LibreChat",
"version": "v0.8.2-rc2",
"version": "v0.8.2-rc3",
"license": "ISC",
"workspaces": [
"api",
@ -45,7 +45,7 @@
},
"api": {
"name": "@librechat/backend",
"version": "v0.8.2-rc2",
"version": "v0.8.2-rc3",
"license": "ISC",
"dependencies": {
"@anthropic-ai/sdk": "^0.71.0",
@ -57,10 +57,9 @@
"@azure/search-documents": "^12.0.0",
"@azure/storage-blob": "^12.27.0",
"@google/genai": "^1.19.0",
"@googleapis/youtube": "^20.0.0",
"@keyv/redis": "^4.3.3",
"@langchain/core": "^0.3.80",
"@librechat/agents": "^3.0.77",
"@librechat/agents": "^3.0.776",
"@librechat/api": "*",
"@librechat/data-schemas": "*",
"@microsoft/microsoft-graph-client": "^3.0.7",
@ -123,10 +122,9 @@
"tiktoken": "^1.0.15",
"traverse": "^0.6.7",
"ua-parser-js": "^1.0.36",
"undici": "^7.10.0",
"undici": "^7.18.2",
"winston": "^3.11.0",
"winston-daily-rotate-file": "^5.0.0",
"youtube-transcript": "^1.2.1",
"zod": "^3.22.4"
},
"devDependencies": {
@ -444,7 +442,7 @@
},
"client": {
"name": "@librechat/frontend",
"version": "v0.8.2-rc2",
"version": "v0.8.2-rc3",
"license": "ISC",
"dependencies": {
"@ariakit/react": "^0.4.15",
@ -10739,18 +10737,6 @@
"node": ">=18.0.0"
}
},
"node_modules/@googleapis/youtube": {
"version": "20.0.0",
"resolved": "https://registry.npmjs.org/@googleapis/youtube/-/youtube-20.0.0.tgz",
"integrity": "sha512-wdt1J0JoKYhvpoS2XIRHX0g/9ul/B0fQeeJAhuuBIdYINuuLt6/oZYZZCBmkuhtkA3IllXgqgAXOjLtLRAnR2g==",
"license": "Apache-2.0",
"dependencies": {
"googleapis-common": "^7.0.0"
},
"engines": {
"node": ">=12.0.0"
}
},
"node_modules/@grpc/grpc-js": {
"version": "1.9.15",
"resolved": "https://registry.npmjs.org/@grpc/grpc-js/-/grpc-js-1.9.15.tgz",
@ -12660,9 +12646,9 @@
}
},
"node_modules/@librechat/agents": {
"version": "3.0.77",
"resolved": "https://registry.npmjs.org/@librechat/agents/-/agents-3.0.77.tgz",
"integrity": "sha512-Wr9d8bjJAQSl03nEgnAPG6jBQT1fL3sNV3TFDN1FvFQt6WGfdok838Cbcn+/tSGXSPJcICTxNkMT7VN8P6bCPw==",
"version": "3.0.776",
"resolved": "https://registry.npmjs.org/@librechat/agents/-/agents-3.0.776.tgz",
"integrity": "sha512-tLhFqyjlGl70QV8mq9cvmvwQ0hw/WCIC7ayEY5zJAXK+WTO80ir7xjYtXz98PX0+hNwNu8PFekgnSTWrYIrT0w==",
"license": "MIT",
"dependencies": {
"@langchain/anthropic": "^0.3.26",
@ -20383,12 +20369,6 @@
"@types/ms": "*"
}
},
"node_modules/@types/diff": {
"version": "6.0.0",
"resolved": "https://registry.npmjs.org/@types/diff/-/diff-6.0.0.tgz",
"integrity": "sha512-dhVCYGv3ZSbzmQaBSagrv1WJ6rXCdkyTcDyoNu1MD8JohI7pR7k8wdZEm+mvdxRKXyHVwckFzWU1vJc+Z29MlA==",
"dev": true
},
"node_modules/@types/estree": {
"version": "1.0.6",
"resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.6.tgz",
@ -22558,25 +22538,24 @@
}
},
"node_modules/browserify-sign": {
"version": "4.2.3",
"resolved": "https://registry.npmjs.org/browserify-sign/-/browserify-sign-4.2.3.tgz",
"integrity": "sha512-JWCZW6SKhfhjJxO8Tyiiy+XYB7cqd2S5/+WeYHsKdNKFlCBhKbblba1A/HN/90YwtxKc8tCErjffZl++UNmGiw==",
"version": "4.2.5",
"resolved": "https://registry.npmjs.org/browserify-sign/-/browserify-sign-4.2.5.tgz",
"integrity": "sha512-C2AUdAJg6rlM2W5QMp2Q4KGQMVBwR1lIimTsUnutJ8bMpW5B52pGpR2gEnNBNwijumDo5FojQ0L9JrXA8m4YEw==",
"dev": true,
"license": "ISC",
"dependencies": {
"bn.js": "^5.2.1",
"browserify-rsa": "^4.1.0",
"bn.js": "^5.2.2",
"browserify-rsa": "^4.1.1",
"create-hash": "^1.2.0",
"create-hmac": "^1.1.7",
"elliptic": "^6.5.5",
"hash-base": "~3.0",
"elliptic": "^6.6.1",
"inherits": "^2.0.4",
"parse-asn1": "^5.1.7",
"parse-asn1": "^5.1.9",
"readable-stream": "^2.3.8",
"safe-buffer": "^5.2.1"
},
"engines": {
"node": ">= 0.12"
"node": ">= 0.10"
}
},
"node_modules/browserify-sign/node_modules/isarray": {
@ -25016,15 +24995,6 @@
"resolved": "https://registry.npmjs.org/didyoumean/-/didyoumean-1.2.2.tgz",
"integrity": "sha512-gxtyfqMg7GKyhQmb056K7M3xszy/myH8w+B4RT+QXBQsvAOdc3XymqDDPHx1BgPgsdAA5SIifona89YtRATDzw=="
},
"node_modules/diff": {
"version": "7.0.0",
"resolved": "https://registry.npmjs.org/diff/-/diff-7.0.0.tgz",
"integrity": "sha512-PJWHUb1RFevKCwaFA9RlG5tCd+FO5iRh9A8HEtkmBH2Li03iJriB6m6JIN4rGz3K3JLawI7/veA1xzRKP6ISBw==",
"peer": true,
"engines": {
"node": ">=0.3.1"
}
},
"node_modules/diff-sequences": {
"version": "29.6.3",
"resolved": "https://registry.npmjs.org/diff-sequences/-/diff-sequences-29.6.3.tgz",
@ -27690,22 +27660,6 @@
"node": ">=14"
}
},
"node_modules/googleapis-common": {
"version": "7.0.1",
"resolved": "https://registry.npmjs.org/googleapis-common/-/googleapis-common-7.0.1.tgz",
"integrity": "sha512-mgt5zsd7zj5t5QXvDanjWguMdHAcJmmDrF9RkInCecNsyV7S7YtGqm5v2IWONNID88osb7zmx5FtrAP12JfD0w==",
"dependencies": {
"extend": "^3.0.2",
"gaxios": "^6.0.3",
"google-auth-library": "^9.0.0",
"qs": "^6.7.0",
"url-template": "^2.0.8",
"uuid": "^9.0.0"
},
"engines": {
"node": ">=14.0.0"
}
},
"node_modules/gopd": {
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz",
@ -28143,9 +28097,9 @@
"integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ=="
},
"node_modules/hono": {
"version": "4.11.3",
"resolved": "https://registry.npmjs.org/hono/-/hono-4.11.3.tgz",
"integrity": "sha512-PmQi306+M/ct/m5s66Hrg+adPnkD5jiO6IjA7WhWw0gSBSo1EcRegwuI1deZ+wd5pzCGynCcn2DprnE4/yEV4w==",
"version": "4.11.4",
"resolved": "https://registry.npmjs.org/hono/-/hono-4.11.4.tgz",
"integrity": "sha512-U7tt8JsyrxSRKspfhtLET79pU8K+tInj5QZXs1jSugO1Vq5dFj3kmZsRldo29mTBfcjDRVRXrEZ6LS63Cog9ZA==",
"license": "MIT",
"peer": true,
"engines": {
@ -34591,17 +34545,16 @@
}
},
"node_modules/parse-asn1": {
"version": "5.1.7",
"resolved": "https://registry.npmjs.org/parse-asn1/-/parse-asn1-5.1.7.tgz",
"integrity": "sha512-CTM5kuWR3sx9IFamcl5ErfPl6ea/N8IYwiJ+vpeB2g+1iknv7zBl5uPwbMbRVznRVbrNY6lGuDoE5b30grmbqg==",
"version": "5.1.9",
"resolved": "https://registry.npmjs.org/parse-asn1/-/parse-asn1-5.1.9.tgz",
"integrity": "sha512-fIYNuZ/HastSb80baGOuPRo1O9cf4baWw5WsAp7dBuUzeTD/BoaG8sVTdlPFksBE2lF21dN+A1AnrpIjSWqHHg==",
"dev": true,
"license": "ISC",
"dependencies": {
"asn1.js": "^4.10.1",
"browserify-aes": "^1.2.0",
"evp_bytestokey": "^1.0.3",
"hash-base": "~3.0",
"pbkdf2": "^3.1.2",
"pbkdf2": "^3.1.5",
"safe-buffer": "^5.2.1"
},
"engines": {
@ -34926,55 +34879,21 @@
"integrity": "sha512-KG8UEiEVkR3wGEb4m5yZkVCzigAD+cVEJck2CzYZO37ZGJfctvVptVO192MwrtPhzONn6go8ylnOdMhKqi4nfg=="
},
"node_modules/pbkdf2": {
"version": "3.1.3",
"resolved": "https://registry.npmjs.org/pbkdf2/-/pbkdf2-3.1.3.tgz",
"integrity": "sha512-wfRLBZ0feWRhCIkoMB6ete7czJcnNnqRpcoWQBLqatqXXmelSRqfdDK4F3u9T2s2cXas/hQJcryI/4lAL+XTlA==",
"version": "3.1.5",
"resolved": "https://registry.npmjs.org/pbkdf2/-/pbkdf2-3.1.5.tgz",
"integrity": "sha512-Q3CG/cYvCO1ye4QKkuH7EXxs3VC/rI1/trd+qX2+PolbaKG0H+bgcZzrTt96mMyRtejk+JMCiLUn3y29W8qmFQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"create-hash": "~1.1.3",
"create-hash": "^1.2.0",
"create-hmac": "^1.1.7",
"ripemd160": "=2.0.1",
"ripemd160": "^2.0.3",
"safe-buffer": "^5.2.1",
"sha.js": "^2.4.11",
"to-buffer": "^1.2.0"
"sha.js": "^2.4.12",
"to-buffer": "^1.2.1"
},
"engines": {
"node": ">=0.12"
}
},
"node_modules/pbkdf2/node_modules/create-hash": {
"version": "1.1.3",
"resolved": "https://registry.npmjs.org/create-hash/-/create-hash-1.1.3.tgz",
"integrity": "sha512-snRpch/kwQhcdlnZKYanNF1m0RDlrCdSKQaH87w1FCFPVPNCQ/Il9QJKAX2jVBZddRdaHBMC+zXa9Gw9tmkNUA==",
"dev": true,
"license": "MIT",
"dependencies": {
"cipher-base": "^1.0.1",
"inherits": "^2.0.1",
"ripemd160": "^2.0.0",
"sha.js": "^2.4.0"
}
},
"node_modules/pbkdf2/node_modules/hash-base": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/hash-base/-/hash-base-2.0.2.tgz",
"integrity": "sha512-0TROgQ1/SxE6KmxWSvXHvRj90/Xo1JvZShofnYF+f6ZsGtR4eES7WfrQzPalmyagfKZCXpVnitiRebZulWsbiw==",
"dev": true,
"license": "MIT",
"dependencies": {
"inherits": "^2.0.1"
}
},
"node_modules/pbkdf2/node_modules/ripemd160": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/ripemd160/-/ripemd160-2.0.1.tgz",
"integrity": "sha512-J7f4wutN8mdbV08MJnXibYpCOPHR+yzy+iQ/AsjMv2j8cLavQ8VGagDFUwwTAdF8FmRKVeNpbTTEwNHCW1g94w==",
"dev": true,
"license": "MIT",
"dependencies": {
"hash-base": "^2.0.0",
"inherits": "^2.0.1"
"node": ">= 0.10"
}
},
"node_modules/peek-readable": {
@ -38557,16 +38476,65 @@
}
},
"node_modules/ripemd160": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/ripemd160/-/ripemd160-2.0.2.tgz",
"integrity": "sha512-ii4iagi25WusVoiC4B4lq7pbXfAp3D9v5CwfkY33vffw2+pkDjY1D8GaN7spsxvCSx8dkPqOZCEZyfxcmJG2IA==",
"version": "2.0.3",
"resolved": "https://registry.npmjs.org/ripemd160/-/ripemd160-2.0.3.tgz",
"integrity": "sha512-5Di9UC0+8h1L6ZD2d7awM7E/T4uA1fJRlx6zk/NvdCCVEoAnFqvHmCuNeIKoCeIixBX/q8uM+6ycDvF8woqosA==",
"dev": true,
"license": "MIT",
"dependencies": {
"hash-base": "^3.0.0",
"inherits": "^2.0.1"
"hash-base": "^3.1.2",
"inherits": "^2.0.4"
},
"engines": {
"node": ">= 0.8"
}
},
"node_modules/ripemd160/node_modules/hash-base": {
"version": "3.1.2",
"resolved": "https://registry.npmjs.org/hash-base/-/hash-base-3.1.2.tgz",
"integrity": "sha512-Bb33KbowVTIj5s7Ked1OsqHUeCpz//tPwR+E2zJgJKo9Z5XolZ9b6bdUgjmYlwnWhoOQKoTd1TYToZGn5mAYOg==",
"dev": true,
"license": "MIT",
"dependencies": {
"inherits": "^2.0.4",
"readable-stream": "^2.3.8",
"safe-buffer": "^5.2.1",
"to-buffer": "^1.2.1"
},
"engines": {
"node": ">= 0.8"
}
},
"node_modules/ripemd160/node_modules/isarray": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz",
"integrity": "sha512-VLghIWNM6ELQzo7zwmcg0NmTVyWKYjvIeM83yjp0wRDTmUnrM678fQbcKBo6n2CJEF0szoG//ytg+TKla89ALQ==",
"dev": true,
"license": "MIT"
},
"node_modules/ripemd160/node_modules/readable-stream": {
"version": "2.3.8",
"resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.8.tgz",
"integrity": "sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA==",
"dev": true,
"license": "MIT",
"dependencies": {
"core-util-is": "~1.0.0",
"inherits": "~2.0.3",
"isarray": "~1.0.0",
"process-nextick-args": "~2.0.0",
"safe-buffer": "~5.1.1",
"string_decoder": "~1.1.1",
"util-deprecate": "~1.0.1"
}
},
"node_modules/ripemd160/node_modules/readable-stream/node_modules/safe-buffer": {
"version": "5.1.2",
"resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz",
"integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==",
"dev": true,
"license": "MIT"
},
"node_modules/robust-predicates": {
"version": "3.0.2",
"resolved": "https://registry.npmjs.org/robust-predicates/-/robust-predicates-3.0.2.tgz",
@ -41153,9 +41121,9 @@
"dev": true
},
"node_modules/undici": {
"version": "7.16.0",
"resolved": "https://registry.npmjs.org/undici/-/undici-7.16.0.tgz",
"integrity": "sha512-QEg3HPMll0o3t2ourKwOeUAZ159Kn9mx5pnzHRQO8+Wixmh88YdZRiIwat0iNzNNXn0yoEtXJqFpyW7eM8BV7g==",
"version": "7.18.2",
"resolved": "https://registry.npmjs.org/undici/-/undici-7.18.2.tgz",
"integrity": "sha512-y+8YjDFzWdQlSE9N5nzKMT3g4a5UBX1HKowfdXh0uvAnTaqqwqB92Jt4UXBAeKekDs5IaDKyJFR4X1gYVCgXcw==",
"license": "MIT",
"engines": {
"node": ">=20.18.1"
@ -41538,11 +41506,6 @@
"requires-port": "^1.0.0"
}
},
"node_modules/url-template": {
"version": "2.0.8",
"resolved": "https://registry.npmjs.org/url-template/-/url-template-2.0.8.tgz",
"integrity": "sha512-XdVKMF4SJ0nP/O7XIPB0JwAEuT9lDIYnNsK8yGVe43y0AWoKeJNdv3ZNWh7ksJ6KqQFjOO6ox/VEitLnaVNufw=="
},
"node_modules/url/node_modules/punycode": {
"version": "1.4.1",
"resolved": "https://registry.npmjs.org/punycode/-/punycode-1.4.1.tgz",
@ -43095,15 +43058,6 @@
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/youtube-transcript": {
"version": "1.2.1",
"resolved": "https://registry.npmjs.org/youtube-transcript/-/youtube-transcript-1.2.1.tgz",
"integrity": "sha512-TvEGkBaajKw+B6y91ziLuBLsa5cawgowou+Bk0ciGpjELDfAzSzTGXaZmeSSkUeknCPpEr/WGApOHDwV7V+Y9Q==",
"license": "MIT",
"engines": {
"node": ">=18.0.0"
}
},
"node_modules/zod": {
"version": "3.25.67",
"resolved": "https://registry.npmjs.org/zod/-/zod-3.25.67.tgz",
@ -43133,7 +43087,7 @@
},
"packages/api": {
"name": "@librechat/api",
"version": "1.7.20",
"version": "1.7.21",
"license": "ISC",
"devDependencies": {
"@babel/preset-env": "^7.21.5",
@ -43146,7 +43100,6 @@
"@rollup/plugin-replace": "^5.0.5",
"@rollup/plugin-typescript": "^12.1.2",
"@types/bun": "^1.2.15",
"@types/diff": "^6.0.0",
"@types/express": "^5.0.0",
"@types/express-session": "^1.18.2",
"@types/jest": "^29.5.2",
@ -43176,13 +43129,12 @@
"@google/genai": "^1.19.0",
"@keyv/redis": "^4.3.3",
"@langchain/core": "^0.3.80",
"@librechat/agents": "^3.0.77",
"@librechat/agents": "^3.0.776",
"@librechat/data-schemas": "*",
"@modelcontextprotocol/sdk": "^1.25.2",
"@smithy/node-http-handler": "^4.4.5",
"axios": "^1.12.1",
"connect-redis": "^8.1.0",
"diff": "^7.0.0",
"eventsource": "^3.0.2",
"express": "^5.1.0",
"express-session": "^1.18.2",
@ -43202,7 +43154,7 @@
"node-fetch": "2.7.0",
"rate-limit-redis": "^4.2.0",
"tiktoken": "^1.0.15",
"undici": "^7.10.0",
"undici": "^7.18.2",
"zod": "^3.22.4"
}
},
@ -43246,7 +43198,7 @@
},
"packages/client": {
"name": "@librechat/client",
"version": "0.4.4",
"version": "0.4.50",
"devDependencies": {
"@babel/core": "^7.28.5",
"@babel/preset-env": "^7.28.5",
@ -45536,7 +45488,7 @@
},
"packages/data-provider": {
"name": "librechat-data-provider",
"version": "0.8.220",
"version": "0.8.230",
"license": "ISC",
"dependencies": {
"axios": "^1.12.1",
@ -45594,7 +45546,7 @@
},
"packages/data-schemas": {
"name": "@librechat/data-schemas",
"version": "0.0.33",
"version": "0.0.34",
"license": "MIT",
"devDependencies": {
"@rollup/plugin-alias": "^5.1.0",
@ -45604,7 +45556,6 @@
"@rollup/plugin-replace": "^5.0.5",
"@rollup/plugin-terser": "^0.4.4",
"@rollup/plugin-typescript": "^12.1.2",
"@types/diff": "^6.0.0",
"@types/express": "^5.0.0",
"@types/jest": "^29.5.2",
"@types/node": "^20.3.0",

View file

@ -1,6 +1,6 @@
{
"name": "LibreChat",
"version": "v0.8.2-rc2",
"version": "v0.8.2-rc3",
"description": "",
"workspaces": [
"api",

View file

@ -1,6 +1,6 @@
{
"name": "@librechat/api",
"version": "1.7.20",
"version": "1.7.21",
"type": "commonjs",
"description": "MCP services for LibreChat",
"main": "dist/index.js",
@ -55,7 +55,6 @@
"@rollup/plugin-replace": "^5.0.5",
"@rollup/plugin-typescript": "^12.1.2",
"@types/bun": "^1.2.15",
"@types/diff": "^6.0.0",
"@types/express": "^5.0.0",
"@types/express-session": "^1.18.2",
"@types/jest": "^29.5.2",
@ -88,13 +87,12 @@
"@google/genai": "^1.19.0",
"@keyv/redis": "^4.3.3",
"@langchain/core": "^0.3.80",
"@librechat/agents": "^3.0.77",
"@librechat/agents": "^3.0.776",
"@librechat/data-schemas": "*",
"@modelcontextprotocol/sdk": "^1.25.2",
"@smithy/node-http-handler": "^4.4.5",
"axios": "^1.12.1",
"connect-redis": "^8.1.0",
"diff": "^7.0.0",
"eventsource": "^3.0.2",
"express": "^5.1.0",
"express-session": "^1.18.2",
@ -114,7 +112,7 @@
"node-fetch": "2.7.0",
"rate-limit-redis": "^4.2.0",
"tiktoken": "^1.0.15",
"undici": "^7.10.0",
"undici": "^7.18.2",
"zod": "^3.22.4"
}
}

View file

@ -0,0 +1,228 @@
import { FileSources } from 'librechat-data-provider';
import type { Agent, AgentAvatar, AgentModelParameters } from 'librechat-data-provider';
import type { RefreshS3UrlFn, UpdateAgentFn } from './avatars';
import {
MAX_AVATAR_REFRESH_AGENTS,
AVATAR_REFRESH_BATCH_SIZE,
refreshListAvatars,
} from './avatars';
describe('refreshListAvatars', () => {
let mockRefreshS3Url: jest.MockedFunction<RefreshS3UrlFn>;
let mockUpdateAgent: jest.MockedFunction<UpdateAgentFn>;
const userId = 'user123';
beforeEach(() => {
mockRefreshS3Url = jest.fn();
mockUpdateAgent = jest.fn();
});
const createAgent = (overrides: Partial<Agent> = {}): Agent => ({
_id: 'obj1',
id: 'agent1',
name: 'Test Agent',
author: userId,
description: 'Test',
created_at: Date.now(),
avatar: {
source: FileSources.s3,
filepath: 'old-path.jpg',
},
instructions: null,
provider: 'openai',
model: 'gpt-4',
model_parameters: {} as AgentModelParameters,
...overrides,
});
it('should return empty stats for empty agents array', async () => {
const stats = await refreshListAvatars({
agents: [],
userId,
refreshS3Url: mockRefreshS3Url,
updateAgent: mockUpdateAgent,
});
expect(stats.updated).toBe(0);
expect(mockRefreshS3Url).not.toHaveBeenCalled();
expect(mockUpdateAgent).not.toHaveBeenCalled();
});
it('should skip non-S3 avatars', async () => {
const agent = createAgent({
avatar: { source: 'local', filepath: 'local-path.jpg' } as AgentAvatar,
});
const stats = await refreshListAvatars({
agents: [agent],
userId,
refreshS3Url: mockRefreshS3Url,
updateAgent: mockUpdateAgent,
});
expect(stats.not_s3).toBe(1);
expect(stats.updated).toBe(0);
expect(mockRefreshS3Url).not.toHaveBeenCalled();
});
it('should skip agents without id', async () => {
const agent = createAgent({ id: '' });
const stats = await refreshListAvatars({
agents: [agent],
userId,
refreshS3Url: mockRefreshS3Url,
updateAgent: mockUpdateAgent,
});
expect(stats.no_id).toBe(1);
expect(mockRefreshS3Url).not.toHaveBeenCalled();
});
it('should refresh avatars for agents owned by other users (VIEW access)', async () => {
const agent = createAgent({ author: 'otherUser' });
mockRefreshS3Url.mockResolvedValue('new-path.jpg');
mockUpdateAgent.mockResolvedValue({});
const stats = await refreshListAvatars({
agents: [agent],
userId,
refreshS3Url: mockRefreshS3Url,
updateAgent: mockUpdateAgent,
});
expect(stats.updated).toBe(1);
expect(mockRefreshS3Url).toHaveBeenCalled();
expect(mockUpdateAgent).toHaveBeenCalled();
});
it('should refresh and persist S3 avatars', async () => {
const agent = createAgent();
mockRefreshS3Url.mockResolvedValue('new-path.jpg');
mockUpdateAgent.mockResolvedValue({});
const stats = await refreshListAvatars({
agents: [agent],
userId,
refreshS3Url: mockRefreshS3Url,
updateAgent: mockUpdateAgent,
});
expect(stats.updated).toBe(1);
expect(mockRefreshS3Url).toHaveBeenCalledWith(agent.avatar);
expect(mockUpdateAgent).toHaveBeenCalledWith(
{ id: 'agent1' },
{ avatar: { filepath: 'new-path.jpg', source: FileSources.s3 } },
{ updatingUserId: userId, skipVersioning: true },
);
});
it('should not update if S3 URL unchanged', async () => {
const agent = createAgent();
mockRefreshS3Url.mockResolvedValue('old-path.jpg');
const stats = await refreshListAvatars({
agents: [agent],
userId,
refreshS3Url: mockRefreshS3Url,
updateAgent: mockUpdateAgent,
});
expect(stats.no_change).toBe(1);
expect(stats.updated).toBe(0);
expect(mockUpdateAgent).not.toHaveBeenCalled();
});
it('should handle S3 refresh errors gracefully', async () => {
const agent = createAgent();
mockRefreshS3Url.mockRejectedValue(new Error('S3 error'));
const stats = await refreshListAvatars({
agents: [agent],
userId,
refreshS3Url: mockRefreshS3Url,
updateAgent: mockUpdateAgent,
});
expect(stats.s3_error).toBe(1);
expect(stats.updated).toBe(0);
});
it('should handle database persist errors gracefully', async () => {
const agent = createAgent();
mockRefreshS3Url.mockResolvedValue('new-path.jpg');
mockUpdateAgent.mockRejectedValue(new Error('DB error'));
const stats = await refreshListAvatars({
agents: [agent],
userId,
refreshS3Url: mockRefreshS3Url,
updateAgent: mockUpdateAgent,
});
expect(stats.persist_error).toBe(1);
expect(stats.updated).toBe(0);
});
it('should process agents in batches', async () => {
const agents = Array.from({ length: 25 }, (_, i) =>
createAgent({
_id: `obj${i}`,
id: `agent${i}`,
avatar: { source: FileSources.s3, filepath: `path${i}.jpg` },
}),
);
mockRefreshS3Url.mockImplementation((avatar) =>
Promise.resolve(avatar.filepath.replace('.jpg', '-new.jpg')),
);
mockUpdateAgent.mockResolvedValue({});
const stats = await refreshListAvatars({
agents,
userId,
refreshS3Url: mockRefreshS3Url,
updateAgent: mockUpdateAgent,
});
expect(stats.updated).toBe(25);
expect(mockRefreshS3Url).toHaveBeenCalledTimes(25);
expect(mockUpdateAgent).toHaveBeenCalledTimes(25);
});
it('should track mixed statistics correctly', async () => {
const agents = [
createAgent({ id: 'agent1' }),
createAgent({ id: 'agent2', author: 'otherUser' }),
createAgent({
id: 'agent3',
avatar: { source: 'local', filepath: 'local.jpg' } as AgentAvatar,
}),
createAgent({ id: '' }), // no id
];
mockRefreshS3Url.mockResolvedValue('new-path.jpg');
mockUpdateAgent.mockResolvedValue({});
const stats = await refreshListAvatars({
agents,
userId,
refreshS3Url: mockRefreshS3Url,
updateAgent: mockUpdateAgent,
});
expect(stats.updated).toBe(2); // agent1 and agent2 (other user's agent now refreshed)
expect(stats.not_s3).toBe(1); // agent3
expect(stats.no_id).toBe(1); // agent with empty id
});
});
describe('Constants', () => {
it('should export MAX_AVATAR_REFRESH_AGENTS as 1000', () => {
expect(MAX_AVATAR_REFRESH_AGENTS).toBe(1000);
});
it('should export AVATAR_REFRESH_BATCH_SIZE as 20', () => {
expect(AVATAR_REFRESH_BATCH_SIZE).toBe(20);
});
});

View file

@ -0,0 +1,122 @@
import { logger } from '@librechat/data-schemas';
import { FileSources } from 'librechat-data-provider';
import type { Agent, AgentAvatar } from 'librechat-data-provider';
const MAX_AVATAR_REFRESH_AGENTS = 1000;
const AVATAR_REFRESH_BATCH_SIZE = 20;
export { MAX_AVATAR_REFRESH_AGENTS, AVATAR_REFRESH_BATCH_SIZE };
export type RefreshS3UrlFn = (avatar: AgentAvatar) => Promise<string | undefined>;
export type UpdateAgentFn = (
searchParams: { id: string },
updateData: { avatar: AgentAvatar },
options: { updatingUserId: string; skipVersioning: boolean },
) => Promise<unknown>;
export type RefreshListAvatarsParams = {
agents: Agent[];
userId: string;
refreshS3Url: RefreshS3UrlFn;
updateAgent: UpdateAgentFn;
};
export type RefreshStats = {
updated: number;
not_s3: number;
no_id: number;
no_change: number;
s3_error: number;
persist_error: number;
};
/**
* Opportunistically refreshes S3-backed avatars for agent list responses.
* Processes agents in batches to prevent database connection pool exhaustion.
* Only list responses are refreshed because they're the highest-traffic surface and
* the avatar URLs have a short-lived TTL. The refresh is cached per-user for 30 minutes
* so we refresh once per interval at most.
*
* Any user with VIEW access to an agent can refresh its avatar URL. This ensures
* avatars remain accessible even when the owner hasn't logged in recently.
* The agents array should already be filtered to only include agents the user can access.
*/
export const refreshListAvatars = async ({
agents,
userId,
refreshS3Url,
updateAgent,
}: RefreshListAvatarsParams): Promise<RefreshStats> => {
const stats: RefreshStats = {
updated: 0,
not_s3: 0,
no_id: 0,
no_change: 0,
s3_error: 0,
persist_error: 0,
};
if (!agents?.length) {
return stats;
}
logger.debug('[refreshListAvatars] Refreshing S3 avatars for agents: %d', agents.length);
for (let i = 0; i < agents.length; i += AVATAR_REFRESH_BATCH_SIZE) {
const batch = agents.slice(i, i + AVATAR_REFRESH_BATCH_SIZE);
await Promise.all(
batch.map(async (agent) => {
if (agent?.avatar?.source !== FileSources.s3 || !agent?.avatar?.filepath) {
stats.not_s3++;
return;
}
if (!agent?.id) {
logger.debug(
'[refreshListAvatars] Skipping S3 avatar refresh for agent: %s, ID is not set',
agent._id,
);
stats.no_id++;
return;
}
try {
logger.debug('[refreshListAvatars] Refreshing S3 avatar for agent: %s', agent._id);
const newPath = await refreshS3Url(agent.avatar);
if (newPath && newPath !== agent.avatar.filepath) {
try {
await updateAgent(
{ id: agent.id },
{
avatar: {
filepath: newPath,
source: agent.avatar.source,
},
},
{
updatingUserId: userId,
skipVersioning: true,
},
);
stats.updated++;
} catch (persistErr) {
logger.error('[refreshListAvatars] Avatar refresh persist error: %o', persistErr);
stats.persist_error++;
}
} else {
stats.no_change++;
}
} catch (err) {
logger.error('[refreshListAvatars] S3 avatar refresh error: %o', err);
stats.s3_error++;
}
}),
);
}
logger.info('[refreshListAvatars] Avatar refresh summary: %o', stats);
return stats;
};

View file

@ -1,3 +1,4 @@
export * from './avatars';
export * from './chain';
export * from './edges';
export * from './initialize';

View file

@ -1,17 +1,42 @@
import { Types } from 'mongoose';
import type { Response } from 'express';
import { Run } from '@librechat/agents';
import type { IUser } from '@librechat/data-schemas';
import { createSafeUser } from '~/utils/env';
import type { Response } from 'express';
import { processMemory } from './memory';
jest.mock('~/stream/GenerationJobManager');
const mockCreateSafeUser = jest.fn((user) => ({
id: user?.id,
email: user?.email,
name: user?.name,
username: user?.username,
}));
const mockResolveHeaders = jest.fn((opts) => {
const headers = opts.headers || {};
const user = opts.user || {};
const result: Record<string, string> = {};
for (const [key, value] of Object.entries(headers)) {
let resolved = value as string;
resolved = resolved.replace(/\$\{(\w+)\}/g, (_match, envVar) => process.env[envVar] || '');
resolved = resolved.replace(/\{\{LIBRECHAT_USER_EMAIL\}\}/g, user.email || '');
resolved = resolved.replace(/\{\{LIBRECHAT_USER_ID\}\}/g, user.id || '');
result[key] = resolved;
}
return result;
});
jest.mock('~/utils', () => ({
Tokenizer: {
getTokenCount: jest.fn(() => 10),
},
createSafeUser: (user: unknown) => mockCreateSafeUser(user),
resolveHeaders: (opts: unknown) => mockResolveHeaders(opts),
}));
const { createSafeUser } = jest.requireMock('~/utils');
jest.mock('@librechat/agents', () => ({
Run: {
create: jest.fn(() => ({
@ -20,6 +45,7 @@ jest.mock('@librechat/agents', () => ({
},
Providers: {
OPENAI: 'openai',
BEDROCK: 'bedrock',
},
GraphEvents: {
TOOL_END: 'tool_end',
@ -295,4 +321,65 @@ describe('Memory Agent Header Resolution', () => {
expect(safeUser).toHaveProperty('id');
expect(safeUser).toHaveProperty('email');
});
it('should include instructions in user message for Bedrock provider', async () => {
const llmConfig = {
provider: 'bedrock',
model: 'us.anthropic.claude-haiku-4-5-20251001-v1:0',
};
const { HumanMessage } = await import('@langchain/core/messages');
const testMessage = new HumanMessage('test chat content');
await processMemory({
res: mockRes,
userId: 'user-123',
setMemory: mockMemoryMethods.setMemory,
deleteMemory: mockMemoryMethods.deleteMemory,
messages: [testMessage],
memory: 'existing memory',
messageId: 'msg-123',
conversationId: 'conv-123',
validKeys: ['preferences'],
instructions: 'test instructions',
llmConfig,
user: testUser,
});
expect(Run.create as jest.Mock).toHaveBeenCalled();
const runConfig = (Run.create as jest.Mock).mock.calls[0][0];
// For Bedrock, instructions should NOT be passed to graphConfig
expect(runConfig.graphConfig.instructions).toBeUndefined();
expect(runConfig.graphConfig.additional_instructions).toBeUndefined();
});
it('should pass instructions to graphConfig for non-Bedrock providers', async () => {
const llmConfig = {
provider: 'openai',
model: 'gpt-4o-mini',
};
await processMemory({
res: mockRes,
userId: 'user-123',
setMemory: mockMemoryMethods.setMemory,
deleteMemory: mockMemoryMethods.deleteMemory,
messages: [],
memory: 'existing memory',
messageId: 'msg-123',
conversationId: 'conv-123',
validKeys: ['preferences'],
instructions: 'test instructions',
llmConfig,
user: testUser,
});
expect(Run.create as jest.Mock).toHaveBeenCalled();
const runConfig = (Run.create as jest.Mock).mock.calls[0][0];
// For non-Bedrock providers, instructions should be passed to graphConfig
expect(runConfig.graphConfig.instructions).toBe('test instructions');
expect(runConfig.graphConfig.additional_instructions).toBeDefined();
});
});

View file

@ -3,6 +3,7 @@ import { z } from 'zod';
import { tool } from '@langchain/core/tools';
import { Tools } from 'librechat-data-provider';
import { logger } from '@librechat/data-schemas';
import { HumanMessage } from '@langchain/core/messages';
import { Run, Providers, GraphEvents } from '@librechat/agents';
import type {
OpenAIClientOptions,
@ -13,13 +14,12 @@ import type {
ToolEndData,
LLMConfig,
} from '@librechat/agents';
import type { TAttachment, MemoryArtifact } from 'librechat-data-provider';
import type { ObjectId, MemoryMethods, IUser } from '@librechat/data-schemas';
import type { TAttachment, MemoryArtifact } from 'librechat-data-provider';
import type { BaseMessage, ToolMessage } from '@langchain/core/messages';
import type { Response as ServerResponse } from 'express';
import { GenerationJobManager } from '~/stream/GenerationJobManager';
import { resolveHeaders, createSafeUser } from '~/utils/env';
import { Tokenizer } from '~/utils';
import { Tokenizer, resolveHeaders, createSafeUser } from '~/utils';
type RequiredMemoryMethods = Pick<
MemoryMethods,
@ -369,6 +369,19 @@ ${memory ?? 'No existing memories'}`;
}
}
// Handle Bedrock with thinking enabled - temperature must be 1
const bedrockConfig = finalLLMConfig as {
additionalModelRequestFields?: { thinking?: unknown };
temperature?: number;
};
if (
llmConfig?.provider === Providers.BEDROCK &&
bedrockConfig.additionalModelRequestFields?.thinking != null &&
bedrockConfig.temperature != null
) {
(finalLLMConfig as unknown as Record<string, unknown>).temperature = 1;
}
const llmConfigWithHeaders = finalLLMConfig as OpenAIClientOptions;
if (llmConfigWithHeaders?.configuration?.defaultHeaders != null) {
llmConfigWithHeaders.configuration.defaultHeaders = resolveHeaders({
@ -383,14 +396,51 @@ ${memory ?? 'No existing memories'}`;
[GraphEvents.TOOL_END]: new BasicToolEndHandler(memoryCallback),
};
/**
* For Bedrock provider, include instructions in the user message instead of as a system prompt.
* Bedrock's Converse API requires conversations to start with a user message, not a system message.
* Other providers can use the standard system prompt approach.
*/
const isBedrock = llmConfig?.provider === Providers.BEDROCK;
let graphInstructions: string | undefined = instructions;
let graphAdditionalInstructions: string | undefined = memoryStatus;
let processedMessages = messages;
if (isBedrock) {
const combinedInstructions = [instructions, memoryStatus].filter(Boolean).join('\n\n');
if (messages.length > 0) {
const firstMessage = messages[0];
const originalContent =
typeof firstMessage.content === 'string' ? firstMessage.content : '';
if (typeof firstMessage.content !== 'string') {
logger.warn(
'Bedrock memory processing: First message has non-string content, using empty string',
);
}
const bedrockUserMessage = new HumanMessage(
`${combinedInstructions}\n\n${originalContent}`,
);
processedMessages = [bedrockUserMessage, ...messages.slice(1)];
} else {
processedMessages = [new HumanMessage(combinedInstructions)];
}
graphInstructions = undefined;
graphAdditionalInstructions = undefined;
}
const run = await Run.create({
runId: messageId,
graphConfig: {
type: 'standard',
llmConfig: finalLLMConfig,
tools: [memoryTool, deleteMemoryTool],
instructions,
additional_instructions: memoryStatus,
instructions: graphInstructions,
additional_instructions: graphAdditionalInstructions,
toolEnd: true,
},
customHandlers,
@ -410,7 +460,7 @@ ${memory ?? 'No existing memories'}`;
} as const;
const inputs = {
messages,
messages: processedMessages,
};
const content = await run.processStream(inputs, config);
if (content) {

View file

@ -33,6 +33,7 @@ export interface GenerationJobManagerOptions {
* @property readyPromise - Resolves immediately (legacy, kept for API compatibility)
* @property resolveReady - Function to resolve readyPromise
* @property finalEvent - Cached final event for late subscribers
* @property errorEvent - Cached error event for late subscribers (errors before client connects)
* @property syncSent - Whether sync event was sent (reset when all subscribers leave)
* @property earlyEventBuffer - Buffer for events emitted before first subscriber connects
* @property hasSubscriber - Whether at least one subscriber has connected
@ -47,6 +48,7 @@ interface RuntimeJobState {
readyPromise: Promise<void>;
resolveReady: () => void;
finalEvent?: t.ServerSentEvent;
errorEvent?: string;
syncSent: boolean;
earlyEventBuffer: t.ServerSentEvent[];
hasSubscriber: boolean;
@ -421,6 +423,7 @@ class GenerationJobManagerClass {
earlyEventBuffer: [],
hasSubscriber: false,
finalEvent,
errorEvent: jobData.error,
};
this.runtimeState.set(streamId, runtime);
@ -510,6 +513,8 @@ class GenerationJobManagerClass {
/**
* Mark job as complete.
* If cleanupOnComplete is true (default), immediately cleans up job resources.
* Exception: Jobs with errors are NOT immediately deleted to allow late-connecting
* clients to receive the error (race condition where error occurs before client connects).
* Note: eventTransport is NOT cleaned up here to allow the final event to be
* fully transmitted. It will be cleaned up when subscribers disconnect or
* by the periodic cleanup job.
@ -527,7 +532,29 @@ class GenerationJobManagerClass {
this.jobStore.clearContentState(streamId);
this.runStepBuffers?.delete(streamId);
// Immediate cleanup if configured (default: true)
// For error jobs, DON'T delete immediately - keep around so late-connecting
// clients can receive the error. This handles the race condition where error
// occurs before client connects to SSE stream.
//
// Cleanup strategy: Error jobs are cleaned up by periodic cleanup (every 60s)
// via jobStore.cleanup() which checks for jobs with status 'error' and
// completedAt set. The TTL is configurable via jobStore options (default: 0,
// meaning cleanup on next interval). This gives clients ~60s to connect and
// receive the error before the job is removed.
if (error) {
await this.jobStore.updateJob(streamId, {
status: 'error',
completedAt: Date.now(),
error,
});
// Keep runtime state so subscribe() can access errorEvent
logger.debug(
`[GenerationJobManager] Job completed with error (keeping for late subscribers): ${streamId}`,
);
return;
}
// Immediate cleanup if configured (default: true) - only for successful completions
if (this._cleanupOnComplete) {
this.runtimeState.delete(streamId);
// Don't cleanup eventTransport here - let the done event fully transmit first.
@ -536,9 +563,8 @@ class GenerationJobManagerClass {
} else {
// Only update status if keeping the job around
await this.jobStore.updateJob(streamId, {
status: error ? 'error' : 'complete',
status: 'complete',
completedAt: Date.now(),
error,
});
}
@ -678,14 +704,22 @@ class GenerationJobManagerClass {
const jobData = await this.jobStore.getJob(streamId);
// If job already complete, send final event
// If job already complete/error, send final event or error
// Error status takes precedence to ensure errors aren't misreported as successes
setImmediate(() => {
if (
runtime.finalEvent &&
jobData &&
['complete', 'error', 'aborted'].includes(jobData.status)
) {
onDone?.(runtime.finalEvent);
if (jobData && ['complete', 'error', 'aborted'].includes(jobData.status)) {
// Check for error status FIRST and prioritize error handling
if (jobData.status === 'error' && (runtime.errorEvent || jobData.error)) {
const errorToSend = runtime.errorEvent ?? jobData.error;
if (errorToSend) {
logger.debug(
`[GenerationJobManager] Sending stored error to late subscriber: ${streamId}`,
);
onError?.(errorToSend);
}
} else if (runtime.finalEvent) {
onDone?.(runtime.finalEvent);
}
}
});
@ -986,8 +1020,18 @@ class GenerationJobManagerClass {
/**
* Emit an error event.
* Stores the error for late-connecting subscribers (race condition where error
* occurs before client connects to SSE stream).
*/
emitError(streamId: string, error: string): void {
const runtime = this.runtimeState.get(streamId);
if (runtime) {
runtime.errorEvent = error;
}
// Persist error to job store for cross-replica consistency
this.jobStore.updateJob(streamId, { error }).catch((err) => {
logger.error(`[GenerationJobManager] Failed to persist error:`, err);
});
this.eventTransport.emitError(streamId, error);
}

View file

@ -796,6 +796,282 @@ describe('GenerationJobManager Integration Tests', () => {
});
});
describe('Error Preservation for Late Subscribers', () => {
/**
* These tests verify the fix for the race condition where errors
* (like INPUT_LENGTH) occur before the SSE client connects.
*
* Problem: Error emitError completeJob job deleted client connects 404
* Fix: Store error, don't delete job immediately, send error to late subscriber
*/
test('should store error in emitError for late-connecting subscribers', async () => {
const { GenerationJobManager } = await import('../GenerationJobManager');
const { InMemoryJobStore } = await import('../implementations/InMemoryJobStore');
const { InMemoryEventTransport } = await import('../implementations/InMemoryEventTransport');
GenerationJobManager.configure({
jobStore: new InMemoryJobStore({ ttlAfterComplete: 60000 }),
eventTransport: new InMemoryEventTransport(),
isRedis: false,
cleanupOnComplete: false,
});
await GenerationJobManager.initialize();
const streamId = `error-store-${Date.now()}`;
await GenerationJobManager.createJob(streamId, 'user-1');
const errorMessage = '{ "type": "INPUT_LENGTH", "info": "234856 / 172627" }';
// Emit error (no subscribers yet - simulates race condition)
GenerationJobManager.emitError(streamId, errorMessage);
// Wait for async job store update
await new Promise((resolve) => setTimeout(resolve, 50));
// Verify error is stored in job store
const job = await GenerationJobManager.getJob(streamId);
expect(job?.error).toBe(errorMessage);
await GenerationJobManager.destroy();
});
test('should NOT delete job immediately when completeJob is called with error', async () => {
const { GenerationJobManager } = await import('../GenerationJobManager');
const { InMemoryJobStore } = await import('../implementations/InMemoryJobStore');
const { InMemoryEventTransport } = await import('../implementations/InMemoryEventTransport');
GenerationJobManager.configure({
jobStore: new InMemoryJobStore({ ttlAfterComplete: 60000 }),
eventTransport: new InMemoryEventTransport(),
isRedis: false,
cleanupOnComplete: true, // Default behavior
});
await GenerationJobManager.initialize();
const streamId = `error-no-delete-${Date.now()}`;
await GenerationJobManager.createJob(streamId, 'user-1');
const errorMessage = 'Test error message';
// Complete with error
await GenerationJobManager.completeJob(streamId, errorMessage);
// Job should still exist (not deleted)
const hasJob = await GenerationJobManager.hasJob(streamId);
expect(hasJob).toBe(true);
// Job should have error status
const job = await GenerationJobManager.getJob(streamId);
expect(job?.status).toBe('error');
expect(job?.error).toBe(errorMessage);
await GenerationJobManager.destroy();
});
test('should send stored error to late-connecting subscriber', async () => {
const { GenerationJobManager } = await import('../GenerationJobManager');
const { InMemoryJobStore } = await import('../implementations/InMemoryJobStore');
const { InMemoryEventTransport } = await import('../implementations/InMemoryEventTransport');
GenerationJobManager.configure({
jobStore: new InMemoryJobStore({ ttlAfterComplete: 60000 }),
eventTransport: new InMemoryEventTransport(),
isRedis: false,
cleanupOnComplete: true,
});
await GenerationJobManager.initialize();
const streamId = `error-late-sub-${Date.now()}`;
await GenerationJobManager.createJob(streamId, 'user-1');
const errorMessage = '{ "type": "INPUT_LENGTH", "info": "234856 / 172627" }';
// Simulate race condition: error occurs before client connects
GenerationJobManager.emitError(streamId, errorMessage);
await GenerationJobManager.completeJob(streamId, errorMessage);
// Wait for async operations
await new Promise((resolve) => setTimeout(resolve, 50));
// Now client connects (late subscriber)
let receivedError: string | undefined;
const subscription = await GenerationJobManager.subscribe(
streamId,
() => {}, // onChunk
() => {}, // onDone
(error) => {
receivedError = error;
}, // onError
);
expect(subscription).not.toBeNull();
// Wait for setImmediate in subscribe to fire
await new Promise((resolve) => setTimeout(resolve, 50));
// Late subscriber should receive the stored error
expect(receivedError).toBe(errorMessage);
subscription?.unsubscribe();
await GenerationJobManager.destroy();
});
test('should prioritize error status over finalEvent in subscribe', async () => {
const { GenerationJobManager } = await import('../GenerationJobManager');
const { InMemoryJobStore } = await import('../implementations/InMemoryJobStore');
const { InMemoryEventTransport } = await import('../implementations/InMemoryEventTransport');
GenerationJobManager.configure({
jobStore: new InMemoryJobStore({ ttlAfterComplete: 60000 }),
eventTransport: new InMemoryEventTransport(),
isRedis: false,
cleanupOnComplete: false,
});
await GenerationJobManager.initialize();
const streamId = `error-priority-${Date.now()}`;
await GenerationJobManager.createJob(streamId, 'user-1');
const errorMessage = 'Error should take priority';
// Emit error and complete with error
GenerationJobManager.emitError(streamId, errorMessage);
await GenerationJobManager.completeJob(streamId, errorMessage);
await new Promise((resolve) => setTimeout(resolve, 50));
// Subscribe and verify error is received (not a done event)
let receivedError: string | undefined;
let receivedDone = false;
const subscription = await GenerationJobManager.subscribe(
streamId,
() => {},
() => {
receivedDone = true;
},
(error) => {
receivedError = error;
},
);
await new Promise((resolve) => setTimeout(resolve, 50));
// Error should be received, not done
expect(receivedError).toBe(errorMessage);
expect(receivedDone).toBe(false);
subscription?.unsubscribe();
await GenerationJobManager.destroy();
});
test('should handle error preservation in Redis mode (cross-replica)', async () => {
if (!ioredisClient) {
console.warn('Redis not available, skipping test');
return;
}
const { createStreamServices } = await import('../createStreamServices');
const { RedisJobStore } = await import('../implementations/RedisJobStore');
// === Replica A: Creates job and emits error ===
const replicaAJobStore = new RedisJobStore(ioredisClient);
await replicaAJobStore.initialize();
const streamId = `redis-error-${Date.now()}`;
const errorMessage = '{ "type": "INPUT_LENGTH", "info": "234856 / 172627" }';
await replicaAJobStore.createJob(streamId, 'user-1');
await replicaAJobStore.updateJob(streamId, {
status: 'error',
error: errorMessage,
completedAt: Date.now(),
});
// === Replica B: Fresh manager receives client connection ===
jest.resetModules();
const { GenerationJobManager } = await import('../GenerationJobManager');
const services = createStreamServices({
useRedis: true,
redisClient: ioredisClient,
});
GenerationJobManager.configure({
...services,
cleanupOnComplete: false,
});
await GenerationJobManager.initialize();
// Client connects to Replica B (job created on Replica A)
let receivedError: string | undefined;
const subscription = await GenerationJobManager.subscribe(
streamId,
() => {},
() => {},
(error) => {
receivedError = error;
},
);
expect(subscription).not.toBeNull();
await new Promise((resolve) => setTimeout(resolve, 100));
// Error should be loaded from Redis and sent to subscriber
expect(receivedError).toBe(errorMessage);
subscription?.unsubscribe();
await GenerationJobManager.destroy();
await replicaAJobStore.destroy();
});
test('error jobs should be cleaned up by periodic cleanup after TTL', async () => {
const { GenerationJobManager } = await import('../GenerationJobManager');
const { InMemoryJobStore } = await import('../implementations/InMemoryJobStore');
const { InMemoryEventTransport } = await import('../implementations/InMemoryEventTransport');
// Use a very short TTL for testing
const jobStore = new InMemoryJobStore({ ttlAfterComplete: 100 });
GenerationJobManager.configure({
jobStore,
eventTransport: new InMemoryEventTransport(),
isRedis: false,
cleanupOnComplete: true,
});
await GenerationJobManager.initialize();
const streamId = `error-cleanup-${Date.now()}`;
await GenerationJobManager.createJob(streamId, 'user-1');
// Complete with error
await GenerationJobManager.completeJob(streamId, 'Test error');
// Job should exist immediately after error
let hasJob = await GenerationJobManager.hasJob(streamId);
expect(hasJob).toBe(true);
// Wait for TTL to expire
await new Promise((resolve) => setTimeout(resolve, 150));
// Trigger cleanup
await jobStore.cleanup();
// Job should be cleaned up after TTL
hasJob = await GenerationJobManager.hasJob(streamId);
expect(hasJob).toBe(false);
await GenerationJobManager.destroy();
});
});
describe('createStreamServices Auto-Detection', () => {
test('should auto-detect Redis when USE_REDIS is true', async () => {
if (!ioredisClient) {

View file

@ -79,7 +79,11 @@ export class InMemoryEventTransport implements IEventTransport {
emitError(streamId: string, error: string): void {
const state = this.streams.get(streamId);
state?.emitter.emit('error', error);
// Only emit if there are listeners - Node.js throws on unhandled 'error' events
// This is intentional for the race condition where error occurs before client connects
if (state?.emitter.listenerCount('error') ?? 0 > 0) {
state?.emitter.emit('error', error);
}
}
getSubscriberCount(streamId: string): number {

View file

@ -1,4 +1,3 @@
export * from './gemini';
export * from './imageContext';
export * from './oai';
export * from './yt';

View file

@ -1,61 +0,0 @@
import { z } from 'zod';
export const ytToolkit = {
youtube_search: {
name: 'youtube_search' as const,
description: `Search for YouTube videos by keyword or phrase.
- Required: query (search terms to find videos)
- Optional: maxResults (number of videos to return, 1-50, default: 5)
- Returns: List of videos with titles, descriptions, and URLs
- Use for: Finding specific videos, exploring content, research
Example: query="cooking pasta tutorials" maxResults=3` as const,
schema: z.object({
query: z.string().describe('Search query terms'),
maxResults: z.number().int().min(1).max(50).optional().describe('Number of results (1-50)'),
}),
},
youtube_info: {
name: 'youtube_info' as const,
description: `Get detailed metadata and statistics for a specific YouTube video.
- Required: url (full YouTube URL or video ID)
- Returns: Video title, description, view count, like count, comment count
- Use for: Getting video metrics and basic metadata
- DO NOT USE FOR VIDEO SUMMARIES, USE TRANSCRIPTS FOR COMPREHENSIVE ANALYSIS
- Accepts both full URLs and video IDs
Example: url="https://youtube.com/watch?v=abc123" or url="abc123"` as const,
schema: z.object({
url: z.string().describe('YouTube video URL or ID'),
}),
} as const,
youtube_comments: {
name: 'youtube_comments',
description: `Retrieve top-level comments from a YouTube video.
- Required: url (full YouTube URL or video ID)
- Optional: maxResults (number of comments, 1-50, default: 10)
- Returns: Comment text, author names, like counts
- Use for: Sentiment analysis, audience feedback, engagement review
Example: url="abc123" maxResults=20`,
schema: z.object({
url: z.string().describe('YouTube video URL or ID'),
maxResults: z
.number()
.int()
.min(1)
.max(50)
.optional()
.describe('Number of comments to retrieve'),
}),
} as const,
youtube_transcript: {
name: 'youtube_transcript',
description: `Fetch and parse the transcript/captions of a YouTube video.
- Required: url (full YouTube URL or video ID)
- Returns: Full video transcript as plain text
- Use for: Content analysis, summarization, translation reference
- This is the "Go-to" tool for analyzing actual video content
- Attempts to fetch English first, then German, then any available language
Example: url="https://youtube.com/watch?v=abc123"`,
schema: z.object({
url: z.string().describe('YouTube video URL or ID'),
}),
} as const,
} as const;

View file

@ -1,6 +1,6 @@
{
"name": "@librechat/client",
"version": "0.4.4",
"version": "0.4.50",
"description": "React components for LibreChat",
"repository": {
"type": "git",

View file

@ -1,6 +1,6 @@
{
"name": "librechat-data-provider",
"version": "0.8.220",
"version": "0.8.230",
"description": "data services for librechat apps",
"main": "dist/index.js",
"module": "dist/index.es.js",

View file

@ -14,7 +14,7 @@ describe('bedrockInputParser', () => {
expect(additionalFields.anthropic_beta).toEqual(['output-128k-2025-02-19']);
});
test('should match anthropic.claude-sonnet-4 model', () => {
test('should match anthropic.claude-sonnet-4 model with 1M context header', () => {
const input = {
model: 'anthropic.claude-sonnet-4',
};
@ -22,10 +22,13 @@ describe('bedrockInputParser', () => {
const additionalFields = result.additionalModelRequestFields as Record<string, unknown>;
expect(additionalFields.thinking).toBe(true);
expect(additionalFields.thinkingBudget).toBe(2000);
expect(additionalFields.anthropic_beta).toEqual(['output-128k-2025-02-19']);
expect(additionalFields.anthropic_beta).toEqual([
'output-128k-2025-02-19',
'context-1m-2025-08-07',
]);
});
test('should match anthropic.claude-opus-5 model', () => {
test('should match anthropic.claude-opus-5 model without 1M context header', () => {
const input = {
model: 'anthropic.claude-opus-5',
};
@ -36,7 +39,7 @@ describe('bedrockInputParser', () => {
expect(additionalFields.anthropic_beta).toEqual(['output-128k-2025-02-19']);
});
test('should match anthropic.claude-haiku-6 model', () => {
test('should match anthropic.claude-haiku-6 model without 1M context header', () => {
const input = {
model: 'anthropic.claude-haiku-6',
};
@ -47,7 +50,7 @@ describe('bedrockInputParser', () => {
expect(additionalFields.anthropic_beta).toEqual(['output-128k-2025-02-19']);
});
test('should match anthropic.claude-4-sonnet model', () => {
test('should match anthropic.claude-4-sonnet model with 1M context header', () => {
const input = {
model: 'anthropic.claude-4-sonnet',
};
@ -55,10 +58,13 @@ describe('bedrockInputParser', () => {
const additionalFields = result.additionalModelRequestFields as Record<string, unknown>;
expect(additionalFields.thinking).toBe(true);
expect(additionalFields.thinkingBudget).toBe(2000);
expect(additionalFields.anthropic_beta).toEqual(['output-128k-2025-02-19']);
expect(additionalFields.anthropic_beta).toEqual([
'output-128k-2025-02-19',
'context-1m-2025-08-07',
]);
});
test('should match anthropic.claude-4.5-sonnet model', () => {
test('should match anthropic.claude-4.5-sonnet model with 1M context header', () => {
const input = {
model: 'anthropic.claude-4.5-sonnet',
};
@ -66,10 +72,13 @@ describe('bedrockInputParser', () => {
const additionalFields = result.additionalModelRequestFields as Record<string, unknown>;
expect(additionalFields.thinking).toBe(true);
expect(additionalFields.thinkingBudget).toBe(2000);
expect(additionalFields.anthropic_beta).toEqual(['output-128k-2025-02-19']);
expect(additionalFields.anthropic_beta).toEqual([
'output-128k-2025-02-19',
'context-1m-2025-08-07',
]);
});
test('should match anthropic.claude-4-7-sonnet model', () => {
test('should match anthropic.claude-4-7-sonnet model with 1M context header', () => {
const input = {
model: 'anthropic.claude-4-7-sonnet',
};
@ -77,7 +86,24 @@ describe('bedrockInputParser', () => {
const additionalFields = result.additionalModelRequestFields as Record<string, unknown>;
expect(additionalFields.thinking).toBe(true);
expect(additionalFields.thinkingBudget).toBe(2000);
expect(additionalFields.anthropic_beta).toEqual(['output-128k-2025-02-19']);
expect(additionalFields.anthropic_beta).toEqual([
'output-128k-2025-02-19',
'context-1m-2025-08-07',
]);
});
test('should match anthropic.claude-sonnet-4-20250514-v1:0 with full model ID', () => {
const input = {
model: 'anthropic.claude-sonnet-4-20250514-v1:0',
};
const result = bedrockInputParser.parse(input) as BedrockConverseInput;
const additionalFields = result.additionalModelRequestFields as Record<string, unknown>;
expect(additionalFields.thinking).toBe(true);
expect(additionalFields.thinkingBudget).toBe(2000);
expect(additionalFields.anthropic_beta).toEqual([
'output-128k-2025-02-19',
'context-1m-2025-08-07',
]);
});
test('should not match non-Claude models', () => {
@ -110,7 +136,7 @@ describe('bedrockInputParser', () => {
expect(additionalFields?.anthropic_beta).toBeUndefined();
});
test('should respect explicit thinking configuration', () => {
test('should respect explicit thinking configuration but still add beta headers', () => {
const input = {
model: 'anthropic.claude-sonnet-4',
thinking: false,
@ -119,6 +145,10 @@ describe('bedrockInputParser', () => {
const additionalFields = result.additionalModelRequestFields as Record<string, unknown>;
expect(additionalFields.thinking).toBeUndefined();
expect(additionalFields.thinkingBudget).toBeUndefined();
expect(additionalFields.anthropic_beta).toEqual([
'output-128k-2025-02-19',
'context-1m-2025-08-07',
]);
});
test('should respect custom thinking budget', () => {

View file

@ -15,6 +15,36 @@ type AnthropicInput = BedrockConverseInput & {
AnthropicReasoning;
};
/**
* Gets the appropriate anthropic_beta headers for Bedrock Anthropic models.
* Bedrock uses `anthropic_beta` (with underscore) in additionalModelRequestFields.
*
* @param model - The Bedrock model identifier (e.g., "anthropic.claude-sonnet-4-20250514-v1:0")
* @returns Array of beta header strings, or empty array if not applicable
*/
function getBedrockAnthropicBetaHeaders(model: string): string[] {
const betaHeaders: string[] = [];
const isClaudeThinkingModel =
model.includes('anthropic.claude-3-7-sonnet') ||
/anthropic\.claude-(?:[4-9](?:\.\d+)?(?:-\d+)?-(?:sonnet|opus|haiku)|(?:sonnet|opus|haiku)-[4-9])/.test(
model,
);
const isSonnet4PlusModel =
/anthropic\.claude-(?:sonnet-[4-9]|[4-9](?:\.\d+)?(?:-\d+)?-sonnet)/.test(model);
if (isClaudeThinkingModel) {
betaHeaders.push('output-128k-2025-02-19');
}
if (isSonnet4PlusModel) {
betaHeaders.push('context-1m-2025-08-07');
}
return betaHeaders;
}
export const bedrockInputSchema = s.tConversationSchema
.pick({
/* LibreChat params; optionType: 'conversation' */
@ -138,7 +168,10 @@ export const bedrockInputParser = s.tConversationSchema
additionalFields.thinkingBudget = 2000;
}
if (typedData.model.includes('anthropic.')) {
additionalFields.anthropic_beta = ['output-128k-2025-02-19'];
const betaHeaders = getBedrockAnthropicBetaHeaders(typedData.model);
if (betaHeaders.length > 0) {
additionalFields.anthropic_beta = betaHeaders;
}
}
} else if (additionalFields.thinking != null || additionalFields.thinkingBudget != null) {
delete additionalFields.thinking;

View file

@ -1702,7 +1702,7 @@ export enum TTSProviders {
/** Enum for app-wide constants */
export enum Constants {
/** Key for the app's version. */
VERSION = 'v0.8.2-rc2',
VERSION = 'v0.8.2-rc3',
/** Key for the Custom Config's version (librechat.yaml). */
CONFIG_VERSION = '1.3.1',
/** Standard value for the first message's `parentMessageId` value, to indicate no parent exists. */

View file

@ -32,11 +32,11 @@ export type FavoriteItem = {
};
export function getFavorites(): Promise<FavoriteItem[]> {
return request.get('/api/user/settings/favorites');
return request.get(`${endpoints.apiBaseUrl()}/api/user/settings/favorites`);
}
export function updateFavorites(favorites: FavoriteItem[]): Promise<FavoriteItem[]> {
return request.post('/api/user/settings/favorites', { favorites });
return request.post(`${endpoints.apiBaseUrl()}/api/user/settings/favorites`, { favorites });
}
export function getSharedMessages(shareId: string): Promise<t.TSharedMessagesResponse> {

View file

@ -1,6 +1,6 @@
{
"name": "@librechat/data-schemas",
"version": "0.0.33",
"version": "0.0.34",
"description": "Mongoose schemas and models for LibreChat",
"type": "module",
"main": "dist/index.cjs",
@ -44,7 +44,6 @@
"@rollup/plugin-replace": "^5.0.5",
"@rollup/plugin-terser": "^0.4.4",
"@rollup/plugin-typescript": "^12.1.2",
"@types/diff": "^6.0.0",
"@types/express": "^5.0.0",
"@types/jest": "^29.5.2",
"@types/node": "^20.3.0",

View file

@ -6,10 +6,20 @@ import { createMessageModel } from '~/models/message';
import { SchemaWithMeiliMethods } from '~/models/plugins/mongoMeili';
const mockAddDocuments = jest.fn();
const mockAddDocumentsInBatches = jest.fn();
const mockUpdateDocuments = jest.fn();
const mockDeleteDocument = jest.fn();
const mockDeleteDocuments = jest.fn();
const mockGetDocument = jest.fn();
const mockIndex = jest.fn().mockReturnValue({
getRawInfo: jest.fn(),
updateSettings: jest.fn(),
addDocuments: mockAddDocuments,
addDocumentsInBatches: mockAddDocumentsInBatches,
updateDocuments: mockUpdateDocuments,
deleteDocument: mockDeleteDocument,
deleteDocuments: mockDeleteDocuments,
getDocument: mockGetDocument,
getDocuments: jest.fn().mockReturnValue({ results: [] }),
});
jest.mock('meilisearch', () => {
@ -42,6 +52,11 @@ describe('Meilisearch Mongoose plugin', () => {
beforeEach(() => {
mockAddDocuments.mockClear();
mockAddDocumentsInBatches.mockClear();
mockUpdateDocuments.mockClear();
mockDeleteDocument.mockClear();
mockDeleteDocuments.mockClear();
mockGetDocument.mockClear();
});
afterAll(async () => {
@ -129,4 +144,485 @@ describe('Meilisearch Mongoose plugin', () => {
expect(mockAddDocuments).not.toHaveBeenCalled();
});
describe('estimatedDocumentCount usage in syncWithMeili', () => {
test('syncWithMeili completes successfully with estimatedDocumentCount', async () => {
// Clear any previous documents
const conversationModel = createConversationModel(mongoose) as SchemaWithMeiliMethods;
await conversationModel.deleteMany({});
// Create test documents
await conversationModel.create({
conversationId: new mongoose.Types.ObjectId(),
user: new mongoose.Types.ObjectId(),
title: 'Test Conversation 1',
endpoint: EModelEndpoint.openAI,
});
await conversationModel.create({
conversationId: new mongoose.Types.ObjectId(),
user: new mongoose.Types.ObjectId(),
title: 'Test Conversation 2',
endpoint: EModelEndpoint.openAI,
});
// Trigger sync - should use estimatedDocumentCount internally
await expect(conversationModel.syncWithMeili()).resolves.not.toThrow();
// Verify documents were processed
expect(mockAddDocuments).toHaveBeenCalled();
});
test('syncWithMeili handles empty collection correctly', async () => {
const messageModel = createMessageModel(mongoose) as SchemaWithMeiliMethods;
await messageModel.deleteMany({});
// Verify collection is empty
const count = await messageModel.estimatedDocumentCount();
expect(count).toBe(0);
// Sync should complete without error even with 0 estimated documents
await expect(messageModel.syncWithMeili()).resolves.not.toThrow();
});
test('estimatedDocumentCount returns count for non-empty collection', async () => {
const conversationModel = createConversationModel(mongoose) as SchemaWithMeiliMethods;
await conversationModel.deleteMany({});
// Create documents
await conversationModel.create({
conversationId: new mongoose.Types.ObjectId(),
user: new mongoose.Types.ObjectId(),
title: 'Test 1',
endpoint: EModelEndpoint.openAI,
});
await conversationModel.create({
conversationId: new mongoose.Types.ObjectId(),
user: new mongoose.Types.ObjectId(),
title: 'Test 2',
endpoint: EModelEndpoint.openAI,
});
const estimatedCount = await conversationModel.estimatedDocumentCount();
expect(estimatedCount).toBeGreaterThanOrEqual(2);
});
test('estimatedDocumentCount is available on model', async () => {
const messageModel = createMessageModel(mongoose) as SchemaWithMeiliMethods;
// Verify the method exists and is callable
expect(typeof messageModel.estimatedDocumentCount).toBe('function');
// Should be able to call it
const result = await messageModel.estimatedDocumentCount();
expect(typeof result).toBe('number');
expect(result).toBeGreaterThanOrEqual(0);
});
test('syncWithMeili handles mix of syncable and TTL documents correctly', async () => {
const messageModel = createMessageModel(mongoose) as SchemaWithMeiliMethods;
await messageModel.deleteMany({});
mockAddDocuments.mockClear();
// Create syncable documents (expiredAt: null)
await messageModel.create({
messageId: new mongoose.Types.ObjectId(),
conversationId: new mongoose.Types.ObjectId(),
user: new mongoose.Types.ObjectId(),
isCreatedByUser: true,
expiredAt: null,
});
await messageModel.create({
messageId: new mongoose.Types.ObjectId(),
conversationId: new mongoose.Types.ObjectId(),
user: new mongoose.Types.ObjectId(),
isCreatedByUser: false,
expiredAt: null,
});
// Create TTL documents (expiredAt set to a date)
await messageModel.create({
messageId: new mongoose.Types.ObjectId(),
conversationId: new mongoose.Types.ObjectId(),
user: new mongoose.Types.ObjectId(),
isCreatedByUser: true,
expiredAt: new Date(),
});
await messageModel.create({
messageId: new mongoose.Types.ObjectId(),
conversationId: new mongoose.Types.ObjectId(),
user: new mongoose.Types.ObjectId(),
isCreatedByUser: false,
expiredAt: new Date(),
});
// estimatedDocumentCount should count all documents (both syncable and TTL)
const estimatedCount = await messageModel.estimatedDocumentCount();
expect(estimatedCount).toBe(4);
// Actual syncable documents (expiredAt: null)
const syncableCount = await messageModel.countDocuments({ expiredAt: null });
expect(syncableCount).toBe(2);
// Sync should complete successfully even though estimated count is higher than processed count
await expect(messageModel.syncWithMeili()).resolves.not.toThrow();
// Only syncable documents should be indexed (2 documents, not 4)
// The mock should be called once per batch, and we have 2 documents
expect(mockAddDocuments).toHaveBeenCalled();
// Verify that only 2 documents were indexed (the syncable ones)
const indexedCount = await messageModel.countDocuments({ _meiliIndex: true });
expect(indexedCount).toBe(2);
});
});
describe('New batch processing and retry functionality', () => {
test('processSyncBatch uses addDocumentsInBatches', async () => {
const conversationModel = createConversationModel(mongoose) as SchemaWithMeiliMethods;
await conversationModel.deleteMany({});
mockAddDocumentsInBatches.mockClear();
mockAddDocuments.mockClear();
await conversationModel.collection.insertOne({
conversationId: new mongoose.Types.ObjectId(),
user: new mongoose.Types.ObjectId(),
title: 'Test Conversation',
endpoint: EModelEndpoint.openAI,
_meiliIndex: false,
expiredAt: null,
});
// Run sync which should call processSyncBatch internally
await conversationModel.syncWithMeili();
// Verify addDocumentsInBatches was called (new batch method)
expect(mockAddDocumentsInBatches).toHaveBeenCalled();
});
test('addObjectToMeili retries on failure', async () => {
const conversationModel = createConversationModel(mongoose) as SchemaWithMeiliMethods;
// Mock addDocuments to fail twice then succeed
mockAddDocuments
.mockRejectedValueOnce(new Error('Network error'))
.mockRejectedValueOnce(new Error('Network error'))
.mockResolvedValueOnce({});
// Create a document which triggers addObjectToMeili
await conversationModel.create({
conversationId: new mongoose.Types.ObjectId(),
user: new mongoose.Types.ObjectId(),
title: 'Test Retry',
endpoint: EModelEndpoint.openAI,
});
// Wait for async operations to complete
await new Promise((resolve) => setTimeout(resolve, 100));
// Verify addDocuments was called multiple times due to retries
expect(mockAddDocuments).toHaveBeenCalledTimes(3);
});
test('getSyncProgress returns accurate progress information', async () => {
const conversationModel = createConversationModel(mongoose) as SchemaWithMeiliMethods;
await conversationModel.deleteMany({});
// Insert documents directly to control the _meiliIndex flag
await conversationModel.collection.insertOne({
conversationId: new mongoose.Types.ObjectId(),
user: new mongoose.Types.ObjectId(),
title: 'Indexed',
endpoint: EModelEndpoint.openAI,
_meiliIndex: true,
expiredAt: null,
});
await conversationModel.collection.insertOne({
conversationId: new mongoose.Types.ObjectId(),
user: new mongoose.Types.ObjectId(),
title: 'Not Indexed',
endpoint: EModelEndpoint.openAI,
_meiliIndex: false,
expiredAt: null,
});
const progress = await conversationModel.getSyncProgress();
expect(progress.totalDocuments).toBe(2);
expect(progress.totalProcessed).toBe(1);
expect(progress.isComplete).toBe(false);
});
test('getSyncProgress excludes TTL documents from counts', async () => {
const conversationModel = createConversationModel(mongoose) as SchemaWithMeiliMethods;
await conversationModel.deleteMany({});
// Insert syncable documents (expiredAt: null)
await conversationModel.collection.insertOne({
conversationId: new mongoose.Types.ObjectId(),
user: new mongoose.Types.ObjectId(),
title: 'Syncable Indexed',
endpoint: EModelEndpoint.openAI,
_meiliIndex: true,
expiredAt: null,
});
await conversationModel.collection.insertOne({
conversationId: new mongoose.Types.ObjectId(),
user: new mongoose.Types.ObjectId(),
title: 'Syncable Not Indexed',
endpoint: EModelEndpoint.openAI,
_meiliIndex: false,
expiredAt: null,
});
// Insert TTL documents (expiredAt set) - these should NOT be counted
await conversationModel.collection.insertOne({
conversationId: new mongoose.Types.ObjectId(),
user: new mongoose.Types.ObjectId(),
title: 'TTL Document 1',
endpoint: EModelEndpoint.openAI,
_meiliIndex: true,
expiredAt: new Date(),
});
await conversationModel.collection.insertOne({
conversationId: new mongoose.Types.ObjectId(),
user: new mongoose.Types.ObjectId(),
title: 'TTL Document 2',
endpoint: EModelEndpoint.openAI,
_meiliIndex: false,
expiredAt: new Date(),
});
const progress = await conversationModel.getSyncProgress();
// Only syncable documents should be counted (2 total, 1 indexed)
expect(progress.totalDocuments).toBe(2);
expect(progress.totalProcessed).toBe(1);
expect(progress.isComplete).toBe(false);
});
test('getSyncProgress shows completion when all syncable documents are indexed', async () => {
const messageModel = createMessageModel(mongoose) as SchemaWithMeiliMethods;
await messageModel.deleteMany({});
// All syncable documents are indexed
await messageModel.collection.insertOne({
messageId: new mongoose.Types.ObjectId(),
conversationId: new mongoose.Types.ObjectId(),
user: new mongoose.Types.ObjectId(),
isCreatedByUser: true,
_meiliIndex: true,
expiredAt: null,
});
await messageModel.collection.insertOne({
messageId: new mongoose.Types.ObjectId(),
conversationId: new mongoose.Types.ObjectId(),
user: new mongoose.Types.ObjectId(),
isCreatedByUser: false,
_meiliIndex: true,
expiredAt: null,
});
// Add TTL document - should not affect completion status
await messageModel.collection.insertOne({
messageId: new mongoose.Types.ObjectId(),
conversationId: new mongoose.Types.ObjectId(),
user: new mongoose.Types.ObjectId(),
isCreatedByUser: true,
_meiliIndex: false,
expiredAt: new Date(),
});
const progress = await messageModel.getSyncProgress();
expect(progress.totalDocuments).toBe(2);
expect(progress.totalProcessed).toBe(2);
expect(progress.isComplete).toBe(true);
});
});
describe('Error handling in processSyncBatch', () => {
test('syncWithMeili fails when processSyncBatch encounters addDocumentsInBatches error', async () => {
const conversationModel = createConversationModel(mongoose) as SchemaWithMeiliMethods;
await conversationModel.deleteMany({});
mockAddDocumentsInBatches.mockClear();
// Insert a document to sync
await conversationModel.collection.insertOne({
conversationId: new mongoose.Types.ObjectId(),
user: new mongoose.Types.ObjectId(),
title: 'Test Conversation',
endpoint: EModelEndpoint.openAI,
_meiliIndex: false,
expiredAt: null,
});
// Mock addDocumentsInBatches to fail
mockAddDocumentsInBatches.mockRejectedValueOnce(new Error('MeiliSearch connection error'));
// Sync should throw the error
await expect(conversationModel.syncWithMeili()).rejects.toThrow(
'MeiliSearch connection error',
);
// Verify the error was logged
expect(mockAddDocumentsInBatches).toHaveBeenCalled();
// Document should NOT be marked as indexed since sync failed
// Note: direct collection.insertOne doesn't set default values, so _meiliIndex may be undefined
const doc = await conversationModel.findOne({});
expect(doc?._meiliIndex).not.toBe(true);
});
test('syncWithMeili fails when processSyncBatch encounters updateMany error', async () => {
const conversationModel = createConversationModel(mongoose) as SchemaWithMeiliMethods;
await conversationModel.deleteMany({});
mockAddDocumentsInBatches.mockClear();
// Insert a document
await conversationModel.collection.insertOne({
conversationId: new mongoose.Types.ObjectId(),
user: new mongoose.Types.ObjectId(),
title: 'Test Conversation',
endpoint: EModelEndpoint.openAI,
_meiliIndex: false,
expiredAt: null,
});
// Mock addDocumentsInBatches to succeed but simulate updateMany failure
mockAddDocumentsInBatches.mockResolvedValueOnce({});
// Spy on updateMany and make it fail
const updateManySpy = jest
.spyOn(conversationModel, 'updateMany')
.mockRejectedValueOnce(new Error('Database connection error'));
// Sync should throw the error
await expect(conversationModel.syncWithMeili()).rejects.toThrow('Database connection error');
expect(updateManySpy).toHaveBeenCalled();
// Restore original implementation
updateManySpy.mockRestore();
});
test('processSyncBatch logs error and throws when addDocumentsInBatches fails', async () => {
const messageModel = createMessageModel(mongoose) as SchemaWithMeiliMethods;
await messageModel.deleteMany({});
mockAddDocumentsInBatches.mockRejectedValueOnce(new Error('Network timeout'));
await messageModel.collection.insertOne({
messageId: new mongoose.Types.ObjectId(),
conversationId: new mongoose.Types.ObjectId(),
user: new mongoose.Types.ObjectId(),
isCreatedByUser: true,
_meiliIndex: false,
expiredAt: null,
});
const indexMock = mockIndex();
const documents = await messageModel.find({ _meiliIndex: false }).lean();
// Should throw the error
await expect(messageModel.processSyncBatch(indexMock, documents)).rejects.toThrow(
'Network timeout',
);
expect(mockAddDocumentsInBatches).toHaveBeenCalled();
});
test('processSyncBatch handles empty document array gracefully', async () => {
const conversationModel = createConversationModel(mongoose) as SchemaWithMeiliMethods;
const indexMock = mockIndex();
// Should not throw with empty array
await expect(conversationModel.processSyncBatch(indexMock, [])).resolves.not.toThrow();
// Should not call addDocumentsInBatches
expect(mockAddDocumentsInBatches).not.toHaveBeenCalled();
});
test('syncWithMeili stops processing when batch fails and does not process remaining documents', async () => {
const conversationModel = createConversationModel(mongoose) as SchemaWithMeiliMethods;
await conversationModel.deleteMany({});
mockAddDocumentsInBatches.mockClear();
// Create multiple documents
for (let i = 0; i < 5; i++) {
await conversationModel.collection.insertOne({
conversationId: new mongoose.Types.ObjectId(),
user: new mongoose.Types.ObjectId(),
title: `Test Conversation ${i}`,
endpoint: EModelEndpoint.openAI,
_meiliIndex: false,
expiredAt: null,
});
}
// Mock addDocumentsInBatches to fail on first call
mockAddDocumentsInBatches.mockRejectedValueOnce(new Error('First batch failed'));
// Sync should fail on the first batch
await expect(conversationModel.syncWithMeili()).rejects.toThrow('First batch failed');
// Should have attempted only once before failing
expect(mockAddDocumentsInBatches).toHaveBeenCalledTimes(1);
// No documents should be indexed since sync failed
const indexedCount = await conversationModel.countDocuments({ _meiliIndex: true });
expect(indexedCount).toBe(0);
});
test('error in processSyncBatch is properly logged before being thrown', async () => {
const messageModel = createMessageModel(mongoose) as SchemaWithMeiliMethods;
await messageModel.deleteMany({});
const testError = new Error('Test error for logging');
mockAddDocumentsInBatches.mockRejectedValueOnce(testError);
await messageModel.collection.insertOne({
messageId: new mongoose.Types.ObjectId(),
conversationId: new mongoose.Types.ObjectId(),
user: new mongoose.Types.ObjectId(),
isCreatedByUser: true,
_meiliIndex: false,
expiredAt: null,
});
const indexMock = mockIndex();
const documents = await messageModel.find({ _meiliIndex: false }).lean();
// Should throw the same error that was passed to it
await expect(messageModel.processSyncBatch(indexMock, documents)).rejects.toThrow(testError);
});
test('syncWithMeili properly propagates processSyncBatch errors', async () => {
const conversationModel = createConversationModel(mongoose) as SchemaWithMeiliMethods;
await conversationModel.deleteMany({});
mockAddDocumentsInBatches.mockClear();
await conversationModel.collection.insertOne({
conversationId: new mongoose.Types.ObjectId(),
user: new mongoose.Types.ObjectId(),
title: 'Test',
endpoint: EModelEndpoint.openAI,
_meiliIndex: false,
expiredAt: null,
});
const customError = new Error('Custom sync error');
mockAddDocumentsInBatches.mockRejectedValueOnce(customError);
// The error should propagate all the way up
await expect(conversationModel.syncWithMeili()).rejects.toThrow('Custom sync error');
});
});
});

View file

@ -50,17 +50,11 @@ interface _DocumentWithMeiliIndex extends Document {
export type DocumentWithMeiliIndex = _DocumentWithMeiliIndex & IConversation & Partial<IMessage>;
export interface SchemaWithMeiliMethods extends Model<DocumentWithMeiliIndex> {
syncWithMeili(options?: { resumeFromId?: string }): Promise<void>;
syncWithMeili(): Promise<void>;
getSyncProgress(): Promise<SyncProgress>;
processSyncBatch(
index: Index<MeiliIndexable>,
documents: Array<Record<string, unknown>>,
updateOps: Array<{
updateOne: {
filter: Record<string, unknown>;
update: { $set: { _meiliIndex: boolean } };
};
}>,
): Promise<void>;
cleanupMeiliIndex(
index: Index<MeiliIndexable>,
@ -156,8 +150,8 @@ const createMeiliMongooseModel = ({
* Get the current sync progress
*/
static async getSyncProgress(this: SchemaWithMeiliMethods): Promise<SyncProgress> {
const totalDocuments = await this.countDocuments();
const indexedDocuments = await this.countDocuments({ _meiliIndex: true });
const totalDocuments = await this.countDocuments({ expiredAt: null });
const indexedDocuments = await this.countDocuments({ expiredAt: null, _meiliIndex: true });
return {
totalProcessed: indexedDocuments,
@ -167,101 +161,79 @@ const createMeiliMongooseModel = ({
}
/**
* Synchronizes the data between the MongoDB collection and the MeiliSearch index.
* Now uses streaming and batching to reduce memory usage.
*/
static async syncWithMeili(
this: SchemaWithMeiliMethods,
options?: { resumeFromId?: string },
): Promise<void> {
* Synchronizes data between the MongoDB collection and the MeiliSearch index by
* incrementally indexing only documents where `expiredAt` is `null` and `_meiliIndex` is `false`
* (i.e., non-expired documents that have not yet been indexed).
* */
static async syncWithMeili(this: SchemaWithMeiliMethods): Promise<void> {
const startTime = Date.now();
const { batchSize, delayMs } = syncConfig;
const collectionName = primaryKey === 'messageId' ? 'messages' : 'conversations';
logger.info(
`[syncWithMeili] Starting sync for ${collectionName} with batch size ${batchSize}`,
);
// Get approximate total count for raw estimation, the sync should not overcome this number
const approxTotalCount = await this.estimatedDocumentCount();
logger.info(
`[syncWithMeili] Approximate total number of all ${collectionName}: ${approxTotalCount}`,
);
try {
const startTime = Date.now();
const { batchSize, delayMs } = syncConfig;
logger.info(
`[syncWithMeili] Starting sync for ${primaryKey === 'messageId' ? 'messages' : 'conversations'} with batch size ${batchSize}`,
);
// Build query with resume capability
// Do not sync TTL documents
const query: FilterQuery<unknown> = { expiredAt: null };
if (options?.resumeFromId) {
query._id = { $gt: options.resumeFromId };
}
// Get total count for progress tracking
const totalCount = await this.countDocuments(query);
let processedCount = 0;
// First, handle documents that need to be removed from Meili
logger.info(`[syncWithMeili] Starting cleanup of Meili index ${index.uid} before sync`);
await this.cleanupMeiliIndex(index, primaryKey, batchSize, delayMs);
// Process MongoDB documents in batches using cursor
const cursor = this.find(query)
.select(attributesToIndex.join(' ') + ' _meiliIndex')
.sort({ _id: 1 })
.batchSize(batchSize)
.cursor();
const format = (doc: Record<string, unknown>) =>
_.omitBy(_.pick(doc, attributesToIndex), (v, k) => k.startsWith('$'));
let documentBatch: Array<Record<string, unknown>> = [];
let updateOps: Array<{
updateOne: {
filter: Record<string, unknown>;
update: { $set: { _meiliIndex: boolean } };
};
}> = [];
// Process documents in streaming fashion
for await (const doc of cursor) {
const typedDoc = doc.toObject() as unknown as Record<string, unknown>;
const formatted = format(typedDoc);
// Check if document needs indexing
if (!typedDoc._meiliIndex) {
documentBatch.push(formatted);
updateOps.push({
updateOne: {
filter: { _id: typedDoc._id },
update: { $set: { _meiliIndex: true } },
},
});
}
processedCount++;
// Process batch when it reaches the configured size
if (documentBatch.length >= batchSize) {
await this.processSyncBatch(index, documentBatch, updateOps);
documentBatch = [];
updateOps = [];
// Log progress
const progress = Math.round((processedCount / totalCount) * 100);
logger.info(`[syncWithMeili] Progress: ${progress}% (${processedCount}/${totalCount})`);
// Add delay to prevent overwhelming resources
if (delayMs > 0) {
await new Promise((resolve) => setTimeout(resolve, delayMs));
}
}
}
// Process remaining documents
if (documentBatch.length > 0) {
await this.processSyncBatch(index, documentBatch, updateOps);
}
const duration = Date.now() - startTime;
logger.info(
`[syncWithMeili] Completed sync for ${primaryKey === 'messageId' ? 'messages' : 'conversations'} in ${duration}ms`,
);
logger.info(`[syncWithMeili] Completed cleanup of Meili index: ${index.uid}`);
} catch (error) {
logger.error('[syncWithMeili] Error during sync:', error);
logger.error('[syncWithMeili] Error during cleanup Meili before sync:', error);
throw error;
}
let processedCount = 0;
let hasMore = true;
while (hasMore) {
const query: FilterQuery<unknown> = {
expiredAt: null,
_meiliIndex: false,
};
try {
const documents = await this.find(query)
.select(attributesToIndex.join(' ') + ' _meiliIndex')
.limit(batchSize)
.lean();
// Check if there are more documents to process
if (documents.length === 0) {
logger.info('[syncWithMeili] No more documents to process');
break;
}
// Process the batch
await this.processSyncBatch(index, documents);
processedCount += documents.length;
logger.info(`[syncWithMeili] Processed: ${processedCount}`);
if (documents.length < batchSize) {
hasMore = false;
}
// Add delay to prevent overwhelming resources
if (hasMore && delayMs > 0) {
await new Promise((resolve) => setTimeout(resolve, delayMs));
}
} catch (error) {
logger.error('[syncWithMeili] Error processing documents batch:', error);
throw error;
}
}
const duration = Date.now() - startTime;
logger.info(
`[syncWithMeili] Completed sync for ${collectionName}. Processed ${processedCount} documents in ${duration}ms`,
);
}
/**
@ -271,28 +243,26 @@ const createMeiliMongooseModel = ({
this: SchemaWithMeiliMethods,
index: Index<MeiliIndexable>,
documents: Array<Record<string, unknown>>,
updateOps: Array<{
updateOne: {
filter: Record<string, unknown>;
update: { $set: { _meiliIndex: boolean } };
};
}>,
): Promise<void> {
if (documents.length === 0) {
return;
}
// Format documents for MeiliSearch
const formattedDocs = documents.map((doc) =>
_.omitBy(_.pick(doc, attributesToIndex), (_v, k) => k.startsWith('$')),
);
try {
// Add documents to MeiliSearch
await index.addDocuments(documents);
await index.addDocumentsInBatches(formattedDocs);
// Update MongoDB to mark documents as indexed
if (updateOps.length > 0) {
await this.collection.bulkWrite(updateOps);
}
const docsIds = documents.map((doc) => doc._id);
await this.updateMany({ _id: { $in: docsIds } }, { $set: { _meiliIndex: true } });
} catch (error) {
logger.error('[processSyncBatch] Error processing batch:', error);
// Don't throw - allow sync to continue with other documents
throw error;
}
}
@ -331,7 +301,7 @@ const createMeiliMongooseModel = ({
// Delete documents that don't exist in MongoDB
const toDelete = meiliIds.filter((id) => !existingIds.has(id));
if (toDelete.length > 0) {
await Promise.all(toDelete.map((id) => index.deleteDocument(id as string)));
await index.deleteDocuments(toDelete.map(String));
logger.debug(`[cleanupMeiliIndex] Deleted ${toDelete.length} orphaned documents`);
}