Merge branch 'dt-conf-logo' of https://github.com/daimlertruck/SRC-LibreChat into dt-conf-logo

This commit is contained in:
shambukorgal-dev 2026-01-22 13:46:50 +05:30
commit 280a39aef0
52 changed files with 2192 additions and 489 deletions

View file

@ -331,10 +331,6 @@ FLUX_API_BASE_URL=https://api.us1.bfl.ai
GOOGLE_SEARCH_API_KEY=
GOOGLE_CSE_ID=
# YOUTUBE
#-----------------
YOUTUBE_API_KEY=
# Stable Diffusion
#-----------------
SD_WEBUI_URL=http://host.docker.internal:7860

View file

@ -5,7 +5,6 @@ const DALLE3 = require('./structured/DALLE3');
const FluxAPI = require('./structured/FluxAPI');
const OpenWeather = require('./structured/OpenWeather');
const StructuredWolfram = require('./structured/Wolfram');
const createYouTubeTools = require('./structured/YouTube');
const StructuredACS = require('./structured/AzureAISearch');
const StructuredSD = require('./structured/StableDiffusion');
const GoogleSearchAPI = require('./structured/GoogleSearch');
@ -25,7 +24,6 @@ module.exports = {
GoogleSearchAPI,
TraversaalSearch,
StructuredWolfram,
createYouTubeTools,
TavilySearchResults,
createOpenAIImageTools,
createGeminiImageTool,

View file

@ -30,20 +30,6 @@
}
]
},
{
"name": "YouTube",
"pluginKey": "youtube",
"toolkit": true,
"description": "Get YouTube video information, retrieve comments, analyze transcripts and search for videos.",
"icon": "https://www.youtube.com/s/desktop/7449ebf7/img/favicon_144x144.png",
"authConfig": [
{
"authField": "YOUTUBE_API_KEY",
"label": "YouTube API Key",
"description": "Your YouTube Data API v3 key."
}
]
},
{
"name": "OpenAI Image Tools",
"pluginKey": "image_gen_oai",

View file

@ -2,6 +2,7 @@ const fs = require('fs');
const path = require('path');
const sharp = require('sharp');
const { v4 } = require('uuid');
const { ProxyAgent } = require('undici');
const { GoogleGenAI } = require('@google/genai');
const { tool } = require('@langchain/core/tools');
const { logger } = require('@librechat/data-schemas');
@ -21,6 +22,24 @@ const { getStrategyFunctions } = require('~/server/services/Files/strategies');
const { spendTokens } = require('~/models/spendTokens');
const { getFiles } = require('~/models/File');
/**
* Configure proxy support for Google APIs
* This wraps globalThis.fetch to add a proxy dispatcher only for googleapis.com URLs
* This is necessary because @google/genai SDK doesn't support custom fetch or httpOptions.dispatcher
*/
if (process.env.PROXY) {
const originalFetch = globalThis.fetch;
const proxyAgent = new ProxyAgent(process.env.PROXY);
globalThis.fetch = function (url, options = {}) {
const urlString = url.toString();
if (urlString.includes('googleapis.com')) {
options = { ...options, dispatcher: proxyAgent };
}
return originalFetch.call(this, url, options);
};
}
/**
* Get the default service key file path (consistent with main Google endpoint)
* @returns {string} - The default path to the service key file

View file

@ -1,137 +0,0 @@
const { ytToolkit } = require('@librechat/api');
const { tool } = require('@langchain/core/tools');
const { youtube } = require('@googleapis/youtube');
const { logger } = require('@librechat/data-schemas');
const { YoutubeTranscript } = require('youtube-transcript');
const { getApiKey } = require('./credentials');
function extractVideoId(url) {
const rawIdRegex = /^[a-zA-Z0-9_-]{11}$/;
if (rawIdRegex.test(url)) {
return url;
}
const regex = new RegExp(
'(?:youtu\\.be/|youtube(?:\\.com)?/(?:' +
'(?:watch\\?v=)|(?:embed/)|(?:shorts/)|(?:live/)|(?:v/)|(?:/))?)' +
'([a-zA-Z0-9_-]{11})(?:\\S+)?$',
);
const match = url.match(regex);
return match ? match[1] : null;
}
function parseTranscript(transcriptResponse) {
if (!Array.isArray(transcriptResponse)) {
return '';
}
return transcriptResponse
.map((entry) => entry.text.trim())
.filter((text) => text)
.join(' ')
.replaceAll(''', "'");
}
function createYouTubeTools(fields = {}) {
const envVar = 'YOUTUBE_API_KEY';
const override = fields.override ?? false;
const apiKey = fields.apiKey ?? fields[envVar] ?? getApiKey(envVar, override);
const youtubeClient = youtube({
version: 'v3',
auth: apiKey,
});
const searchTool = tool(async ({ query, maxResults = 5 }) => {
const response = await youtubeClient.search.list({
part: 'snippet',
q: query,
type: 'video',
maxResults: maxResults || 5,
});
const result = response.data.items.map((item) => ({
title: item.snippet.title,
description: item.snippet.description,
url: `https://www.youtube.com/watch?v=${item.id.videoId}`,
}));
return JSON.stringify(result, null, 2);
}, ytToolkit.youtube_search);
const infoTool = tool(async ({ url }) => {
const videoId = extractVideoId(url);
if (!videoId) {
throw new Error('Invalid YouTube URL or video ID');
}
const response = await youtubeClient.videos.list({
part: 'snippet,statistics',
id: videoId,
});
if (!response.data.items?.length) {
throw new Error('Video not found');
}
const video = response.data.items[0];
const result = {
title: video.snippet.title,
description: video.snippet.description,
views: video.statistics.viewCount,
likes: video.statistics.likeCount,
comments: video.statistics.commentCount,
};
return JSON.stringify(result, null, 2);
}, ytToolkit.youtube_info);
const commentsTool = tool(async ({ url, maxResults = 10 }) => {
const videoId = extractVideoId(url);
if (!videoId) {
throw new Error('Invalid YouTube URL or video ID');
}
const response = await youtubeClient.commentThreads.list({
part: 'snippet',
videoId,
maxResults: maxResults || 10,
});
const result = response.data.items.map((item) => ({
author: item.snippet.topLevelComment.snippet.authorDisplayName,
text: item.snippet.topLevelComment.snippet.textDisplay,
likes: item.snippet.topLevelComment.snippet.likeCount,
}));
return JSON.stringify(result, null, 2);
}, ytToolkit.youtube_comments);
const transcriptTool = tool(async ({ url }) => {
const videoId = extractVideoId(url);
if (!videoId) {
throw new Error('Invalid YouTube URL or video ID');
}
try {
try {
const transcript = await YoutubeTranscript.fetchTranscript(videoId, { lang: 'en' });
return parseTranscript(transcript);
} catch (e) {
logger.error(e);
}
try {
const transcript = await YoutubeTranscript.fetchTranscript(videoId, { lang: 'de' });
return parseTranscript(transcript);
} catch (e) {
logger.error(e);
}
const transcript = await YoutubeTranscript.fetchTranscript(videoId);
return parseTranscript(transcript);
} catch (error) {
throw new Error(`Failed to fetch transcript: ${error.message}`);
}
}, ytToolkit.youtube_transcript);
return [searchTool, infoTool, commentsTool, transcriptTool];
}
module.exports = createYouTubeTools;

View file

@ -0,0 +1,125 @@
const { ProxyAgent } = require('undici');
/**
* These tests verify the proxy wrapper behavior for GeminiImageGen.
* Instead of loading the full module (which has many dependencies),
* we directly test the wrapper logic that would be applied.
*/
describe('GeminiImageGen Proxy Configuration', () => {
let originalEnv;
let originalFetch;
beforeAll(() => {
originalEnv = { ...process.env };
originalFetch = globalThis.fetch;
});
beforeEach(() => {
process.env = { ...originalEnv };
globalThis.fetch = originalFetch;
});
afterEach(() => {
process.env = originalEnv;
globalThis.fetch = originalFetch;
});
/**
* Simulates the proxy wrapper that GeminiImageGen applies at module load.
* This is the same logic from GeminiImageGen.js lines 30-42.
*/
function applyProxyWrapper() {
if (process.env.PROXY) {
const _originalFetch = globalThis.fetch;
const proxyAgent = new ProxyAgent(process.env.PROXY);
globalThis.fetch = function (url, options = {}) {
const urlString = url.toString();
if (urlString.includes('googleapis.com')) {
options = { ...options, dispatcher: proxyAgent };
}
return _originalFetch.call(this, url, options);
};
}
}
it('should wrap globalThis.fetch when PROXY env is set', () => {
process.env.PROXY = 'http://proxy.example.com:8080';
const fetchBeforeWrap = globalThis.fetch;
applyProxyWrapper();
expect(globalThis.fetch).not.toBe(fetchBeforeWrap);
});
it('should not wrap globalThis.fetch when PROXY env is not set', () => {
delete process.env.PROXY;
const fetchBeforeWrap = globalThis.fetch;
applyProxyWrapper();
expect(globalThis.fetch).toBe(fetchBeforeWrap);
});
it('should add dispatcher to googleapis.com URLs', async () => {
process.env.PROXY = 'http://proxy.example.com:8080';
let capturedOptions = null;
const mockFetch = jest.fn((url, options) => {
capturedOptions = options;
return Promise.resolve({ ok: true });
});
globalThis.fetch = mockFetch;
applyProxyWrapper();
await globalThis.fetch('https://generativelanguage.googleapis.com/v1/models', {});
expect(capturedOptions).toBeDefined();
expect(capturedOptions.dispatcher).toBeInstanceOf(ProxyAgent);
});
it('should not add dispatcher to non-googleapis.com URLs', async () => {
process.env.PROXY = 'http://proxy.example.com:8080';
let capturedOptions = null;
const mockFetch = jest.fn((url, options) => {
capturedOptions = options;
return Promise.resolve({ ok: true });
});
globalThis.fetch = mockFetch;
applyProxyWrapper();
await globalThis.fetch('https://api.openai.com/v1/images', {});
expect(capturedOptions).toBeDefined();
expect(capturedOptions.dispatcher).toBeUndefined();
});
it('should preserve existing options when adding dispatcher', async () => {
process.env.PROXY = 'http://proxy.example.com:8080';
let capturedOptions = null;
const mockFetch = jest.fn((url, options) => {
capturedOptions = options;
return Promise.resolve({ ok: true });
});
globalThis.fetch = mockFetch;
applyProxyWrapper();
const customHeaders = { 'X-Custom-Header': 'test' };
await globalThis.fetch('https://aiplatform.googleapis.com/v1/models', {
headers: customHeaders,
method: 'POST',
});
expect(capturedOptions).toBeDefined();
expect(capturedOptions.dispatcher).toBeInstanceOf(ProxyAgent);
expect(capturedOptions.headers).toEqual(customHeaders);
expect(capturedOptions.method).toBe('POST');
});
});

View file

@ -34,7 +34,6 @@ const {
StructuredACS,
TraversaalSearch,
StructuredWolfram,
createYouTubeTools,
TavilySearchResults,
createGeminiImageTool,
createOpenAIImageTools,
@ -185,11 +184,6 @@ const loadTools = async ({
};
const customConstructors = {
youtube: async (_toolContextMap) => {
const authFields = getAuthFields('youtube');
const authValues = await loadAuthValues({ userId: user, authFields });
return createYouTubeTools(authValues);
},
image_gen_oai: async (toolContextMap) => {
const authFields = getAuthFields('image_gen_oai');
const authValues = await loadAuthValues({ userId: user, authFields });

View file

@ -43,10 +43,9 @@
"@azure/search-documents": "^12.0.0",
"@azure/storage-blob": "^12.27.0",
"@google/genai": "^1.19.0",
"@googleapis/youtube": "^20.0.0",
"@keyv/redis": "^4.3.3",
"@langchain/core": "^0.3.80",
"@librechat/agents": "^3.0.66",
"@librechat/agents": "^3.0.77",
"@librechat/api": "*",
"@librechat/data-schemas": "*",
"@microsoft/microsoft-graph-client": "^3.0.7",
@ -112,7 +111,6 @@
"undici": "^7.10.0",
"winston": "^3.11.0",
"winston-daily-rotate-file": "^5.0.0",
"youtube-transcript": "^1.2.1",
"zod": "^3.22.4"
},
"devDependencies": {

View file

@ -408,7 +408,7 @@ function createToolEndCallback({ req, res, artifactPromises, streamId = null })
const { url } = part.image_url;
artifactPromises.push(
(async () => {
const filename = `${output.name}_${output.tool_call_id}_img_${nanoid()}`;
const filename = `${output.name}_img_${nanoid()}`;
const file_id = output.artifact.file_ids?.[i];
const file = await saveBase64Image(url, {
req,

View file

@ -784,6 +784,7 @@ class AgentClient extends BaseClient {
if (!collectedUsage || !collectedUsage.length) {
return;
}
// Use first entry's input_tokens as the base input (represents initial user message context)
// Support both OpenAI format (input_token_details) and Anthropic format (cache_*_input_tokens)
const firstUsage = collectedUsage[0];
const input_tokens =
@ -795,10 +796,11 @@ class AgentClient extends BaseClient {
Number(firstUsage?.cache_read_input_tokens) ||
0);
let output_tokens = 0;
let previousTokens = input_tokens; // Start with original input
for (let i = 0; i < collectedUsage.length; i++) {
const usage = collectedUsage[i];
// Sum output_tokens directly from all entries - works for both sequential and parallel execution
// This avoids the incremental calculation that produced negative values for parallel agents
let total_output_tokens = 0;
for (const usage of collectedUsage) {
if (!usage) {
continue;
}
@ -811,6 +813,9 @@ class AgentClient extends BaseClient {
const cache_read =
Number(usage.input_token_details?.cache_read) || Number(usage.cache_read_input_tokens) || 0;
// Accumulate output tokens for the usage summary
total_output_tokens += Number(usage.output_tokens) || 0;
const txMetadata = {
context,
balance,
@ -821,18 +826,6 @@ class AgentClient extends BaseClient {
model: usage.model ?? model ?? this.model ?? this.options.agent.model_parameters.model,
};
if (i > 0) {
// Count new tokens generated (input_tokens minus previous accumulated tokens)
output_tokens +=
(Number(usage.input_tokens) || 0) + cache_creation + cache_read - previousTokens;
}
// Add this message's output tokens
output_tokens += Number(usage.output_tokens) || 0;
// Update previousTokens to include this message's output
previousTokens += Number(usage.output_tokens) || 0;
if (cache_creation > 0 || cache_read > 0) {
spendStructuredTokens(txMetadata, {
promptTokens: {
@ -862,7 +855,7 @@ class AgentClient extends BaseClient {
this.usage = {
input_tokens,
output_tokens,
output_tokens: total_output_tokens,
};
}

View file

@ -0,0 +1,712 @@
/**
* Tests for AgentClient.recordCollectedUsage
*
* This is a critical function that handles token spending for agent LLM calls.
* It must correctly handle:
* - Sequential execution (single agent with tool calls)
* - Parallel execution (multiple agents with independent inputs)
* - Cache token handling (OpenAI and Anthropic formats)
*/
const { EModelEndpoint } = require('librechat-data-provider');
// Mock dependencies before requiring the module
const mockSpendTokens = jest.fn().mockResolvedValue();
const mockSpendStructuredTokens = jest.fn().mockResolvedValue();
jest.mock('~/models/spendTokens', () => ({
spendTokens: (...args) => mockSpendTokens(...args),
spendStructuredTokens: (...args) => mockSpendStructuredTokens(...args),
}));
jest.mock('~/config', () => ({
logger: {
debug: jest.fn(),
error: jest.fn(),
warn: jest.fn(),
info: jest.fn(),
},
getMCPManager: jest.fn(() => ({
formatInstructionsForContext: jest.fn(),
})),
}));
jest.mock('@librechat/agents', () => ({
...jest.requireActual('@librechat/agents'),
createMetadataAggregator: () => ({
handleLLMEnd: jest.fn(),
collected: [],
}),
}));
const AgentClient = require('./client');
describe('AgentClient - recordCollectedUsage', () => {
let client;
let mockAgent;
let mockOptions;
beforeEach(() => {
jest.clearAllMocks();
mockAgent = {
id: 'agent-123',
endpoint: EModelEndpoint.openAI,
provider: EModelEndpoint.openAI,
model_parameters: {
model: 'gpt-4',
},
};
mockOptions = {
req: {
user: { id: 'user-123' },
body: { model: 'gpt-4', endpoint: EModelEndpoint.openAI },
},
res: {},
agent: mockAgent,
endpointTokenConfig: {},
};
client = new AgentClient(mockOptions);
client.conversationId = 'convo-123';
client.user = 'user-123';
});
describe('basic functionality', () => {
it('should return early if collectedUsage is empty', async () => {
await client.recordCollectedUsage({
collectedUsage: [],
balance: { enabled: true },
transactions: { enabled: true },
});
expect(mockSpendTokens).not.toHaveBeenCalled();
expect(mockSpendStructuredTokens).not.toHaveBeenCalled();
expect(client.usage).toBeUndefined();
});
it('should return early if collectedUsage is null', async () => {
await client.recordCollectedUsage({
collectedUsage: null,
balance: { enabled: true },
transactions: { enabled: true },
});
expect(mockSpendTokens).not.toHaveBeenCalled();
expect(client.usage).toBeUndefined();
});
it('should handle single usage entry correctly', async () => {
const collectedUsage = [{ input_tokens: 100, output_tokens: 50, model: 'gpt-4' }];
await client.recordCollectedUsage({
collectedUsage,
balance: { enabled: true },
transactions: { enabled: true },
});
expect(mockSpendTokens).toHaveBeenCalledTimes(1);
expect(mockSpendTokens).toHaveBeenCalledWith(
expect.objectContaining({
conversationId: 'convo-123',
user: 'user-123',
model: 'gpt-4',
}),
{ promptTokens: 100, completionTokens: 50 },
);
expect(client.usage.input_tokens).toBe(100);
expect(client.usage.output_tokens).toBe(50);
});
it('should skip null entries in collectedUsage', async () => {
const collectedUsage = [
{ input_tokens: 100, output_tokens: 50, model: 'gpt-4' },
null,
{ input_tokens: 200, output_tokens: 60, model: 'gpt-4' },
];
await client.recordCollectedUsage({
collectedUsage,
balance: { enabled: true },
transactions: { enabled: true },
});
expect(mockSpendTokens).toHaveBeenCalledTimes(2);
});
});
describe('sequential execution (single agent with tool calls)', () => {
it('should calculate tokens correctly for sequential tool calls', async () => {
// Sequential flow: output of call N becomes part of input for call N+1
// Call 1: input=100, output=50
// Call 2: input=150 (100+50), output=30
// Call 3: input=180 (150+30), output=20
const collectedUsage = [
{ input_tokens: 100, output_tokens: 50, model: 'gpt-4' },
{ input_tokens: 150, output_tokens: 30, model: 'gpt-4' },
{ input_tokens: 180, output_tokens: 20, model: 'gpt-4' },
];
await client.recordCollectedUsage({
collectedUsage,
balance: { enabled: true },
transactions: { enabled: true },
});
expect(mockSpendTokens).toHaveBeenCalledTimes(3);
// Total output should be sum of all output_tokens: 50 + 30 + 20 = 100
expect(client.usage.output_tokens).toBe(100);
expect(client.usage.input_tokens).toBe(100); // First entry's input
});
});
describe('parallel execution (multiple agents)', () => {
it('should handle parallel agents with independent input tokens', async () => {
// Parallel agents have INDEPENDENT input tokens (not cumulative)
// Agent A: input=100, output=50
// Agent B: input=80, output=40 (different context, not 100+50)
const collectedUsage = [
{ input_tokens: 100, output_tokens: 50, model: 'gpt-4' },
{ input_tokens: 80, output_tokens: 40, model: 'gpt-4' },
];
await client.recordCollectedUsage({
collectedUsage,
balance: { enabled: true },
transactions: { enabled: true },
});
expect(mockSpendTokens).toHaveBeenCalledTimes(2);
// Expected total output: 50 + 40 = 90
// output_tokens must be positive and should reflect total output
expect(client.usage.output_tokens).toBeGreaterThan(0);
});
it('should NOT produce negative output_tokens for parallel execution', async () => {
// Critical bug scenario: parallel agents where second agent has LOWER input tokens
const collectedUsage = [
{ input_tokens: 200, output_tokens: 100, model: 'gpt-4' },
{ input_tokens: 50, output_tokens: 30, model: 'gpt-4' },
];
await client.recordCollectedUsage({
collectedUsage,
balance: { enabled: true },
transactions: { enabled: true },
});
// output_tokens MUST be positive for proper token tracking
expect(client.usage.output_tokens).toBeGreaterThan(0);
// Correct value should be 100 + 30 = 130
});
it('should calculate correct total output for parallel agents', async () => {
// Three parallel agents with independent contexts
const collectedUsage = [
{ input_tokens: 100, output_tokens: 50, model: 'gpt-4' },
{ input_tokens: 120, output_tokens: 60, model: 'gpt-4-turbo' },
{ input_tokens: 80, output_tokens: 40, model: 'claude-3' },
];
await client.recordCollectedUsage({
collectedUsage,
balance: { enabled: true },
transactions: { enabled: true },
});
expect(mockSpendTokens).toHaveBeenCalledTimes(3);
// Total output should be 50 + 60 + 40 = 150
expect(client.usage.output_tokens).toBe(150);
});
it('should handle worst-case parallel scenario without negative tokens', async () => {
// Extreme case: first agent has very high input, subsequent have low
const collectedUsage = [
{ input_tokens: 1000, output_tokens: 500, model: 'gpt-4' },
{ input_tokens: 100, output_tokens: 50, model: 'gpt-4' },
{ input_tokens: 50, output_tokens: 25, model: 'gpt-4' },
];
await client.recordCollectedUsage({
collectedUsage,
balance: { enabled: true },
transactions: { enabled: true },
});
// Must be positive, should be 500 + 50 + 25 = 575
expect(client.usage.output_tokens).toBeGreaterThan(0);
expect(client.usage.output_tokens).toBe(575);
});
});
describe('real-world scenarios', () => {
it('should correctly sum output tokens for sequential tool calls with growing context', async () => {
// Real production data: Claude Opus with multiple tool calls
// Context grows as tool results are added, but output_tokens should only count model generations
const collectedUsage = [
{
input_tokens: 31596,
output_tokens: 151,
total_tokens: 31747,
input_token_details: { cache_read: 0, cache_creation: 0 },
model: 'claude-opus-4-5-20251101',
},
{
input_tokens: 35368,
output_tokens: 150,
total_tokens: 35518,
input_token_details: { cache_read: 0, cache_creation: 0 },
model: 'claude-opus-4-5-20251101',
},
{
input_tokens: 58362,
output_tokens: 295,
total_tokens: 58657,
input_token_details: { cache_read: 0, cache_creation: 0 },
model: 'claude-opus-4-5-20251101',
},
{
input_tokens: 112604,
output_tokens: 193,
total_tokens: 112797,
input_token_details: { cache_read: 0, cache_creation: 0 },
model: 'claude-opus-4-5-20251101',
},
{
input_tokens: 257440,
output_tokens: 2217,
total_tokens: 259657,
input_token_details: { cache_read: 0, cache_creation: 0 },
model: 'claude-opus-4-5-20251101',
},
];
await client.recordCollectedUsage({
collectedUsage,
balance: { enabled: true },
transactions: { enabled: true },
});
// input_tokens should be first entry's input (initial context)
expect(client.usage.input_tokens).toBe(31596);
// output_tokens should be sum of all model outputs: 151 + 150 + 295 + 193 + 2217 = 3006
// NOT the inflated value from incremental calculation (338,559)
expect(client.usage.output_tokens).toBe(3006);
// Verify spendTokens was called for each entry with correct values
expect(mockSpendTokens).toHaveBeenCalledTimes(5);
expect(mockSpendTokens).toHaveBeenNthCalledWith(
1,
expect.objectContaining({ model: 'claude-opus-4-5-20251101' }),
{ promptTokens: 31596, completionTokens: 151 },
);
expect(mockSpendTokens).toHaveBeenNthCalledWith(
5,
expect.objectContaining({ model: 'claude-opus-4-5-20251101' }),
{ promptTokens: 257440, completionTokens: 2217 },
);
});
it('should handle single followup message correctly', async () => {
// Real production data: followup to the above conversation
const collectedUsage = [
{
input_tokens: 263406,
output_tokens: 257,
total_tokens: 263663,
input_token_details: { cache_read: 0, cache_creation: 0 },
model: 'claude-opus-4-5-20251101',
},
];
await client.recordCollectedUsage({
collectedUsage,
balance: { enabled: true },
transactions: { enabled: true },
});
expect(client.usage.input_tokens).toBe(263406);
expect(client.usage.output_tokens).toBe(257);
expect(mockSpendTokens).toHaveBeenCalledTimes(1);
expect(mockSpendTokens).toHaveBeenCalledWith(
expect.objectContaining({ model: 'claude-opus-4-5-20251101' }),
{ promptTokens: 263406, completionTokens: 257 },
);
});
it('should ensure output_tokens > 0 check passes for BaseClient.sendMessage', async () => {
// This verifies the fix for the duplicate token spending bug
// BaseClient.sendMessage checks: if (usage != null && Number(usage[this.outputTokensKey]) > 0)
const collectedUsage = [
{
input_tokens: 31596,
output_tokens: 151,
model: 'claude-opus-4-5-20251101',
},
{
input_tokens: 35368,
output_tokens: 150,
model: 'claude-opus-4-5-20251101',
},
];
await client.recordCollectedUsage({
collectedUsage,
balance: { enabled: true },
transactions: { enabled: true },
});
const usage = client.getStreamUsage();
// The check that was failing before the fix
expect(usage).not.toBeNull();
expect(Number(usage.output_tokens)).toBeGreaterThan(0);
// Verify correct value
expect(usage.output_tokens).toBe(301); // 151 + 150
});
it('should correctly handle cache tokens with multiple tool calls', async () => {
// Real production data: Claude Opus with cache tokens (prompt caching)
// First entry has cache_creation, subsequent entries have cache_read
const collectedUsage = [
{
input_tokens: 788,
output_tokens: 163,
total_tokens: 951,
input_token_details: { cache_read: 0, cache_creation: 30808 },
model: 'claude-opus-4-5-20251101',
},
{
input_tokens: 3802,
output_tokens: 149,
total_tokens: 3951,
input_token_details: { cache_read: 30808, cache_creation: 768 },
model: 'claude-opus-4-5-20251101',
},
{
input_tokens: 26808,
output_tokens: 225,
total_tokens: 27033,
input_token_details: { cache_read: 31576, cache_creation: 0 },
model: 'claude-opus-4-5-20251101',
},
{
input_tokens: 80912,
output_tokens: 204,
total_tokens: 81116,
input_token_details: { cache_read: 31576, cache_creation: 0 },
model: 'claude-opus-4-5-20251101',
},
{
input_tokens: 136454,
output_tokens: 206,
total_tokens: 136660,
input_token_details: { cache_read: 31576, cache_creation: 0 },
model: 'claude-opus-4-5-20251101',
},
{
input_tokens: 146316,
output_tokens: 224,
total_tokens: 146540,
input_token_details: { cache_read: 31576, cache_creation: 0 },
model: 'claude-opus-4-5-20251101',
},
{
input_tokens: 150402,
output_tokens: 1248,
total_tokens: 151650,
input_token_details: { cache_read: 31576, cache_creation: 0 },
model: 'claude-opus-4-5-20251101',
},
{
input_tokens: 156268,
output_tokens: 139,
total_tokens: 156407,
input_token_details: { cache_read: 31576, cache_creation: 0 },
model: 'claude-opus-4-5-20251101',
},
{
input_tokens: 167126,
output_tokens: 2961,
total_tokens: 170087,
input_token_details: { cache_read: 31576, cache_creation: 0 },
model: 'claude-opus-4-5-20251101',
},
];
await client.recordCollectedUsage({
collectedUsage,
balance: { enabled: true },
transactions: { enabled: true },
});
// input_tokens = first entry's input + cache_creation + cache_read
// = 788 + 30808 + 0 = 31596
expect(client.usage.input_tokens).toBe(31596);
// output_tokens = sum of all output_tokens
// = 163 + 149 + 225 + 204 + 206 + 224 + 1248 + 139 + 2961 = 5519
expect(client.usage.output_tokens).toBe(5519);
// First 2 entries have cache tokens, should use spendStructuredTokens
// Remaining 7 entries have cache_read but no cache_creation, still structured
expect(mockSpendStructuredTokens).toHaveBeenCalledTimes(9);
expect(mockSpendTokens).toHaveBeenCalledTimes(0);
// Verify first entry uses structured tokens with cache_creation
expect(mockSpendStructuredTokens).toHaveBeenNthCalledWith(
1,
expect.objectContaining({ model: 'claude-opus-4-5-20251101' }),
{
promptTokens: { input: 788, write: 30808, read: 0 },
completionTokens: 163,
},
);
// Verify second entry uses structured tokens with both cache_creation and cache_read
expect(mockSpendStructuredTokens).toHaveBeenNthCalledWith(
2,
expect.objectContaining({ model: 'claude-opus-4-5-20251101' }),
{
promptTokens: { input: 3802, write: 768, read: 30808 },
completionTokens: 149,
},
);
});
});
describe('cache token handling', () => {
it('should handle OpenAI format cache tokens (input_token_details)', async () => {
const collectedUsage = [
{
input_tokens: 100,
output_tokens: 50,
model: 'gpt-4',
input_token_details: {
cache_creation: 20,
cache_read: 10,
},
},
];
await client.recordCollectedUsage({
collectedUsage,
balance: { enabled: true },
transactions: { enabled: true },
});
expect(mockSpendStructuredTokens).toHaveBeenCalledTimes(1);
expect(mockSpendStructuredTokens).toHaveBeenCalledWith(
expect.objectContaining({ model: 'gpt-4' }),
{
promptTokens: {
input: 100,
write: 20,
read: 10,
},
completionTokens: 50,
},
);
});
it('should handle Anthropic format cache tokens (cache_*_input_tokens)', async () => {
const collectedUsage = [
{
input_tokens: 100,
output_tokens: 50,
model: 'claude-3',
cache_creation_input_tokens: 25,
cache_read_input_tokens: 15,
},
];
await client.recordCollectedUsage({
collectedUsage,
balance: { enabled: true },
transactions: { enabled: true },
});
expect(mockSpendStructuredTokens).toHaveBeenCalledTimes(1);
expect(mockSpendStructuredTokens).toHaveBeenCalledWith(
expect.objectContaining({ model: 'claude-3' }),
{
promptTokens: {
input: 100,
write: 25,
read: 15,
},
completionTokens: 50,
},
);
});
it('should use spendTokens for entries without cache tokens', async () => {
const collectedUsage = [{ input_tokens: 100, output_tokens: 50, model: 'gpt-4' }];
await client.recordCollectedUsage({
collectedUsage,
balance: { enabled: true },
transactions: { enabled: true },
});
expect(mockSpendTokens).toHaveBeenCalledTimes(1);
expect(mockSpendStructuredTokens).not.toHaveBeenCalled();
});
it('should handle mixed cache and non-cache entries', async () => {
const collectedUsage = [
{ input_tokens: 100, output_tokens: 50, model: 'gpt-4' },
{
input_tokens: 150,
output_tokens: 30,
model: 'gpt-4',
input_token_details: { cache_creation: 10, cache_read: 5 },
},
{ input_tokens: 200, output_tokens: 20, model: 'gpt-4' },
];
await client.recordCollectedUsage({
collectedUsage,
balance: { enabled: true },
transactions: { enabled: true },
});
expect(mockSpendTokens).toHaveBeenCalledTimes(2);
expect(mockSpendStructuredTokens).toHaveBeenCalledTimes(1);
});
it('should include cache tokens in total input calculation', async () => {
const collectedUsage = [
{
input_tokens: 100,
output_tokens: 50,
model: 'gpt-4',
input_token_details: {
cache_creation: 20,
cache_read: 10,
},
},
];
await client.recordCollectedUsage({
collectedUsage,
balance: { enabled: true },
transactions: { enabled: true },
});
// Total input should include cache tokens: 100 + 20 + 10 = 130
expect(client.usage.input_tokens).toBe(130);
});
});
describe('model fallback', () => {
it('should use usage.model when available', async () => {
const collectedUsage = [{ input_tokens: 100, output_tokens: 50, model: 'gpt-4-turbo' }];
await client.recordCollectedUsage({
model: 'fallback-model',
collectedUsage,
balance: { enabled: true },
transactions: { enabled: true },
});
expect(mockSpendTokens).toHaveBeenCalledWith(
expect.objectContaining({ model: 'gpt-4-turbo' }),
expect.any(Object),
);
});
it('should fallback to param model when usage.model is missing', async () => {
const collectedUsage = [{ input_tokens: 100, output_tokens: 50 }];
await client.recordCollectedUsage({
model: 'param-model',
collectedUsage,
balance: { enabled: true },
transactions: { enabled: true },
});
expect(mockSpendTokens).toHaveBeenCalledWith(
expect.objectContaining({ model: 'param-model' }),
expect.any(Object),
);
});
it('should fallback to client.model when param model is missing', async () => {
client.model = 'client-model';
const collectedUsage = [{ input_tokens: 100, output_tokens: 50 }];
await client.recordCollectedUsage({
collectedUsage,
balance: { enabled: true },
transactions: { enabled: true },
});
expect(mockSpendTokens).toHaveBeenCalledWith(
expect.objectContaining({ model: 'client-model' }),
expect.any(Object),
);
});
it('should fallback to agent model_parameters.model as last resort', async () => {
const collectedUsage = [{ input_tokens: 100, output_tokens: 50 }];
await client.recordCollectedUsage({
collectedUsage,
balance: { enabled: true },
transactions: { enabled: true },
});
expect(mockSpendTokens).toHaveBeenCalledWith(
expect.objectContaining({ model: 'gpt-4' }),
expect.any(Object),
);
});
});
describe('getStreamUsage integration', () => {
it('should return the usage object set by recordCollectedUsage', async () => {
const collectedUsage = [{ input_tokens: 100, output_tokens: 50, model: 'gpt-4' }];
await client.recordCollectedUsage({
collectedUsage,
balance: { enabled: true },
transactions: { enabled: true },
});
const usage = client.getStreamUsage();
expect(usage).toEqual({
input_tokens: 100,
output_tokens: 50,
});
});
it('should return undefined before recordCollectedUsage is called', () => {
const usage = client.getStreamUsage();
expect(usage).toBeUndefined();
});
it('should have output_tokens > 0 for BaseClient.sendMessage check', async () => {
// This test verifies the usage will pass the check in BaseClient.sendMessage:
// if (usage != null && Number(usage[this.outputTokensKey]) > 0)
const collectedUsage = [
{ input_tokens: 200, output_tokens: 100, model: 'gpt-4' },
{ input_tokens: 50, output_tokens: 30, model: 'gpt-4' },
];
await client.recordCollectedUsage({
collectedUsage,
balance: { enabled: true },
transactions: { enabled: true },
});
const usage = client.getStreamUsage();
expect(usage).not.toBeNull();
expect(Number(usage.output_tokens)).toBeGreaterThan(0);
});
});
});

View file

@ -5,7 +5,9 @@ const { logger } = require('@librechat/data-schemas');
const {
agentCreateSchema,
agentUpdateSchema,
refreshListAvatars,
mergeAgentOcrConversion,
MAX_AVATAR_REFRESH_AGENTS,
convertOcrToContextInPlace,
} = require('@librechat/api');
const {
@ -56,46 +58,6 @@ const systemTools = {
const MAX_SEARCH_LEN = 100;
const escapeRegex = (str = '') => str.replace(/[.*+?^${}()|[\]\\]/g, '\\$&');
/**
* Opportunistically refreshes S3-backed avatars for agent list responses.
* Only list responses are refreshed because they're the highest-traffic surface and
* the avatar URLs have a short-lived TTL. The refresh is cached per-user for 30 minutes
* via {@link CacheKeys.S3_EXPIRY_INTERVAL} so we refresh once per interval at most.
* @param {Array} agents - Agents being enriched with S3-backed avatars
* @param {string} userId - User identifier used for the cache refresh key
*/
const refreshListAvatars = async (agents, userId) => {
if (!agents?.length) {
return;
}
const cache = getLogStores(CacheKeys.S3_EXPIRY_INTERVAL);
const refreshKey = `${userId}:agents_list`;
const alreadyChecked = await cache.get(refreshKey);
if (alreadyChecked) {
return;
}
await Promise.all(
agents.map(async (agent) => {
if (agent?.avatar?.source !== FileSources.s3 || !agent?.avatar?.filepath) {
return;
}
try {
const newPath = await refreshS3Url(agent.avatar);
if (newPath && newPath !== agent.avatar.filepath) {
agent.avatar = { ...agent.avatar, filepath: newPath };
}
} catch (err) {
logger.debug('[/Agents] Avatar refresh error for list item', err);
}
}),
);
await cache.set(refreshKey, true, Time.THIRTY_MINUTES);
};
/**
* Creates an Agent.
* @route POST /Agents
@ -119,7 +81,7 @@ const createAgentHandler = async (req, res) => {
agentData.author = userId;
agentData.tools = [];
const availableTools = await getCachedTools();
const availableTools = (await getCachedTools()) ?? {};
for (const tool of tools) {
if (availableTools[tool]) {
agentData.tools.push(tool);
@ -544,6 +506,35 @@ const getListAgentsHandler = async (req, res) => {
requiredPermissions: PermissionBits.VIEW,
});
/**
* Refresh all S3 avatars for this user's accessible agent set (not only the current page)
* This addresses page-size limits preventing refresh of agents beyond the first page
*/
const cache = getLogStores(CacheKeys.S3_EXPIRY_INTERVAL);
const refreshKey = `${userId}:agents_avatar_refresh`;
const alreadyChecked = await cache.get(refreshKey);
if (alreadyChecked) {
logger.debug('[/Agents] S3 avatar refresh already checked, skipping');
} else {
try {
const fullList = await getListAgentsByAccess({
accessibleIds,
otherParams: {},
limit: MAX_AVATAR_REFRESH_AGENTS,
after: null,
});
await refreshListAvatars({
agents: fullList?.data ?? [],
userId,
refreshS3Url,
updateAgent,
});
await cache.set(refreshKey, true, Time.THIRTY_MINUTES);
} catch (err) {
logger.error('[/Agents] Error refreshing avatars for full list: %o', err);
}
}
// Use the new ACL-aware function
const data = await getListAgentsByAccess({
accessibleIds,
@ -571,15 +562,9 @@ const getListAgentsHandler = async (req, res) => {
return agent;
});
// Opportunistically refresh S3 avatar URLs for list results with caching
try {
await refreshListAvatars(data.data, req.user.id);
} catch (err) {
logger.debug('[/Agents] Skipping avatar refresh for list', err);
}
return res.json(data);
} catch (error) {
logger.error('[/Agents] Error listing Agents', error);
logger.error('[/Agents] Error listing Agents: %o', error);
res.status(500).json({ error: error.message });
}
};

View file

@ -1,8 +1,9 @@
const mongoose = require('mongoose');
const { v4: uuidv4 } = require('uuid');
const { nanoid } = require('nanoid');
const { MongoMemoryServer } = require('mongodb-memory-server');
const { v4: uuidv4 } = require('uuid');
const { agentSchema } = require('@librechat/data-schemas');
const { FileSources } = require('librechat-data-provider');
const { MongoMemoryServer } = require('mongodb-memory-server');
// Only mock the dependencies that are not database-related
jest.mock('~/server/services/Config', () => ({
@ -54,6 +55,15 @@ jest.mock('~/models', () => ({
getCategoriesWithCounts: jest.fn(),
}));
// Mock cache for S3 avatar refresh tests
const mockCache = {
get: jest.fn(),
set: jest.fn(),
};
jest.mock('~/cache', () => ({
getLogStores: jest.fn(() => mockCache),
}));
const {
createAgent: createAgentHandler,
updateAgent: updateAgentHandler,
@ -65,6 +75,8 @@ const {
findPubliclyAccessibleResources,
} = require('~/server/services/PermissionService');
const { refreshS3Url } = require('~/server/services/Files/S3/crud');
/**
* @type {import('mongoose').Model<import('@librechat/data-schemas').IAgent>}
*/
@ -1207,4 +1219,349 @@ describe('Agent Controllers - Mass Assignment Protection', () => {
expect(response.data[0].is_promoted).toBe(true);
});
});
describe('S3 Avatar Refresh', () => {
let userA, userB;
let agentWithS3Avatar, agentWithLocalAvatar, agentOwnedByOther;
beforeEach(async () => {
await Agent.deleteMany({});
jest.clearAllMocks();
// Reset cache mock
mockCache.get.mockResolvedValue(false);
mockCache.set.mockResolvedValue(undefined);
userA = new mongoose.Types.ObjectId();
userB = new mongoose.Types.ObjectId();
// Create agent with S3 avatar owned by userA
agentWithS3Avatar = await Agent.create({
id: `agent_${nanoid(12)}`,
name: 'Agent with S3 Avatar',
description: 'Has S3 avatar',
provider: 'openai',
model: 'gpt-4',
author: userA,
avatar: {
source: FileSources.s3,
filepath: 'old-s3-path.jpg',
},
versions: [
{
name: 'Agent with S3 Avatar',
description: 'Has S3 avatar',
provider: 'openai',
model: 'gpt-4',
createdAt: new Date(),
updatedAt: new Date(),
},
],
});
// Create agent with local avatar owned by userA
agentWithLocalAvatar = await Agent.create({
id: `agent_${nanoid(12)}`,
name: 'Agent with Local Avatar',
description: 'Has local avatar',
provider: 'openai',
model: 'gpt-4',
author: userA,
avatar: {
source: 'local',
filepath: 'local-path.jpg',
},
versions: [
{
name: 'Agent with Local Avatar',
description: 'Has local avatar',
provider: 'openai',
model: 'gpt-4',
createdAt: new Date(),
updatedAt: new Date(),
},
],
});
// Create agent with S3 avatar owned by userB
agentOwnedByOther = await Agent.create({
id: `agent_${nanoid(12)}`,
name: 'Agent Owned By Other',
description: 'Owned by userB',
provider: 'openai',
model: 'gpt-4',
author: userB,
avatar: {
source: FileSources.s3,
filepath: 'other-s3-path.jpg',
},
versions: [
{
name: 'Agent Owned By Other',
description: 'Owned by userB',
provider: 'openai',
model: 'gpt-4',
createdAt: new Date(),
updatedAt: new Date(),
},
],
});
});
test('should skip avatar refresh if cache hit', async () => {
mockCache.get.mockResolvedValue(true);
findAccessibleResources.mockResolvedValue([agentWithS3Avatar._id]);
findPubliclyAccessibleResources.mockResolvedValue([]);
const mockReq = {
user: { id: userA.toString(), role: 'USER' },
query: {},
};
const mockRes = {
status: jest.fn().mockReturnThis(),
json: jest.fn().mockReturnThis(),
};
await getListAgentsHandler(mockReq, mockRes);
// Should not call refreshS3Url when cache hit
expect(refreshS3Url).not.toHaveBeenCalled();
});
test('should refresh and persist S3 avatars on cache miss', async () => {
mockCache.get.mockResolvedValue(false);
findAccessibleResources.mockResolvedValue([agentWithS3Avatar._id]);
findPubliclyAccessibleResources.mockResolvedValue([]);
refreshS3Url.mockResolvedValue('new-s3-path.jpg');
const mockReq = {
user: { id: userA.toString(), role: 'USER' },
query: {},
};
const mockRes = {
status: jest.fn().mockReturnThis(),
json: jest.fn().mockReturnThis(),
};
await getListAgentsHandler(mockReq, mockRes);
// Verify S3 URL was refreshed
expect(refreshS3Url).toHaveBeenCalled();
// Verify cache was set
expect(mockCache.set).toHaveBeenCalled();
// Verify response was returned
expect(mockRes.json).toHaveBeenCalled();
});
test('should refresh avatars for all accessible agents (VIEW permission)', async () => {
mockCache.get.mockResolvedValue(false);
// User A has access to both their own agent and userB's agent
findAccessibleResources.mockResolvedValue([agentWithS3Avatar._id, agentOwnedByOther._id]);
findPubliclyAccessibleResources.mockResolvedValue([]);
refreshS3Url.mockResolvedValue('new-path.jpg');
const mockReq = {
user: { id: userA.toString(), role: 'USER' },
query: {},
};
const mockRes = {
status: jest.fn().mockReturnThis(),
json: jest.fn().mockReturnThis(),
};
await getListAgentsHandler(mockReq, mockRes);
// Should be called for both agents - any user with VIEW access can refresh
expect(refreshS3Url).toHaveBeenCalledTimes(2);
});
test('should skip non-S3 avatars', async () => {
mockCache.get.mockResolvedValue(false);
findAccessibleResources.mockResolvedValue([agentWithLocalAvatar._id, agentWithS3Avatar._id]);
findPubliclyAccessibleResources.mockResolvedValue([]);
refreshS3Url.mockResolvedValue('new-path.jpg');
const mockReq = {
user: { id: userA.toString(), role: 'USER' },
query: {},
};
const mockRes = {
status: jest.fn().mockReturnThis(),
json: jest.fn().mockReturnThis(),
};
await getListAgentsHandler(mockReq, mockRes);
// Should only be called for S3 avatar agent
expect(refreshS3Url).toHaveBeenCalledTimes(1);
});
test('should not update if S3 URL unchanged', async () => {
mockCache.get.mockResolvedValue(false);
findAccessibleResources.mockResolvedValue([agentWithS3Avatar._id]);
findPubliclyAccessibleResources.mockResolvedValue([]);
// Return the same path - no update needed
refreshS3Url.mockResolvedValue('old-s3-path.jpg');
const mockReq = {
user: { id: userA.toString(), role: 'USER' },
query: {},
};
const mockRes = {
status: jest.fn().mockReturnThis(),
json: jest.fn().mockReturnThis(),
};
await getListAgentsHandler(mockReq, mockRes);
// Verify refreshS3Url was called
expect(refreshS3Url).toHaveBeenCalled();
// Response should still be returned
expect(mockRes.json).toHaveBeenCalled();
});
test('should handle S3 refresh errors gracefully', async () => {
mockCache.get.mockResolvedValue(false);
findAccessibleResources.mockResolvedValue([agentWithS3Avatar._id]);
findPubliclyAccessibleResources.mockResolvedValue([]);
refreshS3Url.mockRejectedValue(new Error('S3 error'));
const mockReq = {
user: { id: userA.toString(), role: 'USER' },
query: {},
};
const mockRes = {
status: jest.fn().mockReturnThis(),
json: jest.fn().mockReturnThis(),
};
// Should not throw - handles error gracefully
await expect(getListAgentsHandler(mockReq, mockRes)).resolves.not.toThrow();
// Response should still be returned
expect(mockRes.json).toHaveBeenCalled();
});
test('should process agents in batches', async () => {
mockCache.get.mockResolvedValue(false);
// Create 25 agents (should be processed in batches of 20)
const manyAgents = [];
for (let i = 0; i < 25; i++) {
const agent = await Agent.create({
id: `agent_${nanoid(12)}`,
name: `Agent ${i}`,
description: `Agent ${i} description`,
provider: 'openai',
model: 'gpt-4',
author: userA,
avatar: {
source: FileSources.s3,
filepath: `path${i}.jpg`,
},
versions: [
{
name: `Agent ${i}`,
description: `Agent ${i} description`,
provider: 'openai',
model: 'gpt-4',
createdAt: new Date(),
updatedAt: new Date(),
},
],
});
manyAgents.push(agent);
}
const allAgentIds = manyAgents.map((a) => a._id);
findAccessibleResources.mockResolvedValue(allAgentIds);
findPubliclyAccessibleResources.mockResolvedValue([]);
refreshS3Url.mockImplementation((avatar) =>
Promise.resolve(avatar.filepath.replace('.jpg', '-new.jpg')),
);
const mockReq = {
user: { id: userA.toString(), role: 'USER' },
query: {},
};
const mockRes = {
status: jest.fn().mockReturnThis(),
json: jest.fn().mockReturnThis(),
};
await getListAgentsHandler(mockReq, mockRes);
// All 25 should be processed
expect(refreshS3Url).toHaveBeenCalledTimes(25);
});
test('should skip agents without id or author', async () => {
mockCache.get.mockResolvedValue(false);
// Create agent without proper id field (edge case)
const agentWithoutId = await Agent.create({
id: `agent_${nanoid(12)}`,
name: 'Agent without ID field',
description: 'Testing',
provider: 'openai',
model: 'gpt-4',
author: userA,
avatar: {
source: FileSources.s3,
filepath: 'test-path.jpg',
},
versions: [
{
name: 'Agent without ID field',
description: 'Testing',
provider: 'openai',
model: 'gpt-4',
createdAt: new Date(),
updatedAt: new Date(),
},
],
});
findAccessibleResources.mockResolvedValue([agentWithoutId._id, agentWithS3Avatar._id]);
findPubliclyAccessibleResources.mockResolvedValue([]);
refreshS3Url.mockResolvedValue('new-path.jpg');
const mockReq = {
user: { id: userA.toString(), role: 'USER' },
query: {},
};
const mockRes = {
status: jest.fn().mockReturnThis(),
json: jest.fn().mockReturnThis(),
};
await getListAgentsHandler(mockReq, mockRes);
// Should still complete without errors
expect(mockRes.json).toHaveBeenCalled();
});
test('should use MAX_AVATAR_REFRESH_AGENTS limit for full list query', async () => {
mockCache.get.mockResolvedValue(false);
findAccessibleResources.mockResolvedValue([]);
findPubliclyAccessibleResources.mockResolvedValue([]);
const mockReq = {
user: { id: userA.toString(), role: 'USER' },
query: {},
};
const mockRes = {
status: jest.fn().mockReturnThis(),
json: jest.fn().mockReturnThis(),
};
await getListAgentsHandler(mockReq, mockRes);
// Verify that the handler completed successfully
expect(mockRes.json).toHaveBeenCalled();
});
});
});

View file

@ -31,7 +31,7 @@ const createAssistant = async (req, res) => {
delete assistantData.conversation_starters;
delete assistantData.append_current_datetime;
const toolDefinitions = await getCachedTools();
const toolDefinitions = (await getCachedTools()) ?? {};
assistantData.tools = tools
.map((tool) => {
@ -136,7 +136,7 @@ const patchAssistant = async (req, res) => {
...updateData
} = req.body;
const toolDefinitions = await getCachedTools();
const toolDefinitions = (await getCachedTools()) ?? {};
updateData.tools = (updateData.tools ?? [])
.map((tool) => {

View file

@ -28,7 +28,7 @@ const createAssistant = async (req, res) => {
delete assistantData.conversation_starters;
delete assistantData.append_current_datetime;
const toolDefinitions = await getCachedTools();
const toolDefinitions = (await getCachedTools()) ?? {};
assistantData.tools = tools
.map((tool) => {
@ -125,7 +125,7 @@ const updateAssistant = async ({ req, openai, assistant_id, updateData }) => {
let hasFileSearch = false;
for (const tool of updateData.tools ?? []) {
const toolDefinitions = await getCachedTools();
const toolDefinitions = (await getCachedTools()) ?? {};
let actualTool = typeof tool === 'string' ? toolDefinitions[tool] : tool;
if (!actualTool && manifestToolMap[tool] && manifestToolMap[tool].toolkit === true) {

View file

@ -1,16 +1,16 @@
const { ResourceType } = require('librechat-data-provider');
const { canAccessResource } = require('./canAccessResource');
const { findMCPServerById } = require('~/models');
const { findMCPServerByServerName } = require('~/models');
/**
* MCP Server ID resolver function
* Resolves custom MCP server ID (e.g., "mcp_abc123") to MongoDB ObjectId
* MCP Server name resolver function
* Resolves MCP server name (e.g., "my-mcp-server") to MongoDB ObjectId
*
* @param {string} mcpServerCustomId - Custom MCP server ID from route parameter
* @param {string} serverName - Server name from route parameter
* @returns {Promise<Object|null>} MCP server document with _id field, or null if not found
*/
const resolveMCPServerId = async (mcpServerCustomId) => {
return await findMCPServerById(mcpServerCustomId);
const resolveMCPServerName = async (serverName) => {
return await findMCPServerByServerName(serverName);
};
/**
@ -52,7 +52,7 @@ const canAccessMCPServerResource = (options) => {
resourceType: ResourceType.MCPSERVER,
requiredPermission,
resourceIdParam,
idResolver: resolveMCPServerId,
idResolver: resolveMCPServerName,
});
};

View file

@ -545,7 +545,7 @@ describe('canAccessMCPServerResource middleware', () => {
describe('error handling', () => {
test('should handle server returning null gracefully (treated as not found)', async () => {
// When an MCP server is not found, findMCPServerById returns null
// When an MCP server is not found, findMCPServerByServerName returns null
// which the middleware correctly handles as a 404
req.params.serverName = 'definitely-non-existent-server';

View file

@ -11,7 +11,7 @@ const {
const { requireJwtAuth, checkBan, uaParser, canAccessResource } = require('~/server/middleware');
const { checkPeoplePickerAccess } = require('~/server/middleware/checkPeoplePickerAccess');
const { checkSharePublicAccess } = require('~/server/middleware/checkSharePublicAccess');
const { findMCPServerById } = require('~/models');
const { findMCPServerByObjectId } = require('~/models');
const router = express.Router();
@ -64,7 +64,7 @@ const checkResourcePermissionAccess = (requiredPermission) => (req, res, next) =
resourceType: ResourceType.MCPSERVER,
requiredPermission,
resourceIdParam: 'resourceId',
idResolver: findMCPServerById,
idResolver: findMCPServerByObjectId,
});
} else {
return res.status(400).json({

View file

@ -32,7 +32,7 @@ jest.mock('~/server/middleware/checkPeoplePickerAccess', () => ({
// Import actual middleware to get canAccessResource
const { canAccessResource } = require('~/server/middleware');
const { findMCPServerById } = require('~/models');
const { findMCPServerByObjectId } = require('~/models');
/**
* Security Tests for SBA-ADV-20251203-02
@ -151,7 +151,7 @@ describe('Access Permissions Routes - Security Tests (SBA-ADV-20251203-02)', ()
resourceType: ResourceType.MCPSERVER,
requiredPermission,
resourceIdParam: 'resourceId',
idResolver: findMCPServerById,
idResolver: findMCPServerByObjectId,
});
} else {
return res.status(400).json({

View file

@ -79,7 +79,7 @@ async function processRequiredActions(client, requiredActions) {
requiredActions,
);
const appConfig = client.req.config;
const toolDefinitions = await getCachedTools();
const toolDefinitions = (await getCachedTools()) ?? {};
const seenToolkits = new Set();
const tools = requiredActions
.map((action) => {

View file

@ -5,7 +5,7 @@ const { Calculator } = require('@librechat/agents');
const { logger } = require('@librechat/data-schemas');
const { zodToJsonSchema } = require('zod-to-json-schema');
const { Tools, ImageVisionTool } = require('librechat-data-provider');
const { getToolkitKey, oaiToolkit, ytToolkit, geminiToolkit } = require('@librechat/api');
const { getToolkitKey, oaiToolkit, geminiToolkit } = require('@librechat/api');
const { toolkits } = require('~/app/clients/tools/manifest');
/**
@ -83,7 +83,6 @@ function loadAndFormatTools({ directory, adminFilter = [], adminIncluded = [] })
const basicToolInstances = [
new Calculator(),
...Object.values(oaiToolkit),
...Object.values(ytToolkit),
...Object.values(geminiToolkit),
];
for (const toolInstance of basicToolInstances) {

View file

@ -13,7 +13,7 @@ export default function MessagesView({
const localize = useLocalize();
const [currentEditId, setCurrentEditId] = useState<number | string | null>(-1);
return (
<div className="flex-1 pb-[50px]">
<div className="min-h-0 flex-1 overflow-hidden pb-[50px]">
<div className="dark:gpt-dark-gray relative h-full">
<div
style={{

View file

@ -130,7 +130,7 @@ function SharedView() {
const mainContent = (
<div className="transition-width relative flex h-full w-full flex-1 flex-col items-stretch overflow-hidden pt-0 dark:bg-surface-secondary">
<div className="flex h-full flex-col text-text-primary" role="presentation">
<div className="flex h-full min-h-0 flex-col text-text-primary" role="presentation">
{content}
{footer}
</div>
@ -150,7 +150,7 @@ function SharedView() {
return (
<ShareContext.Provider value={{ isSharedConvo: true }}>
<div className="relative flex min-h-screen w-full dark:bg-surface-secondary">
<div className="relative flex h-screen w-full overflow-hidden dark:bg-surface-secondary">
<main className="relative flex w-full grow overflow-hidden dark:bg-surface-secondary">
{artifactsContainer}
</main>

View file

@ -1,5 +1,5 @@
import React from 'react';
import { Pencil, PlugZap, SlidersHorizontal, RefreshCw, X } from 'lucide-react';
import { Pencil, PlugZap, SlidersHorizontal, RefreshCw, X, Trash2 } from 'lucide-react';
import { Spinner, TooltipAnchor } from '@librechat/client';
import type { MCPServerStatus } from 'librechat-data-provider';
import { useLocalize } from '~/hooks';
@ -17,6 +17,7 @@ interface MCPCardActionsProps {
onConfigClick: (e: React.MouseEvent) => void;
onInitialize: () => void;
onCancel: (e: React.MouseEvent) => void;
onRevoke?: () => void;
}
/**
@ -26,6 +27,7 @@ interface MCPCardActionsProps {
* - Pencil: Edit server definition (Settings panel only)
* - PlugZap: Connect/Authenticate (for disconnected/error servers)
* - SlidersHorizontal: Configure custom variables (for connected servers with vars)
* - Trash2: Revoke OAuth access (for connected OAuth servers)
* - RefreshCw: Reconnect/Refresh (for connected servers)
* - Spinner: Loading state (with X on hover for cancel)
*/
@ -41,6 +43,7 @@ export default function MCPCardActions({
onConfigClick,
onInitialize,
onCancel,
onRevoke,
}: MCPCardActionsProps) {
const localize = useLocalize();
@ -162,6 +165,20 @@ export default function MCPCardActions({
<RefreshCw className="size-3.5" aria-hidden="true" />
</TooltipAnchor>
)}
{/* Revoke button - for OAuth servers (available regardless of connection state) */}
{serverStatus?.requiresOAuth && onRevoke && (
<TooltipAnchor
description={localize('com_ui_revoke')}
side="top"
className={cn(buttonBaseClass, 'text-red-500 hover:text-red-600')}
aria-label={localize('com_ui_revoke')}
role="button"
onClick={onRevoke}
>
<Trash2 className="size-3.5" aria-hidden="true" />
</TooltipAnchor>
)}
</div>
);
}

View file

@ -30,7 +30,7 @@ export default function MCPServerCard({
}: MCPServerCardProps) {
const localize = useLocalize();
const triggerRef = useRef<HTMLDivElement>(null);
const { initializeServer } = useMCPServerManager();
const { initializeServer, revokeOAuthForServer } = useMCPServerManager();
const [dialogOpen, setDialogOpen] = useState(false);
const statusIconProps = getServerStatusIconProps(server.serverName);
@ -50,9 +50,20 @@ export default function MCPServerCard({
const canEdit = canCreateEditMCPs && canEditThisServer;
const handleInitialize = () => {
/** If server has custom user vars and is not already connected, show config dialog first
* This ensures users can enter credentials before initialization attempts
*/
if (hasCustomUserVars && serverStatus?.connectionState !== 'connected') {
onConfigClick({ stopPropagation: () => {}, preventDefault: () => {} } as React.MouseEvent);
return;
}
initializeServer(server.serverName);
};
const handleRevoke = () => {
revokeOAuthForServer(server.serverName);
};
const handleEditClick = (e: React.MouseEvent) => {
e.stopPropagation();
e.preventDefault();
@ -130,6 +141,7 @@ export default function MCPServerCard({
onConfigClick={onConfigClick}
onInitialize={handleInitialize}
onCancel={onCancel}
onRevoke={handleRevoke}
/>
</div>
</div>

View file

@ -1,5 +1,5 @@
import { useMutation } from '@tanstack/react-query';
import { request } from 'librechat-data-provider';
import { apiBaseUrl, request } from 'librechat-data-provider';
export interface AbortStreamParams {
/** The stream ID to abort (if known) */
@ -23,7 +23,10 @@ export interface AbortStreamResponse {
*/
export const abortStream = async (params: AbortStreamParams): Promise<AbortStreamResponse> => {
console.log('[abortStream] Calling abort endpoint with params:', params);
const result = (await request.post('/api/agents/chat/abort', params)) as AbortStreamResponse;
const result = (await request.post(
`${apiBaseUrl()}/api/agents/chat/abort`,
params,
)) as AbortStreamResponse;
console.log('[abortStream] Abort response:', result);
return result;
};

View file

@ -1,5 +1,5 @@
import { useEffect, useMemo, useState } from 'react';
import { QueryKeys, request, dataService } from 'librechat-data-provider';
import { apiBaseUrl, QueryKeys, request, dataService } from 'librechat-data-provider';
import { useQuery, useQueries, useQueryClient } from '@tanstack/react-query';
import type { Agents, TConversation } from 'librechat-data-provider';
import { updateConvoInAllQueries } from '~/utils';
@ -16,7 +16,9 @@ export interface StreamStatusResponse {
export const streamStatusQueryKey = (conversationId: string) => ['streamStatus', conversationId];
export const fetchStreamStatus = async (conversationId: string): Promise<StreamStatusResponse> => {
return request.get<StreamStatusResponse>(`/api/agents/chat/status/${conversationId}`);
return request.get<StreamStatusResponse>(
`${apiBaseUrl()}/api/agents/chat/status/${conversationId}`,
);
};
export function useStreamStatus(conversationId: string | undefined, enabled = true) {

View file

@ -94,8 +94,20 @@ export function useMCPServerManager({ conversationId }: { conversationId?: strin
const cancelOAuthMutation = useCancelMCPOAuthMutation();
const updateUserPluginsMutation = useUpdateUserPluginsMutation({
onSuccess: async () => {
showToast({ message: localize('com_nav_mcp_vars_updated'), status: 'success' });
onSuccess: async (_data, variables) => {
const isRevoke = variables.action === 'uninstall';
const message = isRevoke
? localize('com_nav_mcp_access_revoked')
: localize('com_nav_mcp_vars_updated');
showToast({ message, status: 'success' });
/** Deselect server from mcpValues when revoking access */
if (isRevoke && variables.pluginKey?.startsWith(Constants.mcp_prefix)) {
const serverName = variables.pluginKey.replace(Constants.mcp_prefix, '');
const currentValues = mcpValuesRef.current ?? [];
const filteredValues = currentValues.filter((name) => name !== serverName);
setMCPValues(filteredValues);
}
await Promise.all([
queryClient.invalidateQueries([QueryKeys.mcpServers]),
@ -491,13 +503,10 @@ export function useMCPServerManager({ conversationId }: { conversationId?: strin
auth: {},
};
updateUserPluginsMutation.mutate(payload);
const currentValues = mcpValues ?? [];
const filteredValues = currentValues.filter((name) => name !== targetName);
setMCPValues(filteredValues);
/** Deselection is now handled centrally in updateUserPluginsMutation.onSuccess */
}
},
[selectedToolForConfig, updateUserPluginsMutation, mcpValues, setMCPValues],
[selectedToolForConfig, updateUserPluginsMutation],
);
/** Standalone revoke function for OAuth servers - doesn't require selectedToolForConfig */

View file

@ -7,7 +7,10 @@ import {
request,
Constants,
QueryKeys,
ErrorTypes,
apiBaseUrl,
createPayload,
ViolationTypes,
LocalStorageKeys,
removeNullishValues,
} from 'librechat-data-provider';
@ -144,7 +147,7 @@ export default function useResumableSSE(
let { userMessage } = currentSubmission;
let textIndex: number | null = null;
const baseUrl = `/api/agents/chat/stream/${encodeURIComponent(currentStreamId)}`;
const baseUrl = `${apiBaseUrl()}/api/agents/chat/stream/${encodeURIComponent(currentStreamId)}`;
const url = isResume ? `${baseUrl}?resume=true` : baseUrl;
console.log('[ResumableSSE] Subscribing to stream:', url, { isResume });
@ -333,8 +336,11 @@ export default function useResumableSSE(
});
/**
* Error event - fired on actual network failures (non-200, connection lost, etc.)
* This should trigger reconnection with exponential backoff, except for 404 errors.
* Error event handler - handles BOTH:
* 1. HTTP-level errors (responseCode present) - 404, 401, network failures
* 2. Server-sent error events (event: error with data) - known errors like ViolationTypes/ErrorTypes
*
* Order matters: check responseCode first since HTTP errors may also include data
*/
sse.addEventListener('error', async (e: MessageEvent) => {
(startupConfig?.balance?.enabled ?? false) && balanceQuery.refetch();
@ -346,7 +352,6 @@ export default function useResumableSSE(
if (responseCode === 404) {
console.log('[ResumableSSE] Stream not found (404) - job completed or expired');
sse.close();
// Optimistically remove from active jobs since job is gone
removeActiveJob(currentStreamId);
setIsSubmitting(false);
setShowStopButton(false);
@ -355,8 +360,6 @@ export default function useResumableSSE(
return;
}
console.log('[ResumableSSE] Stream error (network failure) - will attempt reconnect');
// Check for 401 and try to refresh token (same pattern as useSSE)
if (responseCode === 401) {
try {
@ -365,7 +368,6 @@ export default function useResumableSSE(
if (!newToken) {
throw new Error('Token refresh failed.');
}
// Update headers on same SSE instance and retry (like useSSE)
sse.headers = {
Authorization: `Bearer ${newToken}`,
};
@ -377,6 +379,64 @@ export default function useResumableSSE(
}
}
/**
* Server-sent error event (event: error with data) - no responseCode.
* These are known errors (ErrorTypes, ViolationTypes) that should be displayed to user.
* Only check e.data if there's no HTTP responseCode, since HTTP errors may also have body data.
*/
if (!responseCode && e.data) {
console.log('[ResumableSSE] Server-sent error event received:', e.data);
sse.close();
removeActiveJob(currentStreamId);
try {
const errorData = JSON.parse(e.data);
const errorString = errorData.error ?? errorData.message ?? JSON.stringify(errorData);
// Check if it's a known error type (ViolationTypes or ErrorTypes)
let isKnownError = false;
try {
const parsed =
typeof errorString === 'string' ? JSON.parse(errorString) : errorString;
const errorType = parsed?.type ?? parsed?.code;
if (errorType) {
const violationValues = Object.values(ViolationTypes) as string[];
const errorTypeValues = Object.values(ErrorTypes) as string[];
isKnownError =
violationValues.includes(errorType) || errorTypeValues.includes(errorType);
}
} catch {
// Not JSON or parsing failed - treat as generic error
}
console.log('[ResumableSSE] Error type check:', { isKnownError, errorString });
// Display the error to user via errorHandler
errorHandler({
data: { text: errorString } as unknown as Parameters<typeof errorHandler>[0]['data'],
submission: currentSubmission as EventSubmission,
});
} catch (parseError) {
console.error('[ResumableSSE] Failed to parse server error:', parseError);
errorHandler({
data: { text: e.data } as unknown as Parameters<typeof errorHandler>[0]['data'],
submission: currentSubmission as EventSubmission,
});
}
setIsSubmitting(false);
setShowStopButton(false);
setStreamId(null);
reconnectAttemptRef.current = 0;
return;
}
// Network failure or unknown HTTP error - attempt reconnection with backoff
console.log('[ResumableSSE] Stream error (network failure) - will attempt reconnect', {
responseCode,
hasData: !!e.data,
});
if (reconnectAttemptRef.current < MAX_RETRIES) {
// Increment counter BEFORE close() so abort handler knows we're reconnecting
reconnectAttemptRef.current++;

View file

@ -533,6 +533,7 @@
"com_nav_log_out": "Log out",
"com_nav_long_audio_warning": "Longer texts will take longer to process.",
"com_nav_maximize_chat_space": "Maximize chat space",
"com_nav_mcp_access_revoked": "MCP server access revoked successfully.",
"com_nav_mcp_configure_server": "Configure {{0}}",
"com_nav_mcp_connect": "Connect",
"com_nav_mcp_connect_server": "Connect {{0}}",

View file

@ -8,6 +8,7 @@
"com_agents_all": "すべてのエージェント",
"com_agents_all_category": "全て",
"com_agents_all_description": "すべてのカテゴリの共有エージェントを参照",
"com_agents_avatar_upload_error": "エージェントのアバターをアップロードできませんでした",
"com_agents_by_librechat": "LibreChatより",
"com_agents_category_aftersales": "アフターセールス",
"com_agents_category_aftersales_description": "販売後のサポート、メンテナンス、顧客サービスに特化したエージェント",
@ -34,6 +35,7 @@
"com_agents_copy_link": "リンクをコピー",
"com_agents_create_error": "エージェントの作成中にエラーが発生しました。",
"com_agents_created_by": "by",
"com_agents_description_card": "説明: {{description}}",
"com_agents_description_placeholder": "オプション: エージェントの説明を入力してください",
"com_agents_empty_state_heading": "エージェントが見つかりません",
"com_agents_enable_file_search": "ファイル検索を有効にする",
@ -142,6 +144,7 @@
"com_assistants_update_actions_success": "アクションが作成または更新されました",
"com_assistants_update_error": "アシスタントの更新中にエラーが発生しました。",
"com_assistants_update_success": "アップデートに成功しました",
"com_assistants_update_success_name": "正常に更新されました {{name}}",
"com_auth_already_have_account": "既にアカウントがある場合はこちら",
"com_auth_apple_login": "Appleでサインイン",
"com_auth_back_to_login": "ログイン画面に戻る",
@ -311,6 +314,7 @@
"com_endpoint_preset_default_removed": "が無効化されました。",
"com_endpoint_preset_delete_confirm": "本当にこのプリセットを削除しますか?",
"com_endpoint_preset_delete_error": "プリセットの削除に失敗しました。もう一度お試し下さい。",
"com_endpoint_preset_delete_success": "プリセットを削除しました",
"com_endpoint_preset_import": "プリセットのインポートが完了しました",
"com_endpoint_preset_import_error": "プリセットのインポートに失敗しました。もう一度お試し下さい。",
"com_endpoint_preset_name": "プリセット名",
@ -377,6 +381,7 @@
"com_files_no_results": "結果がありません。",
"com_files_number_selected": "{{0}} of {{1}} ファイルが選択されました",
"com_files_preparing_download": "ダウンロードの準備...",
"com_files_results_found": "{{count}} 件の結果が見つかりました",
"com_files_sharepoint_picker_title": "ファイルを選択",
"com_files_table": "ここに何かを入れる必要があります。空でした",
"com_files_upload_local_machine": "ローカルコンピュータから",
@ -427,6 +432,7 @@
"com_nav_chat_commands": "チャットコマンド",
"com_nav_chat_commands_info": "メッセージの先頭に特定の文字を入力することで、これらのコマンドが有効になります。各コマンドは、決められた文字(プレフィックス)で起動します。メッセージの先頭にこれらの文字をよく使用する場合は、コマンド機能を無効にすることができます。",
"com_nav_chat_direction": "チャットの方向",
"com_nav_chat_direction_selected": "チャットの方向: {{direction}}",
"com_nav_clear_all_chats": "すべてのチャットを削除する",
"com_nav_clear_cache_confirm_message": "キャッシュを削除してもよろしいですか?",
"com_nav_clear_conversation": "会話を削除する",
@ -434,9 +440,11 @@
"com_nav_close_sidebar": "サイドバーを閉じる",
"com_nav_commands": "Commands",
"com_nav_confirm_clear": "削除を確定",
"com_nav_control_panel": "コントロールパネル",
"com_nav_conversation_mode": "会話モード",
"com_nav_convo_menu_options": "会話メニューオプション",
"com_nav_db_sensitivity": "デシベル感度",
"com_nav_default_temporary_chat": "一時チャットをデフォルトにする",
"com_nav_delete_account": "アカウントを削除",
"com_nav_delete_account_button": "アカウントを完全に削除する",
"com_nav_delete_account_confirm": "アカウントを削除しますか?",
@ -470,6 +478,7 @@
"com_nav_info_code_artifacts": "チャットの横に実験的なコード アーティファクトの表示を有効にします",
"com_nav_info_code_artifacts_agent": "このエージェントのコードアーティファクトの使用を有効にします。デフォルトでは、\"カスタムプロンプトモード\" が有効になっていない限り、アーティファクトの使用に特化した追加の指示が追加されます。",
"com_nav_info_custom_prompt_mode": "有効にすると、デフォルトのアーティファクト システム プロンプトは含まれません。このモードでは、アーティファクト生成指示をすべて手動で提供する必要があります。",
"com_nav_info_default_temporary_chat": "有効にすると、デフォルトで一時チャットモードが有効な状態で新規チャットが開始されます。一時的なチャットは履歴に保存されません。",
"com_nav_info_enter_to_send": "有効になっている場合、 `ENTER` キーを押すとメッセージが送信されます。無効になっている場合、Enterキーを押すと新しい行が追加され、 `CTRL + ENTER` / `⌘ + ENTER` キーを押してメッセージを送信する必要があります。",
"com_nav_info_fork_change_default": "`表示メッセージのみ` は、選択したメッセージへの直接パスのみが含まれます。 `関連ブランチを含める` は、パスに沿ったブランチを追加します。 `すべてを対象に含める` は、接続されているすべてのメッセージとブランチを含みます。",
"com_nav_info_fork_split_target_setting": "有効になっている場合、選択した動作に従って、対象メッセージから会話内の最新メッセージまで分岐が開始されます。",
@ -524,6 +533,7 @@
"com_nav_long_audio_warning": "長いテキストの処理には時間がかかります。",
"com_nav_maximize_chat_space": "チャット画面を最大化",
"com_nav_mcp_configure_server": "{{0}}を設定",
"com_nav_mcp_status_connected": "接続済み",
"com_nav_mcp_status_connecting": "{{0}} - 接続中",
"com_nav_mcp_vars_update_error": "MCP カスタム ユーザー変数の更新中にエラーが発生しました",
"com_nav_mcp_vars_updated": "MCP カスタムユーザー変数が正常に更新されました。",
@ -563,6 +573,7 @@
"com_nav_theme_dark": "ダーク",
"com_nav_theme_light": "ライト",
"com_nav_theme_system": "システム",
"com_nav_toggle_sidebar": "サイドバーの切り替え",
"com_nav_tool_dialog": "アシスタントツール",
"com_nav_tool_dialog_agents": "エージェントツール",
"com_nav_tool_dialog_description": "ツールの選択を維持するには、アシスタントを保存する必要があります。",
@ -613,14 +624,22 @@
"com_ui_action_button": "アクションボタン",
"com_ui_active": "有効化",
"com_ui_add": "追加",
"com_ui_add_code_interpreter_api_key": "Code Interpreter APIキーを追加",
"com_ui_add_first_bookmark": "チャットを追加するにはクリックしてください",
"com_ui_add_first_mcp_server": "最初のMCPサーバーを作成して始めましょう",
"com_ui_add_first_prompt": "始めるにはプロンプトを作成してください",
"com_ui_add_mcp": "MCPの追加",
"com_ui_add_mcp_server": "MCPサーバーの追加",
"com_ui_add_model_preset": "追加の応答のためのモデルまたはプリセットを追加する",
"com_ui_add_multi_conversation": "複数のチャットを追加",
"com_ui_add_special_variables": "特別な変数を追加",
"com_ui_add_web_search_api_keys": "Web検索APIキーを追加する",
"com_ui_adding_details": "詳細を追加する",
"com_ui_additional_details": "追加の詳細",
"com_ui_admin": "管理者",
"com_ui_admin_access_warning": "管理者アクセスをこの機能で無効にすると、予期せぬUI上の問題が発生し、画面の再読み込みが必要になる場合があります。設定を保存した場合、元に戻すには librechat.yaml の設定ファイルを直接編集する必要があり、この変更はすべての権限に影響します。",
"com_ui_admin_settings": "管理者設定",
"com_ui_admin_settings_section": "管理者設定 - {{section}}",
"com_ui_advanced": "高度",
"com_ui_advanced_settings": "詳細設定",
"com_ui_agent": "エージェント",
@ -741,6 +760,10 @@
"com_ui_bookmarks_title": "タイトル",
"com_ui_bookmarks_update_error": "ブックマークの更新中にエラーが発生しました",
"com_ui_bookmarks_update_success": "ブックマークが正常に更新されました",
"com_ui_branch_created": "ブランチが正常に作成されました",
"com_ui_branch_error": "分岐の作成に失敗しました",
"com_ui_branch_message": "この応答から分岐を作成する",
"com_ui_by_author": "by {{0}}",
"com_ui_callback_url": "コールバックURL",
"com_ui_cancel": "キャンセル",
"com_ui_cancelled": "キャンセル",
@ -748,21 +771,31 @@
"com_ui_change_version": "バージョン変更",
"com_ui_chat": "チャット",
"com_ui_chat_history": "チャット履歴",
"com_ui_chats": "チャット",
"com_ui_check_internet": "インターネット接続を確認してください",
"com_ui_clear": "削除する",
"com_ui_clear_all": "すべてクリア",
"com_ui_clear_browser_cache": "ブラウザのキャッシュをクリアして下さい",
"com_ui_clear_presets": "プリセットをクリア",
"com_ui_clear_search": "検索をクリア",
"com_ui_click_to_close": "クリックして閉じる",
"com_ui_click_to_view_var": "クリックして表示 {{0}}",
"com_ui_client_id": "クライアントID",
"com_ui_client_secret": "クライアントシークレット",
"com_ui_close": "閉じる",
"com_ui_close_menu": "メニューを閉じる",
"com_ui_close_settings": "設定を閉じる",
"com_ui_close_var": "閉じる {{0}}",
"com_ui_close_window": "ウィンドウを閉じる",
"com_ui_code": "コード",
"com_ui_collapse": "折りたたむ",
"com_ui_collapse_chat": "チャットを折りたたむ",
"com_ui_collapse_thoughts": "思考を折りたたむ",
"com_ui_command_placeholder": "オプション:プロンプトのコマンドまたは名前を入力",
"com_ui_command_usage_placeholder": "コマンドまたは名前でプロンプトを選択してください",
"com_ui_complete_setup": "セットアップ完了",
"com_ui_concise": "簡潔",
"com_ui_configure": "設定",
"com_ui_configure_mcp_variables_for": "{{0}}の変数を設定",
"com_ui_confirm": "確認",
"com_ui_confirm_action": "実行する",
@ -770,13 +803,20 @@
"com_ui_confirm_change": "変更の確認",
"com_ui_connecting": "接続中",
"com_ui_context": "コンテキスト",
"com_ui_context_filter_sort": "コンテキストによるフィルターと並べ替え",
"com_ui_continue": "続ける",
"com_ui_continue_oauth": "OAuthで続行",
"com_ui_control_bar": "コントロールバー",
"com_ui_controls": "管理",
"com_ui_conversation": "会話",
"com_ui_conversation_label": "{{title}} 会話",
"com_ui_conversations": "会話",
"com_ui_convo_archived": "会話はアーカイブされました",
"com_ui_convo_delete_error": "会話の削除に失敗しました",
"com_ui_convo_delete_success": "会話の削除に成功",
"com_ui_copied": "コピーしました!",
"com_ui_copied_to_clipboard": "コピーしました",
"com_ui_copy": "コピー",
"com_ui_copy_code": "コードをコピーする",
"com_ui_copy_link": "リンクをコピー",
"com_ui_copy_stack_trace": "スタックトレースをコピーする",
@ -784,15 +824,19 @@
"com_ui_copy_to_clipboard": "クリップボードへコピー",
"com_ui_copy_url_to_clipboard": "URLをクリップボードにコピー",
"com_ui_create": "作成",
"com_ui_create_assistant": "アシスタントを作成",
"com_ui_create_link": "リンクを作成する",
"com_ui_create_memory": "メモリを作成します",
"com_ui_create_new_agent": "新しいエージェントを作成",
"com_ui_create_prompt": "プロンプトを作成する",
"com_ui_create_prompt_page": "新しいプロンプト設定ページ",
"com_ui_creating_image": "画像を作成しています。しばらく時間がかかる場合があります",
"com_ui_current": "現在",
"com_ui_currently_production": "現在生産中",
"com_ui_custom": "カスタム",
"com_ui_custom_header_name": "カスタムヘッダー名",
"com_ui_custom_prompt_mode": "カスタムプロンプトモード",
"com_ui_dark_theme_enabled": "ダークテーマを有効にする",
"com_ui_dashboard": "ダッシュボード",
"com_ui_date": "日付",
"com_ui_date_april": "4月",
@ -809,6 +853,7 @@
"com_ui_date_previous_30_days": "過去30日間",
"com_ui_date_previous_7_days": "過去7日間",
"com_ui_date_september": "9月",
"com_ui_date_sort": "日付順",
"com_ui_date_today": "今日",
"com_ui_date_yesterday": "昨日",
"com_ui_decline": "同意しません",
@ -816,15 +861,21 @@
"com_ui_delete": "削除",
"com_ui_delete_action": "アクションを削除",
"com_ui_delete_action_confirm": "このアクションを削除してもよろしいですか?",
"com_ui_delete_agent": "エージェントを削除",
"com_ui_delete_agent_confirm": "このエージェントを削除してもよろしいですか?",
"com_ui_delete_assistant": "アシスタントを削除",
"com_ui_delete_assistant_confirm": "このアシスタントを削除しますか? この操作は元に戻せません。",
"com_ui_delete_confirm": "このチャットは削除されます。",
"com_ui_delete_confirm_prompt_version_var": "これは、選択されたバージョンを \"{{0}}.\" から削除します。他のバージョンが存在しない場合、プロンプトが削除されます。",
"com_ui_delete_confirm_strong": "削除します <strong>{{title}}</strong>",
"com_ui_delete_conversation": "チャットを削除しますか?",
"com_ui_delete_memory": "メモリの削除",
"com_ui_delete_not_allowed": "削除操作は許可されていません",
"com_ui_delete_preset": "プリセットを削除しますか?",
"com_ui_delete_prompt": "プロンプトを消しますか?",
"com_ui_delete_prompt_name": "プロンプトの削除 - {{name}}",
"com_ui_delete_shared_link": "共有リンクを削除しますか?",
"com_ui_delete_shared_link_heading": "共有リンクを削除",
"com_ui_delete_success": "削除に成功",
"com_ui_delete_tool": "ツールを削除",
"com_ui_delete_tool_confirm": "このツールを削除してもよろしいですか?",
@ -837,6 +888,7 @@
"com_ui_deselect_all": "すべて選択解除",
"com_ui_detailed": "詳細",
"com_ui_disabling": "無効化...",
"com_ui_done": "完了",
"com_ui_download": "ダウンロード",
"com_ui_download_artifact": "アーティファクトをダウンロード",
"com_ui_download_backup": "バックアップコードをダウンロードする",
@ -847,13 +899,17 @@
"com_ui_dropdown_variables": "ドロップダウン変数:",
"com_ui_dropdown_variables_info": "プロンプトのカスタムドロップダウンメニューを作成します: `{{variable_name:option1|option2|option3}}`",
"com_ui_duplicate": "複製",
"com_ui_duplicate_agent": "エージェントの重複",
"com_ui_duplication_error": "会話の複製中にエラーが発生しました",
"com_ui_duplication_processing": "会話を複製中...",
"com_ui_duplication_success": "会話の複製が完了しました",
"com_ui_edit": "編集",
"com_ui_edit_editing_image": "画像編集",
"com_ui_edit_mcp_server": "MCPサーバーの編集",
"com_ui_edit_mcp_server_dialog_description": "一意のサーバー識別子: {{serverName}}",
"com_ui_edit_memory": "メモリ編集",
"com_ui_edit_preset_title": "プリセットの編集 - {{title}}",
"com_ui_edit_prompt_page": "プロンプトページを編集",
"com_ui_editable_message": "編集可能なメッセージ",
"com_ui_editor_instructions": "画像をドラッグして位置を変更 - ズームスライダーまたはボタンでサイズを調整",
"com_ui_empty_category": "-",
@ -861,22 +917,29 @@
"com_ui_endpoint_menu": "LLMエンドポイントメニュー",
"com_ui_enter": "入力",
"com_ui_enter_api_key": "APIキーを入力",
"com_ui_enter_description": "説明を入力(オプション)",
"com_ui_enter_key": "キーを入力",
"com_ui_enter_name": "名前を入力",
"com_ui_enter_openapi_schema": "OpenAPIスキーマを入力してください",
"com_ui_enter_value": "値を入力",
"com_ui_error": "エラー",
"com_ui_error_connection": "サーバーへの接続中にエラーが発生しました。ページを更新してください。",
"com_ui_error_message_prefix": "エラーメッセージ:",
"com_ui_error_save_admin_settings": "管理者設定の保存にエラーが発生しました。",
"com_ui_error_try_following_prefix": "次のいずれかを試してください",
"com_ui_error_unexpected": "予期しない事態が発生しました",
"com_ui_error_updating_preferences": "環境設定の更新エラー",
"com_ui_everyone_permission_level": "全員の許可レベル",
"com_ui_examples": "例",
"com_ui_expand": "展開",
"com_ui_expand_chat": "チャットを展開",
"com_ui_expand_thoughts": "思考を展開",
"com_ui_export_convo_modal": "エクスポート",
"com_ui_feedback_more": "もっと...",
"com_ui_feedback_more_information": "追加フィードバックの提供",
"com_ui_feedback_negative": "改善が必要",
"com_ui_feedback_placeholder": "その他、ご意見・ご感想がございましたら、こちらにご記入ください。",
"com_ui_feedback_positive": "スキ!",
"com_ui_feedback_positive": "いいね!",
"com_ui_feedback_tag_accurate_reliable": "正確で信頼できる",
"com_ui_feedback_tag_attention_to_detail": "細部へのこだわり",
"com_ui_feedback_tag_bad_style": "スタイルや口調が悪い",
@ -895,6 +958,7 @@
"com_ui_file_token_limit": "ファイル・トークンの制限",
"com_ui_file_token_limit_desc": "ファイル処理のトークン上限を設定し、コストとリソースの使用量を管理する。",
"com_ui_files": "ファイル",
"com_ui_filter_mcp_servers": "名前でMCPサーバーをフィルタリング",
"com_ui_filter_prompts": "フィルタープロンプト",
"com_ui_filter_prompts_name": "名前でプロンプトをフィルタ",
"com_ui_final_touch": "最後の仕上げ",
@ -918,6 +982,7 @@
"com_ui_fork_info_visible": "この設定は、ターゲットメッセージへの直接の経路のみを表示し、分岐は表示しません。つまり、表示メッセージのみを抽出して表示するということです。",
"com_ui_fork_more_details_about": "{{0}} 分岐オプションに関する追加情報と詳細を表示します",
"com_ui_fork_more_info_options": "すべての分岐オプションとその動作の詳細説明を見る",
"com_ui_fork_open_menu": "分岐オプションを開く",
"com_ui_fork_processing": "会話を分岐しています...",
"com_ui_fork_remember": "以前の会話内容を記憶する",
"com_ui_fork_remember_checked": "選択した内容は、次回の利用時にも記憶されます。設定から変更できます。",
@ -938,6 +1003,8 @@
"com_ui_group": "グループ",
"com_ui_handoff_instructions": "ハンドオフの指示",
"com_ui_happy_birthday": "初めての誕生日です!",
"com_ui_header_format": "ヘッダー形式",
"com_ui_hide_code": "コードを隠す",
"com_ui_hide_image_details": "画像の詳細を隠す",
"com_ui_hide_password": "パスワードを隠す",
"com_ui_hide_qr": "QRコードを非表示にする",
@ -955,6 +1022,7 @@
"com_ui_import_conversation_info": "JSONファイルから会話をインポートする",
"com_ui_import_conversation_success": "会話のインポートに成功しました",
"com_ui_import_conversation_upload_error": "ファイルのアップロードに失敗しました。もう一度お試しください。",
"com_ui_importing": "インポート中",
"com_ui_include_shadcnui": "shadcn/uiコンポーネントの指示を含める",
"com_ui_initializing": "初期化中...",
"com_ui_input": "入力",
@ -965,9 +1033,13 @@
"com_ui_latest_footer": "Every AI for Everyone.",
"com_ui_latest_production_version": "最新の製品バージョン",
"com_ui_latest_version": "最新バージョン",
"com_ui_leave_blank_to_keep": "既存を維持する場合は空白のままにして下さい",
"com_ui_librechat_code_api_key": "LibreChat コードインタープリター APIキーを取得",
"com_ui_librechat_code_api_subtitle": "セキュア。多言語対応。ファイル入出力。",
"com_ui_librechat_code_api_title": "AIコードを実行",
"com_ui_light_theme_enabled": "ライトテーマが有効",
"com_ui_link_copied": "リンクをコピーしました",
"com_ui_link_refreshed": "リンクを更新しました",
"com_ui_loading": "読み込み中...",
"com_ui_locked": "ロック",
"com_ui_logo": "{{0}}のロゴ",
@ -975,18 +1047,41 @@
"com_ui_manage": "管理",
"com_ui_marketplace": "マーケットプレイス",
"com_ui_marketplace_allow_use": "マーケットプレイスの利用を許可する",
"com_ui_max_favorites_reached": "ピン留めしたアイテムの最大数に達しました({{0}})。アイテムを追加するには、ピン留めを解除します。",
"com_ui_max_file_size": "PNG、JPGまたはJPEG最大 {{0}})",
"com_ui_max_tags": "最新の値を使用した場合、許可される最大数は {{0}} です。",
"com_ui_mcp_authenticated_success": "MCPサーバー{{0}}認証成功",
"com_ui_mcp_configure_server": "設定 {{0}}",
"com_ui_mcp_configure_server_description": "カスタム変数を設定する {{0}}",
"com_ui_mcp_dialog_title": "変数を設定する {{serverName}}. サーバーステータス: {{status}}",
"com_ui_mcp_domain_not_allowed": "MCPサーバードメインが許可ドメインリストにありません。管理者に連絡してください。",
"com_ui_mcp_enter_var": "{{0}}の値を入力する。",
"com_ui_mcp_init_failed": "MCPサーバーの初期化に失敗しました",
"com_ui_mcp_initialize": "初期化",
"com_ui_mcp_initialized_success": "MCPサーバー{{0}}初期化に成功",
"com_ui_mcp_invalid_url": "有効な URL を入力してください",
"com_ui_mcp_oauth_cancelled": "OAuthログインがキャンセルされた {{0}}",
"com_ui_mcp_oauth_timeout": "OAuthログインがタイムアウトしました。 {{0}}",
"com_ui_mcp_server": "MCP サーバー",
"com_ui_mcp_server_connection_failed": "指定されたMCPサーバーへの接続に失敗しました。URL、サーバーの種類、および認証設定が正しいことを確認してから、もう一度お試しください。また、URLにアクセスできることを確認してください。",
"com_ui_mcp_server_created": "MCP サーバーが正常に作成されました",
"com_ui_mcp_server_delete_confirm": "この MCP サーバーを削除してもよろしいですか?",
"com_ui_mcp_server_deleted": "MCP サーバーが正常に削除されました",
"com_ui_mcp_server_role_editor": "MCPサーバーエディター",
"com_ui_mcp_server_role_editor_desc": "MCP サーバーを表示、使用、編集できます",
"com_ui_mcp_server_role_owner": "MCP サーバー所有者",
"com_ui_mcp_server_role_owner_desc": "MCP サーバーを完全に制御",
"com_ui_mcp_server_role_viewer": "MCP サーバー ビューアー",
"com_ui_mcp_server_role_viewer_desc": "MCPサーバーの表示と使用が可能",
"com_ui_mcp_server_updated": "MCPサーバーが正常に更新されました",
"com_ui_mcp_server_url_placeholder": "https://mcp.example.com",
"com_ui_mcp_servers": "MCP サーバー",
"com_ui_mcp_servers_allow_create": "ユーザにMCPサーバーを作成許可する",
"com_ui_mcp_servers_allow_share": "ユーザーにMCPサーバーを共有許可する",
"com_ui_mcp_servers_allow_use": "ユーザーに MCP サーバーの使用を許可する",
"com_ui_mcp_title_invalid": "タイトルに使用できるのは、アルファベット、数字、スペースのみです。",
"com_ui_mcp_type_sse": "SSE",
"com_ui_mcp_type_streamable_http": "ストリーミング可能なHTTPS",
"com_ui_mcp_update_var": "{{0}}を更新",
"com_ui_mcp_url": "MCPサーバーURL",
"com_ui_medium": "中",
@ -1004,13 +1099,18 @@
"com_ui_memory_deleted_items": "削除されたメモリ",
"com_ui_memory_error": "メモリエラー",
"com_ui_memory_key_exists": "このキーを持つメモリはすでに存在します。別のキーを使用してください。",
"com_ui_memory_key_hint": "小文字とアンダースコアのみを使用してください",
"com_ui_memory_key_validation": "メモリー・キーには小文字とアンダースコアのみを使用する。",
"com_ui_memory_storage_full": "メモリストレージがいっぱいです",
"com_ui_memory_updated": "保存されたメモリを更新しました",
"com_ui_memory_updated_items": "更新されたメモリ",
"com_ui_memory_would_exceed": "保存できません - 制限を超えています {{tokens}} トークン。既存のメモリを削除してスペースを確保します。",
"com_ui_mention": "エンドポイント、アシスタント、またはプリセットを素早く切り替えるには、それらを言及してください。",
"com_ui_mermaid": "マーメイド",
"com_ui_mermaid_failed": "図のレンダリングに失敗しました :",
"com_ui_mermaid_source": "ソースコード:",
"com_ui_message_input": "メッセージ入力",
"com_ui_microphone_unavailable": "マイクを使用できません",
"com_ui_min_tags": "これ以上の値を削除できません。少なくとも {{0}} が必要です。",
"com_ui_minimal": "最小限",
"com_ui_misc": "その他",
@ -1019,18 +1119,27 @@
"com_ui_more_info": "詳細",
"com_ui_my_prompts": "マイ プロンプト",
"com_ui_name": "名前",
"com_ui_name_sort": "名前順",
"com_ui_new": "New",
"com_ui_new_chat": "新規チャット",
"com_ui_new_conversation_title": "新しい会話タイトル",
"com_ui_next": "次",
"com_ui_no": "いいえ",
"com_ui_no_auth": "認証なし",
"com_ui_no_bookmarks": "ブックマークがまだないようです。チャットをクリックして新しいブックマークを追加してください",
"com_ui_no_bookmarks_match": "検索に一致するブックマークはありません",
"com_ui_no_bookmarks_title": "ブックマークはありません",
"com_ui_no_categories": "カテゴリーなし",
"com_ui_no_category": "カテゴリなし",
"com_ui_no_changes": "変更なし",
"com_ui_no_individual_access": "個々のユーザーやグループがこのエージェントにアクセスすることはできません。",
"com_ui_no_memories": "記憶はない。手動で作成するか、AIに何かを記憶するよう促す",
"com_ui_no_mcp_servers": "MCPサーバーはまだありません",
"com_ui_no_mcp_servers_match": "フィルターに一致するMCPサーバーはありません",
"com_ui_no_memories": "メモリ登録はありません。新規に作成するか、AIに覚えるよう指示してください。",
"com_ui_no_memories_match": "検索に一致するメモリはありません",
"com_ui_no_memories_title": "まだメモリがありません",
"com_ui_no_personalization_available": "現在、パーソナライズオプションはありません",
"com_ui_no_prompts_title": "まだプロンプトはありません",
"com_ui_no_read_access": "メモリを見る権限がありません",
"com_ui_no_results_found": "結果は見つかりませんでした",
"com_ui_no_terms_content": "表示する利用規約の内容はありません",
@ -1051,7 +1160,11 @@
"com_ui_off": "オフ",
"com_ui_offline": "オフライン",
"com_ui_on": "オン",
"com_ui_open_source_chat_new_tab": "新しいタブでチャットを開く",
"com_ui_open_source_chat_new_tab_title": "新しいタブでチャットを開く- {{title}}",
"com_ui_open_var": "開く {{0}}",
"com_ui_openai": "OpenAI",
"com_ui_opens_new_tab": "(新しいタブで開く)",
"com_ui_optional": "(任意)",
"com_ui_page": "ページ",
"com_ui_people": "人々",
@ -1062,12 +1175,15 @@
"com_ui_permissions_failed_load": "アクセス許可の読み込みに失敗しました。再試行してください。",
"com_ui_permissions_failed_update": "権限の更新に失敗しました。再試行してください。",
"com_ui_permissions_updated_success": "パーミッションの更新に成功",
"com_ui_pin": "ピン留め",
"com_ui_preferences_updated": "環境設定が正常に更新されました",
"com_ui_prev": "前",
"com_ui_preview": "プレビュー",
"com_ui_privacy_policy": "プライバシーポリシー",
"com_ui_privacy_policy_url": "プライバシーポリシーURL",
"com_ui_prompt": "プロンプト",
"com_ui_prompt_group_button": "{{name}} プロンプト、 {{category}} カテゴリ",
"com_ui_prompt_group_button_no_category": "{{name}} プロンプト",
"com_ui_prompt_groups": "プロンプトグループリスト",
"com_ui_prompt_input": "入力を促す",
"com_ui_prompt_input_field": "プロンプト・テキスト入力フィールド",
@ -1084,6 +1200,7 @@
"com_ui_provider": "プロバイダ",
"com_ui_quality": "品質",
"com_ui_read_aloud": "読み上げる",
"com_ui_redirect_uri": "リダイレクトURI",
"com_ui_redirecting_to_provider": "{{0}}にリダイレクト、 お待ちください...",
"com_ui_reference_saved_memories": "保存されたメモリを参照",
"com_ui_reference_saved_memories_description": "アシスタントが応答する際に、保存したメモリを参照し、使用できるようにする。",
@ -1101,6 +1218,7 @@
"com_ui_rename_conversation": "会話の名前を変更する",
"com_ui_rename_failed": "会話の名前を変更できませんでした",
"com_ui_rename_prompt": "プロンプトの名前を変更します",
"com_ui_rename_prompt_name": "プロンプトの名前変更 - {{name}}",
"com_ui_requires_auth": "認証が必要です",
"com_ui_reset": "リセット",
"com_ui_reset_adjustments": "調整をリセットする",
@ -1109,6 +1227,9 @@
"com_ui_resource": "リソース",
"com_ui_response": "応答",
"com_ui_result": "結果",
"com_ui_result_found": "{{count}} 件の結果が見つかりました",
"com_ui_results_found": "{{count}} 件の結果が見つかりました",
"com_ui_retry": "リトライ",
"com_ui_revoke": "無効にする",
"com_ui_revoke_info": "ユーザへ発行した認証情報をすべて無効にする。",
"com_ui_revoke_key_confirm": "この認証情報を無効にしてもよろしいですか?",
@ -1152,6 +1273,7 @@
"com_ui_seconds": "秒",
"com_ui_secret_key": "秘密鍵",
"com_ui_select": "選択",
"com_ui_select_agent": "エージェントを選択",
"com_ui_select_all": "すべて選択",
"com_ui_select_file": "ファイルを選択",
"com_ui_select_model": "モデル選択",
@ -1160,6 +1282,7 @@
"com_ui_select_provider": "プロバイダーを選択してください",
"com_ui_select_provider_first": "最初にプロバイダーを選択してください",
"com_ui_select_region": "地域を選択",
"com_ui_select_row": "行を選択",
"com_ui_select_search_model": "名前でモデルを検索",
"com_ui_select_search_provider": "プロバイダー名で検索",
"com_ui_select_search_region": "地域名で検索",
@ -1169,7 +1292,7 @@
"com_ui_share_delete_error": "共有リンクの削除中にエラーが発生しました。",
"com_ui_share_error": "チャットの共有リンクの共有中にエラーが発生しました",
"com_ui_share_everyone": "みんなと共有する",
"com_ui_share_everyone_description_var": "これ {{resource}} 誰でもご利用いただけます。 {{resource}} 本当はみんなで共有するべきものです。データの取り扱いにはご注意ください。",
"com_ui_share_everyone_description_var": "この {{resource}} は全員が利用できるようになります。この {{resource}} が本当に全員と共有することを意図したものか、必ず確認してください。データの取り扱いには注意してください。",
"com_ui_share_link_to_chat": "チャットへの共有リンク",
"com_ui_share_qr_code_description": "この会話リンクを共有するためのQRコード",
"com_ui_share_update_message": "あなたの名前、カスタム指示、共有リンクを作成した後のメッセージは、共有されません。",
@ -1179,22 +1302,30 @@
"com_ui_shared_prompts": "共有されたプロンプト",
"com_ui_shop": "買い物",
"com_ui_show_all": "すべて表示",
"com_ui_show_code": "コード表示",
"com_ui_show_image_details": "画像の詳細を表示",
"com_ui_show_password": "パスワードを表示する",
"com_ui_show_qr": "QR コードを表示",
"com_ui_sign_in_to_domain": "{{0}}にサインインする",
"com_ui_simple": "シンプル",
"com_ui_size": "サイズ",
"com_ui_size_sort": "サイズ順",
"com_ui_special_var_current_date": "現在の日付",
"com_ui_special_var_current_datetime": "現在の日時",
"com_ui_special_var_current_user": "現在のユーザー",
"com_ui_special_var_iso_datetime": "UTC ISO 日時",
"com_ui_special_variables": "特殊変数:",
"com_ui_special_variables_more_info": "ドロップダウンから特別な変数を選択できます: `{{current_date}}` (今日の日付と曜日)、`{{current_datetime}}` (現地の日付と時刻)、`{{utc_iso_datetime}}` (UTC ISO 日時)、および `{{current_user}}` (アカウント名)。",
"com_ui_speech_not_supported": "お使いのブラウザは音声認識をサポートしていません",
"com_ui_speech_not_supported_use_external": "お使いのブラウザは音声認識をサポートしていません。[設定] > [音声]で[外部STT]に切り替えてみてください。",
"com_ui_speech_while_submitting": "応答の生成中は音声を送信できません",
"com_ui_sr_actions_menu": "{{0}}のアクションメニューを開く",
"com_ui_sr_global_prompt": "グローバルプロンプトグループ",
"com_ui_stack_trace": "スタックトレース",
"com_ui_status_prefix": "ステータス:",
"com_ui_stop": "止める",
"com_ui_storage": "ストレージ",
"com_ui_storage_filter_sort": "ストレージによるフィルタリングと並べ替え",
"com_ui_submit": "送信する",
"com_ui_support_contact": "サポート窓口",
"com_ui_support_contact_email": "電子メール",
@ -1209,16 +1340,22 @@
"com_ui_terms_of_service": "利用規約",
"com_ui_thinking": "考え中...",
"com_ui_thoughts": "推論",
"com_ui_toggle_theme": "テーマを切り替える",
"com_ui_token": "トークン",
"com_ui_token_exchange_method": "トークン交換方法",
"com_ui_token_url": "トークンURL",
"com_ui_tokens": "トークン",
"com_ui_tool_collection_prefix": "ツールのコレクション",
"com_ui_tool_list_collapse": "折りたたむ {{serverName}} ツールリスト",
"com_ui_tool_list_expand": "展開 {{serverName}} ツールリスト",
"com_ui_tools": "ツール",
"com_ui_tools_and_actions": "ツールとアクション",
"com_ui_transferred_to": "転送先",
"com_ui_travel": "旅行",
"com_ui_trust_app": "このアプリケーションを信頼している",
"com_ui_try_adjusting_search": "検索条件を調整する",
"com_ui_ui_resource_error": "UI リソース エラー ({{0}}",
"com_ui_ui_resource_not_found": "UI リソースが見つかりませんindex {{0}})",
"com_ui_ui_resources": "UIリソース",
"com_ui_unarchive": "アーカイブ解除",
"com_ui_unarchive_error": "アーカイブ解除に失敗しました。",
@ -1237,6 +1374,7 @@
"com_ui_upload_file_context": "ファイルコンテキストをアップロード",
"com_ui_upload_file_search": "ファイル検索用アップロード",
"com_ui_upload_files": "ファイルをアップロード",
"com_ui_upload_icon": "アイコン画像をアップロード",
"com_ui_upload_image": "画像をアップロード",
"com_ui_upload_image_input": "画像をアップロード",
"com_ui_upload_invalid": "アップロードに無効なファイルです。制限を超えない画像である必要があります。",
@ -1253,6 +1391,7 @@
"com_ui_used": "使用済み",
"com_ui_user": "ユーザー",
"com_ui_user_group_permissions": "ユーザーとグループの権限",
"com_ui_user_provides_key": "ユーザーは個人キーを登録する",
"com_ui_value": "値",
"com_ui_variables": "変数",
"com_ui_variables_info": "テキスト内で二重中括弧を使用して変数を定義します。例えば、`{{example variable}}`のようにすると、プロンプトを使用するときに後で値を埋め込むことができます。",
@ -1289,6 +1428,7 @@
"com_ui_weekend_morning": "楽しい週末を",
"com_ui_write": "執筆",
"com_ui_x_selected": "{{0}}が選択された",
"com_ui_xhigh": "エクストラ・ハイ",
"com_ui_yes": "はい",
"com_ui_zoom": "ズーム",
"com_ui_zoom_in": "ズームイン",

View file

@ -634,7 +634,9 @@
"com_ui_active": "Aktīvais",
"com_ui_add": "Pievienot",
"com_ui_add_code_interpreter_api_key": "Pievienot kodu tulkošanas API atslēgu",
"com_ui_add_first_bookmark": "Noklikšķiniet uz sarunas, lai to pievienotu",
"com_ui_add_first_mcp_server": "Izveidojiet savu pirmo MCP serveri, lai sāktu darbu",
"com_ui_add_first_prompt": "Izveidojiet savu pirmo uzvedni, lai sāktu darbu",
"com_ui_add_mcp": "Pievienot MCP",
"com_ui_add_mcp_server": "Pievienot MCP serveri",
"com_ui_add_model_preset": "Pievienot modeli vai iestatījumu papildu atbildei",
@ -697,6 +699,7 @@
"com_ui_agents": "Aģenti",
"com_ui_agents_allow_create": "Atļaut aģentu izveidi",
"com_ui_agents_allow_share": "Atļaut aģentu kopīgot",
"com_ui_agents_allow_share_public": "Atļaut koplietot aģentus publiski",
"com_ui_agents_allow_use": "Atļaut aģentu izmantošanu",
"com_ui_all": "visu",
"com_ui_all_proper": "Visi",
@ -1088,6 +1091,7 @@
"com_ui_mcp_servers": "MCP serveri",
"com_ui_mcp_servers_allow_create": "Atļaut lietotājiem izveidot MCP serverus",
"com_ui_mcp_servers_allow_share": "Atļaut lietotājiem koplietot MCP serverus",
"com_ui_mcp_servers_allow_share_public": "Ļaujiet lietotājiem publiski koplietot MCP serverus",
"com_ui_mcp_servers_allow_use": "Atļaut lietotājiem izmantot MCP serverus",
"com_ui_mcp_title_invalid": "Virsrakstā var būt tikai burti, cipari un atstarpes.",
"com_ui_mcp_transport": "Transports",
@ -1139,6 +1143,7 @@
"com_ui_no_auth": "Nav autorizācijas",
"com_ui_no_bookmarks": "Šķiet, ka jums vēl nav grāmatzīmju. Noklikšķiniet uz sarunas un pievienojiet jaunu.",
"com_ui_no_bookmarks_match": "Nav atbilstošu grāmatzīmju meklēšanas vaicājumam",
"com_ui_no_bookmarks_title": "Vēl nav nevienas grāmatzīmes",
"com_ui_no_categories": "Nav pieejamas nevienas kategorijas",
"com_ui_no_category": "Nav kategorijas",
"com_ui_no_changes": "Izmaiņas netika veiktas",
@ -1149,6 +1154,7 @@
"com_ui_no_memories_match": "Nav atmiņu, kas atbilstu jūsu meklēšanas vaicājumam",
"com_ui_no_memories_title": "Vēl nav atmiņu",
"com_ui_no_personalization_available": "Pašlaik nav pieejamas personalizācijas opcijas",
"com_ui_no_prompts_title": "Vēl nav uzvedņu",
"com_ui_no_read_access": "Jums nav atļaujas skatīt atmiņas",
"com_ui_no_results_found": "Nav atrastu rezultātu",
"com_ui_no_terms_content": "Nav noteikumu un nosacījumu satura, ko parādīt",
@ -1205,6 +1211,7 @@
"com_ui_prompts": "Uzvednes",
"com_ui_prompts_allow_create": "Atļaut uzvedņu izveidi",
"com_ui_prompts_allow_share": "Atļaut kopīgošanas uzvednes",
"com_ui_prompts_allow_share_public": "Atļaut kopīgot uzvednes publiski",
"com_ui_prompts_allow_use": "Atļaut izmantot uzvednes",
"com_ui_provider": "Pakalpojumu sniedzējs",
"com_ui_quality": "Kvalitāte",
@ -1385,6 +1392,7 @@
"com_ui_upload_file_context": "Augšupielādēt failu kā kontekstu",
"com_ui_upload_file_search": "Augšupielādēt vektorizētai meklēšanai",
"com_ui_upload_files": "Augšupielādēt failus",
"com_ui_upload_icon": "Augšupielādēt ikonas attēlu",
"com_ui_upload_image": "Augšupielādēt failu kā attēlu",
"com_ui_upload_image_input": "Augšupielādēt failu kā attēlu",
"com_ui_upload_invalid": "Nederīgs augšupielādējamais fails. Attēlam jābūt tādam, kas nepārsniedz ierobežojumu.",

View file

@ -10,9 +10,6 @@ export function getClient() {
/** @type {Anthropic.default.RequestOptions} */
const options = {
apiKey: process.env.ANTHROPIC_API_KEY,
defaultHeaders: {
'anthropic-beta': 'prompt-caching-2024-07-31',
},
};
return new Anthropic(options);

61
package-lock.json generated
View file

@ -57,10 +57,9 @@
"@azure/search-documents": "^12.0.0",
"@azure/storage-blob": "^12.27.0",
"@google/genai": "^1.19.0",
"@googleapis/youtube": "^20.0.0",
"@keyv/redis": "^4.3.3",
"@langchain/core": "^0.3.80",
"@librechat/agents": "^3.0.66",
"@librechat/agents": "^3.0.77",
"@librechat/api": "*",
"@librechat/data-schemas": "*",
"@microsoft/microsoft-graph-client": "^3.0.7",
@ -126,7 +125,6 @@
"undici": "^7.10.0",
"winston": "^3.11.0",
"winston-daily-rotate-file": "^5.0.0",
"youtube-transcript": "^1.2.1",
"zod": "^3.22.4"
},
"devDependencies": {
@ -10739,18 +10737,6 @@
"node": ">=18.0.0"
}
},
"node_modules/@googleapis/youtube": {
"version": "20.0.0",
"resolved": "https://registry.npmjs.org/@googleapis/youtube/-/youtube-20.0.0.tgz",
"integrity": "sha512-wdt1J0JoKYhvpoS2XIRHX0g/9ul/B0fQeeJAhuuBIdYINuuLt6/oZYZZCBmkuhtkA3IllXgqgAXOjLtLRAnR2g==",
"license": "Apache-2.0",
"dependencies": {
"googleapis-common": "^7.0.0"
},
"engines": {
"node": ">=12.0.0"
}
},
"node_modules/@grpc/grpc-js": {
"version": "1.9.15",
"resolved": "https://registry.npmjs.org/@grpc/grpc-js/-/grpc-js-1.9.15.tgz",
@ -12660,9 +12646,9 @@
}
},
"node_modules/@librechat/agents": {
"version": "3.0.66",
"resolved": "https://registry.npmjs.org/@librechat/agents/-/agents-3.0.66.tgz",
"integrity": "sha512-JpQo7w+/yLM3dJ46lyGrm4gPTjiHERwcpojw7drvpYWqOU4e2jmjK0JbNxQ0jP00q+nDhPG+mqJ2qQU7TVraOQ==",
"version": "3.0.77",
"resolved": "https://registry.npmjs.org/@librechat/agents/-/agents-3.0.77.tgz",
"integrity": "sha512-Wr9d8bjJAQSl03nEgnAPG6jBQT1fL3sNV3TFDN1FvFQt6WGfdok838Cbcn+/tSGXSPJcICTxNkMT7VN8P6bCPw==",
"license": "MIT",
"dependencies": {
"@langchain/anthropic": "^0.3.26",
@ -12686,6 +12672,7 @@
"https-proxy-agent": "^7.0.6",
"mathjs": "^15.1.0",
"nanoid": "^3.3.7",
"okapibm25": "^1.4.1",
"openai": "5.8.2"
},
"engines": {
@ -27689,22 +27676,6 @@
"node": ">=14"
}
},
"node_modules/googleapis-common": {
"version": "7.0.1",
"resolved": "https://registry.npmjs.org/googleapis-common/-/googleapis-common-7.0.1.tgz",
"integrity": "sha512-mgt5zsd7zj5t5QXvDanjWguMdHAcJmmDrF9RkInCecNsyV7S7YtGqm5v2IWONNID88osb7zmx5FtrAP12JfD0w==",
"dependencies": {
"extend": "^3.0.2",
"gaxios": "^6.0.3",
"google-auth-library": "^9.0.0",
"qs": "^6.7.0",
"url-template": "^2.0.8",
"uuid": "^9.0.0"
},
"engines": {
"node": ">=14.0.0"
}
},
"node_modules/gopd": {
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz",
@ -34310,6 +34281,12 @@
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/okapibm25": {
"version": "1.4.1",
"resolved": "https://registry.npmjs.org/okapibm25/-/okapibm25-1.4.1.tgz",
"integrity": "sha512-UHmeH4MAtZXGFVncwbY7pfFvDVNxpsyM3W66aGPU0SHj1+ld59ty+9lJ0ifcrcnPUl1XdYoDgb06ObyCnpTs3g==",
"license": "MIT"
},
"node_modules/ollama": {
"version": "0.5.18",
"resolved": "https://registry.npmjs.org/ollama/-/ollama-0.5.18.tgz",
@ -41531,11 +41508,6 @@
"requires-port": "^1.0.0"
}
},
"node_modules/url-template": {
"version": "2.0.8",
"resolved": "https://registry.npmjs.org/url-template/-/url-template-2.0.8.tgz",
"integrity": "sha512-XdVKMF4SJ0nP/O7XIPB0JwAEuT9lDIYnNsK8yGVe43y0AWoKeJNdv3ZNWh7ksJ6KqQFjOO6ox/VEitLnaVNufw=="
},
"node_modules/url/node_modules/punycode": {
"version": "1.4.1",
"resolved": "https://registry.npmjs.org/punycode/-/punycode-1.4.1.tgz",
@ -43088,15 +43060,6 @@
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/youtube-transcript": {
"version": "1.2.1",
"resolved": "https://registry.npmjs.org/youtube-transcript/-/youtube-transcript-1.2.1.tgz",
"integrity": "sha512-TvEGkBaajKw+B6y91ziLuBLsa5cawgowou+Bk0ciGpjELDfAzSzTGXaZmeSSkUeknCPpEr/WGApOHDwV7V+Y9Q==",
"license": "MIT",
"engines": {
"node": ">=18.0.0"
}
},
"node_modules/zod": {
"version": "3.25.67",
"resolved": "https://registry.npmjs.org/zod/-/zod-3.25.67.tgz",
@ -43169,7 +43132,7 @@
"@google/genai": "^1.19.0",
"@keyv/redis": "^4.3.3",
"@langchain/core": "^0.3.80",
"@librechat/agents": "^3.0.66",
"@librechat/agents": "^3.0.77",
"@librechat/data-schemas": "*",
"@modelcontextprotocol/sdk": "^1.25.2",
"@smithy/node-http-handler": "^4.4.5",

View file

@ -88,7 +88,7 @@
"@google/genai": "^1.19.0",
"@keyv/redis": "^4.3.3",
"@langchain/core": "^0.3.80",
"@librechat/agents": "^3.0.66",
"@librechat/agents": "^3.0.77",
"@librechat/data-schemas": "*",
"@modelcontextprotocol/sdk": "^1.25.2",
"@smithy/node-http-handler": "^4.4.5",

View file

@ -0,0 +1,228 @@
import { FileSources } from 'librechat-data-provider';
import type { Agent, AgentAvatar, AgentModelParameters } from 'librechat-data-provider';
import type { RefreshS3UrlFn, UpdateAgentFn } from './avatars';
import {
MAX_AVATAR_REFRESH_AGENTS,
AVATAR_REFRESH_BATCH_SIZE,
refreshListAvatars,
} from './avatars';
describe('refreshListAvatars', () => {
let mockRefreshS3Url: jest.MockedFunction<RefreshS3UrlFn>;
let mockUpdateAgent: jest.MockedFunction<UpdateAgentFn>;
const userId = 'user123';
beforeEach(() => {
mockRefreshS3Url = jest.fn();
mockUpdateAgent = jest.fn();
});
const createAgent = (overrides: Partial<Agent> = {}): Agent => ({
_id: 'obj1',
id: 'agent1',
name: 'Test Agent',
author: userId,
description: 'Test',
created_at: Date.now(),
avatar: {
source: FileSources.s3,
filepath: 'old-path.jpg',
},
instructions: null,
provider: 'openai',
model: 'gpt-4',
model_parameters: {} as AgentModelParameters,
...overrides,
});
it('should return empty stats for empty agents array', async () => {
const stats = await refreshListAvatars({
agents: [],
userId,
refreshS3Url: mockRefreshS3Url,
updateAgent: mockUpdateAgent,
});
expect(stats.updated).toBe(0);
expect(mockRefreshS3Url).not.toHaveBeenCalled();
expect(mockUpdateAgent).not.toHaveBeenCalled();
});
it('should skip non-S3 avatars', async () => {
const agent = createAgent({
avatar: { source: 'local', filepath: 'local-path.jpg' } as AgentAvatar,
});
const stats = await refreshListAvatars({
agents: [agent],
userId,
refreshS3Url: mockRefreshS3Url,
updateAgent: mockUpdateAgent,
});
expect(stats.not_s3).toBe(1);
expect(stats.updated).toBe(0);
expect(mockRefreshS3Url).not.toHaveBeenCalled();
});
it('should skip agents without id', async () => {
const agent = createAgent({ id: '' });
const stats = await refreshListAvatars({
agents: [agent],
userId,
refreshS3Url: mockRefreshS3Url,
updateAgent: mockUpdateAgent,
});
expect(stats.no_id).toBe(1);
expect(mockRefreshS3Url).not.toHaveBeenCalled();
});
it('should refresh avatars for agents owned by other users (VIEW access)', async () => {
const agent = createAgent({ author: 'otherUser' });
mockRefreshS3Url.mockResolvedValue('new-path.jpg');
mockUpdateAgent.mockResolvedValue({});
const stats = await refreshListAvatars({
agents: [agent],
userId,
refreshS3Url: mockRefreshS3Url,
updateAgent: mockUpdateAgent,
});
expect(stats.updated).toBe(1);
expect(mockRefreshS3Url).toHaveBeenCalled();
expect(mockUpdateAgent).toHaveBeenCalled();
});
it('should refresh and persist S3 avatars', async () => {
const agent = createAgent();
mockRefreshS3Url.mockResolvedValue('new-path.jpg');
mockUpdateAgent.mockResolvedValue({});
const stats = await refreshListAvatars({
agents: [agent],
userId,
refreshS3Url: mockRefreshS3Url,
updateAgent: mockUpdateAgent,
});
expect(stats.updated).toBe(1);
expect(mockRefreshS3Url).toHaveBeenCalledWith(agent.avatar);
expect(mockUpdateAgent).toHaveBeenCalledWith(
{ id: 'agent1' },
{ avatar: { filepath: 'new-path.jpg', source: FileSources.s3 } },
{ updatingUserId: userId, skipVersioning: true },
);
});
it('should not update if S3 URL unchanged', async () => {
const agent = createAgent();
mockRefreshS3Url.mockResolvedValue('old-path.jpg');
const stats = await refreshListAvatars({
agents: [agent],
userId,
refreshS3Url: mockRefreshS3Url,
updateAgent: mockUpdateAgent,
});
expect(stats.no_change).toBe(1);
expect(stats.updated).toBe(0);
expect(mockUpdateAgent).not.toHaveBeenCalled();
});
it('should handle S3 refresh errors gracefully', async () => {
const agent = createAgent();
mockRefreshS3Url.mockRejectedValue(new Error('S3 error'));
const stats = await refreshListAvatars({
agents: [agent],
userId,
refreshS3Url: mockRefreshS3Url,
updateAgent: mockUpdateAgent,
});
expect(stats.s3_error).toBe(1);
expect(stats.updated).toBe(0);
});
it('should handle database persist errors gracefully', async () => {
const agent = createAgent();
mockRefreshS3Url.mockResolvedValue('new-path.jpg');
mockUpdateAgent.mockRejectedValue(new Error('DB error'));
const stats = await refreshListAvatars({
agents: [agent],
userId,
refreshS3Url: mockRefreshS3Url,
updateAgent: mockUpdateAgent,
});
expect(stats.persist_error).toBe(1);
expect(stats.updated).toBe(0);
});
it('should process agents in batches', async () => {
const agents = Array.from({ length: 25 }, (_, i) =>
createAgent({
_id: `obj${i}`,
id: `agent${i}`,
avatar: { source: FileSources.s3, filepath: `path${i}.jpg` },
}),
);
mockRefreshS3Url.mockImplementation((avatar) =>
Promise.resolve(avatar.filepath.replace('.jpg', '-new.jpg')),
);
mockUpdateAgent.mockResolvedValue({});
const stats = await refreshListAvatars({
agents,
userId,
refreshS3Url: mockRefreshS3Url,
updateAgent: mockUpdateAgent,
});
expect(stats.updated).toBe(25);
expect(mockRefreshS3Url).toHaveBeenCalledTimes(25);
expect(mockUpdateAgent).toHaveBeenCalledTimes(25);
});
it('should track mixed statistics correctly', async () => {
const agents = [
createAgent({ id: 'agent1' }),
createAgent({ id: 'agent2', author: 'otherUser' }),
createAgent({
id: 'agent3',
avatar: { source: 'local', filepath: 'local.jpg' } as AgentAvatar,
}),
createAgent({ id: '' }), // no id
];
mockRefreshS3Url.mockResolvedValue('new-path.jpg');
mockUpdateAgent.mockResolvedValue({});
const stats = await refreshListAvatars({
agents,
userId,
refreshS3Url: mockRefreshS3Url,
updateAgent: mockUpdateAgent,
});
expect(stats.updated).toBe(2); // agent1 and agent2 (other user's agent now refreshed)
expect(stats.not_s3).toBe(1); // agent3
expect(stats.no_id).toBe(1); // agent with empty id
});
});
describe('Constants', () => {
it('should export MAX_AVATAR_REFRESH_AGENTS as 1000', () => {
expect(MAX_AVATAR_REFRESH_AGENTS).toBe(1000);
});
it('should export AVATAR_REFRESH_BATCH_SIZE as 20', () => {
expect(AVATAR_REFRESH_BATCH_SIZE).toBe(20);
});
});

View file

@ -0,0 +1,122 @@
import { logger } from '@librechat/data-schemas';
import { FileSources } from 'librechat-data-provider';
import type { Agent, AgentAvatar } from 'librechat-data-provider';
const MAX_AVATAR_REFRESH_AGENTS = 1000;
const AVATAR_REFRESH_BATCH_SIZE = 20;
export { MAX_AVATAR_REFRESH_AGENTS, AVATAR_REFRESH_BATCH_SIZE };
export type RefreshS3UrlFn = (avatar: AgentAvatar) => Promise<string | undefined>;
export type UpdateAgentFn = (
searchParams: { id: string },
updateData: { avatar: AgentAvatar },
options: { updatingUserId: string; skipVersioning: boolean },
) => Promise<unknown>;
export type RefreshListAvatarsParams = {
agents: Agent[];
userId: string;
refreshS3Url: RefreshS3UrlFn;
updateAgent: UpdateAgentFn;
};
export type RefreshStats = {
updated: number;
not_s3: number;
no_id: number;
no_change: number;
s3_error: number;
persist_error: number;
};
/**
* Opportunistically refreshes S3-backed avatars for agent list responses.
* Processes agents in batches to prevent database connection pool exhaustion.
* Only list responses are refreshed because they're the highest-traffic surface and
* the avatar URLs have a short-lived TTL. The refresh is cached per-user for 30 minutes
* so we refresh once per interval at most.
*
* Any user with VIEW access to an agent can refresh its avatar URL. This ensures
* avatars remain accessible even when the owner hasn't logged in recently.
* The agents array should already be filtered to only include agents the user can access.
*/
export const refreshListAvatars = async ({
agents,
userId,
refreshS3Url,
updateAgent,
}: RefreshListAvatarsParams): Promise<RefreshStats> => {
const stats: RefreshStats = {
updated: 0,
not_s3: 0,
no_id: 0,
no_change: 0,
s3_error: 0,
persist_error: 0,
};
if (!agents?.length) {
return stats;
}
logger.debug('[refreshListAvatars] Refreshing S3 avatars for agents: %d', agents.length);
for (let i = 0; i < agents.length; i += AVATAR_REFRESH_BATCH_SIZE) {
const batch = agents.slice(i, i + AVATAR_REFRESH_BATCH_SIZE);
await Promise.all(
batch.map(async (agent) => {
if (agent?.avatar?.source !== FileSources.s3 || !agent?.avatar?.filepath) {
stats.not_s3++;
return;
}
if (!agent?.id) {
logger.debug(
'[refreshListAvatars] Skipping S3 avatar refresh for agent: %s, ID is not set',
agent._id,
);
stats.no_id++;
return;
}
try {
logger.debug('[refreshListAvatars] Refreshing S3 avatar for agent: %s', agent._id);
const newPath = await refreshS3Url(agent.avatar);
if (newPath && newPath !== agent.avatar.filepath) {
try {
await updateAgent(
{ id: agent.id },
{
avatar: {
filepath: newPath,
source: agent.avatar.source,
},
},
{
updatingUserId: userId,
skipVersioning: true,
},
);
stats.updated++;
} catch (persistErr) {
logger.error('[refreshListAvatars] Avatar refresh persist error: %o', persistErr);
stats.persist_error++;
}
} else {
stats.no_change++;
}
} catch (err) {
logger.error('[refreshListAvatars] S3 avatar refresh error: %o', err);
stats.s3_error++;
}
}),
);
}
logger.info('[refreshListAvatars] Avatar refresh summary: %o', stats);
return stats;
};

View file

@ -1,3 +1,4 @@
export * from './avatars';
export * from './chain';
export * from './edges';
export * from './initialize';

View file

@ -42,30 +42,19 @@ function getClaudeHeaders(
if (/claude-3[-.]5-sonnet/.test(model)) {
return {
'anthropic-beta': 'max-tokens-3-5-sonnet-2024-07-15,prompt-caching-2024-07-31',
'anthropic-beta': 'max-tokens-3-5-sonnet-2024-07-15',
};
} else if (/claude-3[-.]7/.test(model)) {
return {
'anthropic-beta':
'token-efficient-tools-2025-02-19,output-128k-2025-02-19,prompt-caching-2024-07-31',
'anthropic-beta': 'token-efficient-tools-2025-02-19,output-128k-2025-02-19',
};
} else if (/claude-sonnet-4/.test(model)) {
return {
'anthropic-beta': 'prompt-caching-2024-07-31,context-1m-2025-08-07',
};
} else if (
/claude-(?:sonnet|opus|haiku)-[4-9]/.test(model) ||
/claude-[4-9]-(?:sonnet|opus|haiku)?/.test(model) ||
/claude-4(?:-(?:sonnet|opus|haiku))?/.test(model)
) {
return {
'anthropic-beta': 'prompt-caching-2024-07-31',
};
} else {
return {
'anthropic-beta': 'prompt-caching-2024-07-31',
'anthropic-beta': 'context-1m-2025-08-07',
};
}
return undefined;
}
/**

View file

@ -87,7 +87,7 @@ describe('getLLMConfig', () => {
expect(result.llmConfig.thinking).toHaveProperty('budget_tokens', 2000);
});
it('should add "prompt-caching" and "context-1m" beta headers for claude-sonnet-4 model', () => {
it('should add "context-1m" beta header and promptCache boolean for claude-sonnet-4 model', () => {
const modelOptions = {
model: 'claude-sonnet-4-20250514',
promptCache: true,
@ -97,12 +97,11 @@ describe('getLLMConfig', () => {
expect(clientOptions?.defaultHeaders).toBeDefined();
expect(clientOptions?.defaultHeaders).toHaveProperty('anthropic-beta');
const defaultHeaders = clientOptions?.defaultHeaders as Record<string, string>;
expect(defaultHeaders['anthropic-beta']).toBe(
'prompt-caching-2024-07-31,context-1m-2025-08-07',
);
expect(defaultHeaders['anthropic-beta']).toBe('context-1m-2025-08-07');
expect(result.llmConfig.promptCache).toBe(true);
});
it('should add "prompt-caching" and "context-1m" beta headers for claude-sonnet-4 model formats', () => {
it('should add "context-1m" beta header and promptCache boolean for claude-sonnet-4 model formats', () => {
const modelVariations = [
'claude-sonnet-4-20250514',
'claude-sonnet-4-latest',
@ -116,26 +115,23 @@ describe('getLLMConfig', () => {
expect(clientOptions?.defaultHeaders).toBeDefined();
expect(clientOptions?.defaultHeaders).toHaveProperty('anthropic-beta');
const defaultHeaders = clientOptions?.defaultHeaders as Record<string, string>;
expect(defaultHeaders['anthropic-beta']).toBe(
'prompt-caching-2024-07-31,context-1m-2025-08-07',
);
expect(defaultHeaders['anthropic-beta']).toBe('context-1m-2025-08-07');
expect(result.llmConfig.promptCache).toBe(true);
});
});
it('should add "prompt-caching" beta header for claude-opus-4-5 model', () => {
it('should pass promptCache boolean for claude-opus-4-5 model (no beta header needed)', () => {
const modelOptions = {
model: 'claude-opus-4-5',
promptCache: true,
};
const result = getLLMConfig('test-key', { modelOptions });
const clientOptions = result.llmConfig.clientOptions;
expect(clientOptions?.defaultHeaders).toBeDefined();
expect(clientOptions?.defaultHeaders).toHaveProperty('anthropic-beta');
const defaultHeaders = clientOptions?.defaultHeaders as Record<string, string>;
expect(defaultHeaders['anthropic-beta']).toBe('prompt-caching-2024-07-31');
expect(clientOptions?.defaultHeaders).toBeUndefined();
expect(result.llmConfig.promptCache).toBe(true);
});
it('should add "prompt-caching" beta header for claude-opus-4-5 model formats', () => {
it('should pass promptCache boolean for claude-opus-4-5 model formats (no beta header needed)', () => {
const modelVariations = [
'claude-opus-4-5',
'claude-opus-4-5-20250420',
@ -147,10 +143,8 @@ describe('getLLMConfig', () => {
const modelOptions = { model, promptCache: true };
const result = getLLMConfig('test-key', { modelOptions });
const clientOptions = result.llmConfig.clientOptions;
expect(clientOptions?.defaultHeaders).toBeDefined();
expect(clientOptions?.defaultHeaders).toHaveProperty('anthropic-beta');
const defaultHeaders = clientOptions?.defaultHeaders as Record<string, string>;
expect(defaultHeaders['anthropic-beta']).toBe('prompt-caching-2024-07-31');
expect(clientOptions?.defaultHeaders).toBeUndefined();
expect(result.llmConfig.promptCache).toBe(true);
});
});
@ -309,10 +303,11 @@ describe('getLLMConfig', () => {
},
});
// claude-3-5-sonnet supports prompt caching and should get the appropriate headers
// claude-3-5-sonnet supports prompt caching and should get the max-tokens header and promptCache boolean
expect(result.llmConfig.clientOptions?.defaultHeaders).toEqual({
'anthropic-beta': 'max-tokens-3-5-sonnet-2024-07-15,prompt-caching-2024-07-31',
'anthropic-beta': 'max-tokens-3-5-sonnet-2024-07-15',
});
expect(result.llmConfig.promptCache).toBe(true);
});
it('should handle thinking and thinkingBudget options', () => {
@ -520,9 +515,10 @@ describe('getLLMConfig', () => {
expect(result.llmConfig).not.toHaveProperty('topK');
// Should have appropriate headers for Claude-3.7 with prompt cache
expect(result.llmConfig.clientOptions?.defaultHeaders).toEqual({
'anthropic-beta':
'token-efficient-tools-2025-02-19,output-128k-2025-02-19,prompt-caching-2024-07-31',
'anthropic-beta': 'token-efficient-tools-2025-02-19,output-128k-2025-02-19',
});
// Should pass promptCache boolean
expect(result.llmConfig.promptCache).toBe(true);
});
it('should handle web search functionality like production', () => {
@ -1170,21 +1166,67 @@ describe('getLLMConfig', () => {
it('should handle prompt cache support logic for different models', () => {
const testCases = [
// Models that support prompt cache
{ model: 'claude-3-5-sonnet', promptCache: true, shouldHaveHeaders: true },
{ model: 'claude-3.5-sonnet-20241022', promptCache: true, shouldHaveHeaders: true },
{ model: 'claude-3-7-sonnet', promptCache: true, shouldHaveHeaders: true },
{ model: 'claude-3.7-sonnet-20250109', promptCache: true, shouldHaveHeaders: true },
{ model: 'claude-3-opus', promptCache: true, shouldHaveHeaders: true },
{ model: 'claude-sonnet-4-20250514', promptCache: true, shouldHaveHeaders: true },
// Models that support prompt cache (and have other beta headers)
{
model: 'claude-3-5-sonnet',
promptCache: true,
shouldHaveHeaders: true,
shouldHavePromptCache: true,
},
{
model: 'claude-3.5-sonnet-20241022',
promptCache: true,
shouldHaveHeaders: true,
shouldHavePromptCache: true,
},
{
model: 'claude-3-7-sonnet',
promptCache: true,
shouldHaveHeaders: true,
shouldHavePromptCache: true,
},
{
model: 'claude-3.7-sonnet-20250109',
promptCache: true,
shouldHaveHeaders: true,
shouldHavePromptCache: true,
},
{
model: 'claude-sonnet-4-20250514',
promptCache: true,
shouldHaveHeaders: true,
shouldHavePromptCache: true,
},
// Models that support prompt cache but have no additional beta headers needed
{
model: 'claude-3-opus',
promptCache: true,
shouldHaveHeaders: false,
shouldHavePromptCache: true,
},
// Models that don't support prompt cache
{ model: 'claude-3-5-sonnet-latest', promptCache: true, shouldHaveHeaders: false },
{ model: 'claude-3.5-sonnet-latest', promptCache: true, shouldHaveHeaders: false },
{
model: 'claude-3-5-sonnet-latest',
promptCache: true,
shouldHaveHeaders: false,
shouldHavePromptCache: false,
},
{
model: 'claude-3.5-sonnet-latest',
promptCache: true,
shouldHaveHeaders: false,
shouldHavePromptCache: false,
},
// Prompt cache disabled
{ model: 'claude-3-5-sonnet', promptCache: false, shouldHaveHeaders: false },
{
model: 'claude-3-5-sonnet',
promptCache: false,
shouldHaveHeaders: false,
shouldHavePromptCache: false,
},
];
testCases.forEach(({ model, promptCache, shouldHaveHeaders }) => {
testCases.forEach(({ model, promptCache, shouldHaveHeaders, shouldHavePromptCache }) => {
const result = getLLMConfig('test-key', {
modelOptions: { model, promptCache },
});
@ -1193,12 +1235,16 @@ describe('getLLMConfig', () => {
if (shouldHaveHeaders) {
expect(headers).toBeDefined();
expect((headers as Record<string, string>)['anthropic-beta']).toContain(
'prompt-caching',
);
expect((headers as Record<string, string>)['anthropic-beta']).toBeDefined();
} else {
expect(headers).toBeUndefined();
}
if (shouldHavePromptCache) {
expect(result.llmConfig.promptCache).toBe(true);
} else {
expect(result.llmConfig.promptCache).toBeUndefined();
}
});
});
});

View file

@ -155,6 +155,12 @@ function getLLMConfig(
const supportsCacheControl =
systemOptions.promptCache === true && checkPromptCacheSupport(requestOptions.model ?? '');
/** Pass promptCache boolean for downstream cache_control application */
if (supportsCacheControl) {
(requestOptions as Record<string, unknown>).promptCache = true;
}
const headers = getClaudeHeaders(requestOptions.model ?? '', supportsCacheControl);
if (headers && requestOptions.clientOptions) {
requestOptions.clientOptions.defaultHeaders = headers;

View file

@ -79,7 +79,7 @@ export function isAnthropicVertexCredentials(credentials: AnthropicCredentials):
/**
* Filters anthropic-beta header values to only include those supported by Vertex AI.
* Vertex AI rejects prompt-caching-2024-07-31 but we use 'prompt-caching-vertex' as a
* Vertex AI handles caching differently and we use 'prompt-caching-vertex' as a
* marker to trigger cache_control application in the agents package.
*/
function filterVertexHeaders(headers?: Record<string, string>): Record<string, string> | undefined {

View file

@ -39,12 +39,13 @@ describe('getOpenAIConfig - Anthropic Compatibility', () => {
type: 'enabled',
budget_tokens: 2000,
},
promptCache: true,
},
},
configOptions: {
baseURL: 'http://host.docker.internal:4000/v1',
defaultHeaders: {
'anthropic-beta': 'prompt-caching-2024-07-31,context-1m-2025-08-07',
'anthropic-beta': 'context-1m-2025-08-07',
},
},
tools: [],
@ -87,13 +88,13 @@ describe('getOpenAIConfig - Anthropic Compatibility', () => {
type: 'enabled',
budget_tokens: 3000,
},
promptCache: true,
},
},
configOptions: {
baseURL: 'http://localhost:4000/v1',
defaultHeaders: {
'anthropic-beta':
'token-efficient-tools-2025-02-19,output-128k-2025-02-19,prompt-caching-2024-07-31',
'anthropic-beta': 'token-efficient-tools-2025-02-19,output-128k-2025-02-19',
},
},
tools: [],
@ -135,13 +136,13 @@ describe('getOpenAIConfig - Anthropic Compatibility', () => {
user_id: 'user123',
},
topK: 50,
promptCache: true,
},
},
configOptions: {
baseURL: 'http://localhost:4000/v1',
defaultHeaders: {
'anthropic-beta':
'token-efficient-tools-2025-02-19,output-128k-2025-02-19,prompt-caching-2024-07-31',
'anthropic-beta': 'token-efficient-tools-2025-02-19,output-128k-2025-02-19',
},
},
tools: [],
@ -177,19 +178,20 @@ describe('getOpenAIConfig - Anthropic Compatibility', () => {
metadata: {
user_id: 'user456',
},
promptCache: true,
},
},
configOptions: {
baseURL: 'https://api.anthropic.proxy.com/v1',
defaultHeaders: {
'anthropic-beta': 'max-tokens-3-5-sonnet-2024-07-15,prompt-caching-2024-07-31',
'anthropic-beta': 'max-tokens-3-5-sonnet-2024-07-15',
},
},
tools: [],
});
});
it('should apply anthropic-beta headers based on model pattern', () => {
it('should apply custom headers and promptCache for models that support caching', () => {
const apiKey = 'sk-custom';
const endpoint = 'Anthropic (via LiteLLM)';
const options = {
@ -220,6 +222,7 @@ describe('getOpenAIConfig - Anthropic Compatibility', () => {
metadata: {
user_id: undefined,
},
promptCache: true,
},
},
configOptions: {
@ -227,7 +230,6 @@ describe('getOpenAIConfig - Anthropic Compatibility', () => {
defaultHeaders: {
'Custom-Header': 'custom-value',
Authorization: 'Bearer custom-token',
'anthropic-beta': 'prompt-caching-2024-07-31',
},
},
tools: [],
@ -303,15 +305,15 @@ describe('getOpenAIConfig - Anthropic Compatibility', () => {
stream: true,
topP: 0.9,
maxTokens: 2048,
modelKwargs: {
promptCache: true,
},
// temperature is dropped
// modelKwargs.topK is dropped
// modelKwargs.metadata is dropped completely
},
configOptions: {
baseURL: 'http://proxy.litellm/v1',
defaultHeaders: {
'anthropic-beta': 'prompt-caching-2024-07-31',
},
},
tools: [],
});
@ -385,13 +387,11 @@ describe('getOpenAIConfig - Anthropic Compatibility', () => {
metadata: {
user_id: 'searchUser',
},
promptCache: true,
},
},
configOptions: {
baseURL: 'http://litellm/v1',
defaultHeaders: {
'anthropic-beta': 'prompt-caching-2024-07-31',
},
},
tools: [
{
@ -434,13 +434,11 @@ describe('getOpenAIConfig - Anthropic Compatibility', () => {
user_id: 'testUser',
},
topK: 40,
promptCache: true,
},
},
configOptions: {
baseURL: 'http://litellm/v1',
defaultHeaders: {
'anthropic-beta': 'prompt-caching-2024-07-31',
},
},
tools: [],
});
@ -482,15 +480,13 @@ describe('getOpenAIConfig - Anthropic Compatibility', () => {
metadata: {
user_id: 'addUser',
},
promptCache: true,
customParam1: 'value1', // Unknown params added to modelKwargs
customParam2: 42,
},
},
configOptions: {
baseURL: 'http://litellm/v1',
defaultHeaders: {
'anthropic-beta': 'prompt-caching-2024-07-31',
},
},
tools: [],
});
@ -534,6 +530,7 @@ describe('getOpenAIConfig - Anthropic Compatibility', () => {
metadata: {
user_id: 'bothUser',
},
promptCache: true,
customParam: 'customValue',
// topK is dropped
},
@ -541,7 +538,7 @@ describe('getOpenAIConfig - Anthropic Compatibility', () => {
configOptions: {
baseURL: 'http://litellm/v1',
defaultHeaders: {
'anthropic-beta': 'max-tokens-3-5-sonnet-2024-07-15,prompt-caching-2024-07-31',
'anthropic-beta': 'max-tokens-3-5-sonnet-2024-07-15',
},
},
tools: [],

View file

@ -134,7 +134,7 @@ export class ServerConfigsDB implements IServerConfigsRepositoryInterface {
);
}
const existingServer = await this._dbMethods.findMCPServerById(serverName);
const existingServer = await this._dbMethods.findMCPServerByServerName(serverName);
let configToSave: ParsedServerConfig = { ...config };
// Transform user-provided API key config (adds customUserVars and headers)
@ -204,7 +204,7 @@ export class ServerConfigsDB implements IServerConfigsRepositoryInterface {
* @returns The parsed server config or undefined if not found. If accessed via agent, consumeOnly will be true.
*/
public async get(serverName: string, userId?: string): Promise<ParsedServerConfig | undefined> {
const server = await this._dbMethods.findMCPServerById(serverName);
const server = await this._dbMethods.findMCPServerByServerName(serverName);
if (!server) return undefined;
// Check public access if no userId

View file

@ -584,8 +584,7 @@ describe('GenerationJobManager Integration Tests', () => {
};
GenerationJobManager.emitDone(streamId, finalEventData as never);
// Wait for async Redis update
await new Promise((resolve) => setTimeout(resolve, 50));
await new Promise((resolve) => setTimeout(resolve, 200));
// Verify finalEvent is in Redis
const jobStore = services.jobStore;

View file

@ -1,4 +1,3 @@
export * from './gemini';
export * from './imageContext';
export * from './oai';
export * from './yt';

View file

@ -1,61 +0,0 @@
import { z } from 'zod';
export const ytToolkit = {
youtube_search: {
name: 'youtube_search' as const,
description: `Search for YouTube videos by keyword or phrase.
- Required: query (search terms to find videos)
- Optional: maxResults (number of videos to return, 1-50, default: 5)
- Returns: List of videos with titles, descriptions, and URLs
- Use for: Finding specific videos, exploring content, research
Example: query="cooking pasta tutorials" maxResults=3` as const,
schema: z.object({
query: z.string().describe('Search query terms'),
maxResults: z.number().int().min(1).max(50).optional().describe('Number of results (1-50)'),
}),
},
youtube_info: {
name: 'youtube_info' as const,
description: `Get detailed metadata and statistics for a specific YouTube video.
- Required: url (full YouTube URL or video ID)
- Returns: Video title, description, view count, like count, comment count
- Use for: Getting video metrics and basic metadata
- DO NOT USE FOR VIDEO SUMMARIES, USE TRANSCRIPTS FOR COMPREHENSIVE ANALYSIS
- Accepts both full URLs and video IDs
Example: url="https://youtube.com/watch?v=abc123" or url="abc123"` as const,
schema: z.object({
url: z.string().describe('YouTube video URL or ID'),
}),
} as const,
youtube_comments: {
name: 'youtube_comments',
description: `Retrieve top-level comments from a YouTube video.
- Required: url (full YouTube URL or video ID)
- Optional: maxResults (number of comments, 1-50, default: 10)
- Returns: Comment text, author names, like counts
- Use for: Sentiment analysis, audience feedback, engagement review
Example: url="abc123" maxResults=20`,
schema: z.object({
url: z.string().describe('YouTube video URL or ID'),
maxResults: z
.number()
.int()
.min(1)
.max(50)
.optional()
.describe('Number of comments to retrieve'),
}),
} as const,
youtube_transcript: {
name: 'youtube_transcript',
description: `Fetch and parse the transcript/captions of a YouTube video.
- Required: url (full YouTube URL or video ID)
- Returns: Full video transcript as plain text
- Use for: Content analysis, summarization, translation reference
- This is the "Go-to" tool for analyzing actual video content
- Attempts to fetch English first, then German, then any available language
Example: url="https://youtube.com/watch?v=abc123"`,
schema: z.object({
url: z.string().describe('YouTube video URL or ID'),
}),
} as const,
} as const;

View file

@ -32,11 +32,11 @@ export type FavoriteItem = {
};
export function getFavorites(): Promise<FavoriteItem[]> {
return request.get('/api/user/settings/favorites');
return request.get(`${endpoints.apiBaseUrl()}/api/user/settings/favorites`);
}
export function updateFavorites(favorites: FavoriteItem[]): Promise<FavoriteItem[]> {
return request.post('/api/user/settings/favorites', { favorites });
return request.post(`${endpoints.apiBaseUrl()}/api/user/settings/favorites`, { favorites });
}
export function getSharedMessages(shareId: string): Promise<t.TSharedMessagesResponse> {

View file

@ -228,22 +228,22 @@ describe('MCPServer Model Tests', () => {
});
});
describe('findMCPServerById', () => {
describe('findMCPServerByServerName', () => {
test('should find server by serverName', async () => {
const created = await methods.createMCPServer({
config: createSSEConfig('Find By Id Test'),
config: createSSEConfig('Find By Name Test'),
author: authorId,
});
const found = await methods.findMCPServerById(created.serverName);
const found = await methods.findMCPServerByServerName(created.serverName);
expect(found).toBeDefined();
expect(found?.serverName).toBe('find-by-id-test');
expect(found?.config.title).toBe('Find By Id Test');
expect(found?.serverName).toBe('find-by-name-test');
expect(found?.config.title).toBe('Find By Name Test');
});
test('should return null when server not found', async () => {
const found = await methods.findMCPServerById('non-existent-server');
const found = await methods.findMCPServerByServerName('non-existent-server');
expect(found).toBeNull();
});
@ -254,7 +254,7 @@ describe('MCPServer Model Tests', () => {
author: authorId,
});
const found = await methods.findMCPServerById('lean-test');
const found = await methods.findMCPServerByServerName('lean-test');
// Lean documents don't have mongoose methods
expect(found).toBeDefined();
@ -621,7 +621,7 @@ describe('MCPServer Model Tests', () => {
expect(deleted?.serverName).toBe('delete-test');
// Verify it's actually deleted
const found = await methods.findMCPServerById('delete-test');
const found = await methods.findMCPServerByServerName('delete-test');
expect(found).toBeNull();
});

View file

@ -134,10 +134,10 @@ export function createMCPServerMethods(mongoose: typeof import('mongoose')) {
/**
* Find an MCP server by serverName
* @param serverName - The MCP server ID
* @param serverName - The unique server name identifier
* @returns The MCP server document or null
*/
async function findMCPServerById(serverName: string): Promise<MCPServerDocument | null> {
async function findMCPServerByServerName(serverName: string): Promise<MCPServerDocument | null> {
const MCPServer = mongoose.models.MCPServer as Model<MCPServerDocument>;
return await MCPServer.findOne({ serverName }).lean();
}
@ -311,7 +311,7 @@ export function createMCPServerMethods(mongoose: typeof import('mongoose')) {
return {
createMCPServer,
findMCPServerById,
findMCPServerByServerName,
findMCPServerByObjectId,
findMCPServersByAuthor,
getListMCPServersByIds,

View file

@ -129,4 +129,139 @@ describe('Meilisearch Mongoose plugin', () => {
expect(mockAddDocuments).not.toHaveBeenCalled();
});
describe('estimatedDocumentCount usage in syncWithMeili', () => {
test('syncWithMeili completes successfully with estimatedDocumentCount', async () => {
// Clear any previous documents
const conversationModel = createConversationModel(mongoose) as SchemaWithMeiliMethods;
await conversationModel.deleteMany({});
// Create test documents
await conversationModel.create({
conversationId: new mongoose.Types.ObjectId(),
user: new mongoose.Types.ObjectId(),
title: 'Test Conversation 1',
endpoint: EModelEndpoint.openAI,
});
await conversationModel.create({
conversationId: new mongoose.Types.ObjectId(),
user: new mongoose.Types.ObjectId(),
title: 'Test Conversation 2',
endpoint: EModelEndpoint.openAI,
});
// Trigger sync - should use estimatedDocumentCount internally
await expect(conversationModel.syncWithMeili()).resolves.not.toThrow();
// Verify documents were processed
expect(mockAddDocuments).toHaveBeenCalled();
});
test('syncWithMeili handles empty collection correctly', async () => {
const messageModel = createMessageModel(mongoose) as SchemaWithMeiliMethods;
await messageModel.deleteMany({});
// Verify collection is empty
const count = await messageModel.estimatedDocumentCount();
expect(count).toBe(0);
// Sync should complete without error even with 0 estimated documents
await expect(messageModel.syncWithMeili()).resolves.not.toThrow();
});
test('estimatedDocumentCount returns count for non-empty collection', async () => {
const conversationModel = createConversationModel(mongoose) as SchemaWithMeiliMethods;
await conversationModel.deleteMany({});
// Create documents
await conversationModel.create({
conversationId: new mongoose.Types.ObjectId(),
user: new mongoose.Types.ObjectId(),
title: 'Test 1',
endpoint: EModelEndpoint.openAI,
});
await conversationModel.create({
conversationId: new mongoose.Types.ObjectId(),
user: new mongoose.Types.ObjectId(),
title: 'Test 2',
endpoint: EModelEndpoint.openAI,
});
const estimatedCount = await conversationModel.estimatedDocumentCount();
expect(estimatedCount).toBeGreaterThanOrEqual(2);
});
test('estimatedDocumentCount is available on model', async () => {
const messageModel = createMessageModel(mongoose) as SchemaWithMeiliMethods;
// Verify the method exists and is callable
expect(typeof messageModel.estimatedDocumentCount).toBe('function');
// Should be able to call it
const result = await messageModel.estimatedDocumentCount();
expect(typeof result).toBe('number');
expect(result).toBeGreaterThanOrEqual(0);
});
test('syncWithMeili handles mix of syncable and TTL documents correctly', async () => {
const messageModel = createMessageModel(mongoose) as SchemaWithMeiliMethods;
await messageModel.deleteMany({});
mockAddDocuments.mockClear();
// Create syncable documents (expiredAt: null)
await messageModel.create({
messageId: new mongoose.Types.ObjectId(),
conversationId: new mongoose.Types.ObjectId(),
user: new mongoose.Types.ObjectId(),
isCreatedByUser: true,
expiredAt: null,
});
await messageModel.create({
messageId: new mongoose.Types.ObjectId(),
conversationId: new mongoose.Types.ObjectId(),
user: new mongoose.Types.ObjectId(),
isCreatedByUser: false,
expiredAt: null,
});
// Create TTL documents (expiredAt set to a date)
await messageModel.create({
messageId: new mongoose.Types.ObjectId(),
conversationId: new mongoose.Types.ObjectId(),
user: new mongoose.Types.ObjectId(),
isCreatedByUser: true,
expiredAt: new Date(),
});
await messageModel.create({
messageId: new mongoose.Types.ObjectId(),
conversationId: new mongoose.Types.ObjectId(),
user: new mongoose.Types.ObjectId(),
isCreatedByUser: false,
expiredAt: new Date(),
});
// estimatedDocumentCount should count all documents (both syncable and TTL)
const estimatedCount = await messageModel.estimatedDocumentCount();
expect(estimatedCount).toBe(4);
// Actual syncable documents (expiredAt: null)
const syncableCount = await messageModel.countDocuments({ expiredAt: null });
expect(syncableCount).toBe(2);
// Sync should complete successfully even though estimated count is higher than processed count
await expect(messageModel.syncWithMeili()).resolves.not.toThrow();
// Only syncable documents should be indexed (2 documents, not 4)
// The mock should be called once per batch, and we have 2 documents
expect(mockAddDocuments).toHaveBeenCalled();
// Verify that only 2 documents were indexed (the syncable ones)
const indexedCount = await messageModel.countDocuments({ _meiliIndex: true });
expect(indexedCount).toBe(2);
});
});
});

View file

@ -189,8 +189,10 @@ const createMeiliMongooseModel = ({
query._id = { $gt: options.resumeFromId };
}
// Get total count for progress tracking
const totalCount = await this.countDocuments(query);
// Get approximate total count for progress tracking
const approxTotalCount = await this.estimatedDocumentCount();
logger.info(`[syncWithMeili] Approximate total number of documents to sync: ${approxTotalCount}`);
let processedCount = 0;
// First, handle documents that need to be removed from Meili
@ -239,8 +241,11 @@ const createMeiliMongooseModel = ({
updateOps = [];
// Log progress
const progress = Math.round((processedCount / totalCount) * 100);
logger.info(`[syncWithMeili] Progress: ${progress}% (${processedCount}/${totalCount})`);
// Calculate percentage based on approximate total count sometimes might lead to more than 100%
// the difference is very small and acceptable for progress tracking
const percent = Math.round((processedCount / approxTotalCount) * 100);
const progress = Math.min(percent, 100);
logger.info(`[syncWithMeili] Progress: ${progress}% (count: ${processedCount})`);
// Add delay to prevent overwhelming resources
if (delayMs > 0) {