Merge branch 'main' into refactor/package-auth

This commit is contained in:
Cha 2025-06-17 18:26:25 +08:00
commit 02b9c9d447
340 changed files with 18559 additions and 14872 deletions

View file

@ -5,6 +5,7 @@ export default {
testResultsProcessor: 'jest-junit',
moduleNameMapper: {
'^@src/(.*)$': '<rootDir>/src/$1',
'~/(.*)': '<rootDir>/src/$1',
},
// coverageThreshold: {
// global: {

View file

@ -1,6 +1,6 @@
{
"name": "librechat-mcp",
"version": "1.2.2",
"name": "@librechat/api",
"version": "1.2.3",
"type": "commonjs",
"description": "MCP services for LibreChat",
"main": "dist/index.js",
@ -47,9 +47,11 @@
"@rollup/plugin-replace": "^5.0.5",
"@rollup/plugin-terser": "^0.4.4",
"@rollup/plugin-typescript": "^12.1.2",
"@types/bun": "^1.2.15",
"@types/diff": "^6.0.0",
"@types/express": "^5.0.0",
"@types/jest": "^29.5.2",
"@types/multer": "^1.4.13",
"@types/node": "^20.3.0",
"@types/react": "^18.2.18",
"@types/winston": "^2.4.4",
@ -66,13 +68,18 @@
"publishConfig": {
"registry": "https://registry.npmjs.org/"
},
"dependencies": {
"peerDependencies": {
"@librechat/agents": "^2.4.37",
"@librechat/data-schemas": "*",
"@modelcontextprotocol/sdk": "^1.11.2",
"axios": "^1.8.2",
"diff": "^7.0.0",
"eventsource": "^3.0.2",
"express": "^4.21.2"
},
"peerDependencies": {
"keyv": "^5.3.2"
"express": "^4.21.2",
"keyv": "^5.3.2",
"librechat-data-provider": "*",
"node-fetch": "2.7.0",
"tiktoken": "^1.0.15",
"zod": "^3.22.4"
}
}

View file

@ -1,5 +1,6 @@
// rollup.config.js
import { readFileSync } from 'fs';
import json from '@rollup/plugin-json';
import terser from '@rollup/plugin-terser';
import replace from '@rollup/plugin-replace';
import commonjs from '@rollup/plugin-commonjs';
@ -29,6 +30,7 @@ const plugins = [
inlineSourceMap: true,
}),
terser(),
json(),
];
const cjsBuild = {

View file

@ -0,0 +1,24 @@
import { EModelEndpoint, agentsEndpointSchema } from 'librechat-data-provider';
import type { TCustomConfig, TAgentsEndpoint } from 'librechat-data-provider';
/**
* Sets up the Agents configuration from the config (`librechat.yaml`) file.
* If no agents config is defined, uses the provided defaults or parses empty object.
*
* @param config - The loaded custom configuration.
* @param [defaultConfig] - Default configuration from getConfigDefaults.
* @returns The Agents endpoint configuration.
*/
export function agentsConfigSetup(
config: TCustomConfig,
defaultConfig: Partial<TAgentsEndpoint>,
): Partial<TAgentsEndpoint> {
const agentsConfig = config?.endpoints?.[EModelEndpoint.agents];
if (!agentsConfig) {
return defaultConfig || agentsEndpointSchema.parse({});
}
const parsedConfig = agentsEndpointSchema.parse(agentsConfig);
return parsedConfig;
}

View file

@ -0,0 +1,4 @@
export * from './config';
export * from './memory';
export * from './resources';
export * from './run';

View file

@ -0,0 +1,468 @@
/** Memories */
import { z } from 'zod';
import { tool } from '@langchain/core/tools';
import { Tools } from 'librechat-data-provider';
import { logger } from '@librechat/data-schemas';
import { Run, Providers, GraphEvents } from '@librechat/agents';
import type {
StreamEventData,
ToolEndCallback,
EventHandler,
ToolEndData,
LLMConfig,
} from '@librechat/agents';
import type { TAttachment, MemoryArtifact } from 'librechat-data-provider';
import type { ObjectId, MemoryMethods } from '@librechat/data-schemas';
import type { BaseMessage } from '@langchain/core/messages';
import type { Response as ServerResponse } from 'express';
import { Tokenizer } from '~/utils';
type RequiredMemoryMethods = Pick<
MemoryMethods,
'setMemory' | 'deleteMemory' | 'getFormattedMemories'
>;
type ToolEndMetadata = Record<string, unknown> & {
run_id?: string;
thread_id?: string;
};
export interface MemoryConfig {
validKeys?: string[];
instructions?: string;
llmConfig?: Partial<LLMConfig>;
tokenLimit?: number;
}
export const memoryInstructions =
'The system automatically stores important user information and can update or delete memories based on user requests, enabling dynamic memory management.';
const getDefaultInstructions = (
validKeys?: string[],
tokenLimit?: number,
) => `Use the \`set_memory\` tool to save important information about the user, but ONLY when the user has explicitly provided this information. If there is nothing to note about the user specifically, END THE TURN IMMEDIATELY.
The \`delete_memory\` tool should only be used in two scenarios:
1. When the user explicitly asks to forget or remove specific information
2. When updating existing memories, use the \`set_memory\` tool instead of deleting and re-adding the memory.
${
validKeys && validKeys.length > 0
? `CRITICAL INSTRUCTION: Only the following keys are valid for storing memories:
${validKeys.map((key) => `- ${key}`).join('\n ')}`
: 'You can use any appropriate key to store memories about the user.'
}
${
tokenLimit
? `⚠️ TOKEN LIMIT: Each memory value must not exceed ${tokenLimit} tokens. Be concise and store only essential information.`
: ''
}
WARNING
DO NOT STORE ANY INFORMATION UNLESS THE USER HAS EXPLICITLY PROVIDED IT.
ONLY store information the user has EXPLICITLY shared.
NEVER guess or assume user information.
ALL memory values must be factual statements about THIS specific user.
If nothing needs to be stored, DO NOT CALL any memory tools.
If you're unsure whether to store something, DO NOT store it.
If nothing needs to be stored, END THE TURN IMMEDIATELY.`;
/**
* Creates a memory tool instance with user context
*/
const createMemoryTool = ({
userId,
setMemory,
validKeys,
tokenLimit,
totalTokens = 0,
}: {
userId: string | ObjectId;
setMemory: MemoryMethods['setMemory'];
validKeys?: string[];
tokenLimit?: number;
totalTokens?: number;
}) => {
return tool(
async ({ key, value }) => {
try {
if (validKeys && validKeys.length > 0 && !validKeys.includes(key)) {
logger.warn(
`Memory Agent failed to set memory: Invalid key "${key}". Must be one of: ${validKeys.join(
', ',
)}`,
);
return `Invalid key "${key}". Must be one of: ${validKeys.join(', ')}`;
}
const tokenCount = Tokenizer.getTokenCount(value, 'o200k_base');
if (tokenLimit && tokenCount > tokenLimit) {
logger.warn(
`Memory Agent failed to set memory: Value exceeds token limit. Value has ${tokenCount} tokens, but limit is ${tokenLimit}`,
);
return `Memory value too large: ${tokenCount} tokens exceeds limit of ${tokenLimit}`;
}
if (tokenLimit && totalTokens + tokenCount > tokenLimit) {
const remainingCapacity = tokenLimit - totalTokens;
logger.warn(
`Memory Agent failed to set memory: Would exceed total token limit. Current usage: ${totalTokens}, new memory: ${tokenCount} tokens, limit: ${tokenLimit}`,
);
return `Cannot add memory: would exceed token limit. Current usage: ${totalTokens}/${tokenLimit} tokens. This memory requires ${tokenCount} tokens, but only ${remainingCapacity} tokens available.`;
}
const artifact: Record<Tools.memory, MemoryArtifact> = {
[Tools.memory]: {
key,
value,
tokenCount,
type: 'update',
},
};
const result = await setMemory({ userId, key, value, tokenCount });
if (result.ok) {
logger.debug(`Memory set for key "${key}" (${tokenCount} tokens) for user "${userId}"`);
return [`Memory set for key "${key}" (${tokenCount} tokens)`, artifact];
}
logger.warn(`Failed to set memory for key "${key}" for user "${userId}"`);
return [`Failed to set memory for key "${key}"`, undefined];
} catch (error) {
logger.error('Memory Agent failed to set memory', error);
return [`Error setting memory for key "${key}"`, undefined];
}
},
{
name: 'set_memory',
description: 'Saves important information about the user into memory.',
responseFormat: 'content_and_artifact',
schema: z.object({
key: z
.string()
.describe(
validKeys && validKeys.length > 0
? `The key of the memory value. Must be one of: ${validKeys.join(', ')}`
: 'The key identifier for this memory',
),
value: z
.string()
.describe(
'Value MUST be a complete sentence that fully describes relevant user information.',
),
}),
},
);
};
/**
* Creates a delete memory tool instance with user context
*/
const createDeleteMemoryTool = ({
userId,
deleteMemory,
validKeys,
}: {
userId: string | ObjectId;
deleteMemory: MemoryMethods['deleteMemory'];
validKeys?: string[];
}) => {
return tool(
async ({ key }) => {
try {
if (validKeys && validKeys.length > 0 && !validKeys.includes(key)) {
logger.warn(
`Memory Agent failed to delete memory: Invalid key "${key}". Must be one of: ${validKeys.join(
', ',
)}`,
);
return `Invalid key "${key}". Must be one of: ${validKeys.join(', ')}`;
}
const artifact: Record<Tools.memory, MemoryArtifact> = {
[Tools.memory]: {
key,
type: 'delete',
},
};
const result = await deleteMemory({ userId, key });
if (result.ok) {
logger.debug(`Memory deleted for key "${key}" for user "${userId}"`);
return [`Memory deleted for key "${key}"`, artifact];
}
logger.warn(`Failed to delete memory for key "${key}" for user "${userId}"`);
return [`Failed to delete memory for key "${key}"`, undefined];
} catch (error) {
logger.error('Memory Agent failed to delete memory', error);
return [`Error deleting memory for key "${key}"`, undefined];
}
},
{
name: 'delete_memory',
description:
'Deletes specific memory data about the user using the provided key. For updating existing memories, use the `set_memory` tool instead',
responseFormat: 'content_and_artifact',
schema: z.object({
key: z
.string()
.describe(
validKeys && validKeys.length > 0
? `The key of the memory to delete. Must be one of: ${validKeys.join(', ')}`
: 'The key identifier of the memory to delete',
),
}),
},
);
};
export class BasicToolEndHandler implements EventHandler {
private callback?: ToolEndCallback;
constructor(callback?: ToolEndCallback) {
this.callback = callback;
}
handle(
event: string,
data: StreamEventData | undefined,
metadata?: Record<string, unknown>,
): void {
if (!metadata) {
console.warn(`Graph or metadata not found in ${event} event`);
return;
}
const toolEndData = data as ToolEndData | undefined;
if (!toolEndData?.output) {
console.warn('No output found in tool_end event');
return;
}
this.callback?.(toolEndData, metadata);
}
}
export async function processMemory({
res,
userId,
setMemory,
deleteMemory,
messages,
memory,
messageId,
conversationId,
validKeys,
instructions,
llmConfig,
tokenLimit,
totalTokens = 0,
}: {
res: ServerResponse;
setMemory: MemoryMethods['setMemory'];
deleteMemory: MemoryMethods['deleteMemory'];
userId: string | ObjectId;
memory: string;
messageId: string;
conversationId: string;
messages: BaseMessage[];
validKeys?: string[];
instructions: string;
tokenLimit?: number;
totalTokens?: number;
llmConfig?: Partial<LLMConfig>;
}): Promise<(TAttachment | null)[] | undefined> {
try {
const memoryTool = createMemoryTool({ userId, tokenLimit, setMemory, validKeys, totalTokens });
const deleteMemoryTool = createDeleteMemoryTool({
userId,
validKeys,
deleteMemory,
});
const currentMemoryTokens = totalTokens;
let memoryStatus = `# Existing memory:\n${memory ?? 'No existing memories'}`;
if (tokenLimit) {
const remainingTokens = tokenLimit - currentMemoryTokens;
memoryStatus = `# Memory Status:
Current memory usage: ${currentMemoryTokens} tokens
Token limit: ${tokenLimit} tokens
Remaining capacity: ${remainingTokens} tokens
# Existing memory:
${memory ?? 'No existing memories'}`;
}
const defaultLLMConfig: LLMConfig = {
provider: Providers.OPENAI,
model: 'gpt-4.1-mini',
temperature: 0.4,
streaming: false,
disableStreaming: true,
};
const finalLLMConfig = {
...defaultLLMConfig,
...llmConfig,
/**
* Ensure streaming is always disabled for memory processing
*/
streaming: false,
disableStreaming: true,
};
const artifactPromises: Promise<TAttachment | null>[] = [];
const memoryCallback = createMemoryCallback({ res, artifactPromises });
const customHandlers = {
[GraphEvents.TOOL_END]: new BasicToolEndHandler(memoryCallback),
};
const run = await Run.create({
runId: messageId,
graphConfig: {
type: 'standard',
llmConfig: finalLLMConfig,
tools: [memoryTool, deleteMemoryTool],
instructions,
additional_instructions: memoryStatus,
toolEnd: true,
},
customHandlers,
returnContent: true,
});
const config = {
configurable: {
provider: llmConfig?.provider,
thread_id: `memory-run-${conversationId}`,
},
streamMode: 'values',
version: 'v2',
} as const;
const inputs = {
messages,
};
const content = await run.processStream(inputs, config);
if (content) {
logger.debug('Memory Agent processed memory successfully', content);
} else {
logger.warn('Memory Agent processed memory but returned no content');
}
return await Promise.all(artifactPromises);
} catch (error) {
logger.error('Memory Agent failed to process memory', error);
}
}
export async function createMemoryProcessor({
res,
userId,
messageId,
memoryMethods,
conversationId,
config = {},
}: {
res: ServerResponse;
messageId: string;
conversationId: string;
userId: string | ObjectId;
memoryMethods: RequiredMemoryMethods;
config?: MemoryConfig;
}): Promise<[string, (messages: BaseMessage[]) => Promise<(TAttachment | null)[] | undefined>]> {
const { validKeys, instructions, llmConfig, tokenLimit } = config;
const finalInstructions = instructions || getDefaultInstructions(validKeys, tokenLimit);
const { withKeys, withoutKeys, totalTokens } = await memoryMethods.getFormattedMemories({
userId,
});
return [
withoutKeys,
async function (messages: BaseMessage[]): Promise<(TAttachment | null)[] | undefined> {
try {
return await processMemory({
res,
userId,
messages,
validKeys,
llmConfig,
messageId,
tokenLimit,
conversationId,
memory: withKeys,
totalTokens: totalTokens || 0,
instructions: finalInstructions,
setMemory: memoryMethods.setMemory,
deleteMemory: memoryMethods.deleteMemory,
});
} catch (error) {
logger.error('Memory Agent failed to process memory', error);
}
},
];
}
async function handleMemoryArtifact({
res,
data,
metadata,
}: {
res: ServerResponse;
data: ToolEndData;
metadata?: ToolEndMetadata;
}) {
const output = data?.output;
if (!output) {
return null;
}
if (!output.artifact) {
return null;
}
const memoryArtifact = output.artifact[Tools.memory] as MemoryArtifact | undefined;
if (!memoryArtifact) {
return null;
}
const attachment: Partial<TAttachment> = {
type: Tools.memory,
toolCallId: output.tool_call_id,
messageId: metadata?.run_id ?? '',
conversationId: metadata?.thread_id ?? '',
[Tools.memory]: memoryArtifact,
};
if (!res.headersSent) {
return attachment;
}
res.write(`event: attachment\ndata: ${JSON.stringify(attachment)}\n\n`);
return attachment;
}
/**
* Creates a memory callback for handling memory artifacts
* @param params - The parameters object
* @param params.res - The server response object
* @param params.artifactPromises - Array to collect artifact promises
* @returns The memory callback function
*/
export function createMemoryCallback({
res,
artifactPromises,
}: {
res: ServerResponse;
artifactPromises: Promise<Partial<TAttachment> | null>[];
}): ToolEndCallback {
return async (data: ToolEndData, metadata?: Record<string, unknown>) => {
const output = data?.output;
const memoryArtifact = output?.artifact?.[Tools.memory] as MemoryArtifact;
if (memoryArtifact == null) {
return;
}
artifactPromises.push(
handleMemoryArtifact({ res, data, metadata }).catch((error) => {
logger.error('Error processing memory artifact content:', error);
return null;
}),
);
};
}

View file

@ -0,0 +1,990 @@
import { primeResources } from './resources';
import { logger } from '@librechat/data-schemas';
import { EModelEndpoint, EToolResources, AgentCapabilities } from 'librechat-data-provider';
import type { Request as ServerRequest } from 'express';
import type { TFile } from 'librechat-data-provider';
import type { TGetFiles } from './resources';
// Mock logger
jest.mock('@librechat/data-schemas', () => ({
logger: {
error: jest.fn(),
},
}));
describe('primeResources', () => {
let mockReq: ServerRequest;
let mockGetFiles: jest.MockedFunction<TGetFiles>;
let requestFileSet: Set<string>;
beforeEach(() => {
// Reset mocks
jest.clearAllMocks();
// Setup mock request
mockReq = {
app: {
locals: {
[EModelEndpoint.agents]: {
capabilities: [AgentCapabilities.ocr],
},
},
},
} as unknown as ServerRequest;
// Setup mock getFiles function
mockGetFiles = jest.fn();
// Setup request file set
requestFileSet = new Set(['file1', 'file2', 'file3']);
});
describe('when OCR is enabled and tool_resources has OCR file_ids', () => {
it('should fetch OCR files and include them in attachments', async () => {
const mockOcrFiles: TFile[] = [
{
user: 'user1',
file_id: 'ocr-file-1',
filename: 'document.pdf',
filepath: '/uploads/document.pdf',
object: 'file',
type: 'application/pdf',
bytes: 1024,
embedded: false,
usage: 0,
},
];
mockGetFiles.mockResolvedValue(mockOcrFiles);
const tool_resources = {
[EToolResources.ocr]: {
file_ids: ['ocr-file-1'],
},
};
const result = await primeResources({
req: mockReq,
getFiles: mockGetFiles,
requestFileSet,
attachments: undefined,
tool_resources,
});
expect(mockGetFiles).toHaveBeenCalledWith({ file_id: { $in: ['ocr-file-1'] } }, {}, {});
expect(result.attachments).toEqual(mockOcrFiles);
expect(result.tool_resources).toEqual(tool_resources);
});
});
describe('when OCR is disabled', () => {
it('should not fetch OCR files even if tool_resources has OCR file_ids', async () => {
(mockReq.app as ServerRequest['app']).locals[EModelEndpoint.agents].capabilities = [];
const tool_resources = {
[EToolResources.ocr]: {
file_ids: ['ocr-file-1'],
},
};
const result = await primeResources({
req: mockReq,
getFiles: mockGetFiles,
requestFileSet,
attachments: undefined,
tool_resources,
});
expect(mockGetFiles).not.toHaveBeenCalled();
expect(result.attachments).toBeUndefined();
expect(result.tool_resources).toEqual(tool_resources);
});
});
describe('when attachments are provided', () => {
it('should process files with fileIdentifier as execute_code resources', async () => {
const mockFiles: TFile[] = [
{
user: 'user1',
file_id: 'file1',
filename: 'script.py',
filepath: '/uploads/script.py',
object: 'file',
type: 'text/x-python',
bytes: 512,
embedded: false,
usage: 0,
metadata: {
fileIdentifier: 'python-script',
},
},
];
const attachments = Promise.resolve(mockFiles);
const result = await primeResources({
req: mockReq,
getFiles: mockGetFiles,
requestFileSet,
attachments,
tool_resources: {},
});
expect(result.attachments).toEqual(mockFiles);
expect(result.tool_resources?.[EToolResources.execute_code]?.files).toEqual(mockFiles);
});
it('should process embedded files as file_search resources', async () => {
const mockFiles: TFile[] = [
{
user: 'user1',
file_id: 'file2',
filename: 'document.txt',
filepath: '/uploads/document.txt',
object: 'file',
type: 'text/plain',
bytes: 256,
embedded: true,
usage: 0,
},
];
const attachments = Promise.resolve(mockFiles);
const result = await primeResources({
req: mockReq,
getFiles: mockGetFiles,
requestFileSet,
attachments,
tool_resources: {},
});
expect(result.attachments).toEqual(mockFiles);
expect(result.tool_resources?.[EToolResources.file_search]?.files).toEqual(mockFiles);
});
it('should process image files in requestFileSet as image_edit resources', async () => {
const mockFiles: TFile[] = [
{
user: 'user1',
file_id: 'file1',
filename: 'image.png',
filepath: '/uploads/image.png',
object: 'file',
type: 'image/png',
bytes: 2048,
embedded: false,
usage: 0,
height: 800,
width: 600,
},
];
const attachments = Promise.resolve(mockFiles);
const result = await primeResources({
req: mockReq,
getFiles: mockGetFiles,
requestFileSet,
attachments,
tool_resources: {},
});
expect(result.attachments).toEqual(mockFiles);
expect(result.tool_resources?.[EToolResources.image_edit]?.files).toEqual(mockFiles);
});
it('should not process image files not in requestFileSet', async () => {
const mockFiles: TFile[] = [
{
user: 'user1',
file_id: 'file-not-in-set',
filename: 'image.png',
filepath: '/uploads/image.png',
object: 'file',
type: 'image/png',
bytes: 2048,
embedded: false,
usage: 0,
height: 800,
width: 600,
},
];
const attachments = Promise.resolve(mockFiles);
const result = await primeResources({
req: mockReq,
getFiles: mockGetFiles,
requestFileSet,
attachments,
tool_resources: {},
});
expect(result.attachments).toEqual(mockFiles);
expect(result.tool_resources?.[EToolResources.image_edit]).toBeUndefined();
});
it('should not process image files without height and width', async () => {
const mockFiles: TFile[] = [
{
user: 'user1',
file_id: 'file1',
filename: 'image.png',
filepath: '/uploads/image.png',
object: 'file',
type: 'image/png',
bytes: 2048,
embedded: false,
usage: 0,
// Missing height and width
},
];
const attachments = Promise.resolve(mockFiles);
const result = await primeResources({
req: mockReq,
getFiles: mockGetFiles,
requestFileSet,
attachments,
tool_resources: {},
});
expect(result.attachments).toEqual(mockFiles);
expect(result.tool_resources?.[EToolResources.image_edit]).toBeUndefined();
});
it('should filter out null files from attachments', async () => {
const mockFiles: Array<TFile | null> = [
{
user: 'user1',
file_id: 'file1',
filename: 'valid.txt',
filepath: '/uploads/valid.txt',
object: 'file',
type: 'text/plain',
bytes: 256,
embedded: false,
usage: 0,
},
null,
{
user: 'user1',
file_id: 'file2',
filename: 'valid2.txt',
filepath: '/uploads/valid2.txt',
object: 'file',
type: 'text/plain',
bytes: 128,
embedded: false,
usage: 0,
},
];
const attachments = Promise.resolve(mockFiles);
const result = await primeResources({
req: mockReq,
getFiles: mockGetFiles,
requestFileSet,
attachments,
tool_resources: {},
});
expect(result.attachments).toHaveLength(2);
expect(result.attachments?.[0]?.file_id).toBe('file1');
expect(result.attachments?.[1]?.file_id).toBe('file2');
});
it('should merge existing tool_resources with new files', async () => {
const mockFiles: TFile[] = [
{
user: 'user1',
file_id: 'file1',
filename: 'script.py',
filepath: '/uploads/script.py',
object: 'file',
type: 'text/x-python',
bytes: 512,
embedded: false,
usage: 0,
metadata: {
fileIdentifier: 'python-script',
},
},
];
const existingToolResources = {
[EToolResources.execute_code]: {
files: [
{
user: 'user1',
file_id: 'existing-file',
filename: 'existing.py',
filepath: '/uploads/existing.py',
object: 'file' as const,
type: 'text/x-python',
bytes: 256,
embedded: false,
usage: 0,
},
],
},
};
const attachments = Promise.resolve(mockFiles);
const result = await primeResources({
req: mockReq,
getFiles: mockGetFiles,
requestFileSet,
attachments,
tool_resources: existingToolResources,
});
expect(result.tool_resources?.[EToolResources.execute_code]?.files).toHaveLength(2);
expect(result.tool_resources?.[EToolResources.execute_code]?.files?.[0]?.file_id).toBe(
'existing-file',
);
expect(result.tool_resources?.[EToolResources.execute_code]?.files?.[1]?.file_id).toBe(
'file1',
);
});
});
describe('when both OCR and attachments are provided', () => {
it('should include both OCR files and attachment files', async () => {
const mockOcrFiles: TFile[] = [
{
user: 'user1',
file_id: 'ocr-file-1',
filename: 'document.pdf',
filepath: '/uploads/document.pdf',
object: 'file',
type: 'application/pdf',
bytes: 1024,
embedded: false,
usage: 0,
},
];
const mockAttachmentFiles: TFile[] = [
{
user: 'user1',
file_id: 'file1',
filename: 'attachment.txt',
filepath: '/uploads/attachment.txt',
object: 'file',
type: 'text/plain',
bytes: 256,
embedded: false,
usage: 0,
},
];
mockGetFiles.mockResolvedValue(mockOcrFiles);
const attachments = Promise.resolve(mockAttachmentFiles);
const tool_resources = {
[EToolResources.ocr]: {
file_ids: ['ocr-file-1'],
},
};
const result = await primeResources({
req: mockReq,
getFiles: mockGetFiles,
requestFileSet,
attachments,
tool_resources,
});
expect(result.attachments).toHaveLength(2);
expect(result.attachments?.[0]?.file_id).toBe('ocr-file-1');
expect(result.attachments?.[1]?.file_id).toBe('file1');
});
it('should prevent duplicate files when same file exists in OCR and attachments', async () => {
const sharedFile: TFile = {
user: 'user1',
file_id: 'shared-file-id',
filename: 'document.pdf',
filepath: '/uploads/document.pdf',
object: 'file',
type: 'application/pdf',
bytes: 1024,
embedded: false,
usage: 0,
};
const mockOcrFiles: TFile[] = [sharedFile];
const mockAttachmentFiles: TFile[] = [
sharedFile,
{
user: 'user1',
file_id: 'unique-file',
filename: 'other.txt',
filepath: '/uploads/other.txt',
object: 'file',
type: 'text/plain',
bytes: 256,
embedded: false,
usage: 0,
},
];
mockGetFiles.mockResolvedValue(mockOcrFiles);
const attachments = Promise.resolve(mockAttachmentFiles);
const tool_resources = {
[EToolResources.ocr]: {
file_ids: ['shared-file-id'],
},
};
const result = await primeResources({
req: mockReq,
getFiles: mockGetFiles,
requestFileSet,
attachments,
tool_resources,
});
// Should only have 2 files, not 3 (no duplicate)
expect(result.attachments).toHaveLength(2);
expect(result.attachments?.filter((f) => f?.file_id === 'shared-file-id')).toHaveLength(1);
expect(result.attachments?.find((f) => f?.file_id === 'unique-file')).toBeDefined();
});
it('should still categorize duplicate files for tool_resources', async () => {
const sharedFile: TFile = {
user: 'user1',
file_id: 'shared-file-id',
filename: 'script.py',
filepath: '/uploads/script.py',
object: 'file',
type: 'text/x-python',
bytes: 512,
embedded: false,
usage: 0,
metadata: {
fileIdentifier: 'python-script',
},
};
const mockOcrFiles: TFile[] = [sharedFile];
const mockAttachmentFiles: TFile[] = [sharedFile];
mockGetFiles.mockResolvedValue(mockOcrFiles);
const attachments = Promise.resolve(mockAttachmentFiles);
const tool_resources = {
[EToolResources.ocr]: {
file_ids: ['shared-file-id'],
},
};
const result = await primeResources({
req: mockReq,
getFiles: mockGetFiles,
requestFileSet,
attachments,
tool_resources,
});
// File should appear only once in attachments
expect(result.attachments).toHaveLength(1);
expect(result.attachments?.[0]?.file_id).toBe('shared-file-id');
// But should still be categorized in tool_resources
expect(result.tool_resources?.[EToolResources.execute_code]?.files).toHaveLength(1);
expect(result.tool_resources?.[EToolResources.execute_code]?.files?.[0]?.file_id).toBe(
'shared-file-id',
);
});
it('should handle multiple duplicate files', async () => {
const file1: TFile = {
user: 'user1',
file_id: 'file-1',
filename: 'doc1.pdf',
filepath: '/uploads/doc1.pdf',
object: 'file',
type: 'application/pdf',
bytes: 1024,
embedded: false,
usage: 0,
};
const file2: TFile = {
user: 'user1',
file_id: 'file-2',
filename: 'doc2.pdf',
filepath: '/uploads/doc2.pdf',
object: 'file',
type: 'application/pdf',
bytes: 2048,
embedded: false,
usage: 0,
};
const uniqueFile: TFile = {
user: 'user1',
file_id: 'unique-file',
filename: 'unique.txt',
filepath: '/uploads/unique.txt',
object: 'file',
type: 'text/plain',
bytes: 256,
embedded: false,
usage: 0,
};
const mockOcrFiles: TFile[] = [file1, file2];
const mockAttachmentFiles: TFile[] = [file1, file2, uniqueFile];
mockGetFiles.mockResolvedValue(mockOcrFiles);
const attachments = Promise.resolve(mockAttachmentFiles);
const tool_resources = {
[EToolResources.ocr]: {
file_ids: ['file-1', 'file-2'],
},
};
const result = await primeResources({
req: mockReq,
getFiles: mockGetFiles,
requestFileSet,
attachments,
tool_resources,
});
// Should have 3 files total (2 from OCR + 1 unique from attachments)
expect(result.attachments).toHaveLength(3);
// Each file should appear only once
const fileIds = result.attachments?.map((f) => f?.file_id);
expect(fileIds).toContain('file-1');
expect(fileIds).toContain('file-2');
expect(fileIds).toContain('unique-file');
// Check no duplicates
const uniqueFileIds = new Set(fileIds);
expect(uniqueFileIds.size).toBe(fileIds?.length);
});
it('should handle files without file_id gracefully', async () => {
const fileWithoutId: Partial<TFile> = {
user: 'user1',
filename: 'no-id.txt',
filepath: '/uploads/no-id.txt',
object: 'file',
type: 'text/plain',
bytes: 256,
embedded: false,
usage: 0,
};
const normalFile: TFile = {
user: 'user1',
file_id: 'normal-file',
filename: 'normal.txt',
filepath: '/uploads/normal.txt',
object: 'file',
type: 'text/plain',
bytes: 512,
embedded: false,
usage: 0,
};
const mockOcrFiles: TFile[] = [normalFile];
const mockAttachmentFiles = [fileWithoutId as TFile, normalFile];
mockGetFiles.mockResolvedValue(mockOcrFiles);
const attachments = Promise.resolve(mockAttachmentFiles);
const tool_resources = {
[EToolResources.ocr]: {
file_ids: ['normal-file'],
},
};
const result = await primeResources({
req: mockReq,
getFiles: mockGetFiles,
requestFileSet,
attachments,
tool_resources,
});
// Should include file without ID and one instance of normal file
expect(result.attachments).toHaveLength(2);
expect(result.attachments?.filter((f) => f?.file_id === 'normal-file')).toHaveLength(1);
expect(result.attachments?.some((f) => !f?.file_id)).toBe(true);
});
it('should prevent duplicates from existing tool_resources', async () => {
const existingFile: TFile = {
user: 'user1',
file_id: 'existing-file',
filename: 'existing.py',
filepath: '/uploads/existing.py',
object: 'file',
type: 'text/x-python',
bytes: 512,
embedded: false,
usage: 0,
metadata: {
fileIdentifier: 'python-script',
},
};
const newFile: TFile = {
user: 'user1',
file_id: 'new-file',
filename: 'new.py',
filepath: '/uploads/new.py',
object: 'file',
type: 'text/x-python',
bytes: 256,
embedded: false,
usage: 0,
metadata: {
fileIdentifier: 'python-script',
},
};
const existingToolResources = {
[EToolResources.execute_code]: {
files: [existingFile],
},
};
const attachments = Promise.resolve([existingFile, newFile]);
const result = await primeResources({
req: mockReq,
getFiles: mockGetFiles,
requestFileSet,
attachments,
tool_resources: existingToolResources,
});
// Should only add the new file to attachments
expect(result.attachments).toHaveLength(1);
expect(result.attachments?.[0]?.file_id).toBe('new-file');
// Should not duplicate the existing file in tool_resources
expect(result.tool_resources?.[EToolResources.execute_code]?.files).toHaveLength(2);
const fileIds = result.tool_resources?.[EToolResources.execute_code]?.files?.map(
(f) => f.file_id,
);
expect(fileIds).toEqual(['existing-file', 'new-file']);
});
it('should handle duplicates within attachments array', async () => {
const duplicatedFile: TFile = {
user: 'user1',
file_id: 'dup-file',
filename: 'duplicate.txt',
filepath: '/uploads/duplicate.txt',
object: 'file',
type: 'text/plain',
bytes: 256,
embedded: false,
usage: 0,
};
const uniqueFile: TFile = {
user: 'user1',
file_id: 'unique-file',
filename: 'unique.txt',
filepath: '/uploads/unique.txt',
object: 'file',
type: 'text/plain',
bytes: 128,
embedded: false,
usage: 0,
};
// Same file appears multiple times in attachments
const attachments = Promise.resolve([
duplicatedFile,
duplicatedFile,
uniqueFile,
duplicatedFile,
]);
const result = await primeResources({
req: mockReq,
getFiles: mockGetFiles,
requestFileSet,
attachments,
tool_resources: {},
});
// Should only have 2 unique files
expect(result.attachments).toHaveLength(2);
const fileIds = result.attachments?.map((f) => f?.file_id);
expect(fileIds).toContain('dup-file');
expect(fileIds).toContain('unique-file');
// Verify no duplicates
expect(fileIds?.filter((id) => id === 'dup-file')).toHaveLength(1);
});
it('should prevent duplicates across different tool_resource categories', async () => {
const multiPurposeFile: TFile = {
user: 'user1',
file_id: 'multi-file',
filename: 'data.txt',
filepath: '/uploads/data.txt',
object: 'file',
type: 'text/plain',
bytes: 512,
embedded: true, // Will be categorized as file_search
usage: 0,
};
const existingToolResources = {
[EToolResources.file_search]: {
files: [multiPurposeFile],
},
};
// Try to add the same file again
const attachments = Promise.resolve([multiPurposeFile]);
const result = await primeResources({
req: mockReq,
getFiles: mockGetFiles,
requestFileSet,
attachments,
tool_resources: existingToolResources,
});
// Should not add to attachments (already exists)
expect(result.attachments).toHaveLength(0);
// Should not duplicate in file_search
expect(result.tool_resources?.[EToolResources.file_search]?.files).toHaveLength(1);
expect(result.tool_resources?.[EToolResources.file_search]?.files?.[0]?.file_id).toBe(
'multi-file',
);
});
it('should handle complex scenario with OCR, existing tool_resources, and attachments', async () => {
const ocrFile: TFile = {
user: 'user1',
file_id: 'ocr-file',
filename: 'scan.pdf',
filepath: '/uploads/scan.pdf',
object: 'file',
type: 'application/pdf',
bytes: 2048,
embedded: false,
usage: 0,
};
const existingFile: TFile = {
user: 'user1',
file_id: 'existing-file',
filename: 'code.py',
filepath: '/uploads/code.py',
object: 'file',
type: 'text/x-python',
bytes: 512,
embedded: false,
usage: 0,
metadata: {
fileIdentifier: 'python-script',
},
};
const newFile: TFile = {
user: 'user1',
file_id: 'new-file',
filename: 'image.png',
filepath: '/uploads/image.png',
object: 'file',
type: 'image/png',
bytes: 4096,
embedded: false,
usage: 0,
height: 800,
width: 600,
};
mockGetFiles.mockResolvedValue([ocrFile, existingFile]); // OCR returns both files
const attachments = Promise.resolve([existingFile, ocrFile, newFile]); // Attachments has duplicates
const existingToolResources = {
[EToolResources.ocr]: {
file_ids: ['ocr-file', 'existing-file'],
},
[EToolResources.execute_code]: {
files: [existingFile],
},
};
requestFileSet.add('new-file'); // Only new-file is in request set
const result = await primeResources({
req: mockReq,
getFiles: mockGetFiles,
requestFileSet,
attachments,
tool_resources: existingToolResources,
});
// Should have 3 unique files total
expect(result.attachments).toHaveLength(3);
const attachmentIds = result.attachments?.map((f) => f?.file_id).sort();
expect(attachmentIds).toEqual(['existing-file', 'new-file', 'ocr-file']);
// Check tool_resources
expect(result.tool_resources?.[EToolResources.execute_code]?.files).toHaveLength(1);
expect(result.tool_resources?.[EToolResources.image_edit]?.files).toHaveLength(1);
expect(result.tool_resources?.[EToolResources.image_edit]?.files?.[0]?.file_id).toBe(
'new-file',
);
});
});
describe('error handling', () => {
it('should handle errors gracefully and log them', async () => {
const mockFiles: TFile[] = [
{
user: 'user1',
file_id: 'file1',
filename: 'test.txt',
filepath: '/uploads/test.txt',
object: 'file',
type: 'text/plain',
bytes: 256,
embedded: false,
usage: 0,
},
];
const attachments = Promise.resolve(mockFiles);
const error = new Error('Test error');
// Mock getFiles to throw an error when called for OCR
mockGetFiles.mockRejectedValue(error);
const tool_resources = {
[EToolResources.ocr]: {
file_ids: ['ocr-file-1'],
},
};
const result = await primeResources({
req: mockReq,
getFiles: mockGetFiles,
requestFileSet,
attachments,
tool_resources,
});
expect(logger.error).toHaveBeenCalledWith('Error priming resources', error);
expect(result.attachments).toEqual(mockFiles);
expect(result.tool_resources).toEqual(tool_resources);
});
it('should handle promise rejection in attachments', async () => {
const error = new Error('Attachment error');
const attachments = Promise.reject(error);
// The function should now handle rejected attachment promises gracefully
const result = await primeResources({
req: mockReq,
getFiles: mockGetFiles,
requestFileSet,
attachments,
tool_resources: {},
});
// Should log both the main error and the attachment error
expect(logger.error).toHaveBeenCalledWith('Error priming resources', error);
expect(logger.error).toHaveBeenCalledWith(
'Error resolving attachments in catch block',
error,
);
// Should return empty array when attachments promise is rejected
expect(result.attachments).toEqual([]);
expect(result.tool_resources).toEqual({});
});
});
describe('edge cases', () => {
it('should handle missing app.locals gracefully', async () => {
const reqWithoutLocals = {} as ServerRequest;
const result = await primeResources({
req: reqWithoutLocals,
getFiles: mockGetFiles,
requestFileSet,
attachments: undefined,
tool_resources: {
[EToolResources.ocr]: {
file_ids: ['ocr-file-1'],
},
},
});
expect(mockGetFiles).not.toHaveBeenCalled();
// When app.locals is missing and there's an error accessing properties,
// the function falls back to the catch block which returns an empty array
expect(result.attachments).toEqual([]);
});
it('should handle undefined tool_resources', async () => {
const result = await primeResources({
req: mockReq,
getFiles: mockGetFiles,
requestFileSet,
attachments: undefined,
tool_resources: undefined,
});
expect(result.tool_resources).toEqual({});
expect(result.attachments).toBeUndefined();
});
it('should handle empty requestFileSet', async () => {
const mockFiles: TFile[] = [
{
user: 'user1',
file_id: 'file1',
filename: 'image.png',
filepath: '/uploads/image.png',
object: 'file',
type: 'image/png',
bytes: 2048,
embedded: false,
usage: 0,
height: 800,
width: 600,
},
];
const attachments = Promise.resolve(mockFiles);
const emptyRequestFileSet = new Set<string>();
const result = await primeResources({
req: mockReq,
getFiles: mockGetFiles,
requestFileSet: emptyRequestFileSet,
attachments,
tool_resources: {},
});
expect(result.attachments).toEqual(mockFiles);
expect(result.tool_resources?.[EToolResources.image_edit]).toBeUndefined();
});
});
});

View file

@ -0,0 +1,282 @@
import { logger } from '@librechat/data-schemas';
import { EModelEndpoint, EToolResources, AgentCapabilities } from 'librechat-data-provider';
import type { AgentToolResources, TFile, AgentBaseResource } from 'librechat-data-provider';
import type { FilterQuery, QueryOptions, ProjectionType } from 'mongoose';
import type { IMongoFile } from '@librechat/data-schemas';
import type { Request as ServerRequest } from 'express';
/**
* Function type for retrieving files from the database
* @param filter - MongoDB filter query for files
* @param _sortOptions - Sorting options (currently unused)
* @param selectFields - Field selection options
* @returns Promise resolving to array of files
*/
export type TGetFiles = (
filter: FilterQuery<IMongoFile>,
_sortOptions: ProjectionType<IMongoFile> | null | undefined,
selectFields: QueryOptions<IMongoFile> | null | undefined,
) => Promise<Array<TFile>>;
/**
* Helper function to add a file to a specific tool resource category
* Prevents duplicate files within the same resource category
* @param params - Parameters object
* @param params.file - The file to add to the resource
* @param params.resourceType - The type of tool resource (e.g., execute_code, file_search, image_edit)
* @param params.tool_resources - The agent's tool resources object to update
* @param params.processedResourceFiles - Set tracking processed files per resource type
*/
const addFileToResource = ({
file,
resourceType,
tool_resources,
processedResourceFiles,
}: {
file: TFile;
resourceType: EToolResources;
tool_resources: AgentToolResources;
processedResourceFiles: Set<string>;
}): void => {
if (!file.file_id) {
return;
}
const resourceKey = `${resourceType}:${file.file_id}`;
if (processedResourceFiles.has(resourceKey)) {
return;
}
const resource = tool_resources[resourceType as keyof AgentToolResources] ?? {};
if (!resource.files) {
(tool_resources[resourceType as keyof AgentToolResources] as AgentBaseResource) = {
...resource,
files: [],
};
}
// Check if already exists in the files array
const resourceFiles = tool_resources[resourceType as keyof AgentToolResources]?.files;
const alreadyExists = resourceFiles?.some((f: TFile) => f.file_id === file.file_id);
if (!alreadyExists) {
resourceFiles?.push(file);
processedResourceFiles.add(resourceKey);
}
};
/**
* Categorizes a file into the appropriate tool resource based on its properties
* Files are categorized as:
* - execute_code: Files with fileIdentifier metadata
* - file_search: Files marked as embedded
* - image_edit: Image files in the request file set with dimensions
* @param params - Parameters object
* @param params.file - The file to categorize
* @param params.tool_resources - The agent's tool resources to update
* @param params.requestFileSet - Set of file IDs from the current request
* @param params.processedResourceFiles - Set tracking processed files per resource type
*/
const categorizeFileForToolResources = ({
file,
tool_resources,
requestFileSet,
processedResourceFiles,
}: {
file: TFile;
tool_resources: AgentToolResources;
requestFileSet: Set<string>;
processedResourceFiles: Set<string>;
}): void => {
if (file.metadata?.fileIdentifier) {
addFileToResource({
file,
resourceType: EToolResources.execute_code,
tool_resources,
processedResourceFiles,
});
return;
}
if (file.embedded === true) {
addFileToResource({
file,
resourceType: EToolResources.file_search,
tool_resources,
processedResourceFiles,
});
return;
}
if (
requestFileSet.has(file.file_id) &&
file.type.startsWith('image') &&
file.height &&
file.width
) {
addFileToResource({
file,
resourceType: EToolResources.image_edit,
tool_resources,
processedResourceFiles,
});
}
};
/**
* Primes resources for agent execution by processing attachments and tool resources
* This function:
* 1. Fetches OCR files if OCR is enabled
* 2. Processes attachment files
* 3. Categorizes files into appropriate tool resources
* 4. Prevents duplicate files across all sources
*
* @param params - Parameters object
* @param params.req - Express request object containing app configuration
* @param params.getFiles - Function to retrieve files from database
* @param params.requestFileSet - Set of file IDs from the current request
* @param params.attachments - Promise resolving to array of attachment files
* @param params.tool_resources - Existing tool resources for the agent
* @returns Promise resolving to processed attachments and updated tool resources
*/
export const primeResources = async ({
req,
getFiles,
requestFileSet,
attachments: _attachments,
tool_resources: _tool_resources,
}: {
req: ServerRequest;
requestFileSet: Set<string>;
attachments: Promise<Array<TFile | null>> | undefined;
tool_resources: AgentToolResources | undefined;
getFiles: TGetFiles;
}): Promise<{
attachments: Array<TFile | undefined> | undefined;
tool_resources: AgentToolResources | undefined;
}> => {
try {
/**
* Array to collect all unique files that will be returned as attachments
* Files are added from OCR results and attachment promises, with duplicates prevented
*/
const attachments: Array<TFile> = [];
/**
* Set of file IDs already added to the attachments array
* Used to prevent duplicate files from being added multiple times
* Pre-populated with files from non-OCR tool_resources to prevent re-adding them
*/
const attachmentFileIds = new Set<string>();
/**
* Set tracking which files have been added to specific tool resource categories
* Format: "resourceType:fileId" (e.g., "execute_code:file123")
* Prevents the same file from being added multiple times to the same resource
*/
const processedResourceFiles = new Set<string>();
/**
* The agent's tool resources object that will be updated with categorized files
* Initialized from input parameter or empty object if not provided
*/
const tool_resources = _tool_resources ?? {};
// Track existing files in tool_resources to prevent duplicates within resources
for (const [resourceType, resource] of Object.entries(tool_resources)) {
if (resource?.files && Array.isArray(resource.files)) {
for (const file of resource.files) {
if (file?.file_id) {
processedResourceFiles.add(`${resourceType}:${file.file_id}`);
// Files from non-OCR resources should not be added to attachments from _attachments
if (resourceType !== EToolResources.ocr) {
attachmentFileIds.add(file.file_id);
}
}
}
}
}
const isOCREnabled = (req.app.locals?.[EModelEndpoint.agents]?.capabilities ?? []).includes(
AgentCapabilities.ocr,
);
if (tool_resources[EToolResources.ocr]?.file_ids && isOCREnabled) {
const context = await getFiles(
{
file_id: { $in: tool_resources.ocr.file_ids },
},
{},
{},
);
for (const file of context) {
if (!file?.file_id) {
continue;
}
// Clear from attachmentFileIds if it was pre-added
attachmentFileIds.delete(file.file_id);
// Add to attachments
attachments.push(file);
attachmentFileIds.add(file.file_id);
// Categorize for tool resources
categorizeFileForToolResources({
file,
tool_resources,
requestFileSet,
processedResourceFiles,
});
}
}
if (!_attachments) {
return { attachments: attachments.length > 0 ? attachments : undefined, tool_resources };
}
const files = await _attachments;
for (const file of files) {
if (!file) {
continue;
}
categorizeFileForToolResources({
file,
tool_resources,
requestFileSet,
processedResourceFiles,
});
if (file.file_id && attachmentFileIds.has(file.file_id)) {
continue;
}
attachments.push(file);
if (file.file_id) {
attachmentFileIds.add(file.file_id);
}
}
return { attachments: attachments.length > 0 ? attachments : [], tool_resources };
} catch (error) {
logger.error('Error priming resources', error);
// Safely try to get attachments without rethrowing
let safeAttachments: Array<TFile | undefined> = [];
if (_attachments) {
try {
const attachmentFiles = await _attachments;
safeAttachments = (attachmentFiles?.filter((file) => !!file) ?? []) as Array<TFile>;
} catch (attachmentError) {
// If attachments promise is also rejected, just use empty array
logger.error('Error resolving attachments in catch block', attachmentError);
safeAttachments = [];
}
}
return {
attachments: safeAttachments,
tool_resources: _tool_resources,
};
}
};

View file

@ -0,0 +1,90 @@
import { Run, Providers } from '@librechat/agents';
import { providerEndpointMap, KnownEndpoints } from 'librechat-data-provider';
import type { StandardGraphConfig, EventHandler, GraphEvents, IState } from '@librechat/agents';
import type { Agent } from 'librechat-data-provider';
import type * as t from '~/types';
const customProviders = new Set([
Providers.XAI,
Providers.OLLAMA,
Providers.DEEPSEEK,
Providers.OPENROUTER,
]);
/**
* Creates a new Run instance with custom handlers and configuration.
*
* @param options - The options for creating the Run instance.
* @param options.agent - The agent for this run.
* @param options.signal - The signal for this run.
* @param options.req - The server request.
* @param options.runId - Optional run ID; otherwise, a new run ID will be generated.
* @param options.customHandlers - Custom event handlers.
* @param options.streaming - Whether to use streaming.
* @param options.streamUsage - Whether to stream usage information.
* @returns {Promise<Run<IState>>} A promise that resolves to a new Run instance.
*/
export async function createRun({
runId,
agent,
signal,
customHandlers,
streaming = true,
streamUsage = true,
}: {
agent: Agent;
signal: AbortSignal;
runId?: string;
streaming?: boolean;
streamUsage?: boolean;
customHandlers?: Record<GraphEvents, EventHandler>;
}): Promise<Run<IState>> {
const provider =
providerEndpointMap[agent.provider as keyof typeof providerEndpointMap] ?? agent.provider;
const llmConfig: t.RunLLMConfig = Object.assign(
{
provider,
streaming,
streamUsage,
},
agent.model_parameters,
);
/** Resolves issues with new OpenAI usage field */
if (
customProviders.has(agent.provider) ||
(agent.provider === Providers.OPENAI && agent.endpoint !== agent.provider)
) {
llmConfig.streamUsage = false;
llmConfig.usage = true;
}
let reasoningKey: 'reasoning_content' | 'reasoning' | undefined;
if (
llmConfig.configuration?.baseURL?.includes(KnownEndpoints.openrouter) ||
(agent.endpoint && agent.endpoint.toLowerCase().includes(KnownEndpoints.openrouter))
) {
reasoningKey = 'reasoning';
}
const graphConfig: StandardGraphConfig = {
signal,
llmConfig,
reasoningKey,
tools: agent.tools,
instructions: agent.instructions,
additional_instructions: agent.additional_instructions,
// toolEnd: agent.end_after_tools,
};
// TEMPORARY FOR TESTING
if (agent.provider === Providers.ANTHROPIC || agent.provider === Providers.BEDROCK) {
graphConfig.streamBuffer = 2000;
}
return Run.create({
runId,
graphConfig,
customHandlers,
});
}

View file

@ -0,0 +1 @@
export * from './openai';

View file

@ -0,0 +1,2 @@
export * from './llm';
export * from './initialize';

View file

@ -0,0 +1,176 @@
import {
ErrorTypes,
EModelEndpoint,
resolveHeaders,
mapModelToAzureConfig,
} from 'librechat-data-provider';
import type {
LLMConfigOptions,
UserKeyValues,
InitializeOpenAIOptionsParams,
OpenAIOptionsResult,
} from '~/types';
import { createHandleLLMNewToken } from '~/utils/generators';
import { getAzureCredentials } from '~/utils/azure';
import { isUserProvided } from '~/utils/common';
import { getOpenAIConfig } from './llm';
/**
* Initializes OpenAI options for agent usage. This function always returns configuration
* options and never creates a client instance (equivalent to optionsOnly=true behavior).
*
* @param params - Configuration parameters
* @returns Promise resolving to OpenAI configuration options
* @throws Error if API key is missing or user key has expired
*/
export const initializeOpenAI = async ({
req,
overrideModel,
endpointOption,
overrideEndpoint,
getUserKeyValues,
checkUserKeyExpiry,
}: InitializeOpenAIOptionsParams): Promise<OpenAIOptionsResult> => {
const { PROXY, OPENAI_API_KEY, AZURE_API_KEY, OPENAI_REVERSE_PROXY, AZURE_OPENAI_BASEURL } =
process.env;
const { key: expiresAt } = req.body;
const modelName = overrideModel ?? req.body.model;
const endpoint = overrideEndpoint ?? req.body.endpoint;
if (!endpoint) {
throw new Error('Endpoint is required');
}
const credentials = {
[EModelEndpoint.openAI]: OPENAI_API_KEY,
[EModelEndpoint.azureOpenAI]: AZURE_API_KEY,
};
const baseURLOptions = {
[EModelEndpoint.openAI]: OPENAI_REVERSE_PROXY,
[EModelEndpoint.azureOpenAI]: AZURE_OPENAI_BASEURL,
};
const userProvidesKey = isUserProvided(credentials[endpoint as keyof typeof credentials]);
const userProvidesURL = isUserProvided(baseURLOptions[endpoint as keyof typeof baseURLOptions]);
let userValues: UserKeyValues | null = null;
if (expiresAt && (userProvidesKey || userProvidesURL)) {
checkUserKeyExpiry(expiresAt, endpoint);
userValues = await getUserKeyValues({ userId: req.user.id, name: endpoint });
}
let apiKey = userProvidesKey
? userValues?.apiKey
: credentials[endpoint as keyof typeof credentials];
const baseURL = userProvidesURL
? userValues?.baseURL
: baseURLOptions[endpoint as keyof typeof baseURLOptions];
const clientOptions: LLMConfigOptions = {
proxy: PROXY ?? undefined,
reverseProxyUrl: baseURL || undefined,
streaming: true,
};
const isAzureOpenAI = endpoint === EModelEndpoint.azureOpenAI;
const azureConfig = isAzureOpenAI && req.app.locals[EModelEndpoint.azureOpenAI];
if (isAzureOpenAI && azureConfig) {
const { modelGroupMap, groupMap } = azureConfig;
const {
azureOptions,
baseURL: configBaseURL,
headers = {},
serverless,
} = mapModelToAzureConfig({
modelName: modelName || '',
modelGroupMap,
groupMap,
});
clientOptions.reverseProxyUrl = configBaseURL ?? clientOptions.reverseProxyUrl;
clientOptions.headers = resolveHeaders({ ...headers, ...(clientOptions.headers ?? {}) });
const groupName = modelGroupMap[modelName || '']?.group;
if (groupName && groupMap[groupName]) {
clientOptions.addParams = groupMap[groupName]?.addParams;
clientOptions.dropParams = groupMap[groupName]?.dropParams;
}
apiKey = azureOptions.azureOpenAIApiKey;
clientOptions.azure = !serverless ? azureOptions : undefined;
if (serverless === true) {
clientOptions.defaultQuery = azureOptions.azureOpenAIApiVersion
? { 'api-version': azureOptions.azureOpenAIApiVersion }
: undefined;
if (!clientOptions.headers) {
clientOptions.headers = {};
}
clientOptions.headers['api-key'] = apiKey;
}
} else if (isAzureOpenAI) {
clientOptions.azure =
userProvidesKey && userValues?.apiKey ? JSON.parse(userValues.apiKey) : getAzureCredentials();
apiKey = clientOptions.azure?.azureOpenAIApiKey;
}
if (userProvidesKey && !apiKey) {
throw new Error(
JSON.stringify({
type: ErrorTypes.NO_USER_KEY,
}),
);
}
if (!apiKey) {
throw new Error(`${endpoint} API Key not provided.`);
}
const modelOptions = {
...endpointOption.model_parameters,
model: modelName,
user: req.user.id,
};
const finalClientOptions: LLMConfigOptions = {
...clientOptions,
modelOptions,
};
const options = getOpenAIConfig(apiKey, finalClientOptions, endpoint);
const openAIConfig = req.app.locals[EModelEndpoint.openAI];
const allConfig = req.app.locals.all;
const azureRate = modelName?.includes('gpt-4') ? 30 : 17;
let streamRate: number | undefined;
if (isAzureOpenAI && azureConfig) {
streamRate = azureConfig.streamRate ?? azureRate;
} else if (!isAzureOpenAI && openAIConfig) {
streamRate = openAIConfig.streamRate;
}
if (allConfig?.streamRate) {
streamRate = allConfig.streamRate;
}
if (streamRate) {
options.llmConfig.callbacks = [
{
handleLLMNewToken: createHandleLLMNewToken(streamRate),
},
];
}
const result: OpenAIOptionsResult = {
...options,
streamRate,
};
return result;
};

View file

@ -0,0 +1,156 @@
import { HttpsProxyAgent } from 'https-proxy-agent';
import { KnownEndpoints } from 'librechat-data-provider';
import type * as t from '~/types';
import { sanitizeModelName, constructAzureURL } from '~/utils/azure';
import { isEnabled } from '~/utils/common';
/**
* Generates configuration options for creating a language model (LLM) instance.
* @param apiKey - The API key for authentication.
* @param options - Additional options for configuring the LLM.
* @param endpoint - The endpoint name
* @returns Configuration options for creating an LLM instance.
*/
export function getOpenAIConfig(
apiKey: string,
options: t.LLMConfigOptions = {},
endpoint?: string | null,
): t.LLMConfigResult {
const {
modelOptions = {},
reverseProxyUrl,
defaultQuery,
headers,
proxy,
azure,
streaming = true,
addParams,
dropParams,
} = options;
const llmConfig: Partial<t.ClientOptions> & Partial<t.OpenAIParameters> = Object.assign(
{
streaming,
model: modelOptions.model ?? '',
},
modelOptions,
);
if (addParams && typeof addParams === 'object') {
Object.assign(llmConfig, addParams);
}
// Note: OpenAI Web Search models do not support any known parameters besides `max_tokens`
if (modelOptions.model && /gpt-4o.*search/.test(modelOptions.model)) {
const searchExcludeParams = [
'frequency_penalty',
'presence_penalty',
'temperature',
'top_p',
'top_k',
'stop',
'logit_bias',
'seed',
'response_format',
'n',
'logprobs',
'user',
];
const updatedDropParams = dropParams || [];
const combinedDropParams = [...new Set([...updatedDropParams, ...searchExcludeParams])];
combinedDropParams.forEach((param) => {
if (param in llmConfig) {
delete llmConfig[param as keyof t.ClientOptions];
}
});
} else if (dropParams && Array.isArray(dropParams)) {
dropParams.forEach((param) => {
if (param in llmConfig) {
delete llmConfig[param as keyof t.ClientOptions];
}
});
}
let useOpenRouter = false;
const configOptions: t.OpenAIConfiguration = {};
if (
(reverseProxyUrl && reverseProxyUrl.includes(KnownEndpoints.openrouter)) ||
(endpoint && endpoint.toLowerCase().includes(KnownEndpoints.openrouter))
) {
useOpenRouter = true;
llmConfig.include_reasoning = true;
configOptions.baseURL = reverseProxyUrl;
configOptions.defaultHeaders = Object.assign(
{
'HTTP-Referer': 'https://librechat.ai',
'X-Title': 'LibreChat',
},
headers,
);
} else if (reverseProxyUrl) {
configOptions.baseURL = reverseProxyUrl;
if (headers) {
configOptions.defaultHeaders = headers;
}
}
if (defaultQuery) {
configOptions.defaultQuery = defaultQuery;
}
if (proxy) {
const proxyAgent = new HttpsProxyAgent(proxy);
configOptions.httpAgent = proxyAgent;
}
if (azure) {
const useModelName = isEnabled(process.env.AZURE_USE_MODEL_AS_DEPLOYMENT_NAME);
const updatedAzure = { ...azure };
updatedAzure.azureOpenAIApiDeploymentName = useModelName
? sanitizeModelName(llmConfig.model || '')
: azure.azureOpenAIApiDeploymentName;
if (process.env.AZURE_OPENAI_DEFAULT_MODEL) {
llmConfig.model = process.env.AZURE_OPENAI_DEFAULT_MODEL;
}
if (configOptions.baseURL) {
const azureURL = constructAzureURL({
baseURL: configOptions.baseURL,
azureOptions: updatedAzure,
});
updatedAzure.azureOpenAIBasePath = azureURL.split(
`/${updatedAzure.azureOpenAIApiDeploymentName}`,
)[0];
}
Object.assign(llmConfig, updatedAzure);
llmConfig.model = updatedAzure.azureOpenAIApiDeploymentName;
} else {
llmConfig.apiKey = apiKey;
}
if (process.env.OPENAI_ORGANIZATION && azure) {
configOptions.organization = process.env.OPENAI_ORGANIZATION;
}
if (useOpenRouter && llmConfig.reasoning_effort != null) {
llmConfig.reasoning = {
effort: llmConfig.reasoning_effort,
};
delete llmConfig.reasoning_effort;
}
if (llmConfig.max_tokens != null) {
llmConfig.maxTokens = llmConfig.max_tokens;
delete llmConfig.max_tokens;
}
return {
llmConfig,
configOptions,
};
}

View file

@ -0,0 +1 @@
export * from './mistral/crud';

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,416 @@
import * as fs from 'fs';
import * as path from 'path';
import FormData from 'form-data';
import { logger } from '@librechat/data-schemas';
import {
FileSources,
envVarRegex,
extractEnvVariable,
extractVariableName,
} from 'librechat-data-provider';
import type { TCustomConfig } from 'librechat-data-provider';
import type { Request as ServerRequest } from 'express';
import type { AxiosError } from 'axios';
import type {
MistralFileUploadResponse,
MistralSignedUrlResponse,
MistralOCRUploadResult,
MistralOCRError,
OCRResultPage,
OCRResult,
OCRImage,
} from '~/types';
import { logAxiosError, createAxiosInstance } from '~/utils/axios';
const axios = createAxiosInstance();
const DEFAULT_MISTRAL_BASE_URL = 'https://api.mistral.ai/v1';
const DEFAULT_MISTRAL_MODEL = 'mistral-ocr-latest';
/** Helper type for auth configuration */
interface AuthConfig {
apiKey: string;
baseURL: string;
}
/** Helper type for OCR request context */
interface OCRContext {
req: Pick<ServerRequest, 'user' | 'app'> & {
user?: { id: string };
app: {
locals?: {
ocr?: TCustomConfig['ocr'];
};
};
};
file: Express.Multer.File;
loadAuthValues: (params: {
userId: string;
authFields: string[];
optional?: Set<string>;
}) => Promise<Record<string, string | undefined>>;
}
/**
* Uploads a document to Mistral API using file streaming to avoid loading the entire file into memory
* @param params Upload parameters
* @param params.filePath The path to the file on disk
* @param params.fileName Optional filename to use (defaults to the name from filePath)
* @param params.apiKey Mistral API key
* @param params.baseURL Mistral API base URL
* @returns The response from Mistral API
*/
export async function uploadDocumentToMistral({
apiKey,
filePath,
baseURL = DEFAULT_MISTRAL_BASE_URL,
fileName = '',
}: {
apiKey: string;
filePath: string;
baseURL?: string;
fileName?: string;
}): Promise<MistralFileUploadResponse> {
const form = new FormData();
form.append('purpose', 'ocr');
const actualFileName = fileName || path.basename(filePath);
const fileStream = fs.createReadStream(filePath);
form.append('file', fileStream, { filename: actualFileName });
return axios
.post(`${baseURL}/files`, form, {
headers: {
Authorization: `Bearer ${apiKey}`,
...form.getHeaders(),
},
maxBodyLength: Infinity,
maxContentLength: Infinity,
})
.then((res) => res.data)
.catch((error) => {
throw error;
});
}
export async function getSignedUrl({
apiKey,
fileId,
expiry = 24,
baseURL = DEFAULT_MISTRAL_BASE_URL,
}: {
apiKey: string;
fileId: string;
expiry?: number;
baseURL?: string;
}): Promise<MistralSignedUrlResponse> {
return axios
.get(`${baseURL}/files/${fileId}/url?expiry=${expiry}`, {
headers: {
Authorization: `Bearer ${apiKey}`,
},
})
.then((res) => res.data)
.catch((error) => {
logger.error('Error fetching signed URL:', error.message);
throw error;
});
}
/**
* @param {Object} params
* @param {string} params.apiKey
* @param {string} params.url - The document or image URL
* @param {string} [params.documentType='document_url'] - 'document_url' or 'image_url'
* @param {string} [params.model]
* @param {string} [params.baseURL]
* @returns {Promise<OCRResult>}
*/
export async function performOCR({
url,
apiKey,
model = DEFAULT_MISTRAL_MODEL,
baseURL = DEFAULT_MISTRAL_BASE_URL,
documentType = 'document_url',
}: {
url: string;
apiKey: string;
model?: string;
baseURL?: string;
documentType?: 'document_url' | 'image_url';
}): Promise<OCRResult> {
const documentKey = documentType === 'image_url' ? 'image_url' : 'document_url';
return axios
.post(
`${baseURL}/ocr`,
{
model,
image_limit: 0,
include_image_base64: false,
document: {
type: documentType,
[documentKey]: url,
},
},
{
headers: {
'Content-Type': 'application/json',
Authorization: `Bearer ${apiKey}`,
},
},
)
.then((res) => res.data)
.catch((error) => {
logger.error('Error performing OCR:', error.message);
throw error;
});
}
/**
* Determines if a value needs to be loaded from environment
*/
function needsEnvLoad(value: string): boolean {
return envVarRegex.test(value) || !value.trim();
}
/**
* Gets the environment variable name for a config value
*/
function getEnvVarName(configValue: string, defaultName: string): string {
if (!envVarRegex.test(configValue)) {
return defaultName;
}
return extractVariableName(configValue) || defaultName;
}
/**
* Resolves a configuration value from either hardcoded or environment
*/
async function resolveConfigValue(
configValue: string,
defaultEnvName: string,
authValues: Record<string, string | undefined>,
defaultValue?: string,
): Promise<string> {
// If it's a hardcoded value (not env var and not empty), use it directly
if (!needsEnvLoad(configValue)) {
return configValue;
}
// Otherwise, get from auth values
const envVarName = getEnvVarName(configValue, defaultEnvName);
return authValues[envVarName] || defaultValue || '';
}
/**
* Loads authentication configuration from OCR config
*/
async function loadAuthConfig(context: OCRContext): Promise<AuthConfig> {
const ocrConfig = context.req.app.locals?.ocr;
const apiKeyConfig = ocrConfig?.apiKey || '';
const baseURLConfig = ocrConfig?.baseURL || '';
if (!needsEnvLoad(apiKeyConfig) && !needsEnvLoad(baseURLConfig)) {
return {
apiKey: apiKeyConfig,
baseURL: baseURLConfig,
};
}
const authFields: string[] = [];
if (needsEnvLoad(baseURLConfig)) {
authFields.push(getEnvVarName(baseURLConfig, 'OCR_BASEURL'));
}
if (needsEnvLoad(apiKeyConfig)) {
authFields.push(getEnvVarName(apiKeyConfig, 'OCR_API_KEY'));
}
const authValues = await context.loadAuthValues({
userId: context.req.user?.id || '',
authFields,
optional: new Set(['OCR_BASEURL']),
});
const apiKey = await resolveConfigValue(apiKeyConfig, 'OCR_API_KEY', authValues);
const baseURL = await resolveConfigValue(
baseURLConfig,
'OCR_BASEURL',
authValues,
DEFAULT_MISTRAL_BASE_URL,
);
return { apiKey, baseURL };
}
/**
* Gets the model configuration
*/
function getModelConfig(ocrConfig: TCustomConfig['ocr']): string {
const modelConfig = ocrConfig?.mistralModel || '';
if (!modelConfig.trim()) {
return DEFAULT_MISTRAL_MODEL;
}
if (envVarRegex.test(modelConfig)) {
return extractEnvVariable(modelConfig) || DEFAULT_MISTRAL_MODEL;
}
return modelConfig.trim();
}
/**
* Determines document type based on file
*/
function getDocumentType(file: Express.Multer.File): 'image_url' | 'document_url' {
const mimetype = (file.mimetype || '').toLowerCase();
const originalname = file.originalname || '';
const isImage =
mimetype.startsWith('image') || /\.(png|jpe?g|gif|bmp|webp|tiff?)$/i.test(originalname);
return isImage ? 'image_url' : 'document_url';
}
/**
* Processes OCR result pages into aggregated text and images
*/
function processOCRResult(ocrResult: OCRResult): { text: string; images: string[] } {
let aggregatedText = '';
const images: string[] = [];
ocrResult.pages.forEach((page: OCRResultPage, index: number) => {
if (ocrResult.pages.length > 1) {
aggregatedText += `# PAGE ${index + 1}\n`;
}
aggregatedText += page.markdown + '\n\n';
if (!page.images || page.images.length === 0) {
return;
}
page.images.forEach((image: OCRImage) => {
if (image.image_base64) {
images.push(image.image_base64);
}
});
});
return { text: aggregatedText, images };
}
/**
* Creates an error message for OCR operations
*/
function createOCRError(error: unknown, baseMessage: string): Error {
const axiosError = error as AxiosError<MistralOCRError>;
const detail = axiosError?.response?.data?.detail;
const message = detail || baseMessage;
const responseMessage = axiosError?.response?.data?.message;
const errorLog = logAxiosError({ error: axiosError, message });
const fullMessage = responseMessage ? `${errorLog} - ${responseMessage}` : errorLog;
return new Error(fullMessage);
}
/**
* Uploads a file to the Mistral OCR API and processes the OCR result.
*
* @param params - The params object.
* @param params.req - The request object from Express. It should have a `user` property with an `id`
* representing the user
* @param params.file - The file object, which is part of the request. The file object should
* have a `mimetype` property that tells us the file type
* @param params.loadAuthValues - Function to load authentication values
* @returns - The result object containing the processed `text` and `images` (not currently used),
* along with the `filename` and `bytes` properties.
*/
export const uploadMistralOCR = async (context: OCRContext): Promise<MistralOCRUploadResult> => {
try {
const { apiKey, baseURL } = await loadAuthConfig(context);
const model = getModelConfig(context.req.app.locals?.ocr);
const mistralFile = await uploadDocumentToMistral({
filePath: context.file.path,
fileName: context.file.originalname,
apiKey,
baseURL,
});
const signedUrlResponse = await getSignedUrl({
apiKey,
baseURL,
fileId: mistralFile.id,
});
const documentType = getDocumentType(context.file);
const ocrResult = await performOCR({
apiKey,
baseURL,
model,
url: signedUrlResponse.url,
documentType,
});
// Process result
const { text, images } = processOCRResult(ocrResult);
return {
filename: context.file.originalname,
bytes: text.length * 4,
filepath: FileSources.mistral_ocr,
text,
images,
};
} catch (error) {
throw createOCRError(error, 'Error uploading document to Mistral OCR API');
}
};
/**
* Use Azure Mistral OCR API to processe the OCR result.
*
* @param params - The params object.
* @param params.req - The request object from Express. It should have a `user` property with an `id`
* representing the user
* @param params.file - The file object, which is part of the request. The file object should
* have a `mimetype` property that tells us the file type
* @param params.loadAuthValues - Function to load authentication values
* @returns - The result object containing the processed `text` and `images` (not currently used),
* along with the `filename` and `bytes` properties.
*/
export const uploadAzureMistralOCR = async (
context: OCRContext,
): Promise<MistralOCRUploadResult> => {
try {
const { apiKey, baseURL } = await loadAuthConfig(context);
const model = getModelConfig(context.req.app.locals?.ocr);
const buffer = fs.readFileSync(context.file.path);
const base64 = buffer.toString('base64');
/** Uses actual mimetype of the file, 'image/jpeg' as fallback since it seems to be accepted regardless of mismatch */
const base64Prefix = `data:${context.file.mimetype || 'image/jpeg'};base64,`;
const documentType = getDocumentType(context.file);
const ocrResult = await performOCR({
apiKey,
baseURL,
model,
url: `${base64Prefix}${base64}`,
documentType,
});
const { text, images } = processOCRResult(ocrResult);
return {
filename: context.file.originalname,
bytes: text.length * 4,
filepath: FileSources.azure_mistral_ocr,
text,
images,
};
} catch (error) {
throw createOCRError(error, 'Error uploading document to Azure Mistral OCR API');
}
};

16
packages/api/src/index.ts Normal file
View file

@ -0,0 +1,16 @@
/* MCP */
export * from './mcp/manager';
/* Utilities */
export * from './mcp/utils';
export * from './utils';
/* Flow */
export * from './flow/manager';
/* Agents */
export * from './agents';
/* Endpoints */
export * from './endpoints';
/* Files */
export * from './files';
/* types */
export type * from './mcp/types';
export type * from './flow/types';

View file

@ -11,7 +11,7 @@ import { StreamableHTTPClientTransport } from '@modelcontextprotocol/sdk/client/
import type { Transport } from '@modelcontextprotocol/sdk/shared/transport.js';
import type { JSONRPCMessage } from '@modelcontextprotocol/sdk/types.js';
import type { Logger } from 'winston';
import type * as t from './types/mcp.js';
import type * as t from './types';
function isStdioOptions(options: t.MCPOptions): options is t.StdioOptions {
return 'command' in options;
@ -87,8 +87,8 @@ export class MCPConnection extends EventEmitter {
this.lastPingTime = Date.now();
this.client = new Client(
{
name: 'librechat-mcp-client',
version: '1.2.2',
name: '@librechat/api-client',
version: '1.2.3',
},
{
capabilities: {},

View file

@ -1,14 +1,14 @@
import { CallToolResultSchema, ErrorCode, McpError } from '@modelcontextprotocol/sdk/types.js';
import type { RequestOptions } from '@modelcontextprotocol/sdk/shared/protocol.js';
import type { JsonSchemaType, MCPOptions } from 'librechat-data-provider';
import type { JsonSchemaType, MCPOptions, TUser } from 'librechat-data-provider';
import type { Logger } from 'winston';
import type * as t from './types/mcp';
import type * as t from './types';
import { formatToolContent } from './parsers';
import { MCPConnection } from './connection';
import { CONSTANTS } from './enum';
export interface CallToolOptions extends RequestOptions {
userId?: string;
user?: TUser;
}
export class MCPManager {
@ -21,7 +21,9 @@ export class MCPManager {
private userLastActivity: Map<string, number> = new Map();
private readonly USER_CONNECTION_IDLE_TIMEOUT = 15 * 60 * 1000; // 15 minutes (TODO: make configurable)
private mcpConfigs: t.MCPServers = {};
private processMCPEnv?: (obj: MCPOptions, userId?: string) => MCPOptions; // Store the processing function
private processMCPEnv?: (obj: MCPOptions, user?: TUser) => MCPOptions; // Store the processing function
/** Store MCP server instructions */
private serverInstructions: Map<string, string> = new Map();
private logger: Logger;
private static getDefaultLogger(): Logger {
@ -75,6 +77,42 @@ export class MCPManager {
initializedServers.add(i);
this.connections.set(serverName, connection); // Store in app-level map
// Handle unified serverInstructions configuration
const configInstructions = config.serverInstructions;
if (configInstructions !== undefined) {
if (typeof configInstructions === 'string') {
// Custom instructions provided
this.serverInstructions.set(serverName, configInstructions);
this.logger.info(
`[MCP][${serverName}] Custom instructions stored for context inclusion: ${configInstructions}`,
);
} else if (configInstructions === true) {
// Use server-provided instructions
const serverInstructions = connection.client.getInstructions();
if (serverInstructions) {
this.serverInstructions.set(serverName, serverInstructions);
this.logger.info(
`[MCP][${serverName}] Server instructions stored for context inclusion: ${serverInstructions}`,
);
} else {
this.logger.info(
`[MCP][${serverName}] serverInstructions=true but no server instructions available`,
);
}
} else {
// configInstructions is false - explicitly disabled
this.logger.info(
`[MCP][${serverName}] Instructions explicitly disabled (serverInstructions=false)`,
);
}
} else {
this.logger.info(
`[MCP][${serverName}] Instructions not included (serverInstructions not configured)`,
);
}
const serverCapabilities = connection.client.getServerCapabilities();
this.logger.info(
`[MCP][${serverName}] Capabilities: ${JSON.stringify(serverCapabilities)}`,
@ -181,7 +219,12 @@ export class MCPManager {
}
/** Gets or creates a connection for a specific user */
public async getUserConnection(userId: string, serverName: string): Promise<MCPConnection> {
public async getUserConnection(serverName: string, user: TUser): Promise<MCPConnection> {
const userId = user.id;
if (!userId) {
throw new McpError(ErrorCode.InvalidRequest, `[MCP] User object missing id property`);
}
const userServerMap = this.userConnections.get(userId);
let connection = userServerMap?.get(serverName);
const now = Date.now();
@ -229,7 +272,7 @@ export class MCPManager {
}
if (this.processMCPEnv) {
config = { ...(this.processMCPEnv(config, userId) ?? {}) };
config = { ...(this.processMCPEnv(config, user) ?? {}) };
}
connection = new MCPConnection(serverName, config, this.logger, userId);
@ -424,14 +467,15 @@ export class MCPManager {
options?: CallToolOptions;
}): Promise<t.FormattedToolResponse> {
let connection: MCPConnection | undefined;
const { userId, ...callOptions } = options ?? {};
const { user, ...callOptions } = options ?? {};
const userId = user?.id;
const logPrefix = userId ? `[MCP][User: ${userId}][${serverName}]` : `[MCP][${serverName}]`;
try {
if (userId) {
if (userId && user) {
this.updateUserLastActivity(userId);
// Get or create user-specific connection
connection = await this.getUserConnection(userId, serverName);
connection = await this.getUserConnection(serverName, user);
} else {
// Use app-level connection
connection = this.connections.get(serverName);
@ -519,4 +563,61 @@ export class MCPManager {
logger.info('[MCP] Manager instance destroyed.');
}
}
/**
* Get instructions for MCP servers
* @param serverNames Optional array of server names. If not provided or empty, returns all servers.
* @returns Object mapping server names to their instructions
*/
public getInstructions(serverNames?: string[]): Record<string, string> {
const instructions: Record<string, string> = {};
if (!serverNames || serverNames.length === 0) {
// Return all instructions if no specific servers requested
for (const [serverName, serverInstructions] of this.serverInstructions.entries()) {
instructions[serverName] = serverInstructions;
}
} else {
// Return instructions for specific servers
for (const serverName of serverNames) {
const serverInstructions = this.serverInstructions.get(serverName);
if (serverInstructions) {
instructions[serverName] = serverInstructions;
}
}
}
return instructions;
}
/**
* Format MCP server instructions for injection into context
* @param serverNames Optional array of server names to include. If not provided, includes all servers.
* @returns Formatted instructions string ready for context injection
*/
public formatInstructionsForContext(serverNames?: string[]): string {
/** Instructions for specified servers or all stored instructions */
const instructionsToInclude = this.getInstructions(serverNames);
if (Object.keys(instructionsToInclude).length === 0) {
return '';
}
// Format instructions for context injection
const formattedInstructions = Object.entries(instructionsToInclude)
.map(([serverName, instructions]) => {
return `## ${serverName} MCP Server Instructions
${instructions}`;
})
.join('\n\n');
return `# MCP Server Instructions
The following MCP servers are available with their specific instructions:
${formattedInstructions}
Please follow these instructions when using tools from the respective MCP servers.`;
}
}

View file

@ -1,4 +1,4 @@
import type * as t from './types/mcp';
import type * as t from './types';
const RECOGNIZED_PROVIDERS = new Set([
'google',
'anthropic',

View file

@ -8,7 +8,6 @@ import {
StreamableHTTPOptionsSchema,
} from 'librechat-data-provider';
import type { JsonSchemaType, TPlugin } from 'librechat-data-provider';
import { ToolSchema, ListToolsResultSchema } from '@modelcontextprotocol/sdk/types.js';
import type * as t from '@modelcontextprotocol/sdk/types.js';
export type StdioOptions = z.infer<typeof StdioOptionsSchema>;
@ -45,8 +44,8 @@ export interface MCPPrompt {
export type ConnectionState = 'disconnected' | 'connecting' | 'connected' | 'error';
export type MCPTool = z.infer<typeof ToolSchema>;
export type MCPToolListResponse = z.infer<typeof ListToolsResultSchema>;
export type MCPTool = z.infer<typeof t.ToolSchema>;
export type MCPToolListResponse = z.infer<typeof t.ListToolsResultSchema>;
export type ToolContentPart = t.TextContent | t.ImageContent | t.EmbeddedResource | t.AudioContent;
export type ImageContent = Extract<ToolContentPart, { type: 'image' }>;
export type MCPToolCallResponse =

View file

@ -0,0 +1,19 @@
/**
* Azure OpenAI configuration interface
*/
export interface AzureOptions {
azureOpenAIApiKey?: string;
azureOpenAIApiInstanceName?: string;
azureOpenAIApiDeploymentName?: string;
azureOpenAIApiVersion?: string;
azureOpenAIBasePath?: string;
}
/**
* Client with azure property for setting deployment name
*/
export interface GenericClient {
azure: {
azureOpenAIApiDeploymentName?: string;
};
}

View file

@ -0,0 +1,4 @@
export type ServerSentEvent = {
data: string | Record<string, unknown>;
event?: string;
};

View file

@ -0,0 +1,5 @@
export * from './azure';
export * from './events';
export * from './mistral';
export * from './openai';
export * from './run';

View file

@ -0,0 +1,82 @@
/**
* Mistral OCR API Types
* Based on https://docs.mistral.ai/api/#tag/ocr/operation/ocr_v1_ocr_post
*/
export interface MistralFileUploadResponse {
id: string;
object: string;
bytes: number;
created_at: number;
filename: string;
purpose: string;
}
export interface MistralSignedUrlResponse {
url: string;
expires_at: number;
}
export interface OCRImage {
id: string;
top_left_x: number;
top_left_y: number;
bottom_right_x: number;
bottom_right_y: number;
image_base64: string;
image_annotation?: string;
}
export interface PageDimensions {
dpi: number;
height: number;
width: number;
}
export interface OCRResultPage {
index: number;
markdown: string;
images: OCRImage[];
dimensions: PageDimensions;
}
export interface OCRUsageInfo {
pages_processed: number;
doc_size_bytes: number;
}
export interface OCRResult {
pages: OCRResultPage[];
model: string;
document_annotation?: string | null;
usage_info: OCRUsageInfo;
}
export interface MistralOCRRequest {
model: string;
image_limit?: number;
include_image_base64?: boolean;
document: {
type: 'document_url' | 'image_url';
document_url?: string;
image_url?: string;
};
}
export interface MistralOCRError {
detail?: string;
message?: string;
error?: {
message?: string;
type?: string;
code?: string;
};
}
export interface MistralOCRUploadResult {
filename: string;
bytes: number;
filepath: string;
text: string;
images: string[];
}

View file

@ -0,0 +1,97 @@
import { z } from 'zod';
import { openAISchema, EModelEndpoint } from 'librechat-data-provider';
import type { TEndpointOption, TAzureConfig, TEndpoint } from 'librechat-data-provider';
import type { OpenAIClientOptions } from '@librechat/agents';
import type { AzureOptions } from './azure';
export type OpenAIParameters = z.infer<typeof openAISchema>;
/**
* Configuration options for the getLLMConfig function
*/
export interface LLMConfigOptions {
modelOptions?: Partial<OpenAIParameters>;
reverseProxyUrl?: string;
defaultQuery?: Record<string, string | undefined>;
headers?: Record<string, string>;
proxy?: string;
azure?: AzureOptions;
streaming?: boolean;
addParams?: Record<string, unknown>;
dropParams?: string[];
}
export type OpenAIConfiguration = OpenAIClientOptions['configuration'];
export type ClientOptions = OpenAIClientOptions & {
include_reasoning?: boolean;
};
/**
* Return type for getLLMConfig function
*/
export interface LLMConfigResult {
llmConfig: ClientOptions;
configOptions: OpenAIConfiguration;
}
/**
* Interface for user values retrieved from the database
*/
export interface UserKeyValues {
apiKey?: string;
baseURL?: string;
}
/**
* Request interface with only the properties we need (avoids Express typing conflicts)
*/
export interface RequestData {
user: {
id: string;
};
body: {
model?: string;
endpoint?: string;
key?: string;
};
app: {
locals: {
[EModelEndpoint.azureOpenAI]?: TAzureConfig;
[EModelEndpoint.openAI]?: TEndpoint;
all?: TEndpoint;
};
};
}
/**
* Function type for getting user key values
*/
export type GetUserKeyValuesFunction = (params: {
userId: string;
name: string;
}) => Promise<UserKeyValues>;
/**
* Function type for checking user key expiry
*/
export type CheckUserKeyExpiryFunction = (expiresAt: string, endpoint: string) => void;
/**
* Parameters for the initializeOpenAI function
*/
export interface InitializeOpenAIOptionsParams {
req: RequestData;
overrideModel?: string;
overrideEndpoint?: string;
endpointOption: Partial<TEndpointOption>;
getUserKeyValues: GetUserKeyValuesFunction;
checkUserKeyExpiry: CheckUserKeyExpiryFunction;
}
/**
* Extended LLM config result with stream rate handling
*/
export interface OpenAIOptionsResult extends LLMConfigResult {
streamRate?: number;
}

View file

@ -0,0 +1,10 @@
import type { AgentModelParameters, EModelEndpoint } from 'librechat-data-provider';
import type { OpenAIConfiguration } from './openai';
export type RunLLMConfig = {
provider: EModelEndpoint;
streaming: boolean;
streamUsage: boolean;
usage?: boolean;
configuration?: OpenAIConfiguration;
} & AgentModelParameters;

View file

@ -0,0 +1,131 @@
import axios from 'axios';
import { createAxiosInstance } from './axios';
jest.mock('axios', () => ({
interceptors: {
request: { use: jest.fn(), eject: jest.fn() },
response: { use: jest.fn(), eject: jest.fn() },
},
create: jest.fn().mockReturnValue({
defaults: {
proxy: null,
},
get: jest.fn().mockResolvedValue({ data: {} }),
post: jest.fn().mockResolvedValue({ data: {} }),
put: jest.fn().mockResolvedValue({ data: {} }),
delete: jest.fn().mockResolvedValue({ data: {} }),
}),
get: jest.fn().mockResolvedValue({ data: {} }),
post: jest.fn().mockResolvedValue({ data: {} }),
put: jest.fn().mockResolvedValue({ data: {} }),
delete: jest.fn().mockResolvedValue({ data: {} }),
reset: jest.fn().mockImplementation(function (this: {
get: jest.Mock;
post: jest.Mock;
put: jest.Mock;
delete: jest.Mock;
create: jest.Mock;
}) {
this.get.mockClear();
this.post.mockClear();
this.put.mockClear();
this.delete.mockClear();
this.create.mockClear();
}),
}));
describe('createAxiosInstance', () => {
const originalEnv = process.env;
beforeEach(() => {
// Reset mocks
jest.clearAllMocks();
// Create a clean copy of process.env
process.env = { ...originalEnv };
// Default: no proxy
delete process.env.proxy;
});
afterAll(() => {
// Restore original process.env
process.env = originalEnv;
});
test('creates an axios instance without proxy when no proxy env is set', () => {
const instance = createAxiosInstance();
expect(axios.create).toHaveBeenCalledTimes(1);
expect(instance.defaults.proxy).toBeNull();
});
test('configures proxy correctly with hostname and protocol', () => {
process.env.proxy = 'http://example.com';
const instance = createAxiosInstance();
expect(axios.create).toHaveBeenCalledTimes(1);
expect(instance.defaults.proxy).toEqual({
host: 'example.com',
protocol: 'http',
});
});
test('configures proxy correctly with hostname, protocol and port', () => {
process.env.proxy = 'https://proxy.example.com:8080';
const instance = createAxiosInstance();
expect(axios.create).toHaveBeenCalledTimes(1);
expect(instance.defaults.proxy).toEqual({
host: 'proxy.example.com',
protocol: 'https',
port: 8080,
});
});
test('handles proxy URLs with authentication', () => {
process.env.proxy = 'http://user:pass@proxy.example.com:3128';
const instance = createAxiosInstance();
expect(axios.create).toHaveBeenCalledTimes(1);
expect(instance.defaults.proxy).toEqual({
host: 'proxy.example.com',
protocol: 'http',
port: 3128,
// Note: The current implementation doesn't handle auth - if needed, add this functionality
});
});
test('throws error when proxy URL is invalid', () => {
process.env.proxy = 'invalid-url';
expect(() => createAxiosInstance()).toThrow('Invalid proxy URL');
expect(axios.create).toHaveBeenCalledTimes(1);
});
// If you want to test the actual URL parsing more thoroughly
test('handles edge case proxy URLs correctly', () => {
// IPv6 address
process.env.proxy = 'http://[::1]:8080';
let instance = createAxiosInstance();
expect(instance.defaults.proxy).toEqual({
host: '::1',
protocol: 'http',
port: 8080,
});
// URL with path (which should be ignored for proxy config)
process.env.proxy = 'http://proxy.example.com:8080/some/path';
instance = createAxiosInstance();
expect(instance.defaults.proxy).toEqual({
host: 'proxy.example.com',
protocol: 'http',
port: 8080,
});
});
});

View file

@ -0,0 +1,77 @@
import axios from 'axios';
import { logger } from '@librechat/data-schemas';
import type { AxiosInstance, AxiosProxyConfig, AxiosError } from 'axios';
/**
* Logs Axios errors based on the error object and a custom message.
* @param options - The options object.
* @param options.message - The custom message to be logged.
* @param options.error - The Axios error object.
* @returns The log message.
*/
export const logAxiosError = ({ message, error }: { message: string; error: AxiosError }) => {
let logMessage = message;
try {
const stack = error.stack || 'No stack trace available';
if (error.response?.status) {
const { status, headers, data } = error.response;
logMessage = `${message} The server responded with status ${status}: ${error.message}`;
logger.error(logMessage, {
status,
headers,
data,
stack,
});
} else if (error.request) {
const { method, url } = error.config || {};
logMessage = `${message} No response received for ${method ? method.toUpperCase() : ''} ${url || ''}: ${error.message}`;
logger.error(logMessage, {
requestInfo: { method, url },
stack,
});
} else if (error?.message?.includes("Cannot read properties of undefined (reading 'status')")) {
logMessage = `${message} It appears the request timed out or was unsuccessful: ${error.message}`;
logger.error(logMessage, { stack });
} else {
logMessage = `${message} An error occurred while setting up the request: ${error.message}`;
logger.error(logMessage, { stack });
}
} catch (err: unknown) {
logMessage = `Error in logAxiosError: ${(err as Error).message}`;
logger.error(logMessage, { stack: (err as Error).stack || 'No stack trace available' });
}
return logMessage;
};
/**
* Creates and configures an Axios instance with optional proxy settings.
* @returns A configured Axios instance
* @throws If there's an issue creating the Axios instance or parsing the proxy URL
*/
export function createAxiosInstance(): AxiosInstance {
const instance = axios.create();
if (process.env.proxy) {
try {
const url = new URL(process.env.proxy);
const proxyConfig: Partial<AxiosProxyConfig> = {
host: url.hostname.replace(/^\[|\]$/g, ''),
protocol: url.protocol.replace(':', ''),
};
if (url.port) {
proxyConfig.port = parseInt(url.port, 10);
}
instance.defaults.proxy = proxyConfig as AxiosProxyConfig;
} catch (error) {
console.error('Error parsing proxy URL:', error);
throw new Error(`Invalid proxy URL: ${process.env.proxy}`);
}
}
return instance;
}

View file

@ -0,0 +1,269 @@
import {
genAzureChatCompletion,
getAzureCredentials,
constructAzureURL,
sanitizeModelName,
genAzureEndpoint,
} from './azure';
import type { GenericClient } from '~/types';
describe('sanitizeModelName', () => {
test('removes periods from the model name', () => {
const sanitized = sanitizeModelName('model.name');
expect(sanitized).toBe('modelname');
});
test('leaves model name unchanged if no periods are present', () => {
const sanitized = sanitizeModelName('modelname');
expect(sanitized).toBe('modelname');
});
});
describe('genAzureEndpoint', () => {
test('generates correct endpoint URL', () => {
const url = genAzureEndpoint({
azureOpenAIApiInstanceName: 'instanceName',
azureOpenAIApiDeploymentName: 'deploymentName',
});
expect(url).toBe('https://instanceName.openai.azure.com/openai/deployments/deploymentName');
});
});
describe('genAzureChatCompletion', () => {
// Test with both deployment name and model name provided
test('prefers model name over deployment name when both are provided and feature enabled', () => {
process.env.AZURE_USE_MODEL_AS_DEPLOYMENT_NAME = 'true';
const url = genAzureChatCompletion(
{
azureOpenAIApiInstanceName: 'instanceName',
azureOpenAIApiDeploymentName: 'deploymentName',
azureOpenAIApiVersion: 'v1',
},
'modelName',
);
expect(url).toBe(
'https://instanceName.openai.azure.com/openai/deployments/modelName/chat/completions?api-version=v1',
);
});
// Test with only deployment name provided
test('uses deployment name when model name is not provided', () => {
const url = genAzureChatCompletion({
azureOpenAIApiInstanceName: 'instanceName',
azureOpenAIApiDeploymentName: 'deploymentName',
azureOpenAIApiVersion: 'v1',
});
expect(url).toBe(
'https://instanceName.openai.azure.com/openai/deployments/deploymentName/chat/completions?api-version=v1',
);
});
// Test with only model name provided
test('uses model name when deployment name is not provided and feature enabled', () => {
process.env.AZURE_USE_MODEL_AS_DEPLOYMENT_NAME = 'true';
const url = genAzureChatCompletion(
{
azureOpenAIApiInstanceName: 'instanceName',
azureOpenAIApiVersion: 'v1',
},
'modelName',
);
expect(url).toBe(
'https://instanceName.openai.azure.com/openai/deployments/modelName/chat/completions?api-version=v1',
);
});
// Test with neither deployment name nor model name provided
test('throws error if neither deployment name nor model name is provided', () => {
expect(() => {
genAzureChatCompletion({
azureOpenAIApiInstanceName: 'instanceName',
azureOpenAIApiVersion: 'v1',
});
}).toThrow(
'Either a model name with the `AZURE_USE_MODEL_AS_DEPLOYMENT_NAME` setting or a deployment name must be provided if `AZURE_OPENAI_BASEURL` is omitted.',
);
});
// Test with feature disabled but model name provided
test('ignores model name and uses deployment name when feature is disabled', () => {
process.env.AZURE_USE_MODEL_AS_DEPLOYMENT_NAME = 'false';
const url = genAzureChatCompletion(
{
azureOpenAIApiInstanceName: 'instanceName',
azureOpenAIApiDeploymentName: 'deploymentName',
azureOpenAIApiVersion: 'v1',
},
'modelName',
);
expect(url).toBe(
'https://instanceName.openai.azure.com/openai/deployments/deploymentName/chat/completions?api-version=v1',
);
});
// Test with sanitized model name
test('sanitizes model name when used in URL', () => {
process.env.AZURE_USE_MODEL_AS_DEPLOYMENT_NAME = 'true';
const url = genAzureChatCompletion(
{
azureOpenAIApiInstanceName: 'instanceName',
azureOpenAIApiVersion: 'v1',
},
'model.name',
);
expect(url).toBe(
'https://instanceName.openai.azure.com/openai/deployments/modelname/chat/completions?api-version=v1',
);
});
// Test with client parameter and model name
test('updates client with sanitized model name when provided and feature enabled', () => {
process.env.AZURE_USE_MODEL_AS_DEPLOYMENT_NAME = 'true';
const clientMock = { azure: {} } as GenericClient;
const url = genAzureChatCompletion(
{
azureOpenAIApiInstanceName: 'instanceName',
azureOpenAIApiVersion: 'v1',
},
'model.name',
clientMock,
);
expect(url).toBe(
'https://instanceName.openai.azure.com/openai/deployments/modelname/chat/completions?api-version=v1',
);
expect(clientMock.azure.azureOpenAIApiDeploymentName).toBe('modelname');
});
// Test with client parameter but without model name
test('does not update client when model name is not provided', () => {
const clientMock = { azure: {} } as GenericClient;
const url = genAzureChatCompletion(
{
azureOpenAIApiInstanceName: 'instanceName',
azureOpenAIApiDeploymentName: 'deploymentName',
azureOpenAIApiVersion: 'v1',
},
undefined,
clientMock,
);
expect(url).toBe(
'https://instanceName.openai.azure.com/openai/deployments/deploymentName/chat/completions?api-version=v1',
);
expect(clientMock.azure.azureOpenAIApiDeploymentName).toBeUndefined();
});
// Test with client parameter and deployment name when feature is disabled
test('does not update client when feature is disabled', () => {
process.env.AZURE_USE_MODEL_AS_DEPLOYMENT_NAME = 'false';
const clientMock = { azure: {} } as GenericClient;
const url = genAzureChatCompletion(
{
azureOpenAIApiInstanceName: 'instanceName',
azureOpenAIApiDeploymentName: 'deploymentName',
azureOpenAIApiVersion: 'v1',
},
'modelName',
clientMock,
);
expect(url).toBe(
'https://instanceName.openai.azure.com/openai/deployments/deploymentName/chat/completions?api-version=v1',
);
expect(clientMock.azure.azureOpenAIApiDeploymentName).toBeUndefined();
});
// Reset environment variable after tests
afterEach(() => {
delete process.env.AZURE_USE_MODEL_AS_DEPLOYMENT_NAME;
});
});
describe('getAzureCredentials', () => {
beforeEach(() => {
process.env.AZURE_API_KEY = 'testApiKey';
process.env.AZURE_OPENAI_API_INSTANCE_NAME = 'instanceName';
process.env.AZURE_OPENAI_API_DEPLOYMENT_NAME = 'deploymentName';
process.env.AZURE_OPENAI_API_VERSION = 'v1';
});
test('retrieves Azure OpenAI API credentials from environment variables', () => {
const credentials = getAzureCredentials();
expect(credentials).toEqual({
azureOpenAIApiKey: 'testApiKey',
azureOpenAIApiInstanceName: 'instanceName',
azureOpenAIApiDeploymentName: 'deploymentName',
azureOpenAIApiVersion: 'v1',
});
});
});
describe('constructAzureURL', () => {
test('replaces both placeholders when both properties are provided', () => {
const url = constructAzureURL({
baseURL: 'https://example.com/${INSTANCE_NAME}/${DEPLOYMENT_NAME}',
azureOptions: {
azureOpenAIApiInstanceName: 'instance1',
azureOpenAIApiDeploymentName: 'deployment1',
},
});
expect(url).toBe('https://example.com/instance1/deployment1');
});
test('replaces only INSTANCE_NAME when only azureOpenAIApiInstanceName is provided', () => {
const url = constructAzureURL({
baseURL: 'https://example.com/${INSTANCE_NAME}/${DEPLOYMENT_NAME}',
azureOptions: {
azureOpenAIApiInstanceName: 'instance2',
},
});
expect(url).toBe('https://example.com/instance2/');
});
test('replaces only DEPLOYMENT_NAME when only azureOpenAIApiDeploymentName is provided', () => {
const url = constructAzureURL({
baseURL: 'https://example.com/${INSTANCE_NAME}/${DEPLOYMENT_NAME}',
azureOptions: {
azureOpenAIApiDeploymentName: 'deployment2',
},
});
expect(url).toBe('https://example.com//deployment2');
});
test('does not replace any placeholders when azure object is empty', () => {
const url = constructAzureURL({
baseURL: 'https://example.com/${INSTANCE_NAME}/${DEPLOYMENT_NAME}',
azureOptions: {},
});
expect(url).toBe('https://example.com//');
});
test('returns baseURL as is when `azureOptions` object is not provided', () => {
const url = constructAzureURL({
baseURL: 'https://example.com/${INSTANCE_NAME}/${DEPLOYMENT_NAME}',
});
expect(url).toBe('https://example.com/${INSTANCE_NAME}/${DEPLOYMENT_NAME}');
});
test('returns baseURL as is when no placeholders are set', () => {
const url = constructAzureURL({
baseURL: 'https://example.com/my_custom_instance/my_deployment',
azureOptions: {
azureOpenAIApiInstanceName: 'instance1',
azureOpenAIApiDeploymentName: 'deployment1',
},
});
expect(url).toBe('https://example.com/my_custom_instance/my_deployment');
});
test('returns regular Azure OpenAI baseURL with placeholders set', () => {
const baseURL =
'https://${INSTANCE_NAME}.openai.azure.com/openai/deployments/${DEPLOYMENT_NAME}';
const url = constructAzureURL({
baseURL,
azureOptions: {
azureOpenAIApiInstanceName: 'instance1',
azureOpenAIApiDeploymentName: 'deployment1',
},
});
expect(url).toBe('https://instance1.openai.azure.com/openai/deployments/deployment1');
});
});

View file

@ -0,0 +1,120 @@
import { isEnabled } from './common';
import type { AzureOptions, GenericClient } from '~/types';
/**
* Sanitizes the model name to be used in the URL by removing or replacing disallowed characters.
* @param modelName - The model name to be sanitized.
* @returns The sanitized model name.
*/
export const sanitizeModelName = (modelName: string): string => {
// Replace periods with empty strings and other disallowed characters as needed.
return modelName.replace(/\./g, '');
};
/**
* Generates the Azure OpenAI API endpoint URL.
* @param params - The parameters object.
* @param params.azureOpenAIApiInstanceName - The Azure OpenAI API instance name.
* @param params.azureOpenAIApiDeploymentName - The Azure OpenAI API deployment name.
* @returns The complete endpoint URL for the Azure OpenAI API.
*/
export const genAzureEndpoint = ({
azureOpenAIApiInstanceName,
azureOpenAIApiDeploymentName,
}: {
azureOpenAIApiInstanceName: string;
azureOpenAIApiDeploymentName: string;
}): string => {
return `https://${azureOpenAIApiInstanceName}.openai.azure.com/openai/deployments/${azureOpenAIApiDeploymentName}`;
};
/**
* Generates the Azure OpenAI API chat completion endpoint URL with the API version.
* If both deploymentName and modelName are provided, modelName takes precedence.
* @param azureConfig - The Azure configuration object.
* @param azureConfig.azureOpenAIApiInstanceName - The Azure OpenAI API instance name.
* @param azureConfig.azureOpenAIApiDeploymentName - The Azure OpenAI API deployment name (optional).
* @param azureConfig.azureOpenAIApiVersion - The Azure OpenAI API version.
* @param modelName - The model name to be included in the deployment name (optional).
* @param client - The API Client class for optionally setting properties (optional).
* @returns The complete chat completion endpoint URL for the Azure OpenAI API.
* @throws Error if neither azureOpenAIApiDeploymentName nor modelName is provided.
*/
export const genAzureChatCompletion = (
{
azureOpenAIApiInstanceName,
azureOpenAIApiDeploymentName,
azureOpenAIApiVersion,
}: {
azureOpenAIApiInstanceName: string;
azureOpenAIApiDeploymentName?: string;
azureOpenAIApiVersion: string;
},
modelName?: string,
client?: GenericClient,
): string => {
// Determine the deployment segment of the URL based on provided modelName or azureOpenAIApiDeploymentName
let deploymentSegment: string;
if (isEnabled(process.env.AZURE_USE_MODEL_AS_DEPLOYMENT_NAME) && modelName) {
const sanitizedModelName = sanitizeModelName(modelName);
deploymentSegment = sanitizedModelName;
if (client && typeof client === 'object') {
client.azure.azureOpenAIApiDeploymentName = sanitizedModelName;
}
} else if (azureOpenAIApiDeploymentName) {
deploymentSegment = azureOpenAIApiDeploymentName;
} else if (!process.env.AZURE_OPENAI_BASEURL) {
throw new Error(
'Either a model name with the `AZURE_USE_MODEL_AS_DEPLOYMENT_NAME` setting or a deployment name must be provided if `AZURE_OPENAI_BASEURL` is omitted.',
);
} else {
deploymentSegment = '';
}
return `https://${azureOpenAIApiInstanceName}.openai.azure.com/openai/deployments/${deploymentSegment}/chat/completions?api-version=${azureOpenAIApiVersion}`;
};
/**
* Retrieves the Azure OpenAI API credentials from environment variables.
* @returns An object containing the Azure OpenAI API credentials.
*/
export const getAzureCredentials = (): AzureOptions => {
return {
azureOpenAIApiKey: process.env.AZURE_API_KEY ?? process.env.AZURE_OPENAI_API_KEY,
azureOpenAIApiInstanceName: process.env.AZURE_OPENAI_API_INSTANCE_NAME,
azureOpenAIApiDeploymentName: process.env.AZURE_OPENAI_API_DEPLOYMENT_NAME,
azureOpenAIApiVersion: process.env.AZURE_OPENAI_API_VERSION,
};
};
/**
* Constructs a URL by replacing placeholders in the baseURL with values from the azure object.
* It specifically looks for '${INSTANCE_NAME}' and '${DEPLOYMENT_NAME}' within the baseURL and replaces
* them with 'azureOpenAIApiInstanceName' and 'azureOpenAIApiDeploymentName' from the azure object.
* If the respective azure property is not provided, the placeholder is replaced with an empty string.
*
* @param params - The parameters object.
* @param params.baseURL - The baseURL to inspect for replacement placeholders.
* @param params.azureOptions - The azure options object containing the instance and deployment names.
* @returns The complete baseURL with credentials injected for the Azure OpenAI API.
*/
export function constructAzureURL({
baseURL,
azureOptions,
}: {
baseURL: string;
azureOptions?: AzureOptions;
}): string {
let finalURL = baseURL;
// Replace INSTANCE_NAME and DEPLOYMENT_NAME placeholders with actual values if available
if (azureOptions) {
finalURL = finalURL.replace('${INSTANCE_NAME}', azureOptions.azureOpenAIApiInstanceName ?? '');
finalURL = finalURL.replace(
'${DEPLOYMENT_NAME}',
azureOptions.azureOpenAIApiDeploymentName ?? '',
);
}
return finalURL;
}

View file

@ -0,0 +1,55 @@
/* eslint-disable @typescript-eslint/ban-ts-comment */
import { isEnabled } from './common';
describe('isEnabled', () => {
test('should return true when input is "true"', () => {
expect(isEnabled('true')).toBe(true);
});
test('should return true when input is "TRUE"', () => {
expect(isEnabled('TRUE')).toBe(true);
});
test('should return true when input is true', () => {
expect(isEnabled(true)).toBe(true);
});
test('should return false when input is "false"', () => {
expect(isEnabled('false')).toBe(false);
});
test('should return false when input is false', () => {
expect(isEnabled(false)).toBe(false);
});
test('should return false when input is null', () => {
expect(isEnabled(null)).toBe(false);
});
test('should return false when input is undefined', () => {
expect(isEnabled()).toBe(false);
});
test('should return false when input is an empty string', () => {
expect(isEnabled('')).toBe(false);
});
test('should return false when input is a whitespace string', () => {
expect(isEnabled(' ')).toBe(false);
});
test('should return false when input is a number', () => {
// @ts-expect-error
expect(isEnabled(123)).toBe(false);
});
test('should return false when input is an object', () => {
// @ts-expect-error
expect(isEnabled({})).toBe(false);
});
test('should return false when input is an array', () => {
// @ts-expect-error
expect(isEnabled([])).toBe(false);
});
});

View file

@ -0,0 +1,48 @@
/**
* Checks if the given value is truthy by being either the boolean `true` or a string
* that case-insensitively matches 'true'.
*
* @param value - The value to check.
* @returns Returns `true` if the value is the boolean `true` or a case-insensitive
* match for the string 'true', otherwise returns `false`.
* @example
*
* isEnabled("True"); // returns true
* isEnabled("TRUE"); // returns true
* isEnabled(true); // returns true
* isEnabled("false"); // returns false
* isEnabled(false); // returns false
* isEnabled(null); // returns false
* isEnabled(); // returns false
*/
export function isEnabled(value?: string | boolean | null | undefined): boolean {
if (typeof value === 'boolean') {
return value;
}
if (typeof value === 'string') {
return value.toLowerCase().trim() === 'true';
}
return false;
}
/**
* Checks if the provided value is 'user_provided'.
*
* @param value - The value to check.
* @returns - Returns true if the value is 'user_provided', otherwise false.
*/
export const isUserProvided = (value?: string): boolean => value === 'user_provided';
/**
* @param values
*/
export function optionalChainWithEmptyCheck(
...values: (string | number | undefined)[]
): string | number | undefined {
for (const value of values) {
if (value !== undefined && value !== null && value !== '') {
return value;
}
}
return values[values.length - 1];
}

View file

@ -0,0 +1,16 @@
import type { Response as ServerResponse } from 'express';
import type { ServerSentEvent } from '~/types';
/**
* Sends message data in Server Sent Events format.
* @param res - The server response.
* @param event - The message event.
* @param event.event - The type of event.
* @param event.data - The message to be sent.
*/
export function sendEvent(res: ServerResponse, event: ServerSentEvent): void {
if (typeof event.data === 'string' && event.data.length === 0) {
return;
}
res.write(`event: message\ndata: ${JSON.stringify(event)}\n\n`);
}

View file

@ -0,0 +1,115 @@
import { sanitizeFilename } from './files';
jest.mock('node:crypto', () => {
const actualModule = jest.requireActual('node:crypto');
return {
...actualModule,
randomBytes: jest.fn().mockReturnValue(Buffer.from('abc123', 'hex')),
};
});
describe('sanitizeFilename', () => {
test('removes directory components (1/2)', () => {
expect(sanitizeFilename('/path/to/file.txt')).toBe('file.txt');
});
test('removes directory components (2/2)', () => {
expect(sanitizeFilename('../../../../file.txt')).toBe('file.txt');
});
test('replaces non-alphanumeric characters', () => {
expect(sanitizeFilename('file name@#$.txt')).toBe('file_name___.txt');
});
test('preserves dots and hyphens', () => {
expect(sanitizeFilename('file-name.with.dots.txt')).toBe('file-name.with.dots.txt');
});
test('prepends underscore to filenames starting with a dot', () => {
expect(sanitizeFilename('.hiddenfile')).toBe('_.hiddenfile');
});
test('truncates long filenames', () => {
const longName = 'a'.repeat(300) + '.txt';
const result = sanitizeFilename(longName);
expect(result.length).toBe(255);
expect(result).toMatch(/^a+-abc123\.txt$/);
});
test('handles filenames with no extension', () => {
const longName = 'a'.repeat(300);
const result = sanitizeFilename(longName);
expect(result.length).toBe(255);
expect(result).toMatch(/^a+-abc123$/);
});
test('handles empty input', () => {
expect(sanitizeFilename('')).toBe('_');
});
test('handles input with only special characters', () => {
expect(sanitizeFilename('@#$%^&*')).toBe('_______');
});
});
describe('sanitizeFilename with real crypto', () => {
// Temporarily unmock crypto for these tests
beforeAll(() => {
jest.resetModules();
jest.unmock('node:crypto');
});
afterAll(() => {
jest.resetModules();
jest.mock('node:crypto', () => {
const actualModule = jest.requireActual('node:crypto');
return {
...actualModule,
randomBytes: jest.fn().mockReturnValue(Buffer.from('abc123', 'hex')),
};
});
});
test('truncates long filenames with real crypto', async () => {
const { sanitizeFilename: realSanitizeFilename } = await import('./files');
const longName = 'b'.repeat(300) + '.pdf';
const result = realSanitizeFilename(longName);
expect(result.length).toBe(255);
expect(result).toMatch(/^b+-[a-f0-9]{6}\.pdf$/);
expect(result.endsWith('.pdf')).toBe(true);
});
test('handles filenames with no extension with real crypto', async () => {
const { sanitizeFilename: realSanitizeFilename } = await import('./files');
const longName = 'c'.repeat(300);
const result = realSanitizeFilename(longName);
expect(result.length).toBe(255);
expect(result).toMatch(/^c+-[a-f0-9]{6}$/);
expect(result).not.toContain('.');
});
test('generates unique suffixes for identical long filenames', async () => {
const { sanitizeFilename: realSanitizeFilename } = await import('./files');
const longName = 'd'.repeat(300) + '.doc';
const result1 = realSanitizeFilename(longName);
const result2 = realSanitizeFilename(longName);
expect(result1.length).toBe(255);
expect(result2.length).toBe(255);
expect(result1).not.toBe(result2); // Should be different due to random suffix
expect(result1.endsWith('.doc')).toBe(true);
expect(result2.endsWith('.doc')).toBe(true);
});
test('real crypto produces valid hex strings', async () => {
const { sanitizeFilename: realSanitizeFilename } = await import('./files');
const longName = 'test'.repeat(100) + '.txt';
const result = realSanitizeFilename(longName);
const hexMatch = result.match(/-([a-f0-9]{6})\.txt$/);
expect(hexMatch).toBeTruthy();
expect(hexMatch![1]).toMatch(/^[a-f0-9]{6}$/);
});
});

View file

@ -0,0 +1,33 @@
import path from 'path';
import crypto from 'node:crypto';
/**
* Sanitize a filename by removing any directory components, replacing non-alphanumeric characters
* @param inputName
*/
export function sanitizeFilename(inputName: string): string {
// Remove any directory components
let name = path.basename(inputName);
// Replace any non-alphanumeric characters except for '.' and '-'
name = name.replace(/[^a-zA-Z0-9.-]/g, '_');
// Ensure the name doesn't start with a dot (hidden file in Unix-like systems)
if (name.startsWith('.') || name === '') {
name = '_' + name;
}
// Limit the length of the filename
const MAX_LENGTH = 255;
if (name.length > MAX_LENGTH) {
const ext = path.extname(name);
const nameWithoutExt = path.basename(name, ext);
name =
nameWithoutExt.slice(0, MAX_LENGTH - ext.length - 7) +
'-' +
crypto.randomBytes(3).toString('hex') +
ext;
}
return name;
}

View file

@ -0,0 +1,75 @@
import fetch from 'node-fetch';
import { logger } from '@librechat/data-schemas';
import { GraphEvents, sleep } from '@librechat/agents';
import type { Response as ServerResponse } from 'express';
import type { ServerSentEvent } from '~/types';
import { sendEvent } from './events';
/**
* Makes a function to make HTTP request and logs the process.
* @param params
* @param params.directEndpoint - Whether to use a direct endpoint.
* @param params.reverseProxyUrl - The reverse proxy URL to use for the request.
* @returns A promise that resolves to the response of the fetch request.
*/
export function createFetch({
directEndpoint = false,
reverseProxyUrl = '',
}: {
directEndpoint?: boolean;
reverseProxyUrl?: string;
}) {
/**
* Makes an HTTP request and logs the process.
* @param url - The URL to make the request to. Can be a string or a Request object.
* @param init - Optional init options for the request.
* @returns A promise that resolves to the response of the fetch request.
*/
return async function (
_url: fetch.RequestInfo,
init: fetch.RequestInit,
): Promise<fetch.Response> {
let url = _url;
if (directEndpoint) {
url = reverseProxyUrl;
}
logger.debug(`Making request to ${url}`);
if (typeof Bun !== 'undefined') {
return await fetch(url, init);
}
return await fetch(url, init);
};
}
/**
* Creates event handlers for stream events that don't capture client references
* @param res - The response object to send events to
* @returns Object containing handler functions
*/
export function createStreamEventHandlers(res: ServerResponse) {
return {
[GraphEvents.ON_RUN_STEP]: function (event: ServerSentEvent) {
if (res) {
sendEvent(res, event);
}
},
[GraphEvents.ON_MESSAGE_DELTA]: function (event: ServerSentEvent) {
if (res) {
sendEvent(res, event);
}
},
[GraphEvents.ON_REASONING_DELTA]: function (event: ServerSentEvent) {
if (res) {
sendEvent(res, event);
}
},
};
}
export function createHandleLLMNewToken(streamRate: number) {
return async function () {
if (streamRate) {
await sleep(streamRate);
}
};
}

View file

@ -0,0 +1,8 @@
export * from './axios';
export * from './azure';
export * from './common';
export * from './events';
export * from './files';
export * from './generators';
export * from './openid';
export { default as Tokenizer } from './tokenizer';

View file

@ -0,0 +1,51 @@
/**
* Helper function to safely log sensitive data when debug mode is enabled
* @param obj - Object to stringify
* @param maxLength - Maximum length of the stringified output
* @returns Stringified object with sensitive data masked
*/
export function safeStringify(obj: unknown, maxLength = 1000): string {
try {
const str = JSON.stringify(obj, (key, value) => {
// Mask sensitive values
if (
key === 'client_secret' ||
key === 'Authorization' ||
key.toLowerCase().includes('token') ||
key.toLowerCase().includes('password')
) {
return typeof value === 'string' && value.length > 6
? `${value.substring(0, 3)}...${value.substring(value.length - 3)}`
: '***MASKED***';
}
return value;
});
if (str && str.length > maxLength) {
return `${str.substring(0, maxLength)}... (truncated)`;
}
return str;
} catch (error) {
return `[Error stringifying object: ${(error as Error).message}]`;
}
}
/**
* Helper to log headers without revealing sensitive information
* @param headers - Headers object to log
* @returns Stringified headers with sensitive data masked
*/
export function logHeaders(headers: Headers | undefined | null): string {
const headerObj: Record<string, string> = {};
if (!headers || typeof headers.entries !== 'function') {
return 'No headers available';
}
for (const [key, value] of headers.entries()) {
if (key.toLowerCase() === 'authorization' || key.toLowerCase().includes('secret')) {
headerObj[key] = '***MASKED***';
} else {
headerObj[key] = value;
}
}
return safeStringify(headerObj);
}

View file

@ -0,0 +1,143 @@
/**
* @file Tokenizer.spec.cjs
*
* Tests the real TokenizerSingleton (no mocking of `tiktoken`).
* Make sure to install `tiktoken` and have it configured properly.
*/
import { logger } from '@librechat/data-schemas';
import type { Tiktoken } from 'tiktoken';
import Tokenizer from './tokenizer';
jest.mock('@librechat/data-schemas', () => ({
logger: {
error: jest.fn(),
},
}));
describe('Tokenizer', () => {
it('should be a singleton (same instance)', async () => {
const AnotherTokenizer = await import('./tokenizer'); // same path
expect(Tokenizer).toBe(AnotherTokenizer.default);
});
describe('getTokenizer', () => {
it('should create an encoder for an explicit model name (e.g., "gpt-4")', () => {
// The real `encoding_for_model` will be called internally
// as soon as we pass isModelName = true.
const tokenizer = Tokenizer.getTokenizer('gpt-4', true);
// Basic sanity checks
expect(tokenizer).toBeDefined();
// You can optionally check certain properties from `tiktoken` if they exist
// e.g., expect(typeof tokenizer.encode).toBe('function');
});
it('should create an encoder for a known encoding (e.g., "cl100k_base")', () => {
// The real `get_encoding` will be called internally
// as soon as we pass isModelName = false.
const tokenizer = Tokenizer.getTokenizer('cl100k_base', false);
expect(tokenizer).toBeDefined();
// e.g., expect(typeof tokenizer.encode).toBe('function');
});
it('should return cached tokenizer if previously fetched', () => {
const tokenizer1 = Tokenizer.getTokenizer('cl100k_base', false);
const tokenizer2 = Tokenizer.getTokenizer('cl100k_base', false);
// Should be the exact same instance from the cache
expect(tokenizer1).toBe(tokenizer2);
});
});
describe('freeAndResetAllEncoders', () => {
beforeEach(() => {
jest.clearAllMocks();
});
it('should free all encoders and reset tokenizerCallsCount to 1', () => {
// By creating two different encodings, we populate the cache
Tokenizer.getTokenizer('cl100k_base', false);
Tokenizer.getTokenizer('r50k_base', false);
// Now free them
Tokenizer.freeAndResetAllEncoders();
// The internal cache is cleared
expect(Tokenizer.tokenizersCache['cl100k_base']).toBeUndefined();
expect(Tokenizer.tokenizersCache['r50k_base']).toBeUndefined();
// tokenizerCallsCount is reset to 1
expect(Tokenizer.tokenizerCallsCount).toBe(1);
});
it('should catch and log errors if freeing fails', () => {
// Mock logger.error before the test
const mockLoggerError = jest.spyOn(logger, 'error');
// Set up a problematic tokenizer in the cache
Tokenizer.tokenizersCache['cl100k_base'] = {
free() {
throw new Error('Intentional free error');
},
} as unknown as Tiktoken;
// Should not throw uncaught errors
Tokenizer.freeAndResetAllEncoders();
// Verify logger.error was called with correct arguments
expect(mockLoggerError).toHaveBeenCalledWith(
'[Tokenizer] Free and reset encoders error',
expect.any(Error),
);
// Clean up
mockLoggerError.mockRestore();
Tokenizer.tokenizersCache = {};
});
});
describe('getTokenCount', () => {
beforeEach(() => {
jest.clearAllMocks();
Tokenizer.freeAndResetAllEncoders();
});
it('should return the number of tokens in the given text', () => {
const text = 'Hello, world!';
const count = Tokenizer.getTokenCount(text, 'cl100k_base');
expect(count).toBeGreaterThan(0);
});
it('should reset encoders if an error is thrown', () => {
// We can simulate an error by temporarily overriding the selected tokenizer's `encode` method.
const tokenizer = Tokenizer.getTokenizer('cl100k_base', false);
const originalEncode = tokenizer.encode;
tokenizer.encode = () => {
throw new Error('Forced error');
};
// Despite the forced error, the code should catch and reset, then re-encode
const count = Tokenizer.getTokenCount('Hello again', 'cl100k_base');
expect(count).toBeGreaterThan(0);
// Restore the original encode
tokenizer.encode = originalEncode;
});
it('should reset tokenizers after 25 calls', () => {
// Spy on freeAndResetAllEncoders
const resetSpy = jest.spyOn(Tokenizer, 'freeAndResetAllEncoders');
// Make 24 calls; should NOT reset yet
for (let i = 0; i < 24; i++) {
Tokenizer.getTokenCount('test text', 'cl100k_base');
}
expect(resetSpy).not.toHaveBeenCalled();
// 25th call triggers the reset
Tokenizer.getTokenCount('the 25th call!', 'cl100k_base');
expect(resetSpy).toHaveBeenCalledTimes(1);
});
});
});

View file

@ -0,0 +1,78 @@
import { logger } from '@librechat/data-schemas';
import { encoding_for_model as encodingForModel, get_encoding as getEncoding } from 'tiktoken';
import type { Tiktoken, TiktokenModel, TiktokenEncoding } from 'tiktoken';
interface TokenizerOptions {
debug?: boolean;
}
class Tokenizer {
tokenizersCache: Record<string, Tiktoken>;
tokenizerCallsCount: number;
private options?: TokenizerOptions;
constructor() {
this.tokenizersCache = {};
this.tokenizerCallsCount = 0;
}
getTokenizer(
encoding: TiktokenModel | TiktokenEncoding,
isModelName = false,
extendSpecialTokens: Record<string, number> = {},
): Tiktoken {
let tokenizer: Tiktoken;
if (this.tokenizersCache[encoding]) {
tokenizer = this.tokenizersCache[encoding];
} else {
if (isModelName) {
tokenizer = encodingForModel(encoding as TiktokenModel, extendSpecialTokens);
} else {
tokenizer = getEncoding(encoding as TiktokenEncoding, extendSpecialTokens);
}
this.tokenizersCache[encoding] = tokenizer;
}
return tokenizer;
}
freeAndResetAllEncoders(): void {
try {
Object.keys(this.tokenizersCache).forEach((key) => {
if (this.tokenizersCache[key]) {
this.tokenizersCache[key].free();
delete this.tokenizersCache[key];
}
});
this.tokenizerCallsCount = 1;
} catch (error) {
logger.error('[Tokenizer] Free and reset encoders error', error);
}
}
resetTokenizersIfNecessary(): void {
if (this.tokenizerCallsCount >= 25) {
if (this.options?.debug) {
logger.debug('[Tokenizer] freeAndResetAllEncoders: reached 25 encodings, resetting...');
}
this.freeAndResetAllEncoders();
}
this.tokenizerCallsCount++;
}
getTokenCount(text: string, encoding: TiktokenModel | TiktokenEncoding = 'cl100k_base'): number {
this.resetTokenizersIfNecessary();
try {
const tokenizer = this.getTokenizer(encoding);
return tokenizer.encode(text, 'all').length;
} catch (error) {
logger.error('[Tokenizer] Error getting token count:', error);
this.freeAndResetAllEncoders();
const tokenizer = this.getTokenizer(encoding);
return tokenizer.encode(text, 'all').length;
}
}
}
const TokenizerSingleton = new Tokenizer();
export default TokenizerSingleton;

View file

@ -18,7 +18,10 @@
"isolatedModules": true,
"noEmit": true,
"sourceMap": true,
"baseUrl": "."
"baseUrl": ".",
"paths": {
"~/*": ["./src/*"]
}
},
"ts-node": {
"experimentalSpecifierResolution": "node",

View file

@ -5,6 +5,7 @@ export default {
testResultsProcessor: 'jest-junit',
moduleNameMapper: {
'^@src/(.*)$': '<rootDir>/src/$1',
'~/(.*)': '<rootDir>/src/$1',
},
// coverageThreshold: {
// global: {
@ -16,4 +17,4 @@ export default {
// },
restoreMocks: true,
testTimeout: 15000,
};
};

View file

@ -48,11 +48,11 @@
"@types/bcrypt": "^5.0.2",
"@types/diff": "^6.0.0",
"@types/express": "^5.0.0",
"@types/jest": "^29.5.2",
"@types/jest": "^29.5.14",
"@types/node": "^20.3.0",
"@types/passport-jwt": "^4.0.1",
"@types/traverse": "^0.6.37",
"jest": "^29.5.0",
"jest": "^29.7.0",
"jest-junit": "^16.0.0",
"rimraf": "^5.0.1",
"rollup": "^4.22.4",
@ -63,14 +63,19 @@
"typescript": "^5.0.4"
},
"dependencies": {
"@librechat/data-schemas": "^0.0.7",
"@librechat/api": "^1.2.3",
"@librechat/data-schemas": "^0.0.8",
"@node-saml/passport-saml": "^5.0.1",
"@types/nodemailer": "^6.4.17",
"axios": "^1.10.0",
"bcryptjs": "^3.0.2",
"crypto": "^1.0.1",
"form-data": "^4.0.3",
"handlebars": "^4.7.8",
"jsonwebtoken": "^9.0.2",
"jwks-rsa": "^3.2.0",
"klona": "^2.0.6",
"mongodb-memory-server": "^10.1.4",
"mongoose": "^8.12.1",
"nodemailer": "^7.0.3",
"openid-client": "^6.5.0",
@ -85,9 +90,7 @@
"passport-local": "^1.0.0",
"passport-oauth2": "^1.8.0",
"sharp": "^0.33.5",
"traverse": "^0.6.11",
"winston": "^3.17.0",
"winston-daily-rotate-file": "^5.0.0"
"traverse": "^0.6.11"
},
"peerDependencies": {
"keyv": "^5.3.2"
@ -102,4 +105,4 @@
"typescript",
"librechat"
]
}
}

View file

@ -211,7 +211,10 @@ const setOpenIDAuthTokens = (tokenset: TokenEndpointResponse, res: Response) =>
return;
}
const { REFRESH_TOKEN_EXPIRY } = process.env ?? {};
const expiryInMilliseconds = eval(REFRESH_TOKEN_EXPIRY ?? '') ?? 1000 * 60 * 60 * 24 * 7; // 7 days default
const expiryInMilliseconds = REFRESH_TOKEN_EXPIRY
? eval(REFRESH_TOKEN_EXPIRY)
: 1000 * 60 * 60 * 24 * 7; // 7 days default
const expirationDate = new Date(Date.now() + expiryInMilliseconds);
if (tokenset == null) {
logger.error('[setOpenIDAuthTokens] No tokenset found in request');

View file

@ -1,5 +1,5 @@
import { BalanceConfig, createMethods } from '@librechat/data-schemas';
import type { Mongoose } from 'mongoose';
import { BalanceConfig, createMethods } from '@librechat/data-schemas';
// Flag to prevent re-initialization
let initialized = false;

View file

@ -0,0 +1,409 @@
import mongoose from 'mongoose';
import { Strategy as AppleStrategy, Profile as AppleProfile } from 'passport-apple';
import { MongoMemoryServer } from 'mongodb-memory-server';
import jwt from 'jsonwebtoken';
import { logger, userSchema } from '@librechat/data-schemas';
import { isEnabled } from '@librechat/api';
import { createSocialUser, handleExistingUser } from './helpers';
import { socialLogin } from './socialLogin';
import { IUser } from '@librechat/data-schemas';
const mockFindUser = jest.fn();
jest.mock('jsonwebtoken');
jest.mock('@librechat/data-schemas', () => {
const actualModule = jest.requireActual('@librechat/data-schemas');
return {
...actualModule,
logger: {
error: jest.fn(),
info: jest.fn(),
debug: jest.fn(),
warn: jest.fn(),
},
createMethods: jest.fn(() => {
return { findUser: mockFindUser };
}),
};
});
jest.mock('../initAuth', () => {
const actualModule = jest.requireActual('../initAuth');
return {
...actualModule,
getMethods: jest.fn(() => {
return { findUser: mockFindUser };
}),
};
});
jest.mock('./helpers', () => {
const actualModule = jest.requireActual('./helpers');
return {
...actualModule,
createSocialUser: jest.fn(),
handleExistingUser: jest.fn(),
};
});
jest.mock('@librechat/api', () => ({
isEnabled: jest.fn(),
}));
describe('Apple Login Strategy', () => {
let mongoServer: MongoMemoryServer;
let appleStrategyInstance: InstanceType<typeof AppleStrategy>;
let User: any;
const OLD_ENV = process.env;
let getProfileDetails: ({
idToken,
profile,
}: {
idToken: string | null;
profile: AppleProfile;
}) => Partial<IUser> & { avatarUrl: null };
// Start and stop in-memory MongoDB
beforeAll(async () => {
mongoServer = await MongoMemoryServer.create();
const mongoUri = mongoServer.getUri();
await mongoose.connect(mongoUri);
User = mongoose.models.User || mongoose.model('User', userSchema);
});
afterAll(async () => {
await mongoose.disconnect();
await mongoServer.stop();
process.env = OLD_ENV;
});
beforeEach(async () => {
// Reset environment variables
process.env = { ...OLD_ENV };
process.env.APPLE_CLIENT_ID = 'fake_client_id';
process.env.APPLE_TEAM_ID = 'fake_team_id';
process.env.APPLE_CALLBACK_URL = '/auth/apple/callback';
process.env.DOMAIN_SERVER = 'https://example.com';
process.env.APPLE_KEY_ID = 'fake_key_id';
process.env.APPLE_PRIVATE_KEY_PATH = '/path/to/fake/private/key';
process.env.ALLOW_SOCIAL_REGISTRATION = 'true';
// Clear mocks and database
jest.clearAllMocks();
await User.deleteMany({});
// Define getProfileDetails within the test scope
getProfileDetails = ({ idToken, profile }) => {
if (!idToken) {
logger.error('idToken is missing');
throw new Error('idToken is missing');
}
const decoded = jwt.decode(idToken) as any;
if (!decoded) {
logger.error('Failed to decode idToken');
throw new Error('idToken is invalid');
}
console.log('Decoded token:', decoded);
logger.debug(`Decoded Apple JWT: ${JSON.stringify(decoded, null, 2)}`);
return {
email: decoded.email,
id: decoded.sub,
avatarUrl: null, // Apple does not provide an avatar URL
username: decoded.email ? decoded.email.split('@')[0].toLowerCase() : `user_${decoded.sub}`,
name: decoded.name
? `${decoded.name.firstName} ${decoded.name.lastName}`
: profile.displayName || null,
emailVerified: true, // Apple verifies the email
};
};
// Mock isEnabled based on environment variable
(isEnabled as jest.Mock).mockImplementation((flag: string) => flag === 'true');
// Initialize the strategy with the mocked getProfileDetails
const appleLogin = socialLogin('apple', getProfileDetails);
appleStrategyInstance = new AppleStrategy(
{
clientID: process.env.APPLE_CLIENT_ID,
teamID: process.env.APPLE_TEAM_ID,
callbackURL: `${process.env.DOMAIN_SERVER}${process.env.APPLE_CALLBACK_URL}`,
keyID: process.env.APPLE_KEY_ID,
privateKeyLocation: process.env.APPLE_PRIVATE_KEY_PATH,
passReqToCallback: false,
},
appleLogin,
);
});
const mockProfile = {
displayName: 'John Doe',
};
describe('getProfileDetails', () => {
it('should throw an error if idToken is missing', () => {
expect(() => {
getProfileDetails({ idToken: null, profile: mockProfile });
}).toThrow('idToken is missing');
expect(logger.error).toHaveBeenCalledWith('idToken is missing');
});
it('should throw an error if idToken cannot be decoded', () => {
(jwt.decode as jest.Mock).mockReturnValue(null);
expect(() => {
getProfileDetails({ idToken: 'invalid_id_token', profile: mockProfile });
}).toThrow('idToken is invalid');
expect(logger.error).toHaveBeenCalledWith('Failed to decode idToken');
});
it('should extract user details correctly from idToken', () => {
const fakeDecodedToken = {
email: 'john.doe@example.com',
sub: 'apple-sub-1234',
name: {
firstName: 'John',
lastName: 'Doe',
},
};
(jwt.decode as jest.Mock).mockReturnValue(fakeDecodedToken);
const profileDetails = getProfileDetails({
idToken: 'fake_id_token',
profile: mockProfile,
});
expect(jwt.decode).toHaveBeenCalledWith('fake_id_token');
expect(logger.debug).toHaveBeenCalledWith(expect.stringContaining('Decoded Apple JWT'));
expect(profileDetails).toEqual({
email: 'john.doe@example.com',
id: 'apple-sub-1234',
avatarUrl: null,
username: 'john.doe',
name: 'John Doe',
emailVerified: true,
});
});
it('should handle missing email and use sub for username', () => {
const fakeDecodedToken = {
sub: 'apple-sub-5678',
};
(jwt.decode as jest.Mock).mockReturnValue(fakeDecodedToken);
const profileDetails = getProfileDetails({
idToken: 'fake_id_token',
profile: mockProfile,
});
expect(profileDetails).toEqual({
email: undefined,
id: 'apple-sub-5678',
avatarUrl: null,
username: 'user_apple-sub-5678',
name: 'John Doe',
emailVerified: true,
});
});
});
describe('Strategy verify callback', () => {
const tokenset = {
id_token: 'fake_id_token',
};
const decodedToken = {
email: 'jane.doe@example.com',
sub: 'apple-sub-9012',
name: {
firstName: 'Jane',
lastName: 'Doe',
},
};
const fakeAccessToken = 'fake_access_token';
const fakeRefreshToken = 'fake_refresh_token';
beforeEach(async () => {
(jwt.decode as jest.Mock).mockReturnValue(decodedToken);
mockFindUser.mockResolvedValue(null);
const { initAuth } = require('../initAuth');
const saveBufferMock = jest.fn().mockResolvedValue('/fake/path/to/avatar.png');
await initAuth(mongoose, { enabled: false }, saveBufferMock); // mongoose: {}, fake balance config, dummy saveBuffer
});
it('should create a new user if one does not exist and registration is allowed', async () => {
// Mock findUser to return null (user does not exist)
mockFindUser.mockResolvedValue(null);
// Mock createSocialUser to create a user
(createSocialUser as jest.Mock).mockImplementation(async (userData: any) => {
const user = new User(userData);
await user.save();
return user;
});
const mockVerifyCallback = jest.fn();
// Invoke the verify callback with correct arguments
await new Promise((resolve) => {
appleStrategyInstance._verify(
fakeAccessToken,
fakeRefreshToken,
tokenset.id_token,
mockProfile,
(err: Error | null, user: any) => {
mockVerifyCallback(err, user);
resolve();
},
);
});
expect(mockVerifyCallback).toHaveBeenCalledWith(
null,
expect.objectContaining({ email: 'jane.doe@example.com' }),
);
const user = mockVerifyCallback.mock.calls[0][1];
expect(user.email).toBe('jane.doe@example.com');
expect(user.username).toBe('jane.doe');
expect(user.name).toBe('Jane Doe');
expect(user.provider).toBe('apple');
});
it('should handle existing user and update avatarUrl', async () => {
// Create an existing user without saving to database
const existingUser = new User({
email: 'jane.doe@example.com',
username: 'jane.doe',
name: 'Jane Doe',
provider: 'apple',
providerId: 'apple-sub-9012',
avatarUrl: 'old_avatar.png',
});
console.log('aa', existingUser);
// Mock findUser to return the existing user
mockFindUser.mockResolvedValue(existingUser);
// Mock handleExistingUser to update avatarUrl without saving to database
(handleExistingUser as jest.Mock).mockImplementation(
async (user: any, avatarUrl: string | null) => {
user.avatarUrl = avatarUrl;
// Don't call save() to avoid database operations
return user;
},
);
const mockVerifyCallback = jest.fn();
// Invoke the verify callback with correct arguments
await new Promise((resolve) => {
appleStrategyInstance._verify(
fakeAccessToken,
fakeRefreshToken,
tokenset.id_token,
mockProfile,
(err: Error | null, user: any) => {
mockVerifyCallback(err, user);
resolve();
},
);
});
console.log('bb', existingUser);
expect(mockVerifyCallback).toHaveBeenCalledWith(null, existingUser);
expect(existingUser.avatarUrl).toBe(''); // As per getProfileDetails
expect(handleExistingUser).toHaveBeenCalledWith(existingUser, '');
});
it('should handle missing idToken gracefully', async () => {
const mockVerifyCallback = jest.fn();
// Invoke the verify callback with missing id_token
await new Promise((resolve) => {
appleStrategyInstance._verify(
fakeAccessToken,
fakeRefreshToken,
null, // idToken is missing
mockProfile,
(err: Error | null, user: any) => {
mockVerifyCallback(err, user);
resolve();
},
);
});
expect(mockVerifyCallback).toHaveBeenCalledWith(expect.any(Error), undefined);
expect(mockVerifyCallback.mock.calls[0][0].message).toBe('idToken is missing');
// Ensure createSocialUser and handleExistingUser were not called
expect(createSocialUser).not.toHaveBeenCalled();
expect(handleExistingUser).not.toHaveBeenCalled();
});
it('should handle decoding errors gracefully', async () => {
// Simulate decoding failure by returning null
(jwt.decode as jest.Mock).mockReturnValue(null);
const mockVerifyCallback = jest.fn();
// Invoke the verify callback with correct arguments
await new Promise((resolve) => {
appleStrategyInstance._verify(
fakeAccessToken,
fakeRefreshToken,
tokenset.id_token,
mockProfile,
(err: Error | null, user: any) => {
mockVerifyCallback(err, user);
resolve();
},
);
});
expect(mockVerifyCallback).toHaveBeenCalledWith(expect.any(Error), undefined);
expect(mockVerifyCallback.mock.calls[0][0].message).toBe('idToken is invalid');
// Ensure createSocialUser and handleExistingUser were not called
expect(createSocialUser).not.toHaveBeenCalled();
expect(handleExistingUser).not.toHaveBeenCalled();
// Ensure logger.error was called
expect(logger.error).toHaveBeenCalledWith('Failed to decode idToken');
});
it('should handle errors during user creation', async () => {
// Mock findUser to return null (user does not exist)
mockFindUser.mockResolvedValue(null);
// Mock createSocialUser to throw an error
(createSocialUser as jest.Mock).mockImplementation(() => {
throw new Error('Database error');
});
const mockVerifyCallback = jest.fn();
// Invoke the verify callback with correct arguments
await new Promise((resolve) => {
appleStrategyInstance._verify(
fakeAccessToken,
fakeRefreshToken,
tokenset.id_token,
mockProfile,
(err: Error | null, user: any) => {
mockVerifyCallback(err, user);
resolve();
},
);
});
expect(mockVerifyCallback).toHaveBeenCalledWith(expect.any(Error), undefined);
expect(mockVerifyCallback.mock.calls[0][0].message).toBe('Database error');
// Ensure logger.error was called
expect(logger.error).toHaveBeenCalledWith('[appleLogin]', expect.any(Error));
});
});
});

View file

@ -3,7 +3,6 @@ import { logger } from '@librechat/data-schemas';
import jwt from 'jsonwebtoken';
import { GetProfileDetails, GetProfileDetailsParams } from './types';
import socialLogin from './socialLogin';
import { Profile } from 'passport';
/**
* Extract profile details from the decoded idToken
@ -27,7 +26,7 @@ const getProfileDetails: GetProfileDetails = ({ profile, idToken }: GetProfileDe
id: decoded.sub,
avatarUrl: null, // Apple does not provide an avatar URL
username: decoded.email ? decoded.email.split('@')[0].toLowerCase() : `user_${decoded.sub}`,
name: decoded.name
displayName: decoded.name
? `${decoded.name.firstName} ${decoded.name.lastName}`
: profile.displayName || null,
emailVerified: true, // Apple verifies the email

View file

@ -1,4 +1,3 @@
import { Profile } from 'passport';
import { Strategy as DiscordStrategy } from 'passport-discord';
import socialLogin from './socialLogin';
import { GetProfileDetails } from './types';

View file

@ -18,6 +18,7 @@ import { CreateSocialUserParams } from './types';
* @throws {Error} Throws an error if there's an issue saving the updated user object.
*/
const handleExistingUser = async (oldUser: IUser, avatarUrl: string) => {
console.log(1111);
const fileStrategy = process.env.CDN_PROVIDER ?? FileSources.local;
const isLocal = fileStrategy === FileSources.local;

View file

@ -2,8 +2,8 @@ import fs from 'fs';
import LdapStrategy, { type Options } from 'passport-ldapauth';
import { SystemRoles } from 'librechat-data-provider';
import { logger } from '@librechat/data-schemas';
import { isEnabled } from '../utils';
import { getBalanceConfig, getMethods } from '../initAuth';
import { isEnabled } from '../utils';
const {
LDAP_URL,
@ -79,7 +79,7 @@ const ldapLogin = () => {
usernameField: 'email',
passwordField: 'password',
};
return new LdapStrategy(ldapOptions, async (userinfo: any, done) => {
return new LdapStrategy(ldapOptions, async (userinfo: any, done: any) => {
if (!userinfo) {
return done(null, false, { message: 'Invalid credentials' });
}

View file

@ -10,7 +10,7 @@ import { Request } from 'express';
// Unix timestamp for 2024-06-07 15:20:18 Eastern Time
const verificationEnabledTimestamp = 1717788018;
async function validateLoginRequest(req) {
async function validateLoginRequest(req: Request) {
const { error } = loginSchema.safeParse(req.body);
return error ? errorsToString(error.errors) : null;
}
@ -59,6 +59,12 @@ async function passportStrategy(
return done(null, false, { message: 'Email does not exist.' });
}
if (!user.password) {
logError('Passport Local Strategy - User has no password', { email });
logger.error(`[Login] [Login failed] [Username: ${email}] [Request-IP: ${req.ip}]`);
return done(null, false, { message: 'Email does not exist.' });
}
const isMatch = await comparePassword(user, password);
if (!isMatch) {
logError('Passport Local Strategy - Password does not match', { isMatch });

View file

@ -0,0 +1,355 @@
import passport from 'passport';
import mongoose from 'mongoose';
// --- Mocks ---
jest.mock('jsonwebtoken');
jest.mock('undici', () => {
const ActualUndici = jest.requireActual('undici');
return {
...ActualUndici,
fetch: jest.fn(() => {
return new ActualUndici.Response(Buffer.from('fake image'), {
status: 200,
headers: { 'content-type': 'image/png' },
});
}),
};
});
const fetchMock = jest.fn().mockResolvedValue(
new Response(Buffer.from('fake image'), {
status: 200,
headers: { 'content-type': 'image/png' },
}),
);
const mockedMethods = {
findUser: jest.fn(),
createUser: jest.fn(),
updateUser: jest.fn(),
};
jest.mock('@librechat/data-schemas', () => {
const actual = jest.requireActual('@librechat/data-schemas');
return {
...actual,
createMethods: jest.fn(() => mockedMethods),
};
});
// Mock the openid-client module and all its dependencies
jest.mock('openid-client', () => {
// const actual = jest.requireActual('openid-client');
return {
// ...actual,
discovery: jest.fn().mockResolvedValue({
clientId: 'fake_client_id',
clientSecret: 'fake_client_secret',
issuer: 'https://fake-issuer.com',
// Add any other properties needed by the implementation
}),
fetchUserInfo: jest.fn().mockImplementation((config, accessToken, sub) => {
// Only return additional properties, but don't override any claims
return Promise.resolve({
preferred_username: 'preferred_username',
});
}),
customFetch: Symbol('customFetch'),
};
});
jest.mock('openid-client/passport', () => {
let verifyCallback: (...args: any[]) => any;
const mockConstructor = jest.fn((options, verify) => {
verifyCallback = verify;
return {
name: 'openid',
options,
verify,
};
});
return {
Strategy: mockConstructor,
__getVerifyCallback: () => verifyCallback,
};
});
// Mock passport
jest.mock('passport', () => ({
use: jest.fn(),
}));
import undici from 'undici';
import { setupOpenId } from './openidStrategy';
import { initAuth } from '../initAuth';
import jwt from 'jsonwebtoken';
describe('setupOpenId', () => {
// Store a reference to the verify callback once it's set up
let verifyCallback: (...args: any[]) => any;
// Helper to wrap the verify callback in a promise
const validate = (tokenset: any) =>
new Promise((resolve, reject) => {
verifyCallback(tokenset, (err: Error | null, user: any, details: any) => {
if (err) {
reject(err);
} else {
resolve({ user, details });
}
});
});
const tokenset = {
id_token: 'fake_id_token',
access_token: 'fake_access_token',
claims: () => ({
sub: '1234',
email: 'test@example.com',
email_verified: true,
given_name: 'First',
family_name: 'Last',
name: 'My Full',
username: 'flast',
picture: 'https://example.com/avatar.png',
}),
};
beforeEach(async () => {
// Clear previous mock calls and reset implementations
jest.clearAllMocks();
// Reset environment variables needed by the strategy
process.env.OPENID_ISSUER = 'https://fake-issuer.com';
process.env.OPENID_CLIENT_ID = 'fake_client_id';
process.env.OPENID_CLIENT_SECRET = 'fake_client_secret';
process.env.DOMAIN_SERVER = 'https://example.com';
process.env.OPENID_CALLBACK_URL = '/callback';
process.env.OPENID_SCOPE = 'openid profile email';
process.env.OPENID_REQUIRED_ROLE = 'requiredRole';
process.env.OPENID_REQUIRED_ROLE_PARAMETER_PATH = 'roles';
process.env.OPENID_REQUIRED_ROLE_TOKEN_KIND = 'id';
delete process.env.OPENID_USERNAME_CLAIM;
delete process.env.OPENID_NAME_CLAIM;
delete process.env.PROXY;
delete process.env.OPENID_USE_PKCE;
// Default jwtDecode mock returns a token that includes the required role.
(jwt.decode as jest.Mock).mockReturnValue({
roles: ['requiredRole'],
});
// By default, assume that no user is found, so createUser will be called
mockedMethods.findUser.mockResolvedValue(null);
mockedMethods.createUser.mockImplementation(async (userData) => {
// simulate created user with an _id property
return { _id: 'newUserId', ...userData };
});
mockedMethods.updateUser.mockImplementation(async (id, userData) => {
return { _id: id, ...userData };
});
try {
// const { setupOpenId } = require('@librechat/auth');
const saveBufferMock = jest.fn().mockResolvedValue('/fake/path/to/avatar.png');
await initAuth(mongoose, { enabled: false }, saveBufferMock); // mongoose: {}, fake balance config, dummy saveBuffer
const openidLogin = await setupOpenId({});
// Simulate the app's `passport.use(...)`
passport.use('openid', openidLogin);
verifyCallback = require('openid-client/passport').__getVerifyCallback();
} catch (e) {
console.log(e);
}
});
it('should create a new user with correct username when username claim exists', async () => {
// Arrange our userinfo already has username 'flast'
const userinfo = tokenset.claims();
// Act
const { user } = (await validate(tokenset)) as any;
// Assert
expect(user.username).toBe(userinfo.username);
expect(mockedMethods.createUser).toHaveBeenCalledWith(
expect.objectContaining({
provider: 'openid',
openidId: userinfo.sub,
username: userinfo.username,
email: userinfo.email,
name: `${userinfo.given_name} ${userinfo.family_name}`,
}),
{ enabled: false },
true,
true,
);
});
it('should use given_name as username when username claim is missing', async () => {
// Arrange remove username from userinfo
const userinfo: any = { ...tokenset.claims() };
delete userinfo.username;
// Expect the username to be the given name (unchanged case)
const expectUsername = userinfo.given_name;
// Act
const { user } = (await validate({ ...tokenset, claims: () => userinfo })) as any;
// Assert
expect(user.username).toBe(expectUsername);
expect(mockedMethods.createUser).toHaveBeenCalledWith(
expect.objectContaining({ username: expectUsername }),
{ enabled: false },
true,
true,
);
});
it('should use email as username when username and given_name are missing', async () => {
// Arrange remove username and given_name
const userinfo: any = { ...tokenset.claims() };
delete userinfo.username;
delete userinfo.given_name;
const expectUsername = userinfo.email;
// Act
const { user } = (await validate({ ...tokenset, claims: () => userinfo })) as any;
// Assert
expect(user.username).toBe(expectUsername);
expect(mockedMethods.createUser).toHaveBeenCalledWith(
expect.objectContaining({ username: expectUsername }),
{ enabled: false },
true,
true,
);
});
it('should override username with OPENID_USERNAME_CLAIM when set', async () => {
// Arrange set OPENID_USERNAME_CLAIM so that the sub claim is used
process.env.OPENID_USERNAME_CLAIM = 'sub';
const userinfo = tokenset.claims();
// Act
const { user } = (await validate(tokenset)) as any;
// Assert username should equal the sub (converted as-is)
expect(user.username).toBe(userinfo.sub);
expect(mockedMethods.createUser).toHaveBeenCalledWith(
expect.objectContaining({ username: userinfo.sub }),
{ enabled: false },
true,
true,
);
});
it('should set the full name correctly when given_name and family_name exist', async () => {
// Arrange
const userinfo = tokenset.claims();
const expectedFullName = `${userinfo.given_name} ${userinfo.family_name}`;
// Act
const { user } = (await validate(tokenset)) as any;
// Assert
expect(user.name).toBe(expectedFullName);
});
it('should override full name with OPENID_NAME_CLAIM when set', async () => {
// Arrange use the name claim as the full name
process.env.OPENID_NAME_CLAIM = 'name';
const userinfo = { ...tokenset.claims(), name: 'Custom Name' };
// Act
const { user } = (await validate({ ...tokenset, claims: () => userinfo })) as any;
// Assert
expect(user.name).toBe('Custom Name');
});
it('should update an existing user on login', async () => {
// Arrange simulate that a user already exists
const existingUser = {
_id: 'existingUserId',
provider: 'local',
email: tokenset.claims().email,
openidId: '',
username: '',
name: '',
};
mockedMethods.findUser.mockImplementation(async (query) => {
if (query.openidId === tokenset.claims().sub || query.email === tokenset.claims().email) {
return existingUser;
}
return null;
});
const userinfo = tokenset.claims();
// Act
await validate(tokenset);
// Assert updateUser should be called and the user object updated
expect(mockedMethods.updateUser).toHaveBeenCalledWith(
existingUser._id,
expect.objectContaining({
provider: 'openid',
openidId: userinfo.sub,
username: userinfo.username,
name: `${userinfo.given_name} ${userinfo.family_name}`,
}),
);
});
it('should enforce the required role and reject login if missing', async () => {
// Arrange simulate a token without the required role.
(jwt.decode as jest.Mock).mockReturnValue({
roles: ['SomeOtherRole'],
});
// Act
const { user, details } = (await validate(tokenset)) as any;
// Assert verify that the strategy rejects login
expect(user).toBe(false);
expect(details.message).toBe('You must have the "requiredRole" role to log in.');
});
it.skip('should attempt to download and save the avatar if picture is provided', async () => {
// Act
const { user } = (await validate(tokenset)) as any;
// Assert verify that download was attempted and the avatar field was set via updateUser
expect(undici.fetch).toHaveBeenCalled();
// Our mock getStrategyFunctions.saveBuffer returns '/fake/path/to/avatar.png'
expect(user.avatar).toBe('/fake/path/to/avatar.png');
});
it('should not attempt to download avatar if picture is not provided', async () => {
// Arrange remove picture
const userinfo: any = { ...tokenset.claims() };
delete userinfo.picture;
// Act
await validate({ ...tokenset, claims: () => userinfo });
// Assert fetch should not be called and avatar should remain undefined or empty
expect(undici.fetch).not.toHaveBeenCalled();
// Depending on your implementation, user.avatar may be undefined or an empty string.
});
it('should default to usePKCE false when OPENID_USE_PKCE is not defined', async () => {
const OpenIDStrategy = require('openid-client/passport').Strategy;
delete process.env.OPENID_USE_PKCE;
const { setupOpenId } = require('./openidStrategy');
await setupOpenId({});
const callOptions = OpenIDStrategy.mock.calls[OpenIDStrategy.mock.calls.length - 1][0];
expect(callOptions.usePKCE).toBe(false);
expect(callOptions.params?.code_challenge_method).toBeUndefined();
});
});

View file

@ -1,27 +1,106 @@
import passport from 'passport';
import * as client from 'openid-client';
// @ts-ignore
import { Strategy as OpenIDStrategy } from 'openid-client/passport';
import { Strategy as OpenIDStrategy, VerifyCallback } from 'openid-client/passport';
import jwt from 'jsonwebtoken';
import { HttpsProxyAgent } from 'https-proxy-agent';
import { hashToken, logger } from '@librechat/data-schemas';
import { isEnabled } from '../utils';
import { safeStringify, logHeaders } from '@librechat/api';
import * as oauth from 'oauth4webapi';
import { getBalanceConfig, getMethods, getSaveBufferStrategy } from '../initAuth';
import { fetch, Response as UndiciResponse, Headers } from 'undici';
import { Request } from 'express';
let crypto: typeof import('node:crypto') | undefined;
/**
* @typedef {import('openid-client').ClientMetadata} ClientMetadata
* @typedef {import('openid-client').Configuration} Configuration
**/
/**
* @param {string} url
* @param {client.CustomFetchOptions} options
*/
export async function customFetch(url: URL | string, options: any): Promise<UndiciResponse> {
const urlStr = url.toString();
logger.debug(`[openidStrategy] Request to: ${urlStr}`);
const debugOpenId = isEnabled(process.env.DEBUG_OPENID_REQUESTS ?? '');
if (debugOpenId) {
logger.debug(`[openidStrategy] Request method: ${options.method || 'GET'}`);
logger.debug(`[openidStrategy] Request headers: ${logHeaders(options.headers)}`);
if (options.body) {
let bodyForLogging = '';
if (options.body instanceof URLSearchParams) {
bodyForLogging = options.body.toString();
} else if (typeof options.body === 'string') {
bodyForLogging = options.body;
} else {
bodyForLogging = safeStringify(options.body);
}
logger.debug(`[openidStrategy] Request body: ${bodyForLogging}`);
}
}
try {
/** @type {undici.RequestInit} */
let fetchOptions = options;
if (process.env.PROXY) {
logger.info(`[openidStrategy] proxy agent configured: ${process.env.PROXY}`);
fetchOptions = {
...options,
dispatcher: new HttpsProxyAgent(process.env.PROXY ?? ''),
};
}
const response: UndiciResponse = await fetch(url, fetchOptions);
if (debugOpenId) {
logger.debug(`[openidStrategy] Response status: ${response.status} ${response.statusText}`);
// logger.debug(`[openidStrategy] Response headers: ${logHeaders(response.headers)}`);
}
if (response.status === 200 && response.headers.has('www-authenticate')) {
const wwwAuth = response.headers.get('www-authenticate');
logger.warn(`[openidStrategy] Non-standard WWW-Authenticate header found in successful response (200 OK): ${wwwAuth}.
This violates RFC 7235 and may cause issues with strict OAuth clients. Removing header for compatibility.`);
/** Cloned response without the WWW-Authenticate header */
const responseBody = await response.arrayBuffer();
const newHeaders = new Headers();
for (const [key, value] of response.headers.entries()) {
if (key.toLowerCase() !== 'www-authenticate') {
newHeaders.append(key, value);
}
}
return new UndiciResponse(responseBody, {
status: response.status,
statusText: response.statusText,
headers: newHeaders,
});
}
return response;
} catch (error: any) {
logger.error(`[openidStrategy] Fetch error: ${error.message}`);
throw error;
}
}
//overload currenturl function because of express version 4 buggy req.host doesn't include port
//More info https://github.com/panva/openid-client/pull/713
let openidConfig: client.Configuration;
class CustomOpenIDStrategy extends OpenIDStrategy {
constructor(options: any, verify: Function) {
constructor(options: any, verify: VerifyCallback) {
super(options, verify);
}
currentUrl(req: any): URL {
currentUrl(req: Request): URL {
const hostAndProtocol = process.env.DOMAIN_SERVER!;
return new URL(`${hostAndProtocol}${req.originalUrl ?? req.url}`);
}
authorizationRequestParams(req: any, options: any) {
const params = super.authorizationRequestParams(req, options);
authorizationRequestParams(req: Request, options: any): URLSearchParams {
const params = super.authorizationRequestParams(req, options) as URLSearchParams;
if (options?.state && !params?.has('state')) {
params?.set('state', options.state);
}
@ -29,7 +108,6 @@ class CustomOpenIDStrategy extends OpenIDStrategy {
}
}
let openidConfig: client.Configuration;
let tokensCache: any;
/**
@ -128,7 +206,7 @@ const downloadImage = async (
if (process.env.PROXY) {
options.agent = new HttpsProxyAgent(process.env.PROXY);
}
const response: Response = await fetch(url, options);
const response: UndiciResponse = await fetch(url, options);
if (response.ok) {
const arrayBuffer = await response.arrayBuffer();
const buffer = Buffer.from(arrayBuffer);
@ -208,7 +286,7 @@ function convertToUsername(input: string | string[], defaultValue: string = '')
async function setupOpenId(tokensCacheKv: any): Promise<any | null> {
try {
tokensCache = tokensCacheKv;
/** @type {ClientMetadata} */
const clientMetadata = {
client_id: process.env.OPENID_CLIENT_ID,
client_secret: process.env.OPENID_CLIENT_SECRET,
@ -218,19 +296,13 @@ async function setupOpenId(tokensCacheKv: any): Promise<any | null> {
new URL(process.env.OPENID_ISSUER ?? ''),
process.env.OPENID_CLIENT_ID ?? '',
clientMetadata,
undefined,
{
//@ts-ignore
[client.customFetch]: customFetch,
},
);
const { findUser, createUser, updateUser } = getMethods();
if (process.env.PROXY) {
const proxyAgent = new HttpsProxyAgent(process.env.PROXY);
const customFetch: client.CustomFetch = (...args: any[]) => {
return fetch(args[0], { ...args[1], agent: proxyAgent });
};
openidConfig[client.customFetch] = customFetch;
logger.info(`[openidStrategy] proxy agent added: ${process.env.PROXY}`);
}
const requiredRole = process.env.OPENID_REQUIRED_ROLE;
const requiredRoleParameterPath = process.env.OPENID_REQUIRED_ROLE_PARAMETER_PATH;
const requiredRoleTokenKind = process.env.OPENID_REQUIRED_ROLE_TOKEN_KIND;
@ -243,17 +315,13 @@ async function setupOpenId(tokensCacheKv: any): Promise<any | null> {
callbackURL: `${process.env.DOMAIN_SERVER}${process.env.OPENID_CALLBACK_URL}`,
usePKCE,
},
async (
tokenset: client.TokenEndpointResponse & client.TokenEndpointResponseHelpers,
done: passport.AuthenticateCallback,
) => {
async (tokenset: any, done) => {
try {
const claims: oauth.IDToken | undefined = tokenset.claims();
let user = await findUser({ openidId: claims?.sub });
logger.info(
`[openidStrategy] user ${user ? 'found' : 'not found'} with openidId: ${claims?.sub}`,
);
if (!user) {
user = await findUser({ email: claims?.email });
logger.info(
@ -267,7 +335,6 @@ async function setupOpenId(tokensCacheKv: any): Promise<any | null> {
...(await getUserInfo(openidConfig, tokenset.access_token, claims?.sub ?? '')),
};
const fullName = getFullName(userinfo);
if (requiredRole) {
let decodedToken = null;
if (requiredRoleTokenKind === 'access') {
@ -333,7 +400,6 @@ async function setupOpenId(tokensCacheKv: any): Promise<any | null> {
if (!!userinfo && userinfo.picture && !user?.avatar?.includes('manual=true')) {
/** @type {string | undefined} */
const imageUrl = userinfo.picture;
let fileName;
try {
crypto = await import('node:crypto');
@ -346,7 +412,6 @@ async function setupOpenId(tokensCacheKv: any): Promise<any | null> {
} else {
fileName = userinfo.sub + '.png';
}
const imageBuffer = await downloadImage(
imageUrl,
openidConfig,

View file

@ -0,0 +1,416 @@
import passport from 'passport';
import mongoose from 'mongoose';
// --- Mocks ---
jest.mock('fs', () => ({
existsSync: jest.fn(),
statSync: jest.fn(),
readFileSync: jest.fn(),
}));
jest.mock('path', () => ({
isAbsolute: jest.fn(),
basename: jest.fn(),
dirname: jest.fn(),
join: jest.fn(),
normalize: jest.fn(),
}));
const mockedMethods = {
findUser: jest.fn(),
createUser: jest.fn(),
updateUser: jest.fn(),
};
jest.mock('@librechat/data-schemas', () => {
const actual = jest.requireActual('@librechat/data-schemas');
return {
...actual,
createMethods: jest.fn(() => mockedMethods),
logger: {
info: jest.fn(),
debug: jest.fn(),
error: jest.fn(),
},
};
});
jest.mock('@librechat/api', () => ({
isEnabled: jest.fn(() => false),
isUserProvided: jest.fn(() => false),
}));
import path from 'path';
import fs from 'fs';
import { samlLogin, getCertificateContent } from './samlStrategy';
import { initAuth } from '../initAuth';
import { Profile } from '@node-saml/passport-saml/lib';
// To capture the verify callback from the strategy, we grab it from the mock constructor
describe('getCertificateContent', () => {
// const { getCertificateContent } = require('@librechat/auth');
const certWithHeader = `-----BEGIN CERTIFICATE-----
MIIDazCCAlOgAwIBAgIUKhXaFJGJJPx466rlwYORIsqCq7MwDQYJKoZIhvcNAQEL
BQAwRTELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM
GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDAeFw0yNTAzMDQwODUxNTJaFw0yNjAz
MDQwODUxNTJaMEUxCzAJBgNVBAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEw
HwYDVQQKDBhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQwggEiMA0GCSqGSIb3DQEB
AQUAA4IBDwAwggEKAoIBAQCWP09NZg0xaRiLpNygCVgV3M+4RFW2S0c5X/fg/uFT
O5MfaVYzG5GxzhXzWRB8RtNPsxX/nlbPsoUroeHbz+SABkOsNEv6JuKRH4VXRH34
VzjazVkPAwj+N4WqsC/Wo4EGGpKIGeGi8Zed4yvMqoTyE3mrS19fY0nMHT62wUwS
GMm2pAQdAQePZ9WY7A5XOA1IoxW2Zh2Oxaf1p59epBkZDhoxSMu8GoSkvK27Km4A
4UXftzdg/wHNPrNirmcYouioHdmrOtYxPjrhUBQ74AmE1/QK45B6wEgirKH1A1AW
6C+ApLwpBMvy9+8Gbyvc8G18W3CjdEVKmAeWb9JUedSXAgMBAAGjUzBRMB0GA1Ud
DgQWBBRxpaqBx8VDLLc8IkHATujj8IOs6jAfBgNVHSMEGDAWgBRxpaqBx8VDLLc8
IkHATujj8IOs6jAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQBc
Puk6i+yowwGccB3LhfxZ+Fz6s6/Lfx6bP/Hy4NYOxmx2/awGBgyfp1tmotjaS9Cf
FWd67LuEru4TYtz12RNMDBF5ypcEfibvb3I8O6igOSQX/Jl5D2pMChesZxhmCift
Qp09T41MA8PmHf1G9oMG0A3ZnjKDG5ebaJNRFImJhMHsgh/TP7V3uZy7YHTgopKX
Hv63V3Uo3Oihav29Q7urwmf7Ly7X7J2WE86/w3vRHi5dhaWWqEqxmnAXl+H+sG4V
meeVRI332bg1Nuy8KnnX8v3ZeJzMBkAhzvSr6Ri96R0/Un/oEFwVC5jDTq8sXVn6
u7wlOSk+oFzDIO/UILIA
-----END CERTIFICATE-----`;
const certWithoutHeader = certWithHeader
.replace(/-----BEGIN CERTIFICATE-----/g, '')
.replace(/-----END CERTIFICATE-----/g, '')
.replace(/\s+/g, '');
it('should throw an error if SAML_CERT is not set', () => {
process.env.SAML_CERT;
expect(() => getCertificateContent(process.env.SAML_CERT)).toThrow(
'Invalid input: SAML_CERT must be a string.',
);
});
it('should throw an error if SAML_CERT is empty', () => {
process.env.SAML_CERT = '';
expect(() => getCertificateContent(process.env.SAML_CERT)).toThrow(
'Invalid cert: SAML_CERT must be a valid file path or certificate string.',
);
});
it('should load cert from an environment variable if it is a single-line string(with header)', () => {
process.env.SAML_CERT = certWithHeader;
const actual = getCertificateContent(process.env.SAML_CERT);
expect(actual).toBe(certWithHeader);
});
it('should load cert from an environment variable if it is a single-line string(with no header)', () => {
process.env.SAML_CERT = certWithoutHeader;
const actual = getCertificateContent(process.env.SAML_CERT);
expect(actual).toBe(certWithoutHeader);
});
it('should throw an error if SAML_CERT is a single-line string (with header, no newline characters)', () => {
process.env.SAML_CERT = certWithHeader.replace(/\n/g, '');
expect(() => getCertificateContent(process.env.SAML_CERT)).toThrow(
'Invalid cert: SAML_CERT must be a valid file path or certificate string.',
);
});
it('should load cert from a relative file path if SAML_CERT is valid', () => {
process.env.SAML_CERT = 'test.pem';
const resolvedPath = '/absolute/path/to/test.pem';
(path.isAbsolute as jest.Mock).mockReturnValue(false);
(path.join as jest.Mock).mockReturnValue(resolvedPath);
(path.normalize as jest.Mock).mockReturnValue(resolvedPath);
(fs.existsSync as jest.Mock).mockReturnValue(true);
(fs.statSync as jest.Mock).mockReturnValue({ isFile: () => true });
(fs.readFileSync as jest.Mock).mockReturnValue(certWithHeader);
const actual = getCertificateContent(process.env.SAML_CERT);
expect(actual).toBe(certWithHeader);
});
it('should load cert from an absolute file path if SAML_CERT is valid', () => {
process.env.SAML_CERT = '/absolute/path/to/test.pem';
(path.isAbsolute as jest.Mock).mockReturnValue(true);
(path.normalize as jest.Mock).mockReturnValue(process.env.SAML_CERT);
(fs.existsSync as jest.Mock).mockReturnValue(true);
(fs.statSync as jest.Mock).mockReturnValue({ isFile: () => true });
(fs.readFileSync as jest.Mock).mockReturnValue(certWithHeader);
const actual = getCertificateContent(process.env.SAML_CERT);
expect(actual).toBe(certWithHeader);
});
it('should throw an error if the file does not exist', () => {
process.env.SAML_CERT = 'missing.pem';
const resolvedPath = '/absolute/path/to/missing.pem';
(path.isAbsolute as jest.Mock).mockReturnValue(false);
(path.join as jest.Mock).mockReturnValue(resolvedPath);
(path.normalize as jest.Mock).mockReturnValue(resolvedPath);
(fs.existsSync as jest.Mock).mockReturnValue(false);
expect(() => getCertificateContent(process.env.SAML_CERT)).toThrow(
'Invalid cert: SAML_CERT must be a valid file path or certificate string.',
);
});
it('should throw an error if the file is not readable', () => {
process.env.SAML_CERT = 'unreadable.pem';
const resolvedPath = '/absolute/path/to/unreadable.pem';
(path.isAbsolute as jest.Mock).mockReturnValue(false);
(path.join as jest.Mock).mockReturnValue(resolvedPath);
(path.normalize as jest.Mock).mockReturnValue(resolvedPath);
(fs.existsSync as jest.Mock).mockReturnValue(true);
(fs.statSync as jest.Mock).mockReturnValue({ isFile: () => true });
(fs.readFileSync as jest.Mock).mockImplementation(() => {
throw new Error('Permission denied');
});
expect(() => getCertificateContent(process.env.SAML_CERT)).toThrow(
'Error reading certificate file: Permission denied',
);
});
});
describe('setupSaml', () => {
let verifyCallback: (...args: any[]) => any;
// Helper to wrap the verify callback in a promise
const validate = (profile: any) =>
new Promise((resolve, reject) => {
verifyCallback(profile, (err: Error | null, user: any, details: any) => {
if (err) {
reject(err);
} else {
resolve({ user, details });
}
});
});
const baseProfile = {
nameID: 'saml-1234',
email: 'test@example.com',
given_name: 'First',
family_name: 'Last',
name: 'My Full Name',
username: 'flast',
picture: 'https://example.com/avatar.png',
custom_name: 'custom',
};
beforeEach(async () => {
jest.clearAllMocks();
// Configure mocks
mockedMethods.findUser.mockResolvedValue(null);
mockedMethods.createUser.mockImplementation(async (userData) => ({
_id: 'mock-user-id',
...userData,
}));
mockedMethods.updateUser.mockImplementation(async (id, userData) => ({
_id: id,
...userData,
}));
const cert = `
-----BEGIN CERTIFICATE-----
MIIDazCCAlOgAwIBAgIUKhXaFJGJJPx466rlwYORIsqCq7MwDQYJKoZIhvcNAQEL
BQAwRTELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM
GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDAeFw0yNTAzMDQwODUxNTJaFw0yNjAz
MDQwODUxNTJaMEUxCzAJBgNVBAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEw
HwYDVQQKDBhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQwggEiMA0GCSqGSIb3DQEB
AQUAA4IBDwAwggEKAoIBAQCWP09NZg0xaRiLpNygCVgV3M+4RFW2S0c5X/fg/uFT
O5MfaVYzG5GxzhXzWRB8RtNPsxX/nlbPsoUroeHbz+SABkOsNEv6JuKRH4VXRH34
VzjazVkPAwj+N4WqsC/Wo4EGGpKIGeGi8Zed4yvMqoTyE3mrS19fY0nMHT62wUwS
GMm2pAQdAQePZ9WY7A5XOA1IoxW2Zh2Oxaf1p59epBkZDhoxSMu8GoSkvK27Km4A
4UXftzdg/wHNPrNirmcYouioHdmrOtYxPjrhUBQ74AmE1/QK45B6wEgirKH1A1AW
6C+ApLwpBMvy9+8Gbyvc8G18W3CjdEVKmAeWb9JUedSXAgMBAAGjUzBRMB0GA1Ud
DgQWBBRxpaqBx8VDLLc8IkHATujj8IOs6jAfBgNVHSMEGDAWgBRxpaqBx8VDLLc8
IkHATujj8IOs6jAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQBc
Puk6i+yowwGccB3LhfxZ+Fz6s6/Lfx6bP/Hy4NYOxmx2/awGBgyfp1tmotjaS9Cf
FWd67LuEru4TYtz12RNMDBF5ypcEfibvb3I8O6igOSQX/Jl5D2pMChesZxhmCift
Qp09T41MA8PmHf1G9oMG0A3ZnjKDG5ebaJNRFImJhMHsgh/TP7V3uZy7YHTgopKX
Hv63V3Uo3Oihav29Q7urwmf7Ly7X7J2WE86/w3vRHi5dhaWWqEqxmnAXl+H+sG4V
meeVRI332bg1Nuy8KnnX8v3ZeJzMBkAhzvSr6Ri96R0/Un/oEFwVC5jDTq8sXVn6
u7wlOSk+oFzDIO/UILIA
-----END CERTIFICATE-----
`;
// Reset environment variables
process.env.SAML_ENTRY_POINT = 'https://example.com/saml';
process.env.SAML_ISSUER = 'saml-issuer';
process.env.SAML_CERT = cert;
process.env.SAML_CALLBACK_URL = '/oauth/saml/callback';
delete process.env.SAML_EMAIL_CLAIM;
delete process.env.SAML_USERNAME_CLAIM;
delete process.env.SAML_GIVEN_NAME_CLAIM;
delete process.env.SAML_FAMILY_NAME_CLAIM;
delete process.env.SAML_PICTURE_CLAIM;
delete process.env.SAML_NAME_CLAIM;
// For image download, simulate a successful response
global.fetch = jest.fn().mockResolvedValue({
ok: true,
arrayBuffer: jest.fn().mockResolvedValue(Buffer.from('fake image')),
});
const saveBufferMock = jest.fn().mockResolvedValue('/fake/path/to/avatar.png');
await initAuth(mongoose, { enabled: false }, saveBufferMock);
// Simulate the app's `passport.use(...)`
const SamlStrategy: any = samlLogin();
passport.use('saml', SamlStrategy);
console.log('---SamlStrategy', SamlStrategy);
verifyCallback = SamlStrategy._signonVerify;
});
it('should create a new user with correct username when username claim exists', async () => {
const profile = { ...baseProfile };
const { user } = (await validate(profile)) as any;
expect(user.username).toBe(profile.username);
expect(user.provider).toBe('saml');
expect(user.samlId).toBe(profile.nameID);
expect(user.email).toBe(profile.email);
expect(user.name).toBe(`${profile.given_name} ${profile.family_name}`);
});
it('should use given_name as username when username claim is missing', async () => {
const profile: any = { ...baseProfile };
delete profile.username;
const expectUsername = profile.given_name;
const { user } = (await validate(profile)) as any;
expect(user.username).toBe(expectUsername);
expect(user.provider).toBe('saml');
});
it('should use email as username when username and given_name are missing', async () => {
const profile: Partial<Profile> = { ...baseProfile };
delete profile.username;
delete profile.given_name;
const expectUsername = profile.email;
const { user } = (await validate(profile)) as any;
expect(user.username).toBe(expectUsername);
expect(user.provider).toBe('saml');
});
it('should override username with SAML_USERNAME_CLAIM when set', async () => {
process.env.SAML_USERNAME_CLAIM = 'nameID';
const profile = { ...baseProfile };
const { user } = (await validate(profile)) as any;
expect(user.username).toBe(profile.nameID);
expect(user.provider).toBe('saml');
});
it('should set the full name correctly when given_name and family_name exist', async () => {
const profile = { ...baseProfile };
const expectedFullName = `${profile.given_name} ${profile.family_name}`;
const { user } = (await validate(profile)) as any;
expect(user.name).toBe(expectedFullName);
});
it('should set the full name correctly when given_name exist', async () => {
const profile: Partial<Profile> = { ...baseProfile };
delete profile.family_name;
const expectedFullName = profile.given_name;
const { user } = (await validate(profile)) as any;
expect(user.name).toBe(expectedFullName);
});
it('should set the full name correctly when family_name exist', async () => {
const profile: Partial<Profile> = { ...baseProfile };
delete profile.given_name;
const expectedFullName = profile.family_name;
const { user } = (await validate(profile)) as any;
expect(user.name).toBe(expectedFullName);
});
it('should set the full name correctly when username exist', async () => {
const profile: Partial<Profile> = { ...baseProfile };
delete profile.family_name;
delete profile.given_name;
const expectedFullName = profile.username;
const { user } = (await validate(profile)) as any;
expect(user.name).toBe(expectedFullName);
});
it('should set the full name correctly when email only exist', async () => {
const profile: Partial<Profile> = { ...baseProfile };
delete profile.family_name;
delete profile.given_name;
delete profile.username;
const expectedFullName = profile.email;
const { user } = (await validate(profile)) as any;
expect(user.name).toBe(expectedFullName);
});
it('should set the full name correctly with SAML_NAME_CLAIM when set', async () => {
process.env.SAML_NAME_CLAIM = 'custom_name';
const profile = { ...baseProfile };
const expectedFullName = profile.custom_name;
const { user } = (await validate(profile)) as any;
expect(user.name).toBe(expectedFullName);
});
it('should update an existing user on login', async () => {
// Set up findUser to return an existing user
const existingUser = {
_id: 'existing-user-id',
provider: 'local',
email: baseProfile.email,
samlId: '',
username: 'oldusername',
name: 'Old Name',
};
mockedMethods.findUser.mockResolvedValue(existingUser);
const profile = { ...baseProfile };
const { user } = (await validate(profile)) as any;
expect(user.provider).toBe('saml');
expect(user.samlId).toBe(baseProfile.nameID);
expect(user.username).toBe(baseProfile.username);
expect(user.name).toBe(`${baseProfile.given_name} ${baseProfile.family_name}`);
expect(user.email).toBe(baseProfile.email);
});
it('should attempt to download and save the avatar if picture is provided', async () => {
const profile: Partial<Profile> = { ...baseProfile };
const { user } = (await validate(profile)) as any;
expect(global.fetch).toHaveBeenCalled();
expect(user.avatar).toBe('/fake/path/to/avatar.png');
});
it('should not attempt to download avatar if picture is not provided', async () => {
const profile: Partial<Profile> = { ...baseProfile };
delete profile.picture;
await validate(profile);
expect(global.fetch).not.toHaveBeenCalled();
});
});

View file

@ -276,7 +276,6 @@ const samlLogin = () => {
wantAssertionsSigned: process.env.SAML_USE_AUTHN_RESPONSE_SIGNED === 'true' ? false : true,
wantAuthnResponseSigned: process.env.SAML_USE_AUTHN_RESPONSE_SIGNED === 'true' ? true : false,
};
return new SamlStrategy(samlConfig, signOnVerify, () => {
logger.info('saml logout!');
});

View file

@ -1,6 +1,5 @@
import { logger } from '@librechat/data-schemas';
import { Profile } from 'passport';
import { VerifyCallback } from 'passport-oauth2';
import { getMethods } from '../initAuth';
import { isEnabled } from '../utils';
import { createSocialUser, handleExistingUser } from './helpers';
@ -15,26 +14,29 @@ export function socialLogin(
refreshToken: string,
idToken: string,
profile: Profile,
cb: VerifyCallback,
cb: any,
): Promise<void> => {
try {
const { email, id, avatarUrl, username, name, emailVerified } = getProfileDetails({
idToken,
profile,
});
const { findUser } = getMethods();
const oldUser = await findUser({ email: email?.trim() });
const ALLOW_SOCIAL_REGISTRATION = isEnabled(process.env.ALLOW_SOCIAL_REGISTRATION ?? '');
if (oldUser) {
await handleExistingUser(oldUser, avatarUrl);
console.log('1', oldUser);
await handleExistingUser(oldUser, avatarUrl ?? '');
return cb(null, oldUser);
}
if (ALLOW_SOCIAL_REGISTRATION) {
const newUser = await createSocialUser({
email,
avatarUrl,
email: email ?? '',
avatarUrl: avatarUrl ?? '',
provider,
providerKey: `${provider}Id`,
providerId: id,

View file

@ -1,4 +1,3 @@
import { VerifyCallback } from 'passport-oauth2';
import { Profile } from 'passport';
import { IUser } from '@librechat/data-schemas';
@ -8,14 +7,14 @@ export interface GetProfileDetailsParams {
}
export type GetProfileDetails = (
params: GetProfileDetailsParams,
) => Partial<IUser> & { avatarUrl: string };
) => Partial<IUser> & { avatarUrl: string | null };
export type SocialLoginStrategy = (
accessToken: string,
refreshToken: string,
idToken: string,
profile: Profile,
cb: VerifyCallback,
cb: any,
) => Promise<void>;
export interface CreateSocialUserParams {

View file

@ -0,0 +1,466 @@
jest.mock('@librechat/data-schemas', () => {
return {
logger: {
info: jest.fn(),
warn: jest.fn(),
debug: jest.fn(),
error: jest.fn(),
},
};
});
// file deepcode ignore NoHardcodedPasswords: No hard-coded passwords in tests
import { errorsToString } from 'librechat-data-provider';
import { loginSchema, registerSchema } from '@librechat/auth';
describe('Zod Schemas', () => {
describe('loginSchema', () => {
it('should validate a correct login object', () => {
const result = loginSchema.safeParse({
email: 'test@example.com',
password: 'password123',
});
expect(result.success).toBe(true);
});
it('should invalidate an incorrect email', () => {
const result = loginSchema.safeParse({
email: 'testexample.com',
password: 'password123',
});
expect(result.success).toBe(false);
});
it('should invalidate a short password', () => {
const result = loginSchema.safeParse({
email: 'test@example.com',
password: 'pass',
});
expect(result.success).toBe(false);
});
it('should handle email with unusual characters', () => {
const emails = ['test+alias@example.com', 'test@subdomain.example.co.uk'];
emails.forEach((email) => {
const result = loginSchema.safeParse({
email,
password: 'password123',
});
expect(result.success).toBe(true);
});
});
it('should invalidate email without a domain', () => {
const result = loginSchema.safeParse({
email: 'test@.com',
password: 'password123',
});
expect(result.success).toBe(false);
});
it('should invalidate password with only spaces', () => {
const result = loginSchema.safeParse({
email: 'test@example.com',
password: ' ',
});
expect(result.success).toBe(false);
});
it('should invalidate password that is too long', () => {
const result = loginSchema.safeParse({
email: 'test@example.com',
password: 'a'.repeat(129),
});
expect(result.success).toBe(false);
});
it('should invalidate empty email or password', () => {
const result = loginSchema.safeParse({
email: '',
password: '',
});
expect(result.success).toBe(false);
});
});
describe('registerSchema', () => {
it('should validate a correct register object', () => {
const result = registerSchema.safeParse({
name: 'John Doe',
username: 'john_doe',
email: 'john@example.com',
password: 'password123',
confirm_password: 'password123',
});
expect(result.success).toBe(true);
});
it('should allow the username to be omitted', () => {
const result = registerSchema.safeParse({
name: 'John Doe',
email: 'john@example.com',
password: 'password123',
confirm_password: 'password123',
});
expect(result.success).toBe(true);
});
it('should invalidate a short name', () => {
const result = registerSchema.safeParse({
name: 'Jo',
username: 'john_doe',
email: 'john@example.com',
password: 'password123',
confirm_password: 'password123',
});
expect(result.success).toBe(false);
});
it('should handle empty username by transforming to null', () => {
const result = registerSchema.safeParse({
name: 'John Doe',
username: '',
email: 'john@example.com',
password: 'password123',
confirm_password: 'password123',
});
expect(result.success).toBe(true);
expect(result.data.username).toBe(null);
});
it('should handle name with special characters', () => {
const names = ['Jöhn Dœ', 'John <Doe>'];
names.forEach((name) => {
const result = registerSchema.safeParse({
name,
username: 'john_doe',
email: 'john@example.com',
password: 'password123',
confirm_password: 'password123',
});
expect(result.success).toBe(true);
});
});
it('should handle username with special characters', () => {
const usernames = ['john.doe@', 'john..doe'];
usernames.forEach((username) => {
const result = registerSchema.safeParse({
name: 'John Doe',
username,
email: 'john@example.com',
password: 'password123',
confirm_password: 'password123',
});
expect(result.success).toBe(true);
});
});
it('should invalidate mismatched password and confirm_password', () => {
const result = registerSchema.safeParse({
name: 'John Doe',
username: 'john_doe',
email: 'john@example.com',
password: 'password123',
confirm_password: 'password124',
});
expect(result.success).toBe(false);
});
it('should handle email without a TLD', () => {
const result = registerSchema.safeParse({
name: 'John Doe',
username: 'john_doe',
email: 'john@domain',
password: 'password123',
confirm_password: 'password123',
});
expect(result.success).toBe(false);
});
it('should handle email with multiple @ symbols', () => {
const result = registerSchema.safeParse({
name: 'John Doe',
username: 'john_doe',
email: 'john@domain@com',
password: 'password123',
confirm_password: 'password123',
});
expect(result.success).toBe(false);
});
it('should handle name that is too long', () => {
const result = registerSchema.safeParse({
name: 'a'.repeat(81),
username: 'john_doe',
email: 'john@example.com',
password: 'password123',
confirm_password: 'password123',
});
expect(result.success).toBe(false);
});
it('should handle username that is too long', () => {
const result = registerSchema.safeParse({
name: 'John Doe',
username: 'a'.repeat(81),
email: 'john@example.com',
password: 'password123',
confirm_password: 'password123',
});
expect(result.success).toBe(false);
});
it('should handle password or confirm_password that is too long', () => {
const result = registerSchema.safeParse({
name: 'John Doe',
username: 'john_doe',
email: 'john@example.com',
password: 'a'.repeat(129),
confirm_password: 'a'.repeat(129),
});
expect(result.success).toBe(false);
});
it('should handle password or confirm_password that is just spaces', () => {
const result = registerSchema.safeParse({
name: 'John Doe',
username: 'john_doe',
email: 'john@example.com',
password: ' ',
confirm_password: ' ',
});
expect(result.success).toBe(false);
});
it('should handle null values for fields', () => {
const result = registerSchema.safeParse({
name: null,
username: null,
email: null,
password: null,
confirm_password: null,
});
expect(result.success).toBe(false);
});
it('should handle undefined values for fields', () => {
const result = registerSchema.safeParse({
name: undefined,
username: undefined,
email: undefined,
password: undefined,
confirm_password: undefined,
});
expect(result.success).toBe(false);
});
it('should handle extra fields not defined in the schema', () => {
const result = registerSchema.safeParse({
name: 'John Doe',
username: 'john_doe',
email: 'john@example.com',
password: 'password123',
confirm_password: 'password123',
extraField: "I shouldn't be here",
});
expect(result.success).toBe(true);
});
it('should handle username with special characters from various languages', () => {
const usernames = [
// General
'éèäöü',
// German
'Jöhn.Döe@',
'Jöhn_Ü',
'Jöhnß',
// French
'Jéan-Piérre',
'Élève',
'Fiançée',
'Mère',
// Spanish
'Niño',
'Señor',
'Muñoz',
// Portuguese
'João',
'Coração',
'Pão',
// Italian
'Pietro',
'Bambino',
'Forlì',
// Romanian
'Mâncare',
'Școală',
'Țară',
// Catalan
'Niç',
'Màquina',
'Çap',
// Swedish
'Fjärran',
'Skål',
'Öland',
// Norwegian
'Blåbær',
'Fjord',
'Årstid',
// Danish
'Flød',
'Søster',
'Århus',
// Icelandic
'Þór',
'Ætt',
'Öx',
// Turkish
'Şehir',
'Çocuk',
'Gözlük',
// Polish
'Łódź',
'Część',
'Świat',
// Czech
'Čaj',
'Řeka',
'Život',
// Slovak
'Kočka',
'Ľudia',
'Žaba',
// Croatian
'Čovjek',
'Šuma',
'Žaba',
// Hungarian
'Tűz',
'Ősz',
'Ünnep',
// Finnish
'Mäki',
'Yö',
'Äiti',
// Estonian
'Tänav',
'Öö',
'Ülikool',
// Latvian
'Ēka',
'Ūdens',
'Čempions',
// Lithuanian
'Ūsas',
'Ąžuolas',
'Čia',
// Dutch
'Maïs',
'Geërfd',
'Coördinatie',
];
const failingUsernames = usernames.reduce((acc, username) => {
const result = registerSchema.safeParse({
name: 'John Doe',
username,
email: 'john@example.com',
password: 'password123',
confirm_password: 'password123',
});
if (!result.success) {
acc.push({ username, error: result.error });
}
return acc;
}, []);
if (failingUsernames.length > 0) {
console.log('Failing Usernames:', failingUsernames);
}
expect(failingUsernames).toEqual([]);
});
it('should reject invalid usernames', () => {
const invalidUsernames = [
'john{doe}', // Contains `{` and `}`
'j', // Only one character
'a'.repeat(81), // More than 80 characters
"' OR '1'='1'; --", // SQL Injection
'{$ne: null}', // MongoDB Injection
'<script>alert("XSS")</script>', // Basic XSS
'"><script>alert("XSS")</script>', // XSS breaking out of an attribute
'"><img src=x onerror=alert("XSS")>', // XSS using an image tag
];
const passingUsernames = [];
const failingUsernames = invalidUsernames.reduce((acc, username) => {
const result = registerSchema.safeParse({
name: 'John Doe',
username,
email: 'john@example.com',
password: 'password123',
confirm_password: 'password123',
});
if (!result.success) {
acc.push({ username, error: result.error });
}
if (result.success) {
passingUsernames.push({ username });
}
return acc;
}, []);
expect(failingUsernames.length).toEqual(invalidUsernames.length); // They should match since all invalidUsernames should fail.
});
});
describe('errorsToString', () => {
it('should convert errors to string', () => {
const { error } = registerSchema.safeParse({
name: 'Jo',
username: 'john_doe',
email: 'john@example.com',
password: 'password123',
confirm_password: 'password123',
});
const result = errorsToString(error.errors);
expect(result).toBe('name: String must contain at least 3 character(s)');
});
});
});

View file

@ -1,3 +1,4 @@
import { TransportOptions, SendMailOptions } from 'nodemailer';
export interface SendEmailParams {
email: string;
subject: string;
@ -13,3 +14,20 @@ export interface SendEmailResponse {
envelope: { from: string; to: string[] };
messageId: string;
}
export interface MailgunEmailParams {
to: string;
from: string;
subject: string;
html: string;
}
export interface MailgunResponse {
id: string;
message: string;
}
export interface SMTPParams {
transporterOptions: any;
mailOptions: SendMailOptions;
}

View file

@ -215,9 +215,6 @@ async function resizeAvatar({
} else if (input instanceof Buffer) {
imageBuffer = input;
} else if (typeof input === 'object' && input instanceof File) {
console.log(input);
console.log('----');
// @ts-ignore
const fileContent = await fs.promises.readFile(input?.path);
imageBuffer = Buffer.from(fileContent);
} else {
@ -229,6 +226,21 @@ async function resizeAvatar({
const height = metadata.height ?? 0;
const minSize = Math.min(width, height);
if (metadata.format === 'gif') {
const resizedBuffer = await sharp(imageBuffer, { animated: true })
.extract({
left: Math.floor((width - minSize) / 2),
top: Math.floor((height - minSize) / 2),
width: minSize,
height: minSize,
})
.resize(250, 250)
.gif()
.toBuffer();
return resizedBuffer;
}
const squaredBuffer = await sharp(imageBuffer)
.extract({
left: Math.floor((width - minSize) / 2),

View file

@ -1,29 +1,123 @@
import fs from 'fs';
import path from 'path';
import nodemailer, { TransportOptions } from 'nodemailer';
import nodemailer, { SentMessageInfo } from 'nodemailer';
import handlebars from 'handlebars';
import { createTokenHash, isEnabled } from '.';
import { createTokenHash } from '.';
import { logAxiosError } from '@librechat/api';
import { isEnabled } from '.';
import { IUser, logger } from '@librechat/data-schemas';
import { getMethods } from '../initAuth';
import { ObjectId } from 'mongoose';
import bcrypt from 'bcryptjs';
import { Request } from 'express';
import { SendEmailParams, SendEmailResponse } from '../types/email';
import FormData from 'form-data';
import axios, { AxiosResponse } from 'axios';
import { MailgunEmailParams, SendEmailParams, SMTPParams } from '../types/email';
const genericVerificationMessage = 'Please check your email to verify your email address.';
const domains = {
client: process.env.DOMAIN_CLIENT,
server: process.env.DOMAIN_SERVER,
};
/**
* Sends an email using Mailgun API.
*
* @async
* @function sendEmailViaMailgun
* @param {Object} params - The parameters for sending the email.
* @param {string} params.to - The recipient's email address.
* @param {string} params.from - The sender's email address.
* @param {string} params.subject - The subject of the email.
* @param {string} params.html - The HTML content of the email.
* @returns {Promise<Object>} - A promise that resolves to the response from Mailgun API.
*/
const sendEmailViaMailgun = async ({
to,
from,
subject,
html,
}: MailgunEmailParams): Promise<SentMessageInfo> => {
const mailgunApiKey: string | undefined = process.env.MAILGUN_API_KEY;
const mailgunDomain: string | undefined = process.env.MAILGUN_DOMAIN;
const mailgunHost: string = process.env.MAILGUN_HOST || 'smtp.mailgun.org';
if (!mailgunApiKey || !mailgunDomain) {
throw new Error('Mailgun API key and domain are required');
}
const formData = new FormData();
formData.append('from', from);
formData.append('to', to);
formData.append('subject', subject);
formData.append('html', html);
formData.append('o:tracking-clicks', 'no');
try {
const response = await axios.post(`${mailgunHost}/v3/${mailgunDomain}/messages`, formData, {
headers: {
...formData.getHeaders(),
Authorization: `Basic ${Buffer.from(`api:${mailgunApiKey}`).toString('base64')}`,
},
});
return response.data;
} catch (error: any) {
throw new Error(logAxiosError({ error, message: 'Failed to send email via Mailgun' }));
}
};
/**
* Sends an email using SMTP via Nodemailer.
*
* @async
* @function sendEmailViaSMTP
* @param {Object} params - The parameters for sending the email.
* @param {Object} params.transporterOptions - The transporter configuration options.
* @param {Object} params.mailOptions - The email options.
* @returns {Promise<Object>} - A promise that resolves to the info object of the sent email.
*/
const sendEmailViaSMTP = async ({
transporterOptions,
mailOptions,
}: SMTPParams): Promise<SentMessageInfo> => {
const transporter = nodemailer.createTransport(transporterOptions);
return await transporter.sendMail(mailOptions);
};
export const sendEmail = async ({
email,
subject,
payload,
template,
throwError = true,
}: SendEmailParams): Promise<SendEmailResponse | Error> => {
}: SendEmailParams): Promise<SentMessageInfo | Error> => {
try {
const transporterOptions: TransportOptions = {
// Read and compile the email template
const source = fs.readFileSync(path.join(__dirname, 'emails', template), 'utf8');
const compiledTemplate = handlebars.compile(source);
const html = compiledTemplate(payload);
// Prepare common email data
const fromName = process.env.EMAIL_FROM_NAME || process.env.APP_TITLE;
const fromEmail = process.env.EMAIL_FROM;
const fromAddress = `"${fromName}" <${fromEmail}>`;
const toAddress = `"${payload.name}" <${email}>`;
// Check if Mailgun is configured
if (process.env.MAILGUN_API_KEY && process.env.MAILGUN_DOMAIN) {
logger.debug('[sendEmail] Using Mailgun provider');
return await sendEmailViaMailgun({
from: fromAddress,
to: toAddress,
subject: subject,
html: html,
});
}
// Default to SMTP
logger.debug('[sendEmail] Using SMTP provider');
const transporterOptions: any = {
secure: process.env.EMAIL_ENCRYPTION === 'tls',
requireTLS: process.env.EMAIL_ENCRYPTION === 'starttls',
tls: {
@ -49,24 +143,21 @@ export const sendEmail = async ({
transporterOptions.port = Number(process.env.EMAIL_PORT ?? 25);
}
const transporter = nodemailer.createTransport(transporterOptions);
const templatePath = path.join(__dirname, 'utils/', template);
const source = fs.readFileSync(templatePath, 'utf8');
const compiledTemplate = handlebars.compile(source);
const mailOptions = {
from: `"${process.env.EMAIL_FROM_NAME || process.env.APP_TITLE}" <${process.env.EMAIL_FROM}>`,
to: `"${payload.name}" <${email}>`,
// Header address should contain name-addr
from: fromAddress,
to: toAddress,
envelope: {
from: process.env.EMAIL_FROM!,
// Envelope from should contain addr-spec
// Mistake in the Nodemailer documentation?
from: fromEmail,
to: email,
},
subject,
html: compiledTemplate(payload),
subject: subject,
html: html,
};
return await transporter.sendMail(mailOptions);
return await sendEmailViaSMTP({ transporterOptions, mailOptions });
} catch (error: any) {
if (throwError) {
throw error;

View file

@ -22,17 +22,71 @@
<!--<![endif]-->
<title></title>
<style type='text/css'>
@media (prefers-color-scheme: dark) { .darkmode { background-color: #212121 !important; }
.darkmode p { color: #ffffff !important; } } @media only screen and (min-width: 520px) {
.u-row { width: 500px !important; } .u-row .u-col { vertical-align: top; } .u-row .u-col-100 {
width: 500px !important; } } @media (max-width: 520px) { .u-row-container { max-width: 100%
!important; padding-left: 0px !important; padding-right: 0px !important; } .u-row .u-col {
min-width: 320px !important; max-width: 100% !important; display: block !important; } .u-row {
width: 100% !important; } .u-col { width: 100% !important; } .u-col>div { margin: 0 auto; } }
body { margin: 0; padding: 0; } table, tr, td { vertical-align: top; border-collapse:
collapse; } .ie-container table, .mso-container table { table-layout: fixed; } * {
line-height: inherit; } a[x-apple-data-detectors='true'] { color: inherit !important;
text-decoration: none !important; } table, td { color: #ffffff; }
@media (prefers-color-scheme: dark) {
.darkmode {
background-color: #212121 !important;
}
.darkmode p {
color: #ffffff !important;
}
}
@media only screen and (min-width: 520px) {
.u-row {
width: 500px !important;
}
.u-row .u-col {
vertical-align: top;
}
.u-row .u-col-100 {
width: 500px !important;
}
}
@media (max-width: 520px) {
.u-row-container {
max-width: 100% !important;
padding-left: 0px !important;
padding-right: 0px !important;
}
.u-row .u-col {
min-width: 320px !important;
max-width: 100% !important;
display: block !important;
}
.u-row {
width: 100% !important;
}
.u-col {
width: 100% !important;
}
.u-col > div {
margin: 0 auto;
}
}
body {
margin: 0;
padding: 0;
}
table,
tr,
td {
vertical-align: top;
border-collapse: collapse;
}
.ie-container table,
.mso-container table {
table-layout: fixed;
}
* {
line-height: inherit;
}
a[x-apple-data-detectors='true'] {
color: inherit !important;
text-decoration: none !important;
}
table,
td {
color: #ffffff;
}
</style>
</head>

View file

@ -22,18 +22,78 @@
<!--<![endif]-->
<title></title>
<style type='text/css'>
@media (prefers-color-scheme: dark) { .darkmode { background-color: #212121 !important; }
.darkmode p { color: #ffffff !important; } } @media only screen and (min-width: 520px) {
.u-row { width: 500px !important; } .u-row .u-col { vertical-align: top; } .u-row .u-col-100 {
width: 500px !important; } } @media (max-width: 520px) { .u-row-container { max-width: 100%
!important; padding-left: 0px !important; padding-right: 0px !important; } .u-row .u-col {
min-width: 320px !important; max-width: 100% !important; display: block !important; } .u-row {
width: 100% !important; } .u-col { width: 100% !important; } .u-col>div { margin: 0 auto; } }
body { margin: 0; padding: 0; } table, tr, td { vertical-align: top; border-collapse:
collapse; } p { margin: 0; } .ie-container table, .mso-container table { table-layout: fixed;
} * { line-height: inherit; } a[x-apple-data-detectors='true'] { color: inherit !important;
text-decoration: none !important; } table, td { color: #ffffff; } #u_body a { color: #0000ee;
text-decoration: underline; }
@media (prefers-color-scheme: dark) {
.darkmode {
background-color: #212121 !important;
}
.darkmode p {
color: #ffffff !important;
}
}
@media only screen and (min-width: 520px) {
.u-row {
width: 500px !important;
}
.u-row .u-col {
vertical-align: top;
}
.u-row .u-col-100 {
width: 500px !important;
}
}
@media (max-width: 520px) {
.u-row-container {
max-width: 100% !important;
padding-left: 0px !important;
padding-right: 0px !important;
}
.u-row .u-col {
min-width: 320px !important;
max-width: 100% !important;
display: block !important;
}
.u-row {
width: 100% !important;
}
.u-col {
width: 100% !important;
}
.u-col > div {
margin: 0 auto;
}
}
body {
margin: 0;
padding: 0;
}
table,
tr,
td {
vertical-align: top;
border-collapse: collapse;
}
p {
margin: 0;
}
.ie-container table,
.mso-container table {
table-layout: fixed;
}
* {
line-height: inherit;
}
a[x-apple-data-detectors='true'] {
color: inherit !important;
text-decoration: none !important;
}
table,
td {
color: #ffffff;
}
#u_body a {
color: #0000ee;
text-decoration: underline;
}
</style>
</head>

View file

@ -22,18 +22,75 @@
<!--<![endif]-->
<title></title>
<style type='text/css'>
@media (prefers-color-scheme: dark) { .darkmode { background-color: #212121 !important; }
.darkmode p { color: #ffffff !important; } } @media only screen and (min-width: 520px) {
.u-row { width: 500px !important; } .u-row .u-col { vertical-align: top; } .u-row .u-col-100 {
width: 500px !important; } } @media (max-width: 520px) { .u-row-container { max-width: 100%
!important; padding-left: 0px !important; padding-right: 0px !important; } .u-row .u-col {
min-width: 320px !important; max-width: 100% !important; display: block !important; } .u-row {
width: 100% !important; } .u-col { width: 100% !important; } .u-col>div { margin: 0 auto; } }
body { margin: 0; padding: 0; } table, tr, td { vertical-align: top; border-collapse:
collapse; } .ie-container table, .mso-container table { table-layout: fixed; } * {
line-height: inherit; } a[x-apple-data-detectors='true'] { color: inherit !important;
text-decoration: none !important; } table, td { color: #ffffff; } #u_body a { color: #0000ee;
text-decoration: underline; }
@media (prefers-color-scheme: dark) {
.darkmode {
background-color: #212121 !important;
}
.darkmode p {
color: #ffffff !important;
}
}
@media only screen and (min-width: 520px) {
.u-row {
width: 500px !important;
}
.u-row .u-col {
vertical-align: top;
}
.u-row .u-col-100 {
width: 500px !important;
}
}
@media (max-width: 520px) {
.u-row-container {
max-width: 100% !important;
padding-left: 0px !important;
padding-right: 0px !important;
}
.u-row .u-col {
min-width: 320px !important;
max-width: 100% !important;
display: block !important;
}
.u-row {
width: 100% !important;
}
.u-col {
width: 100% !important;
}
.u-col > div {
margin: 0 auto;
}
}
body {
margin: 0;
padding: 0;
}
table,
tr,
td {
vertical-align: top;
border-collapse: collapse;
}
.ie-container table,
.mso-container table {
table-layout: fixed;
}
* {
line-height: inherit;
}
a[x-apple-data-detectors='true'] {
color: inherit !important;
text-decoration: none !important;
}
table,
td {
color: #ffffff;
}
#u_body a {
color: #0000ee;
text-decoration: underline;
}
</style>
</head>

View file

@ -39,13 +39,24 @@ function isEnabled(value: boolean | string) {
return false;
}
/**
* Check if email configuration is set
* @returns {Boolean}
*/
function checkEmailConfig() {
return (
// Check if Mailgun is configured
const hasMailgunConfig =
!!process.env.MAILGUN_API_KEY && !!process.env.MAILGUN_DOMAIN && !!process.env.EMAIL_FROM;
// Check if SMTP is configured
const hasSMTPConfig =
(!!process.env.EMAIL_SERVICE || !!process.env.EMAIL_HOST) &&
!!process.env.EMAIL_USERNAME &&
!!process.env.EMAIL_PASSWORD &&
!!process.env.EMAIL_FROM
);
!!process.env.EMAIL_FROM;
// Return true if either Mailgun or SMTP is properly configured
return hasMailgunConfig || hasSMTPConfig;
}
export { checkEmailConfig, isEnabled, createTokenHash };

View file

@ -1,6 +1,6 @@
{
"name": "librechat-data-provider",
"version": "0.7.86",
"version": "0.7.87",
"description": "data services for librechat apps",
"main": "dist/index.js",
"module": "dist/index.es.js",

View file

@ -1,4 +1,28 @@
import { StdioOptionsSchema, StreamableHTTPOptionsSchema, processMCPEnv, MCPOptions } from '../src/mcp';
import type { TUser } from 'librechat-data-provider';
import {
StreamableHTTPOptionsSchema,
StdioOptionsSchema,
processMCPEnv,
MCPOptions,
} from '../src/mcp';
// Helper function to create test user objects
function createTestUser(
overrides: Partial<TUser> & Record<string, unknown> = {},
): TUser & Record<string, unknown> {
return {
id: 'test-user-id',
username: 'testuser',
email: 'test@example.com',
name: 'Test User',
avatar: 'https://example.com/avatar.png',
provider: 'email',
role: 'user',
createdAt: new Date('2021-01-01').toISOString(),
updatedAt: new Date('2021-01-01').toISOString(),
...overrides,
};
}
describe('Environment Variable Extraction (MCP)', () => {
const originalEnv = process.env;
@ -91,13 +115,13 @@ describe('Environment Variable Extraction (MCP)', () => {
// Type is now required, so parsing should fail
expect(() => StreamableHTTPOptionsSchema.parse(options)).toThrow();
// With type provided, it should pass
const validOptions = {
type: 'streamable-http' as const,
url: 'https://example.com/api',
};
const result = StreamableHTTPOptionsSchema.parse(validOptions);
expect(result.type).toBe('streamable-http');
});
@ -113,7 +137,7 @@ describe('Environment Variable Extraction (MCP)', () => {
};
const result = StreamableHTTPOptionsSchema.parse(options);
expect(result.headers).toEqual(options.headers);
});
});
@ -165,7 +189,7 @@ describe('Environment Variable Extraction (MCP)', () => {
});
it('should process user ID in headers field', () => {
const userId = 'test-user-123';
const user = createTestUser({ id: 'test-user-123' });
const obj: MCPOptions = {
type: 'sse',
url: 'https://example.com',
@ -176,7 +200,7 @@ describe('Environment Variable Extraction (MCP)', () => {
},
};
const result = processMCPEnv(obj, userId);
const result = processMCPEnv(obj, user);
expect('headers' in result && result.headers).toEqual({
Authorization: 'test-api-key-value',
@ -217,15 +241,15 @@ describe('Environment Variable Extraction (MCP)', () => {
};
// Process for two different users
const user1Id = 'user-123';
const user2Id = 'user-456';
const user1 = createTestUser({ id: 'user-123' });
const user2 = createTestUser({ id: 'user-456' });
const resultUser1 = processMCPEnv(baseConfig, user1Id);
const resultUser2 = processMCPEnv(baseConfig, user2Id);
const resultUser1 = processMCPEnv(baseConfig, user1);
const resultUser2 = processMCPEnv(baseConfig, user2);
// Verify each has the correct user ID
expect('headers' in resultUser1 && resultUser1.headers?.['User-Id']).toBe(user1Id);
expect('headers' in resultUser2 && resultUser2.headers?.['User-Id']).toBe(user2Id);
expect('headers' in resultUser1 && resultUser1.headers?.['User-Id']).toBe('user-123');
expect('headers' in resultUser2 && resultUser2.headers?.['User-Id']).toBe('user-456');
// Verify they're different objects
expect(resultUser1).not.toBe(resultUser2);
@ -239,11 +263,11 @@ describe('Environment Variable Extraction (MCP)', () => {
expect(baseConfig.headers?.['User-Id']).toBe('{{LIBRECHAT_USER_ID}}');
// Second user's config should be unchanged
expect('headers' in resultUser2 && resultUser2.headers?.['User-Id']).toBe(user2Id);
expect('headers' in resultUser2 && resultUser2.headers?.['User-Id']).toBe('user-456');
});
it('should process headers in streamable-http options', () => {
const userId = 'test-user-123';
const user = createTestUser({ id: 'test-user-123' });
const obj: MCPOptions = {
type: 'streamable-http',
url: 'https://example.com',
@ -254,7 +278,7 @@ describe('Environment Variable Extraction (MCP)', () => {
},
};
const result = processMCPEnv(obj, userId);
const result = processMCPEnv(obj, user);
expect('headers' in result && result.headers).toEqual({
Authorization: 'test-api-key-value',
@ -262,7 +286,7 @@ describe('Environment Variable Extraction (MCP)', () => {
'Content-Type': 'application/json',
});
});
it('should maintain streamable-http type in processed options', () => {
const obj: MCPOptions = {
type: 'streamable-http',
@ -273,5 +297,233 @@ describe('Environment Variable Extraction (MCP)', () => {
expect(result.type).toBe('streamable-http');
});
it('should process dynamic user fields in headers', () => {
const user = createTestUser({
id: 'user-123',
email: 'test@example.com',
username: 'testuser',
openidId: 'openid-123',
googleId: 'google-456',
emailVerified: true,
role: 'admin',
});
const obj: MCPOptions = {
type: 'sse',
url: 'https://example.com',
headers: {
'User-Email': '{{LIBRECHAT_USER_EMAIL}}',
'User-Name': '{{LIBRECHAT_USER_USERNAME}}',
OpenID: '{{LIBRECHAT_USER_OPENIDID}}',
'Google-ID': '{{LIBRECHAT_USER_GOOGLEID}}',
'Email-Verified': '{{LIBRECHAT_USER_EMAILVERIFIED}}',
'User-Role': '{{LIBRECHAT_USER_ROLE}}',
'Content-Type': 'application/json',
},
};
const result = processMCPEnv(obj, user);
expect('headers' in result && result.headers).toEqual({
'User-Email': 'test@example.com',
'User-Name': 'testuser',
OpenID: 'openid-123',
'Google-ID': 'google-456',
'Email-Verified': 'true',
'User-Role': 'admin',
'Content-Type': 'application/json',
});
});
it('should handle missing user fields gracefully', () => {
const user = createTestUser({
id: 'user-123',
email: 'test@example.com',
username: undefined, // explicitly set to undefined to test missing field
});
const obj: MCPOptions = {
type: 'sse',
url: 'https://example.com',
headers: {
'User-Email': '{{LIBRECHAT_USER_EMAIL}}',
'User-Name': '{{LIBRECHAT_USER_USERNAME}}',
'Content-Type': 'application/json',
},
};
const result = processMCPEnv(obj, user);
expect('headers' in result && result.headers).toEqual({
'User-Email': 'test@example.com',
'User-Name': '', // Empty string for missing field
'Content-Type': 'application/json',
});
});
it('should process user fields in env variables', () => {
const user = createTestUser({
id: 'user-123',
email: 'test@example.com',
ldapId: 'ldap-user-123',
});
const obj: MCPOptions = {
command: 'node',
args: ['server.js'],
env: {
USER_EMAIL: '{{LIBRECHAT_USER_EMAIL}}',
LDAP_ID: '{{LIBRECHAT_USER_LDAPID}}',
API_KEY: '${TEST_API_KEY}',
},
};
const result = processMCPEnv(obj, user);
expect('env' in result && result.env).toEqual({
USER_EMAIL: 'test@example.com',
LDAP_ID: 'ldap-user-123',
API_KEY: 'test-api-key-value',
});
});
it('should process user fields in URL', () => {
const user = createTestUser({
id: 'user-123',
username: 'testuser',
});
const obj: MCPOptions = {
type: 'sse',
url: 'https://example.com/api/{{LIBRECHAT_USER_USERNAME}}/stream',
};
const result = processMCPEnv(obj, user);
expect('url' in result && result.url).toBe('https://example.com/api/testuser/stream');
});
it('should handle boolean user fields', () => {
const user = createTestUser({
id: 'user-123',
emailVerified: true,
twoFactorEnabled: false,
termsAccepted: true,
});
const obj: MCPOptions = {
type: 'sse',
url: 'https://example.com',
headers: {
'Email-Verified': '{{LIBRECHAT_USER_EMAILVERIFIED}}',
'Two-Factor': '{{LIBRECHAT_USER_TWOFACTORENABLED}}',
'Terms-Accepted': '{{LIBRECHAT_USER_TERMSACCEPTED}}',
},
};
const result = processMCPEnv(obj, user);
expect('headers' in result && result.headers).toEqual({
'Email-Verified': 'true',
'Two-Factor': 'false',
'Terms-Accepted': 'true',
});
});
it('should not process sensitive fields like password', () => {
const user = createTestUser({
id: 'user-123',
email: 'test@example.com',
password: 'secret-password',
});
const obj: MCPOptions = {
type: 'sse',
url: 'https://example.com',
headers: {
'User-Email': '{{LIBRECHAT_USER_EMAIL}}',
'User-Password': '{{LIBRECHAT_USER_PASSWORD}}', // This should not be processed
},
};
const result = processMCPEnv(obj, user);
expect('headers' in result && result.headers).toEqual({
'User-Email': 'test@example.com',
'User-Password': '{{LIBRECHAT_USER_PASSWORD}}', // Unchanged
});
});
it('should handle multiple occurrences of the same placeholder', () => {
const user = createTestUser({
id: 'user-123',
email: 'test@example.com',
});
const obj: MCPOptions = {
type: 'sse',
url: 'https://example.com',
headers: {
'Primary-Email': '{{LIBRECHAT_USER_EMAIL}}',
'Secondary-Email': '{{LIBRECHAT_USER_EMAIL}}',
'Backup-Email': '{{LIBRECHAT_USER_EMAIL}}',
},
};
const result = processMCPEnv(obj, user);
expect('headers' in result && result.headers).toEqual({
'Primary-Email': 'test@example.com',
'Secondary-Email': 'test@example.com',
'Backup-Email': 'test@example.com',
});
});
it('should support both id and _id properties for LIBRECHAT_USER_ID', () => {
// Test with 'id' property
const userWithId = createTestUser({
id: 'user-123',
email: 'test@example.com',
});
const obj1: MCPOptions = {
type: 'sse',
url: 'https://example.com',
headers: {
'User-Id': '{{LIBRECHAT_USER_ID}}',
},
};
const result1 = processMCPEnv(obj1, userWithId);
expect('headers' in result1 && result1.headers?.['User-Id']).toBe('user-123');
// Test with '_id' property only (should not work since we only check 'id')
const userWithUnderscore = createTestUser({
id: undefined, // Remove default id to test _id
_id: 'user-456',
email: 'test@example.com',
});
const obj2: MCPOptions = {
type: 'sse',
url: 'https://example.com',
headers: {
'User-Id': '{{LIBRECHAT_USER_ID}}',
},
};
const result2 = processMCPEnv(obj2, userWithUnderscore);
// Since we don't check _id, the placeholder should remain unchanged
expect('headers' in result2 && result2.headers?.['User-Id']).toBe('{{LIBRECHAT_USER_ID}}');
// Test with both properties (id takes precedence)
const userWithBoth = createTestUser({
id: 'user-789',
_id: 'user-000',
email: 'test@example.com',
});
const obj3: MCPOptions = {
type: 'sse',
url: 'https://example.com',
headers: {
'User-Id': '{{LIBRECHAT_USER_ID}}',
},
};
const result3 = processMCPEnv(obj3, userWithBoth);
expect('headers' in result3 && result3.headers?.['User-Id']).toBe('user-789');
});
});
});

View file

@ -254,6 +254,7 @@ export const getAllPromptGroups = () => `${prompts()}/all`;
export const roles = () => '/api/roles';
export const getRole = (roleName: string) => `${roles()}/${roleName.toLowerCase()}`;
export const updatePromptPermissions = (roleName: string) => `${getRole(roleName)}/prompts`;
export const updateMemoryPermissions = (roleName: string) => `${getRole(roleName)}/memories`;
export const updateAgentPermissions = (roleName: string) => `${getRole(roleName)}/agents`;
/* Conversation Tags */
@ -283,3 +284,8 @@ export const confirmTwoFactor = () => '/api/auth/2fa/confirm';
export const disableTwoFactor = () => '/api/auth/2fa/disable';
export const regenerateBackupCodes = () => '/api/auth/2fa/backup/regenerate';
export const verifyTwoFactorTemp = () => '/api/auth/2fa/verify-temp';
/* Memories */
export const memories = () => '/api/memories';
export const memory = (key: string) => `${memories()}/${encodeURIComponent(key)}`;
export const memoryPreferences = () => `${memories()}/preferences`;

View file

@ -244,21 +244,26 @@ export const defaultAgentCapabilities = [
AgentCapabilities.ocr,
];
export const agentsEndpointSChema = baseEndpointSchema.merge(
z.object({
/* agents specific */
recursionLimit: z.number().optional(),
disableBuilder: z.boolean().optional(),
maxRecursionLimit: z.number().optional(),
allowedProviders: z.array(z.union([z.string(), eModelEndpointSchema])).optional(),
capabilities: z
.array(z.nativeEnum(AgentCapabilities))
.optional()
.default(defaultAgentCapabilities),
}),
);
export const agentsEndpointSchema = baseEndpointSchema
.merge(
z.object({
/* agents specific */
recursionLimit: z.number().optional(),
disableBuilder: z.boolean().optional().default(false),
maxRecursionLimit: z.number().optional(),
allowedProviders: z.array(z.union([z.string(), eModelEndpointSchema])).optional(),
capabilities: z
.array(z.nativeEnum(AgentCapabilities))
.optional()
.default(defaultAgentCapabilities),
}),
)
.default({
disableBuilder: false,
capabilities: defaultAgentCapabilities,
});
export type TAgentsEndpoint = z.infer<typeof agentsEndpointSChema>;
export type TAgentsEndpoint = z.infer<typeof agentsEndpointSchema>;
export const endpointSchema = baseEndpointSchema.merge(
z.object({
@ -493,6 +498,7 @@ export const intefaceSchema = z
sidePanel: z.boolean().optional(),
multiConvo: z.boolean().optional(),
bookmarks: z.boolean().optional(),
memories: z.boolean().optional(),
presets: z.boolean().optional(),
prompts: z.boolean().optional(),
agents: z.boolean().optional(),
@ -508,6 +514,7 @@ export const intefaceSchema = z
presets: true,
multiConvo: true,
bookmarks: true,
memories: true,
prompts: true,
agents: true,
temporaryChat: true,
@ -586,6 +593,7 @@ export type TStartupConfig = {
export enum OCRStrategy {
MISTRAL_OCR = 'mistral_ocr',
CUSTOM_OCR = 'custom_ocr',
AZURE_MISTRAL_OCR = 'azure_mistral_ocr',
}
export enum SearchCategories {
@ -649,11 +657,35 @@ export const balanceSchema = z.object({
refillAmount: z.number().optional().default(10000),
});
export const memorySchema = z.object({
disabled: z.boolean().optional(),
validKeys: z.array(z.string()).optional(),
tokenLimit: z.number().optional(),
personalize: z.boolean().default(true),
messageWindowSize: z.number().optional().default(5),
agent: z
.union([
z.object({
id: z.string(),
}),
z.object({
provider: z.string(),
model: z.string(),
instructions: z.string().optional(),
model_parameters: z.record(z.any()).optional(),
}),
])
.optional(),
});
export type TMemoryConfig = z.infer<typeof memorySchema>;
export const configSchema = z.object({
version: z.string(),
cache: z.boolean().default(true),
ocr: ocrSchema.optional(),
webSearch: webSearchSchema.optional(),
memory: memorySchema.optional(),
secureImageLinks: z.boolean().optional(),
imageOutputType: z.nativeEnum(EImageOutputType).default(EImageOutputType.PNG),
includedTools: z.array(z.string()).optional(),
@ -694,7 +726,7 @@ export const configSchema = z.object({
[EModelEndpoint.azureOpenAI]: azureEndpointSchema.optional(),
[EModelEndpoint.azureAssistants]: assistantEndpointSchema.optional(),
[EModelEndpoint.assistants]: assistantEndpointSchema.optional(),
[EModelEndpoint.agents]: agentsEndpointSChema.optional(),
[EModelEndpoint.agents]: agentsEndpointSchema.optional(),
[EModelEndpoint.custom]: z.array(endpointSchema.partial()).optional(),
[EModelEndpoint.bedrock]: baseEndpointSchema.optional(),
})
@ -1291,6 +1323,10 @@ export enum SettingsTabValues {
* Chat input commands
*/
COMMANDS = 'commands',
/**
* Tab for Personalization Settings
*/
PERSONALIZATION = 'personalization',
}
export enum STTProviders {
@ -1328,7 +1364,7 @@ export enum Constants {
/** Key for the app's version. */
VERSION = 'v0.7.8',
/** Key for the Custom Config's version (librechat.yaml). */
CONFIG_VERSION = '1.2.6',
CONFIG_VERSION = '1.2.8',
/** Standard value for the first message's `parentMessageId` value, to indicate no parent exists. */
NO_PARENT = '00000000-0000-0000-0000-000000000000',
/** Standard value for the initial conversationId before a request is sent */
@ -1398,6 +1434,10 @@ export enum LocalStorageKeys {
LAST_CODE_TOGGLE_ = 'LAST_CODE_TOGGLE_',
/** Last checked toggle for Web Search per conversation ID */
LAST_WEB_SEARCH_TOGGLE_ = 'LAST_WEB_SEARCH_TOGGLE_',
/** Key for the last selected agent provider */
LAST_AGENT_PROVIDER = 'lastAgentProvider',
/** Key for the last selected agent model */
LAST_AGENT_MODEL = 'lastAgentModel',
}
export enum ForkOptions {

View file

@ -13,11 +13,11 @@ export default function createPayload(submission: t.TSubmission) {
ephemeralAgent,
} = submission;
const { conversationId } = s.tConvoUpdateSchema.parse(conversation);
const { endpoint, endpointType } = endpointOption as {
const { endpoint: _e, endpointType } = endpointOption as {
endpoint: s.EModelEndpoint;
endpointType?: s.EModelEndpoint;
};
const endpoint = _e as s.EModelEndpoint;
let server = EndpointURLs[endpointType ?? endpoint];
const isEphemeral = s.isEphemeralAgent(endpoint, ephemeralAgent);
@ -32,6 +32,7 @@ export default function createPayload(submission: t.TSubmission) {
const payload: t.TPayload = {
...userMessage,
...endpointOption,
endpoint,
ephemeralAgent: isEphemeral ? ephemeralAgent : undefined,
isContinued: !!(isEdited && isContinued),
conversationId,

View file

@ -718,6 +718,12 @@ export function updateAgentPermissions(
return request.put(endpoints.updateAgentPermissions(variables.roleName), variables.updates);
}
export function updateMemoryPermissions(
variables: m.UpdateMemoryPermVars,
): Promise<m.UpdatePermResponse> {
return request.put(endpoints.updateMemoryPermissions(variables.roleName), variables.updates);
}
/* Tags */
export function getConversationTags(): Promise<t.TConversationTagsResponse> {
return request.get(endpoints.conversationTags());
@ -799,3 +805,33 @@ export function verifyTwoFactorTemp(
): Promise<t.TVerify2FATempResponse> {
return request.post(endpoints.verifyTwoFactorTemp(), payload);
}
/* Memories */
export const getMemories = (): Promise<q.MemoriesResponse> => {
return request.get(endpoints.memories());
};
export const deleteMemory = (key: string): Promise<void> => {
return request.delete(endpoints.memory(key));
};
export const updateMemory = (
key: string,
value: string,
originalKey?: string,
): Promise<q.TUserMemory> => {
return request.patch(endpoints.memory(originalKey || key), { key, value });
};
export const updateMemoryPreferences = (preferences: {
memories: boolean;
}): Promise<{ updated: boolean; preferences: { memories: boolean } }> => {
return request.patch(endpoints.memoryPreferences(), preferences);
};
export const createMemory = (data: {
key: string;
value: string;
}): Promise<{ created: boolean; memory: q.TUserMemory }> => {
return request.post(endpoints.memories(), data);
};

View file

@ -16,6 +16,8 @@ export * from './models';
export * from './mcp';
/* web search */
export * from './web';
/* memory */
export * from './memory';
/* RBAC */
export * from './permissions';
export * from './roles';

View file

@ -46,6 +46,8 @@ export enum QueryKeys {
health = 'health',
userTerms = 'userTerms',
banner = 'banner',
/* Memories */
memories = 'memories',
}
export enum MutationKeys {
@ -70,4 +72,5 @@ export enum MutationKeys {
updateRole = 'updateRole',
enableTwoFactor = 'enableTwoFactor',
verifyTwoFactor = 'verifyTwoFactor',
updateMemoryPreferences = 'updateMemoryPreferences',
}

View file

@ -1,4 +1,5 @@
import { z } from 'zod';
import type { TUser } from './types';
import { extractEnvVariable } from './utils';
const BaseOptionsSchema = z.object({
@ -7,6 +8,13 @@ const BaseOptionsSchema = z.object({
initTimeout: z.number().optional(),
/** Controls visibility in chat dropdown menu (MCPSelect) */
chatMenu: z.boolean().optional(),
/**
* Controls server instruction behavior:
* - undefined/not set: No instructions included (default)
* - true: Use server-provided instructions
* - string: Use custom instructions (overrides server-provided)
*/
serverInstructions: z.union([z.boolean(), z.string()]).optional(),
});
export const StdioOptionsSchema = BaseOptionsSchema.extend({
@ -114,12 +122,58 @@ export const MCPServersSchema = z.record(z.string(), MCPOptionsSchema);
export type MCPOptions = z.infer<typeof MCPOptionsSchema>;
/**
* Recursively processes an object to replace environment variables in string values
* @param {MCPOptions} obj - The object to process
* @param {string} [userId] - The user ID
* @returns {MCPOptions} - The processed object with environment variables replaced
* List of allowed user fields that can be used in MCP environment variables.
* These are non-sensitive string/boolean fields from the IUser interface.
*/
export function processMCPEnv(obj: Readonly<MCPOptions>, userId?: string): MCPOptions {
const ALLOWED_USER_FIELDS = [
'name',
'username',
'email',
'provider',
'role',
'googleId',
'facebookId',
'openidId',
'samlId',
'ldapId',
'githubId',
'discordId',
'appleId',
'emailVerified',
'twoFactorEnabled',
'termsAccepted',
] as const;
/**
* Processes a string value to replace user field placeholders
* @param value - The string value to process
* @param user - The user object
* @returns The processed string with placeholders replaced
*/
function processUserPlaceholders(value: string, user?: TUser): string {
if (!user || typeof value !== 'string') {
return value;
}
for (const field of ALLOWED_USER_FIELDS) {
const placeholder = `{{LIBRECHAT_USER_${field.toUpperCase()}}}`;
if (value.includes(placeholder)) {
const fieldValue = user[field as keyof TUser];
const replacementValue = fieldValue != null ? String(fieldValue) : '';
value = value.replace(new RegExp(placeholder, 'g'), replacementValue);
}
}
return value;
}
/**
* Recursively processes an object to replace environment variables in string values
* @param obj - The object to process
* @param user - The user object containing all user fields
* @returns - The processed object with environment variables replaced
*/
export function processMCPEnv(obj: Readonly<MCPOptions>, user?: TUser): MCPOptions {
if (obj === null || obj === undefined) {
return obj;
}
@ -129,23 +183,31 @@ export function processMCPEnv(obj: Readonly<MCPOptions>, userId?: string): MCPOp
if ('env' in newObj && newObj.env) {
const processedEnv: Record<string, string> = {};
for (const [key, value] of Object.entries(newObj.env)) {
processedEnv[key] = extractEnvVariable(value);
let processedValue = extractEnvVariable(value);
processedValue = processUserPlaceholders(processedValue, user);
processedEnv[key] = processedValue;
}
newObj.env = processedEnv;
} else if ('headers' in newObj && newObj.headers) {
const processedHeaders: Record<string, string> = {};
for (const [key, value] of Object.entries(newObj.headers)) {
if (value === '{{LIBRECHAT_USER_ID}}' && userId != null && userId) {
processedHeaders[key] = userId;
const userId = user?.id;
if (value === '{{LIBRECHAT_USER_ID}}' && userId != null) {
processedHeaders[key] = String(userId);
continue;
}
processedHeaders[key] = extractEnvVariable(value);
let processedValue = extractEnvVariable(value);
processedValue = processUserPlaceholders(processedValue, user);
processedHeaders[key] = processedValue;
}
newObj.headers = processedHeaders;
}
if ('url' in newObj && newObj.url) {
newObj.url = extractEnvVariable(newObj.url);
let processedUrl = extractEnvVariable(newObj.url);
processedUrl = processUserPlaceholders(processedUrl, user);
newObj.url = processedUrl;
}
return newObj;

View file

@ -0,0 +1,62 @@
import type { TCustomConfig, TMemoryConfig } from './config';
/**
* Loads the memory configuration and validates it
* @param config - The memory configuration from librechat.yaml
* @returns The validated memory configuration
*/
export function loadMemoryConfig(config: TCustomConfig['memory']): TMemoryConfig | undefined {
if (!config) {
return undefined;
}
// If disabled is explicitly true, return the config as-is
if (config.disabled === true) {
return config;
}
// Check if the agent configuration is valid
const hasValidAgent =
config.agent &&
(('id' in config.agent && !!config.agent.id) ||
('provider' in config.agent &&
'model' in config.agent &&
!!config.agent.provider &&
!!config.agent.model));
// If agent config is invalid, treat as disabled
if (!hasValidAgent) {
return {
...config,
disabled: true,
};
}
return config;
}
/**
* Checks if memory feature is enabled based on the configuration
* @param config - The memory configuration
* @returns True if memory is enabled, false otherwise
*/
export function isMemoryEnabled(config: TMemoryConfig | undefined): boolean {
if (!config) {
return false;
}
if (config.disabled === true) {
return false;
}
// Check if agent configuration is valid
const hasValidAgent =
config.agent &&
(('id' in config.agent && !!config.agent.id) ||
('provider' in config.agent &&
'model' in config.agent &&
!!config.agent.provider &&
!!config.agent.model));
return !!hasValidAgent;
}

View file

@ -225,13 +225,15 @@ const extractOmniVersion = (modelStr: string): string => {
export const getResponseSender = (endpointOption: t.TEndpointOption): string => {
const {
model: _m,
endpoint,
endpoint: _e,
endpointType,
modelDisplayLabel: _mdl,
chatGptLabel: _cgl,
modelLabel: _ml,
} = endpointOption;
const endpoint = _e as EModelEndpoint;
const model = _m ?? '';
const modelDisplayLabel = _mdl ?? '';
const chatGptLabel = _cgl ?? '';

View file

@ -16,6 +16,10 @@ export enum PermissionTypes {
* Type for Agent Permissions
*/
AGENTS = 'AGENTS',
/**
* Type for Memory Permissions
*/
MEMORIES = 'MEMORIES',
/**
* Type for Multi-Conversation Permissions
*/
@ -45,6 +49,8 @@ export enum Permissions {
READ = 'READ',
READ_AUTHOR = 'READ_AUTHOR',
SHARE = 'SHARE',
/** Can disable if desired */
OPT_OUT = 'OPT_OUT',
}
export const promptPermissionsSchema = z.object({
@ -60,6 +66,15 @@ export const bookmarkPermissionsSchema = z.object({
});
export type TBookmarkPermissions = z.infer<typeof bookmarkPermissionsSchema>;
export const memoryPermissionsSchema = z.object({
[Permissions.USE]: z.boolean().default(true),
[Permissions.CREATE]: z.boolean().default(true),
[Permissions.UPDATE]: z.boolean().default(true),
[Permissions.READ]: z.boolean().default(true),
[Permissions.OPT_OUT]: z.boolean().default(true),
});
export type TMemoryPermissions = z.infer<typeof memoryPermissionsSchema>;
export const agentPermissionsSchema = z.object({
[Permissions.SHARED_GLOBAL]: z.boolean().default(false),
[Permissions.USE]: z.boolean().default(true),
@ -92,6 +107,7 @@ export type TWebSearchPermissions = z.infer<typeof webSearchPermissionsSchema>;
export const permissionsSchema = z.object({
[PermissionTypes.PROMPTS]: promptPermissionsSchema,
[PermissionTypes.BOOKMARKS]: bookmarkPermissionsSchema,
[PermissionTypes.MEMORIES]: memoryPermissionsSchema,
[PermissionTypes.AGENTS]: agentPermissionsSchema,
[PermissionTypes.MULTI_CONVO]: multiConvoPermissionsSchema,
[PermissionTypes.TEMPORARY_CHAT]: temporaryChatPermissionsSchema,

View file

@ -5,6 +5,7 @@ import {
permissionsSchema,
agentPermissionsSchema,
promptPermissionsSchema,
memoryPermissionsSchema,
runCodePermissionsSchema,
webSearchPermissionsSchema,
bookmarkPermissionsSchema,
@ -48,6 +49,13 @@ const defaultRolesSchema = z.object({
[PermissionTypes.BOOKMARKS]: bookmarkPermissionsSchema.extend({
[Permissions.USE]: z.boolean().default(true),
}),
[PermissionTypes.MEMORIES]: memoryPermissionsSchema.extend({
[Permissions.USE]: z.boolean().default(true),
[Permissions.CREATE]: z.boolean().default(true),
[Permissions.UPDATE]: z.boolean().default(true),
[Permissions.READ]: z.boolean().default(true),
[Permissions.OPT_OUT]: z.boolean().default(true),
}),
[PermissionTypes.AGENTS]: agentPermissionsSchema.extend({
[Permissions.SHARED_GLOBAL]: z.boolean().default(true),
[Permissions.USE]: z.boolean().default(true),
@ -86,6 +94,13 @@ export const roleDefaults = defaultRolesSchema.parse({
[PermissionTypes.BOOKMARKS]: {
[Permissions.USE]: true,
},
[PermissionTypes.MEMORIES]: {
[Permissions.USE]: true,
[Permissions.CREATE]: true,
[Permissions.UPDATE]: true,
[Permissions.READ]: true,
[Permissions.OPT_OUT]: true,
},
[PermissionTypes.AGENTS]: {
[Permissions.SHARED_GLOBAL]: true,
[Permissions.USE]: true,
@ -110,6 +125,7 @@ export const roleDefaults = defaultRolesSchema.parse({
permissions: {
[PermissionTypes.PROMPTS]: {},
[PermissionTypes.BOOKMARKS]: {},
[PermissionTypes.MEMORIES]: {},
[PermissionTypes.AGENTS]: {},
[PermissionTypes.MULTI_CONVO]: {},
[PermissionTypes.TEMPORARY_CHAT]: {},

View file

@ -522,11 +522,19 @@ export const tMessageSchema = z.object({
feedback: feedbackSchema.optional(),
});
export type MemoryArtifact = {
key: string;
value?: string;
tokenCount?: number;
type: 'update' | 'delete';
};
export type TAttachmentMetadata = {
type?: Tools;
messageId: string;
toolCallId: string;
[Tools.web_search]?: SearchResultData;
[Tools.memory]?: MemoryArtifact;
};
export type TAttachment =

View file

@ -1,17 +1,18 @@
import type OpenAI from 'openai';
import type { InfiniteData } from '@tanstack/react-query';
import type {
TBanner,
TMessage,
TResPlugin,
ImageDetail,
TSharedLink,
TConversation,
EModelEndpoint,
TConversationTag,
TBanner,
TAttachment,
} from './schemas';
import { TMinimalFeedback } from './feedback';
import { SettingDefinition } from './generate';
import type { SettingDefinition } from './generate';
import type { TMinimalFeedback } from './feedback';
import type { Agent } from './types/assistants';
export type TOpenAIMessage = OpenAI.Chat.ChatCompletionMessageParam;
@ -20,28 +21,78 @@ export * from './schemas';
export type TMessages = TMessage[];
/* TODO: Cleanup EndpointOption types */
export type TEndpointOption = {
spec?: string | null;
iconURL?: string | null;
endpoint: EModelEndpoint;
endpointType?: EModelEndpoint;
export type TEndpointOption = Pick<
TConversation,
// Core conversation fields
| 'endpoint'
| 'endpointType'
| 'model'
| 'modelLabel'
| 'chatGptLabel'
| 'promptPrefix'
| 'temperature'
| 'topP'
| 'topK'
| 'top_p'
| 'frequency_penalty'
| 'presence_penalty'
| 'maxOutputTokens'
| 'maxContextTokens'
| 'max_tokens'
| 'maxTokens'
| 'resendFiles'
| 'imageDetail'
| 'reasoning_effort'
| 'instructions'
| 'additional_instructions'
| 'append_current_datetime'
| 'tools'
| 'stop'
| 'region'
| 'additionalModelRequestFields'
// Anthropic-specific
| 'promptCache'
| 'thinking'
| 'thinkingBudget'
// Assistant/Agent fields
| 'assistant_id'
| 'agent_id'
// UI/Display fields
| 'iconURL'
| 'greeting'
| 'spec'
// Artifacts
| 'artifacts'
// Files
| 'file_ids'
// System field
| 'system'
// Google examples
| 'examples'
// Context
| 'context'
> & {
// Fields specific to endpoint options that don't exist on TConversation
modelDisplayLabel?: string;
resendFiles?: boolean;
promptCache?: boolean;
maxContextTokens?: number;
imageDetail?: ImageDetail;
model?: string | null;
promptPrefix?: string;
temperature?: number;
chatGptLabel?: string | null;
modelLabel?: string | null;
jailbreak?: boolean;
key?: string | null;
/* assistant */
/** @deprecated Assistants API */
thread_id?: string;
/* multi-response stream */
// Conversation identifiers for multi-response streams
overrideConvoId?: string;
overrideUserMessageId?: string;
// Model parameters (used by different endpoints)
modelOptions?: Record<string, unknown>;
model_parameters?: Record<string, unknown>;
// Configuration data (added by middleware)
modelsConfig?: TModelsConfig;
// File attachments (processed by middleware)
attachments?: TAttachment[];
// Generated prompts
artifactsPrompt?: string;
// Agent-specific fields
agent?: Promise<Agent>;
// Client-specific options
clientOptions?: Record<string, unknown>;
};
export type TEphemeralAgent = {
@ -130,6 +181,9 @@ export type TUser = {
plugins?: string[];
twoFactorEnabled?: boolean;
backupCodes?: TBackupCode[];
personalization?: {
memories?: boolean;
};
createdAt: string;
updatedAt: string;
};
@ -557,7 +611,7 @@ export type TUpdateFeedbackResponse = {
messageId: string;
conversationId: string;
feedback?: TMinimalFeedback;
}
};
export type TBalanceResponse = {
tokenCredits: number;

View file

@ -22,6 +22,7 @@ export enum Tools {
web_search = 'web_search',
retrieval = 'retrieval',
function = 'function',
memory = 'memory',
}
export enum EToolResources {
@ -514,6 +515,8 @@ export type ActionAuth = {
token_exchange_method?: TokenExchangeMethodEnum;
};
export type MCPAuth = ActionAuth;
export type ActionMetadata = {
api_key?: string;
auth?: ActionAuth;
@ -524,6 +527,16 @@ export type ActionMetadata = {
oauth_client_secret?: string;
};
export type MCPMetadata = Omit<ActionMetadata, 'auth'> & {
name?: string;
description?: string;
url?: string;
tools?: string[];
auth?: MCPAuth;
icon?: string;
trust?: boolean;
};
export type ActionMetadataRuntime = ActionMetadata & {
oauth_access_token?: string;
oauth_refresh_token?: string;
@ -540,6 +553,11 @@ export type Action = {
version: number | string;
} & ({ assistant_id: string; agent_id?: never } | { assistant_id?: never; agent_id: string });
export type MCP = {
mcp_id: string;
metadata: MCPMetadata;
} & ({ assistant_id: string; agent_id?: never } | { assistant_id?: never; agent_id: string });
export type AssistantAvatar = {
filepath: string;
source: string;

View file

@ -10,6 +10,7 @@ export enum FileSources {
vectordb = 'vectordb',
execute_code = 'execute_code',
mistral_ocr = 'mistral_ocr',
azure_mistral_ocr = 'azure_mistral_ocr',
text = 'text',
}

View file

@ -278,7 +278,7 @@ export type UpdatePermVars<T> = {
};
export type UpdatePromptPermVars = UpdatePermVars<p.TPromptPermissions>;
export type UpdateMemoryPermVars = UpdatePermVars<p.TMemoryPermissions>;
export type UpdateAgentPermVars = UpdatePermVars<p.TAgentPermissions>;
export type UpdatePermResponse = r.TRole;
@ -290,6 +290,13 @@ export type UpdatePromptPermOptions = MutationOptions<
types.TError | null | undefined
>;
export type UpdateMemoryPermOptions = MutationOptions<
UpdatePermResponse,
UpdateMemoryPermVars,
unknown,
types.TError | null | undefined
>;
export type UpdateAgentPermOptions = MutationOptions<
UpdatePermResponse,
UpdateAgentPermVars,

View file

@ -109,3 +109,18 @@ export type VerifyToolAuthResponse = {
export type GetToolCallParams = { conversationId: string };
export type ToolCallResults = a.ToolCallResult[];
/* Memories */
export type TUserMemory = {
key: string;
value: string;
updated_at: string;
tokenCount?: number;
};
export type MemoriesResponse = {
memories: TUserMemory[];
totalTokens: number;
tokenLimit: number | null;
usagePercentage: number | null;
};

View file

@ -5,8 +5,8 @@ import type {
SearchProviders,
TWebSearchConfig,
} from './config';
import { extractVariableName } from './utils';
import { SearchCategories, SafeSearchTypes } from './config';
import { extractVariableName } from './utils';
import { AuthType } from './schemas';
export function loadWebSearchConfig(
@ -64,23 +64,29 @@ export const webSearchAuth = {
/**
* Extracts all API keys from the webSearchAuth configuration object
*/
export const webSearchKeys: TWebSearchKeys[] = [];
export function getWebSearchKeys(): TWebSearchKeys[] {
const keys: TWebSearchKeys[] = [];
// Iterate through each category (providers, scrapers, rerankers)
for (const category of Object.keys(webSearchAuth)) {
const categoryObj = webSearchAuth[category as TWebSearchCategories];
// Iterate through each category (providers, scrapers, rerankers)
for (const category of Object.keys(webSearchAuth)) {
const categoryObj = webSearchAuth[category as TWebSearchCategories];
// Iterate through each service within the category
for (const service of Object.keys(categoryObj)) {
const serviceObj = categoryObj[service as keyof typeof categoryObj];
// Iterate through each service within the category
for (const service of Object.keys(categoryObj)) {
const serviceObj = categoryObj[service as keyof typeof categoryObj];
// Extract the API keys from the service
for (const key of Object.keys(serviceObj)) {
webSearchKeys.push(key as TWebSearchKeys);
// Extract the API keys from the service
for (const key of Object.keys(serviceObj)) {
keys.push(key as TWebSearchKeys);
}
}
}
return keys;
}
export const webSearchKeys: TWebSearchKeys[] = getWebSearchKeys();
export function extractWebSearchEnvVars({
keys,
config,

View file

@ -1,114 +0,0 @@
# `@librechat/data-schemas`
Mongoose schemas and models for LibreChat. This package provides a comprehensive collection of Mongoose schemas used across the LibreChat project, enabling robust data modeling and validation for various entities such as actions, agents, messages, users, and more.
## Features
- **Modular Schemas:** Includes schemas for actions, agents, assistants, balance, banners, categories, conversation tags, conversations, files, keys, messages, plugin authentication, presets, projects, prompts, prompt groups, roles, sessions, shared links, tokens, tool calls, transactions, and users.
- **TypeScript Support:** Provides TypeScript definitions for type-safe development.
- **Ready for Mongoose Integration:** Easily integrate with Mongoose to create models and interact with your MongoDB database.
- **Flexible & Extensible:** Designed to support the evolving needs of LibreChat while being adaptable to other projects.
## Installation
Install the package via npm or yarn:
```bash
npm install @librechat/data-schemas
```
Or with yarn:
```bash
yarn add @librechat/data-schemas
```
## Usage
After installation, you can import and use the schemas in your project. For example, to create a Mongoose model for a user:
```js
import mongoose from 'mongoose';
import { userSchema } from '@librechat/data-schemas';
const UserModel = mongoose.model('User', userSchema);
// Now you can use UserModel to create, read, update, and delete user documents.
```
You can also import other schemas as needed:
```js
import { actionSchema, agentSchema, messageSchema } from '@librechat/data-schemas';
```
Each schema is designed to integrate seamlessly with Mongoose and provides indexes, timestamps, and validations tailored for LibreChats use cases.
## Development
This package uses Rollup and TypeScript for building and bundling.
### Available Scripts
- **Build:**
Cleans the `dist` directory and builds the package.
```bash
npm run build
```
- **Build Watch:**
Rebuilds automatically on file changes.
```bash
npm run build:watch
```
- **Test:**
Runs tests with coverage in watch mode.
```bash
npm run test
```
- **Test (CI):**
Runs tests with coverage for CI environments.
```bash
npm run test:ci
```
- **Verify:**
Runs tests in CI mode to verify code integrity.
```bash
npm run verify
```
- **Clean:**
Removes the `dist` directory.
```bash
npm run clean
```
For those using Bun, equivalent scripts are available:
- **Bun Clean:** `bun run b:clean`
- **Bun Build:** `bun run b:build`
## Repository & Issues
The source code is maintained on GitHub.
- **Repository:** [LibreChat Repository](https://github.com/danny-avila/LibreChat.git)
- **Issues & Bug Reports:** [LibreChat Issues](https://github.com/danny-avila/LibreChat/issues)
## License
This project is licensed under the [MIT License](LICENSE).
## Contributing
Contributions to improve and expand the data schemas are welcome. If you have suggestions, improvements, or bug fixes, please open an issue or submit a pull request on the [GitHub repository](https://github.com/danny-avila/LibreChat/issues).
For more detailed documentation on each schema and model, please refer to the source code or visit the [LibreChat website](https://librechat.ai).

View file

@ -5,6 +5,7 @@ export default {
testResultsProcessor: 'jest-junit',
moduleNameMapper: {
'^@src/(.*)$': '<rootDir>/src/$1',
'^~/(.*)$': '<rootDir>/src/$1',
},
// coverageThreshold: {
// global: {
@ -16,4 +17,4 @@ export default {
// },
restoreMocks: true,
testTimeout: 15000,
};
};

View file

@ -1,6 +1,6 @@
{
"name": "@librechat/data-schemas",
"version": "0.0.7",
"version": "0.0.8",
"description": "Mongoose schemas and models for LibreChat",
"type": "module",
"main": "dist/index.cjs",

View file

@ -58,7 +58,7 @@ function redactMessage(str: string, trimLength?: number): string {
* @returns The modified log information object.
*/
const redactFormat = winston.format((info: winston.Logform.TransformableInfo) => {
if (info.level === 'error') {
if (info && info.level === 'error') {
// Type guard to ensure message is a string
if (typeof info.message === 'string') {
info.message = redactMessage(info.message);

Some files were not shown because too many files have changed in this diff Show more