Merge branch 'dev' into feat/prompt-enhancement

This commit is contained in:
Marco Beretta 2025-07-10 12:11:22 +02:00 committed by GitHub
commit e1af9d21f0
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
309 changed files with 12487 additions and 6311 deletions

View file

@ -1,6 +1,6 @@
{
"name": "@librechat/api",
"version": "1.2.4",
"version": "1.2.6",
"type": "commonjs",
"description": "MCP services for LibreChat",
"main": "dist/index.js",
@ -69,13 +69,14 @@
"registry": "https://registry.npmjs.org/"
},
"peerDependencies": {
"@librechat/agents": "^2.4.41",
"@librechat/agents": "^2.4.56",
"@librechat/data-schemas": "*",
"@modelcontextprotocol/sdk": "^1.12.3",
"@modelcontextprotocol/sdk": "^1.13.3",
"axios": "^1.8.2",
"diff": "^7.0.0",
"eventsource": "^3.0.2",
"express": "^4.21.2",
"js-yaml": "^4.1.0",
"keyv": "^5.3.2",
"librechat-data-provider": "*",
"node-fetch": "2.7.0",

View file

@ -2,3 +2,4 @@ export * from './config';
export * from './memory';
export * from './resources';
export * from './run';
export * from './validation';

View file

@ -1,6 +1,7 @@
import { Run, Providers } from '@librechat/agents';
import { providerEndpointMap, KnownEndpoints } from 'librechat-data-provider';
import type {
OpenAIClientOptions,
StandardGraphConfig,
EventHandler,
GenericTool,
@ -46,7 +47,10 @@ export async function createRun({
customHandlers?: Record<GraphEvents, EventHandler>;
}): Promise<Run<IState>> {
const provider =
providerEndpointMap[agent.provider as keyof typeof providerEndpointMap] ?? agent.provider;
(providerEndpointMap[
agent.provider as keyof typeof providerEndpointMap
] as unknown as Providers) ?? agent.provider;
const llmConfig: t.RunLLMConfig = Object.assign(
{
provider,
@ -65,12 +69,19 @@ export async function createRun({
llmConfig.usage = true;
}
let reasoningKey: 'reasoning_content' | 'reasoning' | undefined;
if (
let reasoningKey: 'reasoning_content' | 'reasoning' = 'reasoning_content';
if (provider === Providers.GOOGLE) {
reasoningKey = 'reasoning';
} else if (
llmConfig.configuration?.baseURL?.includes(KnownEndpoints.openrouter) ||
(agent.endpoint && agent.endpoint.toLowerCase().includes(KnownEndpoints.openrouter))
) {
reasoningKey = 'reasoning';
} else if (
(llmConfig as OpenAIClientOptions).useResponsesApi === true &&
(provider === Providers.OPENAI || provider === Providers.AZURE)
) {
reasoningKey = 'reasoning';
}
const graphConfig: StandardGraphConfig = {

View file

@ -0,0 +1,61 @@
import { z } from 'zod';
/** Avatar schema shared between create and update */
export const agentAvatarSchema = z.object({
filepath: z.string(),
source: z.string(),
});
/** Base resource schema for tool resources */
export const agentBaseResourceSchema = z.object({
file_ids: z.array(z.string()).optional(),
files: z.array(z.any()).optional(), // Files are populated at runtime, not from user input
});
/** File resource schema extends base with vector_store_ids */
export const agentFileResourceSchema = agentBaseResourceSchema.extend({
vector_store_ids: z.array(z.string()).optional(),
});
/** Tool resources schema matching AgentToolResources interface */
export const agentToolResourcesSchema = z
.object({
image_edit: agentBaseResourceSchema.optional(),
execute_code: agentBaseResourceSchema.optional(),
file_search: agentFileResourceSchema.optional(),
ocr: agentBaseResourceSchema.optional(),
})
.optional();
/** Base agent schema with all common fields */
export const agentBaseSchema = z.object({
name: z.string().nullable().optional(),
description: z.string().nullable().optional(),
instructions: z.string().nullable().optional(),
avatar: agentAvatarSchema.nullable().optional(),
model_parameters: z.record(z.unknown()).optional(),
tools: z.array(z.string()).optional(),
agent_ids: z.array(z.string()).optional(),
end_after_tools: z.boolean().optional(),
hide_sequential_outputs: z.boolean().optional(),
artifacts: z.string().optional(),
recursion_limit: z.number().optional(),
conversation_starters: z.array(z.string()).optional(),
tool_resources: agentToolResourcesSchema,
});
/** Create schema extends base with required fields for creation */
export const agentCreateSchema = agentBaseSchema.extend({
provider: z.string(),
model: z.string().nullable(),
tools: z.array(z.string()).optional().default([]),
});
/** Update schema extends base with all fields optional and additional update-only fields */
export const agentUpdateSchema = agentBaseSchema.extend({
provider: z.string().optional(),
model: z.string().nullable().optional(),
projectIds: z.array(z.string()).optional(),
removeProjectIds: z.array(z.string()).optional(),
isCollaborative: z.boolean().optional(),
});

View file

@ -0,0 +1 @@
export * from './llm';

View file

@ -0,0 +1,207 @@
import { Providers } from '@librechat/agents';
import { googleSettings, AuthKeys } from 'librechat-data-provider';
import type { GoogleClientOptions, VertexAIClientOptions } from '@librechat/agents';
import type { GoogleAIToolType } from '@langchain/google-common';
import type * as t from '~/types';
import { isEnabled } from '~/utils';
function getThresholdMapping(model: string) {
const gemini1Pattern = /gemini-(1\.0|1\.5|pro$|1\.0-pro|1\.5-pro|1\.5-flash-001)/;
const restrictedPattern = /(gemini-(1\.5-flash-8b|2\.0|exp)|learnlm)/;
if (gemini1Pattern.test(model)) {
return (value: string) => {
if (value === 'OFF') {
return 'BLOCK_NONE';
}
return value;
};
}
if (restrictedPattern.test(model)) {
return (value: string) => {
if (value === 'OFF' || value === 'HARM_BLOCK_THRESHOLD_UNSPECIFIED') {
return 'BLOCK_NONE';
}
return value;
};
}
return (value: string) => value;
}
export function getSafetySettings(
model?: string,
): Array<{ category: string; threshold: string }> | undefined {
if (isEnabled(process.env.GOOGLE_EXCLUDE_SAFETY_SETTINGS)) {
return undefined;
}
const mapThreshold = getThresholdMapping(model ?? '');
return [
{
category: 'HARM_CATEGORY_SEXUALLY_EXPLICIT',
threshold: mapThreshold(
process.env.GOOGLE_SAFETY_SEXUALLY_EXPLICIT || 'HARM_BLOCK_THRESHOLD_UNSPECIFIED',
),
},
{
category: 'HARM_CATEGORY_HATE_SPEECH',
threshold: mapThreshold(
process.env.GOOGLE_SAFETY_HATE_SPEECH || 'HARM_BLOCK_THRESHOLD_UNSPECIFIED',
),
},
{
category: 'HARM_CATEGORY_HARASSMENT',
threshold: mapThreshold(
process.env.GOOGLE_SAFETY_HARASSMENT || 'HARM_BLOCK_THRESHOLD_UNSPECIFIED',
),
},
{
category: 'HARM_CATEGORY_DANGEROUS_CONTENT',
threshold: mapThreshold(
process.env.GOOGLE_SAFETY_DANGEROUS_CONTENT || 'HARM_BLOCK_THRESHOLD_UNSPECIFIED',
),
},
{
category: 'HARM_CATEGORY_CIVIC_INTEGRITY',
threshold: mapThreshold(process.env.GOOGLE_SAFETY_CIVIC_INTEGRITY || 'BLOCK_NONE'),
},
];
}
/**
* Replicates core logic from GoogleClient's constructor and setOptions, plus client determination.
* Returns an object with the provider label and the final options that would be passed to createLLM.
*
* @param credentials - Either a JSON string or an object containing Google keys
* @param options - The same shape as the "GoogleClient" constructor options
*/
export function getGoogleConfig(
credentials: string | t.GoogleCredentials | undefined,
options: t.GoogleConfigOptions = {},
) {
let creds: t.GoogleCredentials = {};
if (typeof credentials === 'string') {
try {
creds = JSON.parse(credentials);
} catch (err: unknown) {
throw new Error(
`Error parsing string credentials: ${err instanceof Error ? err.message : 'Unknown error'}`,
);
}
} else if (credentials && typeof credentials === 'object') {
creds = credentials;
}
const serviceKeyRaw = creds[AuthKeys.GOOGLE_SERVICE_KEY] ?? {};
const serviceKey =
typeof serviceKeyRaw === 'string' ? JSON.parse(serviceKeyRaw) : (serviceKeyRaw ?? {});
const apiKey = creds[AuthKeys.GOOGLE_API_KEY] ?? null;
const project_id = !apiKey ? (serviceKey?.project_id ?? null) : null;
const reverseProxyUrl = options.reverseProxyUrl;
const authHeader = options.authHeader;
const {
web_search,
thinking = googleSettings.thinking.default,
thinkingBudget = googleSettings.thinkingBudget.default,
...modelOptions
} = options.modelOptions || {};
const llmConfig: GoogleClientOptions | VertexAIClientOptions = {
...(modelOptions || {}),
model: modelOptions?.model ?? '',
maxRetries: 2,
};
/** Used only for Safety Settings */
llmConfig.safetySettings = getSafetySettings(llmConfig.model);
let provider;
if (project_id) {
provider = Providers.VERTEXAI;
} else {
provider = Providers.GOOGLE;
}
// If we have a GCP project => Vertex AI
if (provider === Providers.VERTEXAI) {
(llmConfig as VertexAIClientOptions).authOptions = {
credentials: { ...serviceKey },
projectId: project_id,
};
(llmConfig as VertexAIClientOptions).location = process.env.GOOGLE_LOC || 'us-central1';
} else if (apiKey && provider === Providers.GOOGLE) {
llmConfig.apiKey = apiKey;
} else {
throw new Error(
`Invalid credentials provided. Please provide either a valid API key or service account credentials for Google Cloud.`,
);
}
const shouldEnableThinking =
thinking && thinkingBudget != null && (thinkingBudget > 0 || thinkingBudget === -1);
if (shouldEnableThinking && provider === Providers.GOOGLE) {
(llmConfig as GoogleClientOptions).thinkingConfig = {
thinkingBudget: thinking ? thinkingBudget : googleSettings.thinkingBudget.default,
includeThoughts: Boolean(thinking),
};
} else if (shouldEnableThinking && provider === Providers.VERTEXAI) {
(llmConfig as VertexAIClientOptions).thinkingBudget = thinking
? thinkingBudget
: googleSettings.thinkingBudget.default;
(llmConfig as VertexAIClientOptions).includeThoughts = Boolean(thinking);
}
/*
let legacyOptions = {};
// Filter out any "examples" that are empty
legacyOptions.examples = (legacyOptions.examples ?? [])
.filter(Boolean)
.filter((obj) => obj?.input?.content !== '' && obj?.output?.content !== '');
// If user has "examples" from legacyOptions, push them onto llmConfig
if (legacyOptions.examples?.length) {
llmConfig.examples = legacyOptions.examples.map((ex) => {
const { input, output } = ex;
if (!input?.content || !output?.content) {return undefined;}
return {
input: new HumanMessage(input.content),
output: new AIMessage(output.content),
};
}).filter(Boolean);
}
*/
if (reverseProxyUrl) {
(llmConfig as GoogleClientOptions).baseUrl = reverseProxyUrl;
}
if (authHeader) {
(llmConfig as GoogleClientOptions).customHeaders = {
Authorization: `Bearer ${apiKey}`,
};
}
const tools: GoogleAIToolType[] = [];
if (web_search) {
tools.push({ googleSearch: {} });
}
// Return the final shape
return {
/** @type {GoogleAIToolType[]} */
tools,
/** @type {Providers.GOOGLE | Providers.VERTEXAI} */
provider,
/** @type {GoogleClientOptions | VertexAIClientOptions} */
llmConfig,
};
}

View file

@ -1 +1,2 @@
export * from './google';
export * from './openai';

View file

@ -1,18 +1,14 @@
import {
ErrorTypes,
EModelEndpoint,
resolveHeaders,
mapModelToAzureConfig,
} from 'librechat-data-provider';
import { ErrorTypes, EModelEndpoint, mapModelToAzureConfig } from 'librechat-data-provider';
import type {
LLMConfigOptions,
UserKeyValues,
InitializeOpenAIOptionsParams,
OpenAIOptionsResult,
OpenAIConfigOptions,
InitializeOpenAIOptionsParams,
} from '~/types';
import { createHandleLLMNewToken } from '~/utils/generators';
import { getAzureCredentials } from '~/utils/azure';
import { isUserProvided } from '~/utils/common';
import { resolveHeaders } from '~/utils/env';
import { getOpenAIConfig } from './llm';
/**
@ -68,7 +64,7 @@ export const initializeOpenAI = async ({
? userValues?.baseURL
: baseURLOptions[endpoint as keyof typeof baseURLOptions];
const clientOptions: LLMConfigOptions = {
const clientOptions: OpenAIConfigOptions = {
proxy: PROXY ?? undefined,
reverseProxyUrl: baseURL || undefined,
streaming: true,
@ -91,7 +87,10 @@ export const initializeOpenAI = async ({
});
clientOptions.reverseProxyUrl = configBaseURL ?? clientOptions.reverseProxyUrl;
clientOptions.headers = resolveHeaders({ ...headers, ...(clientOptions.headers ?? {}) });
clientOptions.headers = resolveHeaders(
{ ...headers, ...(clientOptions.headers ?? {}) },
req.user,
);
const groupName = modelGroupMap[modelName || '']?.group;
if (groupName && groupMap[groupName]) {
@ -136,7 +135,7 @@ export const initializeOpenAI = async ({
user: req.user.id,
};
const finalClientOptions: LLMConfigOptions = {
const finalClientOptions: OpenAIConfigOptions = {
...clientOptions,
modelOptions,
};

View file

@ -1,9 +1,25 @@
import { ProxyAgent } from 'undici';
import { KnownEndpoints } from 'librechat-data-provider';
import { KnownEndpoints, removeNullishValues } from 'librechat-data-provider';
import type { BindToolsInput } from '@langchain/core/language_models/chat_models';
import type { AzureOpenAIInput } from '@langchain/openai';
import type { OpenAI } from 'openai';
import type * as t from '~/types';
import { sanitizeModelName, constructAzureURL } from '~/utils/azure';
import { isEnabled } from '~/utils/common';
function hasReasoningParams({
reasoning_effort,
reasoning_summary,
}: {
reasoning_effort?: string | null;
reasoning_summary?: string | null;
}): boolean {
return (
(reasoning_effort != null && reasoning_effort !== '') ||
(reasoning_summary != null && reasoning_summary !== '')
);
}
/**
* Generates configuration options for creating a language model (LLM) instance.
* @param apiKey - The API key for authentication.
@ -13,11 +29,11 @@ import { isEnabled } from '~/utils/common';
*/
export function getOpenAIConfig(
apiKey: string,
options: t.LLMConfigOptions = {},
options: t.OpenAIConfigOptions = {},
endpoint?: string | null,
): t.LLMConfigResult {
const {
modelOptions = {},
modelOptions: _modelOptions = {},
reverseProxyUrl,
defaultQuery,
headers,
@ -27,8 +43,10 @@ export function getOpenAIConfig(
addParams,
dropParams,
} = options;
const llmConfig: Partial<t.ClientOptions> & Partial<t.OpenAIParameters> = Object.assign(
const { reasoning_effort, reasoning_summary, ...modelOptions } = _modelOptions;
const llmConfig: Partial<t.ClientOptions> &
Partial<t.OpenAIParameters> &
Partial<AzureOpenAIInput> = Object.assign(
{
streaming,
model: modelOptions.model ?? '',
@ -40,39 +58,6 @@ export function getOpenAIConfig(
Object.assign(llmConfig, addParams);
}
// Note: OpenAI Web Search models do not support any known parameters besides `max_tokens`
if (modelOptions.model && /gpt-4o.*search/.test(modelOptions.model)) {
const searchExcludeParams = [
'frequency_penalty',
'presence_penalty',
'temperature',
'top_p',
'top_k',
'stop',
'logit_bias',
'seed',
'response_format',
'n',
'logprobs',
'user',
];
const updatedDropParams = dropParams || [];
const combinedDropParams = [...new Set([...updatedDropParams, ...searchExcludeParams])];
combinedDropParams.forEach((param) => {
if (param in llmConfig) {
delete llmConfig[param as keyof t.ClientOptions];
}
});
} else if (dropParams && Array.isArray(dropParams)) {
dropParams.forEach((param) => {
if (param in llmConfig) {
delete llmConfig[param as keyof t.ClientOptions];
}
});
}
let useOpenRouter = false;
const configOptions: t.OpenAIConfiguration = {};
@ -119,7 +104,10 @@ export function getOpenAIConfig(
llmConfig.model = process.env.AZURE_OPENAI_DEFAULT_MODEL;
}
if (configOptions.baseURL) {
const constructBaseURL = () => {
if (!configOptions.baseURL) {
return;
}
const azureURL = constructAzureURL({
baseURL: configOptions.baseURL,
azureOptions: updatedAzure,
@ -127,9 +115,40 @@ export function getOpenAIConfig(
updatedAzure.azureOpenAIBasePath = azureURL.split(
`/${updatedAzure.azureOpenAIApiDeploymentName}`,
)[0];
}
};
constructBaseURL();
Object.assign(llmConfig, updatedAzure);
const constructAzureResponsesApi = () => {
if (!llmConfig.useResponsesApi) {
return;
}
configOptions.baseURL = constructAzureURL({
baseURL: configOptions.baseURL || 'https://${INSTANCE_NAME}.openai.azure.com/openai/v1',
azureOptions: llmConfig,
});
delete llmConfig.azureOpenAIApiDeploymentName;
delete llmConfig.azureOpenAIApiInstanceName;
delete llmConfig.azureOpenAIApiVersion;
delete llmConfig.azureOpenAIBasePath;
delete llmConfig.azureOpenAIApiKey;
llmConfig.apiKey = apiKey;
configOptions.defaultHeaders = {
...configOptions.defaultHeaders,
'api-key': apiKey,
};
configOptions.defaultQuery = {
...configOptions.defaultQuery,
'api-version': configOptions.defaultQuery?.['api-version'] ?? 'preview',
};
};
constructAzureResponsesApi();
llmConfig.model = updatedAzure.azureOpenAIApiDeploymentName;
} else {
llmConfig.apiKey = apiKey;
@ -139,11 +158,19 @@ export function getOpenAIConfig(
configOptions.organization = process.env.OPENAI_ORGANIZATION;
}
if (useOpenRouter && llmConfig.reasoning_effort != null) {
llmConfig.reasoning = {
effort: llmConfig.reasoning_effort,
};
delete llmConfig.reasoning_effort;
if (
hasReasoningParams({ reasoning_effort, reasoning_summary }) &&
(llmConfig.useResponsesApi === true || useOpenRouter)
) {
llmConfig.reasoning = removeNullishValues(
{
effort: reasoning_effort,
summary: reasoning_summary,
},
true,
) as OpenAI.Reasoning;
} else if (hasReasoningParams({ reasoning_effort })) {
llmConfig.reasoning_effort = reasoning_effort;
}
if (llmConfig.max_tokens != null) {
@ -151,8 +178,53 @@ export function getOpenAIConfig(
delete llmConfig.max_tokens;
}
const tools: BindToolsInput[] = [];
if (modelOptions.web_search) {
llmConfig.useResponsesApi = true;
tools.push({ type: 'web_search_preview' });
}
/**
* Note: OpenAI Web Search models do not support any known parameters besides `max_tokens`
*/
if (modelOptions.model && /gpt-4o.*search/.test(modelOptions.model)) {
const searchExcludeParams = [
'frequency_penalty',
'presence_penalty',
'reasoning',
'reasoning_effort',
'temperature',
'top_p',
'top_k',
'stop',
'logit_bias',
'seed',
'response_format',
'n',
'logprobs',
'user',
];
const updatedDropParams = dropParams || [];
const combinedDropParams = [...new Set([...updatedDropParams, ...searchExcludeParams])];
combinedDropParams.forEach((param) => {
if (param in llmConfig) {
delete llmConfig[param as keyof t.ClientOptions];
}
});
} else if (dropParams && Array.isArray(dropParams)) {
dropParams.forEach((param) => {
if (param in llmConfig) {
delete llmConfig[param as keyof t.ClientOptions];
}
});
}
return {
llmConfig,
configOptions,
tools,
};
}

View file

@ -21,6 +21,7 @@ import type {
OCRImage,
} from '~/types';
import { logAxiosError, createAxiosInstance } from '~/utils/axios';
import { loadServiceKey } from '~/utils/key';
const axios = createAxiosInstance();
const DEFAULT_MISTRAL_BASE_URL = 'https://api.mistral.ai/v1';
@ -32,6 +33,13 @@ interface AuthConfig {
baseURL: string;
}
/** Helper type for Google service account */
interface GoogleServiceAccount {
client_email?: string;
private_key?: string;
project_id?: string;
}
/** Helper type for OCR request context */
interface OCRContext {
req: Pick<ServerRequest, 'user' | 'app'> & {
@ -353,7 +361,11 @@ export const uploadMistralOCR = async (context: OCRContext): Promise<MistralOCRU
documentType,
});
// Process result
if (!ocrResult || !ocrResult.pages || ocrResult.pages.length === 0) {
throw new Error(
'No OCR result returned from service, may be down or the file is not supported.',
);
}
const { text, images } = processOCRResult(ocrResult);
return {
@ -364,7 +376,7 @@ export const uploadMistralOCR = async (context: OCRContext): Promise<MistralOCRU
images,
};
} catch (error) {
throw createOCRError(error, 'Error uploading document to Mistral OCR API');
throw createOCRError(error, 'Error uploading document to Mistral OCR API:');
}
};
@ -401,6 +413,12 @@ export const uploadAzureMistralOCR = async (
documentType,
});
if (!ocrResult || !ocrResult.pages || ocrResult.pages.length === 0) {
throw new Error(
'No OCR result returned from service, may be down or the file is not supported.',
);
}
const { text, images } = processOCRResult(ocrResult);
return {
@ -411,6 +429,219 @@ export const uploadAzureMistralOCR = async (
images,
};
} catch (error) {
throw createOCRError(error, 'Error uploading document to Azure Mistral OCR API');
throw createOCRError(error, 'Error uploading document to Azure Mistral OCR API:');
}
};
/**
* Loads Google service account configuration
*/
async function loadGoogleAuthConfig(): Promise<{
serviceAccount: GoogleServiceAccount;
accessToken: string;
}> {
/** Path from environment variable or default location */
const serviceKeyPath =
process.env.GOOGLE_SERVICE_KEY_FILE ||
path.join(__dirname, '..', '..', '..', 'api', 'data', 'auth.json');
const serviceKey = await loadServiceKey(serviceKeyPath);
if (!serviceKey) {
throw new Error(
`Google service account not found or could not be loaded from ${serviceKeyPath}`,
);
}
if (!serviceKey.client_email || !serviceKey.private_key || !serviceKey.project_id) {
throw new Error('Invalid Google service account configuration');
}
const jwt = await createJWT(serviceKey as GoogleServiceAccount);
const accessToken = await exchangeJWTForAccessToken(jwt);
return {
serviceAccount: serviceKey as GoogleServiceAccount,
accessToken,
};
}
/**
* Creates a JWT token manually
*/
async function createJWT(serviceKey: GoogleServiceAccount): Promise<string> {
const crypto = await import('crypto');
const header = {
alg: 'RS256',
typ: 'JWT',
};
const now = Math.floor(Date.now() / 1000);
const payload = {
iss: serviceKey.client_email,
scope: 'https://www.googleapis.com/auth/cloud-platform',
aud: 'https://oauth2.googleapis.com/token',
exp: now + 3600,
iat: now,
};
const encodedHeader = Buffer.from(JSON.stringify(header)).toString('base64url');
const encodedPayload = Buffer.from(JSON.stringify(payload)).toString('base64url');
const signatureInput = `${encodedHeader}.${encodedPayload}`;
const sign = crypto.createSign('RSA-SHA256');
sign.update(signatureInput);
sign.end();
const signature = sign.sign(serviceKey.private_key!, 'base64url');
return `${signatureInput}.${signature}`;
}
/**
* Exchanges JWT for access token
*/
async function exchangeJWTForAccessToken(jwt: string): Promise<string> {
const response = await axios.post(
'https://oauth2.googleapis.com/token',
new URLSearchParams({
grant_type: 'urn:ietf:params:oauth:grant-type:jwt-bearer',
assertion: jwt,
}),
{
headers: {
'Content-Type': 'application/x-www-form-urlencoded',
},
},
);
if (!response.data?.access_token) {
throw new Error('No access token in response');
}
return response.data.access_token;
}
/**
* Performs OCR using Google Vertex AI
*/
async function performGoogleVertexOCR({
url,
accessToken,
projectId,
model,
documentType = 'document_url',
}: {
url: string;
accessToken: string;
projectId: string;
model: string;
documentType?: 'document_url' | 'image_url';
}): Promise<OCRResult> {
const location = process.env.GOOGLE_LOC || 'us-central1';
const modelId = model || 'mistral-ocr-2505';
let baseURL: string;
if (location === 'global') {
baseURL = `https://aiplatform.googleapis.com/v1/projects/${projectId}/locations/global/publishers/mistralai/models/${modelId}:rawPredict`;
} else {
baseURL = `https://${location}-aiplatform.googleapis.com/v1/projects/${projectId}/locations/${location}/publishers/mistralai/models/${modelId}:rawPredict`;
}
const documentKey = documentType === 'image_url' ? 'image_url' : 'document_url';
const requestBody = {
model: modelId,
document: {
type: documentType,
[documentKey]: url,
},
include_image_base64: true,
};
logger.debug('Sending request to Google Vertex AI:', {
url: baseURL,
body: {
...requestBody,
document: { ...requestBody.document, [documentKey]: 'base64_data_hidden' },
},
});
return axios
.post(baseURL, requestBody, {
headers: {
'Content-Type': 'application/json',
Authorization: `Bearer ${accessToken}`,
Accept: 'application/json',
},
})
.then((res) => {
logger.debug('Google Vertex AI response received');
return res.data;
})
.catch((error) => {
if (error.response?.data) {
logger.error('Vertex AI error response: ' + JSON.stringify(error.response.data, null, 2));
}
throw new Error(
logAxiosError({
error: error as AxiosError,
message: 'Error calling Google Vertex AI Mistral OCR',
}),
);
});
}
/**
* Use Google Vertex AI Mistral OCR API to process the OCR result.
*
* @param params - The params object.
* @param params.req - The request object from Express. It should have a `user` property with an `id`
* representing the user
* @param params.file - The file object, which is part of the request. The file object should
* have a `mimetype` property that tells us the file type
* @param params.loadAuthValues - Function to load authentication values
* @returns - The result object containing the processed `text` and `images` (not currently used),
* along with the `filename` and `bytes` properties.
*/
export const uploadGoogleVertexMistralOCR = async (
context: OCRContext,
): Promise<MistralOCRUploadResult> => {
try {
const { serviceAccount, accessToken } = await loadGoogleAuthConfig();
const model = getModelConfig(context.req.app.locals?.ocr);
const buffer = fs.readFileSync(context.file.path);
const base64 = buffer.toString('base64');
const base64Prefix = `data:${context.file.mimetype || 'application/pdf'};base64,`;
const documentType = getDocumentType(context.file);
const ocrResult = await performGoogleVertexOCR({
url: `${base64Prefix}${base64}`,
accessToken,
projectId: serviceAccount.project_id!,
model,
documentType,
});
if (!ocrResult || !ocrResult.pages || ocrResult.pages.length === 0) {
throw new Error(
'No OCR result returned from service, may be down or the file is not supported.',
);
}
const { text, images } = processOCRResult(ocrResult);
return {
filename: context.file.originalname,
bytes: text.length * 4,
filepath: FileSources.vertexai_mistral_ocr as string,
text,
images,
};
} catch (error) {
throw createOCRError(error, 'Error uploading document to Google Vertex AI Mistral OCR:');
}
};

View file

@ -11,6 +11,8 @@ export * from './oauth';
export * from './crypto';
/* Flow */
export * from './flow/manager';
/* Middleware */
export * from './middleware';
/* Agents */
export * from './agents';
/* Endpoints */

View file

@ -59,9 +59,6 @@ export class MCPConnection extends EventEmitter {
private transport: Transport | null = null; // Make this nullable
private connectionState: t.ConnectionState = 'disconnected';
private connectPromise: Promise<void> | null = null;
private lastError: Error | null = null;
private lastConfigUpdate = 0;
private readonly CONFIG_TTL = 5 * 60 * 1000; // 5 minutes
private readonly MAX_RECONNECT_ATTEMPTS = 3;
public readonly serverName: string;
private shouldStopReconnecting = false;
@ -135,7 +132,6 @@ export class MCPConnection extends EventEmitter {
private emitError(error: unknown, errorContext: string): void {
const errorMessage = error instanceof Error ? error.message : String(error);
logger.error(`${this.getLogPrefix()} ${errorContext}: ${errorMessage}`);
this.emit('error', new Error(`${errorContext}: ${errorMessage}`));
}
private constructTransport(options: t.MCPOptions): Transport {
@ -359,16 +355,10 @@ export class MCPConnection extends EventEmitter {
private subscribeToResources(): void {
this.client.setNotificationHandler(ResourceListChangedNotificationSchema, async () => {
this.invalidateCache();
this.emit('resourcesChanged');
});
}
private invalidateCache(): void {
// this.cachedConfig = null;
this.lastConfigUpdate = 0;
}
async connectClient(): Promise<void> {
if (this.connectionState === 'connected') {
return;
@ -527,7 +517,7 @@ export class MCPConnection extends EventEmitter {
try {
await this.disconnect();
await this.connectClient();
if (!this.isConnected()) {
if (!(await this.isConnected())) {
throw new Error('Connection not established');
}
} catch (error) {
@ -564,11 +554,7 @@ export class MCPConnection extends EventEmitter {
}
this.connectionState = 'disconnected';
this.emit('connectionChange', 'disconnected');
} catch (error) {
this.emit('error', error);
throw error;
} finally {
this.invalidateCache();
this.connectPromise = null;
}
}
@ -603,79 +589,6 @@ export class MCPConnection extends EventEmitter {
}
}
// public async modifyConfig(config: ContinueConfig): Promise<ContinueConfig> {
// try {
// // Check cache
// if (this.cachedConfig && Date.now() - this.lastConfigUpdate < this.CONFIG_TTL) {
// return this.cachedConfig;
// }
// await this.connectClient();
// // Fetch and process resources
// const resources = await this.fetchResources();
// const submenuItems = resources.map(resource => ({
// title: resource.name,
// description: resource.description,
// id: resource.uri,
// }));
// if (!config.contextProviders) {
// config.contextProviders = [];
// }
// config.contextProviders.push(
// new MCPContextProvider({
// submenuItems,
// client: this.client,
// }),
// );
// // Fetch and process tools
// const tools = await this.fetchTools();
// const continueTools: Tool[] = tools.map(tool => ({
// displayTitle: tool.name,
// function: {
// description: tool.description,
// name: tool.name,
// parameters: tool.inputSchema,
// },
// readonly: false,
// type: 'function',
// wouldLikeTo: `use the ${tool.name} tool`,
// uri: `mcp://${tool.name}`,
// }));
// config.tools = [...(config.tools || []), ...continueTools];
// // Fetch and process prompts
// const prompts = await this.fetchPrompts();
// if (!config.slashCommands) {
// config.slashCommands = [];
// }
// const slashCommands: SlashCommand[] = prompts.map(prompt =>
// constructMcpSlashCommand(
// this.client,
// prompt.name,
// prompt.description,
// prompt.arguments?.map(a => a.name),
// ),
// );
// config.slashCommands.push(...slashCommands);
// // Update cache
// this.cachedConfig = config;
// this.lastConfigUpdate = Date.now();
// return config;
// } catch (error) {
// this.emit('error', error);
// // Return original config if modification fails
// return config;
// }
// }
public async isConnected(): Promise<boolean> {
try {
await this.client.ping();

View file

@ -2,7 +2,7 @@ import { logger } from '@librechat/data-schemas';
import { CallToolResultSchema, ErrorCode, McpError } from '@modelcontextprotocol/sdk/types.js';
import type { RequestOptions } from '@modelcontextprotocol/sdk/shared/protocol.js';
import type { OAuthClientInformation } from '@modelcontextprotocol/sdk/shared/auth.js';
import type { JsonSchemaType, MCPOptions, TUser } from 'librechat-data-provider';
import type { JsonSchemaType, TUser } from 'librechat-data-provider';
import type { TokenMethods } from '@librechat/data-schemas';
import type { FlowStateManager } from '~/flow/manager';
import type { MCPOAuthTokens, MCPOAuthFlowMetadata } from './oauth/types';
@ -13,6 +13,7 @@ import { MCPOAuthHandler } from './oauth/handler';
import { MCPTokenStorage } from './oauth/tokens';
import { formatToolContent } from './parsers';
import { MCPConnection } from './connection';
import { processMCPEnv } from '~/utils/env';
export class MCPManager {
private static instance: MCPManager | null = null;
@ -24,11 +25,6 @@ export class MCPManager {
private userLastActivity: Map<string, number> = new Map();
private readonly USER_CONNECTION_IDLE_TIMEOUT = 15 * 60 * 1000; // 15 minutes (TODO: make configurable)
private mcpConfigs: t.MCPServers = {};
private processMCPEnv?: (
obj: MCPOptions,
user?: TUser,
customUserVars?: Record<string, string>,
) => MCPOptions; // Store the processing function
/** Store MCP server instructions */
private serverInstructions: Map<string, string> = new Map();
@ -46,14 +42,11 @@ export class MCPManager {
mcpServers,
flowManager,
tokenMethods,
processMCPEnv,
}: {
mcpServers: t.MCPServers;
flowManager: FlowStateManager<MCPOAuthTokens | null>;
tokenMethods?: TokenMethods;
processMCPEnv?: (obj: MCPOptions) => MCPOptions;
}): Promise<void> {
this.processMCPEnv = processMCPEnv; // Store the function
this.mcpConfigs = mcpServers;
if (!flowManager) {
@ -68,7 +61,7 @@ export class MCPManager {
const connectionResults = await Promise.allSettled(
entries.map(async ([serverName, _config], i) => {
/** Process env for app-level connections */
const config = this.processMCPEnv ? this.processMCPEnv(_config) : _config;
const config = processMCPEnv(_config);
/** Existing tokens for system-level connections */
let tokens: MCPOAuthTokens | null = null;
@ -444,9 +437,7 @@ export class MCPManager {
);
}
if (this.processMCPEnv) {
config = { ...(this.processMCPEnv(config, user, customUserVars) ?? {}) };
}
config = { ...(processMCPEnv(config, user, customUserVars) ?? {}) };
/** If no in-memory tokens, tokens from persistent storage */
let tokens: MCPOAuthTokens | null = null;
if (tokenMethods?.findToken) {

View file

@ -1,10 +1,10 @@
import type { TUser } from 'librechat-data-provider';
import {
StreamableHTTPOptionsSchema,
StdioOptionsSchema,
processMCPEnv,
MCPOptions,
} from '../src/mcp';
StdioOptionsSchema,
StreamableHTTPOptionsSchema,
} from 'librechat-data-provider';
import type { TUser } from 'librechat-data-provider';
import { processMCPEnv } from '~/utils/env';
// Helper function to create test user objects
function createTestUser(

View file

@ -7,6 +7,7 @@ const RECOGNIZED_PROVIDERS = new Set([
'xai',
'deepseek',
'ollama',
'bedrock',
]);
const CONTENT_ARRAY_PROVIDERS = new Set(['google', 'anthropic', 'openai']);

View file

@ -0,0 +1,554 @@
import { Request, Response, NextFunction } from 'express';
import {
Permissions,
PermissionTypes,
EModelEndpoint,
EndpointURLs,
} from 'librechat-data-provider';
import type { IRole, IUser } from '@librechat/data-schemas';
import { checkAccess, generateCheckAccess, skipAgentCheck } from './access';
// Mock logger
jest.mock('@librechat/data-schemas', () => ({
logger: {
warn: jest.fn(),
error: jest.fn(),
debug: jest.fn(),
},
}));
describe('access middleware', () => {
let mockReq: Partial<Request>;
let mockRes: Partial<Response>;
let mockNext: jest.MockedFunction<NextFunction>;
let mockGetRoleByName: jest.Mock;
beforeEach(() => {
mockReq = {
user: {
id: 'user123',
role: 'user',
email: 'test@example.com',
emailVerified: true,
provider: 'local',
} as IUser,
body: {},
originalUrl: '/api/test',
method: 'POST',
} as Partial<Request>;
mockRes = {
status: jest.fn().mockReturnThis(),
json: jest.fn().mockReturnThis(),
};
mockNext = jest.fn() as jest.MockedFunction<NextFunction>;
mockGetRoleByName = jest.fn();
});
afterEach(() => {
jest.clearAllMocks();
});
describe('skipAgentCheck', () => {
it('should return false when req is undefined', () => {
expect(skipAgentCheck(undefined)).toBe(false);
});
it('should return false when req.body.endpoint is not present', () => {
expect(skipAgentCheck(mockReq as Request)).toBe(false);
});
it('should return false when method is not POST', () => {
mockReq.method = 'GET';
mockReq.body = { endpoint: 'gpt-4' };
expect(skipAgentCheck(mockReq as Request)).toBe(false);
});
it('should return false when URL does not include agents endpoint', () => {
mockReq.body = { endpoint: 'gpt-4' };
mockReq.originalUrl = '/api/messages';
expect(skipAgentCheck(mockReq as Request)).toBe(false);
});
it('should return true when not an agents endpoint but URL includes agents', () => {
mockReq.body = { endpoint: 'gpt-4' };
mockReq.originalUrl = EndpointURLs[EModelEndpoint.agents];
expect(skipAgentCheck(mockReq as Request)).toBe(true);
});
it('should return false when is an agents endpoint', () => {
mockReq.body = { endpoint: EModelEndpoint.agents };
mockReq.originalUrl = EndpointURLs[EModelEndpoint.agents];
expect(skipAgentCheck(mockReq as Request)).toBe(false);
});
});
describe('checkAccess', () => {
const defaultParams = {
user: {
id: 'user123',
role: 'user',
email: 'test@example.com',
emailVerified: true,
provider: 'local',
} as IUser,
permissionType: PermissionTypes.AGENTS,
permissions: [Permissions.USE],
getRoleByName: jest.fn(),
};
it('should return true when skipCheck function returns true', async () => {
const skipCheck = jest.fn().mockReturnValue(true);
const result = await checkAccess({
...defaultParams,
req: mockReq as Request,
skipCheck,
});
expect(result).toBe(true);
expect(skipCheck).toHaveBeenCalledWith(mockReq);
});
it('should return false when user is not provided', async () => {
const result = await checkAccess({
...defaultParams,
user: null as unknown as IUser,
});
expect(result).toBe(false);
});
it('should return false when user has no role', async () => {
const result = await checkAccess({
...defaultParams,
user: {
id: 'user123',
email: 'test@example.com',
emailVerified: true,
provider: 'local',
} as IUser,
});
expect(result).toBe(false);
});
it('should return true when user has required permissions', async () => {
const mockRole = {
name: 'user',
permissions: {
[PermissionTypes.AGENTS]: {
[Permissions.USE]: true,
},
},
} as unknown as IRole;
defaultParams.getRoleByName.mockResolvedValue(mockRole);
const result = await checkAccess(defaultParams);
expect(result).toBe(true);
expect(defaultParams.getRoleByName).toHaveBeenCalledWith('user');
});
it('should return false when user lacks required permissions', async () => {
const mockRole = {
name: 'user',
permissions: {
[PermissionTypes.AGENTS]: {
[Permissions.USE]: false,
},
},
} as unknown as IRole;
defaultParams.getRoleByName.mockResolvedValue(mockRole);
const result = await checkAccess(defaultParams);
expect(result).toBe(false);
});
it('should check multiple permissions with AND logic', async () => {
const mockRole = {
name: 'user',
permissions: {
[PermissionTypes.AGENTS]: {
[Permissions.USE]: true,
[Permissions.CREATE]: true,
},
},
} as unknown as IRole;
defaultParams.getRoleByName.mockResolvedValue(mockRole);
const result = await checkAccess({
...defaultParams,
permissions: [Permissions.USE, Permissions.CREATE],
});
expect(result).toBe(true);
});
it('should return false when user has only some of the required permissions', async () => {
const mockRole = {
name: 'user',
permissions: {
[PermissionTypes.AGENTS]: {
[Permissions.USE]: true,
[Permissions.CREATE]: false,
},
},
} as unknown as IRole;
defaultParams.getRoleByName.mockResolvedValue(mockRole);
const result = await checkAccess({
...defaultParams,
permissions: [Permissions.USE, Permissions.CREATE],
});
expect(result).toBe(false);
});
it('should check bodyProps when permission is not directly granted', async () => {
const mockRole = {
name: 'user',
permissions: {
[PermissionTypes.AGENTS]: {
[Permissions.USE]: true,
[Permissions.SHARED_GLOBAL]: false,
},
},
} as unknown as IRole;
defaultParams.getRoleByName.mockResolvedValue(mockRole);
const checkObject = {
projectIds: ['project1'],
removeProjectIds: ['project2'],
};
const result = await checkAccess({
...defaultParams,
permissions: [Permissions.USE, Permissions.SHARED_GLOBAL],
bodyProps: {
[Permissions.SHARED_GLOBAL]: ['projectIds', 'removeProjectIds'],
} as Record<Permissions, string[]>,
checkObject,
});
expect(result).toBe(true);
});
it('should return false when bodyProps requirements are not met', async () => {
const mockRole = {
name: 'user',
permissions: {
[PermissionTypes.AGENTS]: {
[Permissions.SHARED_GLOBAL]: false,
},
},
} as unknown as IRole;
defaultParams.getRoleByName.mockResolvedValue(mockRole);
const checkObject = {
projectIds: ['project1'],
// missing removeProjectIds
};
const result = await checkAccess({
...defaultParams,
permissions: [Permissions.SHARED_GLOBAL],
bodyProps: {
[Permissions.SHARED_GLOBAL]: ['projectIds', 'removeProjectIds'],
} as Record<Permissions, string[]>,
checkObject,
});
expect(result).toBe(false);
});
it('should handle role without permissions object', async () => {
const mockRole = {
name: 'user',
} as unknown as IRole;
defaultParams.getRoleByName.mockResolvedValue(mockRole);
const result = await checkAccess(defaultParams);
expect(result).toBe(false);
});
it('should handle getRoleByName returning null', async () => {
defaultParams.getRoleByName.mockResolvedValue(null);
const result = await checkAccess(defaultParams);
expect(result).toBe(false);
});
});
describe('generateCheckAccess', () => {
it('should create middleware that allows access when user has permissions', async () => {
const mockRole = {
name: 'user',
permissions: {
[PermissionTypes.MEMORIES]: {
[Permissions.USE]: true,
[Permissions.READ]: true,
},
},
} as unknown as IRole;
mockGetRoleByName.mockResolvedValue(mockRole);
const middleware = generateCheckAccess({
permissionType: PermissionTypes.MEMORIES,
permissions: [Permissions.USE, Permissions.READ],
getRoleByName: mockGetRoleByName,
});
await middleware(mockReq as Request, mockRes as Response, mockNext);
expect(mockNext).toHaveBeenCalled();
expect(mockRes.status).not.toHaveBeenCalled();
});
it('should create middleware that denies access when user lacks permissions', async () => {
const mockRole = {
name: 'user',
permissions: {
[PermissionTypes.MEMORIES]: {
[Permissions.USE]: false,
},
},
} as unknown as IRole;
mockGetRoleByName.mockResolvedValue(mockRole);
const middleware = generateCheckAccess({
permissionType: PermissionTypes.MEMORIES,
permissions: [Permissions.USE],
getRoleByName: mockGetRoleByName,
});
await middleware(mockReq as Request, mockRes as Response, mockNext);
expect(mockNext).not.toHaveBeenCalled();
expect(mockRes.status).toHaveBeenCalledWith(403);
expect(mockRes.json).toHaveBeenCalledWith({ message: 'Forbidden: Insufficient permissions' });
});
it('should handle bodyProps in middleware', async () => {
const mockRole = {
name: 'user',
permissions: {
[PermissionTypes.AGENTS]: {
[Permissions.USE]: true,
[Permissions.CREATE]: true,
[Permissions.SHARED_GLOBAL]: false,
},
},
} as unknown as IRole;
mockGetRoleByName.mockResolvedValue(mockRole);
mockReq.body = {
projectIds: ['project1'],
removeProjectIds: ['project2'],
};
const middleware = generateCheckAccess({
permissionType: PermissionTypes.AGENTS,
permissions: [Permissions.USE, Permissions.CREATE, Permissions.SHARED_GLOBAL],
bodyProps: {
[Permissions.SHARED_GLOBAL]: ['projectIds', 'removeProjectIds'],
} as Record<Permissions, string[]>,
getRoleByName: mockGetRoleByName,
});
await middleware(mockReq as Request, mockRes as Response, mockNext);
expect(mockNext).toHaveBeenCalled();
expect(mockRes.status).not.toHaveBeenCalled();
});
it('should use skipCheck function when provided', async () => {
const skipCheck = jest.fn().mockReturnValue(true);
const middleware = generateCheckAccess({
permissionType: PermissionTypes.AGENTS,
permissions: [Permissions.USE],
skipCheck,
getRoleByName: mockGetRoleByName,
});
await middleware(mockReq as Request, mockRes as Response, mockNext);
expect(skipCheck).toHaveBeenCalledWith(mockReq);
expect(mockNext).toHaveBeenCalled();
expect(mockGetRoleByName).not.toHaveBeenCalled();
});
it('should handle errors and return 500 status', async () => {
const error = new Error('Database error');
mockGetRoleByName.mockRejectedValue(error);
const middleware = generateCheckAccess({
permissionType: PermissionTypes.AGENTS,
permissions: [Permissions.USE],
getRoleByName: mockGetRoleByName,
});
await middleware(mockReq as Request, mockRes as Response, mockNext);
expect(mockRes.status).toHaveBeenCalledWith(500);
expect(mockRes.json).toHaveBeenCalledWith({
message: 'Server error: Database error',
});
});
it('should handle non-Error objects in catch block', async () => {
mockGetRoleByName.mockRejectedValue('String error');
const middleware = generateCheckAccess({
permissionType: PermissionTypes.AGENTS,
permissions: [Permissions.USE],
getRoleByName: mockGetRoleByName,
});
await middleware(mockReq as Request, mockRes as Response, mockNext);
expect(mockRes.status).toHaveBeenCalledWith(500);
expect(mockRes.json).toHaveBeenCalledWith({
message: 'Server error: Unknown error',
});
});
});
describe('Real-world usage patterns', () => {
it('should handle memory access patterns', async () => {
const mockRole = {
name: 'user',
permissions: {
[PermissionTypes.MEMORIES]: {
[Permissions.USE]: true,
[Permissions.CREATE]: true,
[Permissions.UPDATE]: true,
[Permissions.READ]: true,
[Permissions.OPT_OUT]: true,
},
},
} as unknown as IRole;
mockGetRoleByName.mockResolvedValue(mockRole);
// Test memory read access
const checkMemoryRead = generateCheckAccess({
permissionType: PermissionTypes.MEMORIES,
permissions: [Permissions.USE, Permissions.READ],
getRoleByName: mockGetRoleByName,
});
await checkMemoryRead(mockReq as Request, mockRes as Response, mockNext);
expect(mockNext).toHaveBeenCalled();
// Test memory create access
mockNext.mockClear();
const checkMemoryCreate = generateCheckAccess({
permissionType: PermissionTypes.MEMORIES,
permissions: [Permissions.USE, Permissions.CREATE],
getRoleByName: mockGetRoleByName,
});
await checkMemoryCreate(mockReq as Request, mockRes as Response, mockNext);
expect(mockNext).toHaveBeenCalled();
});
it('should handle agent access patterns with skipCheck', async () => {
const mockRole = {
name: 'user',
permissions: {
[PermissionTypes.AGENTS]: {
[Permissions.USE]: true,
},
},
} as unknown as IRole;
mockGetRoleByName.mockResolvedValue(mockRole);
mockReq.body = { endpoint: 'gpt-4' };
mockReq.originalUrl = EndpointURLs[EModelEndpoint.agents];
const checkAgentAccess = generateCheckAccess({
permissionType: PermissionTypes.AGENTS,
permissions: [Permissions.USE],
skipCheck: skipAgentCheck,
getRoleByName: mockGetRoleByName,
});
await checkAgentAccess(mockReq as Request, mockRes as Response, mockNext);
// Should skip check because endpoint is not agents
expect(mockNext).toHaveBeenCalled();
expect(mockGetRoleByName).not.toHaveBeenCalled();
});
it('should handle prompt access patterns', async () => {
const mockRole = {
name: 'user',
permissions: {
[PermissionTypes.PROMPTS]: {
[Permissions.USE]: true,
[Permissions.CREATE]: true,
[Permissions.SHARED_GLOBAL]: false,
},
},
} as unknown as IRole;
mockGetRoleByName.mockResolvedValue(mockRole);
const checkPromptAccess = generateCheckAccess({
permissionType: PermissionTypes.PROMPTS,
permissions: [Permissions.USE],
getRoleByName: mockGetRoleByName,
});
await checkPromptAccess(mockReq as Request, mockRes as Response, mockNext);
expect(mockNext).toHaveBeenCalled();
});
it('should handle bookmark access patterns', async () => {
const mockRole = {
name: 'user',
permissions: {
[PermissionTypes.BOOKMARKS]: {
[Permissions.USE]: true,
},
},
} as unknown as IRole;
mockGetRoleByName.mockResolvedValue(mockRole);
const checkBookmarkAccess = generateCheckAccess({
permissionType: PermissionTypes.BOOKMARKS,
permissions: [Permissions.USE],
getRoleByName: mockGetRoleByName,
});
await checkBookmarkAccess(mockReq as Request, mockRes as Response, mockNext);
expect(mockNext).toHaveBeenCalled();
});
it('should handle tool access patterns', async () => {
const mockRole = {
name: 'user',
permissions: {
[PermissionTypes.RUN_CODE]: {
[Permissions.USE]: true,
},
},
} as unknown as IRole;
mockGetRoleByName.mockResolvedValue(mockRole);
const result = await checkAccess({
user: mockReq.user as IUser,
permissionType: PermissionTypes.RUN_CODE,
permissions: [Permissions.USE],
getRoleByName: mockGetRoleByName,
});
expect(result).toBe(true);
});
});
});

View file

@ -0,0 +1,141 @@
import { logger } from '@librechat/data-schemas';
import {
Permissions,
EndpointURLs,
EModelEndpoint,
PermissionTypes,
isAgentsEndpoint,
} from 'librechat-data-provider';
import type { NextFunction, Request as ServerRequest, Response as ServerResponse } from 'express';
import type { IRole, IUser } from '@librechat/data-schemas';
export function skipAgentCheck(req?: ServerRequest): boolean {
if (!req || !req?.body?.endpoint) {
return false;
}
if (req.method !== 'POST') {
return false;
}
if (!req.originalUrl?.includes(EndpointURLs[EModelEndpoint.agents])) {
return false;
}
return !isAgentsEndpoint(req.body.endpoint);
}
/**
* Core function to check if a user has one or more required permissions
* @param user - The user object
* @param permissionType - The type of permission to check
* @param permissions - The list of specific permissions to check
* @param bodyProps - An optional object where keys are permissions and values are arrays of properties to check
* @param checkObject - The object to check properties against
* @param skipCheck - An optional function that takes the checkObject and returns true to skip permission checking
* @returns Whether the user has the required permissions
*/
export const checkAccess = async ({
req,
user,
permissionType,
permissions,
getRoleByName,
bodyProps = {} as Record<Permissions, string[]>,
checkObject = {},
skipCheck,
}: {
user: IUser;
req?: ServerRequest;
permissionType: PermissionTypes;
permissions: Permissions[];
bodyProps?: Record<Permissions, string[]>;
checkObject?: object;
/** If skipCheck function is provided and returns true, skip permission checking */
skipCheck?: (req?: ServerRequest) => boolean;
getRoleByName: (roleName: string, fieldsToSelect?: string | string[]) => Promise<IRole | null>;
}): Promise<boolean> => {
if (skipCheck && skipCheck(req)) {
return true;
}
if (!user || !user.role) {
return false;
}
const role = await getRoleByName(user.role);
if (role && role.permissions && role.permissions[permissionType]) {
const hasAnyPermission = permissions.every((permission) => {
if (
role.permissions?.[permissionType as keyof typeof role.permissions]?.[
permission as keyof (typeof role.permissions)[typeof permissionType]
]
) {
return true;
}
if (bodyProps[permission] && checkObject) {
return bodyProps[permission].every((prop) =>
Object.prototype.hasOwnProperty.call(checkObject, prop),
);
}
return false;
});
return hasAnyPermission;
}
return false;
};
/**
* Middleware to check if a user has one or more required permissions, optionally based on `req.body` properties.
* @param permissionType - The type of permission to check.
* @param permissions - The list of specific permissions to check.
* @param bodyProps - An optional object where keys are permissions and values are arrays of `req.body` properties to check.
* @param skipCheck - An optional function that takes req.body and returns true to skip permission checking.
* @param getRoleByName - A function to get the role by name.
* @returns Express middleware function.
*/
export const generateCheckAccess = ({
permissionType,
permissions,
bodyProps = {} as Record<Permissions, string[]>,
skipCheck,
getRoleByName,
}: {
permissionType: PermissionTypes;
permissions: Permissions[];
bodyProps?: Record<Permissions, string[]>;
skipCheck?: (req?: ServerRequest) => boolean;
getRoleByName: (roleName: string, fieldsToSelect?: string | string[]) => Promise<IRole | null>;
}): ((req: ServerRequest, res: ServerResponse, next: NextFunction) => Promise<unknown>) => {
return async (req, res, next) => {
try {
const hasAccess = await checkAccess({
req,
user: req.user as IUser,
permissionType,
permissions,
bodyProps,
checkObject: req.body,
skipCheck,
getRoleByName,
});
if (hasAccess) {
return next();
}
logger.warn(
`[${permissionType}] Forbidden: "${req.originalUrl}" - Insufficient permissions for User ${req.user?.id}: ${permissions.join(', ')}`,
);
return res.status(403).json({ message: 'Forbidden: Insufficient permissions' });
} catch (error) {
logger.error(error);
return res.status(500).json({
message: `Server error: ${error instanceof Error ? error.message : 'Unknown error'}`,
});
}
};
};

View file

@ -0,0 +1 @@
export * from './access';

View file

@ -0,0 +1,24 @@
import { z } from 'zod';
import { AuthKeys, googleBaseSchema } from 'librechat-data-provider';
export type GoogleParameters = z.infer<typeof googleBaseSchema>;
export type GoogleCredentials = {
[AuthKeys.GOOGLE_SERVICE_KEY]?: string;
[AuthKeys.GOOGLE_API_KEY]?: string;
};
/**
* Configuration options for the getLLMConfig function
*/
export interface GoogleConfigOptions {
modelOptions?: Partial<GoogleParameters>;
reverseProxyUrl?: string;
defaultQuery?: Record<string, string | undefined>;
headers?: Record<string, string>;
proxy?: string;
streaming?: boolean;
authHeader?: boolean;
addParams?: Record<string, unknown>;
dropParams?: string[];
}

View file

@ -1,5 +1,6 @@
export * from './azure';
export * from './events';
export * from './google';
export * from './mistral';
export * from './openai';
export * from './run';

View file

@ -1,6 +1,7 @@
import { z } from 'zod';
import { openAISchema, EModelEndpoint } from 'librechat-data-provider';
import type { TEndpointOption, TAzureConfig, TEndpoint } from 'librechat-data-provider';
import type { BindToolsInput } from '@langchain/core/language_models/chat_models';
import type { OpenAIClientOptions } from '@librechat/agents';
import type { AzureOptions } from './azure';
@ -9,7 +10,7 @@ export type OpenAIParameters = z.infer<typeof openAISchema>;
/**
* Configuration options for the getLLMConfig function
*/
export interface LLMConfigOptions {
export interface OpenAIConfigOptions {
modelOptions?: Partial<OpenAIParameters>;
reverseProxyUrl?: string;
defaultQuery?: Record<string, string | undefined>;
@ -33,6 +34,7 @@ export type ClientOptions = OpenAIClientOptions & {
export interface LLMConfigResult {
llmConfig: ClientOptions;
configOptions: OpenAIConfiguration;
tools?: BindToolsInput[];
}
/**

View file

@ -1,10 +1,12 @@
import type { AgentModelParameters, EModelEndpoint } from 'librechat-data-provider';
import type { Providers, ClientOptions } from '@librechat/agents';
import type { AgentModelParameters } from 'librechat-data-provider';
import type { OpenAIConfiguration } from './openai';
export type RunLLMConfig = {
provider: EModelEndpoint;
provider: Providers;
streaming: boolean;
streamUsage: boolean;
usage?: boolean;
configuration?: OpenAIConfiguration;
} & AgentModelParameters;
} & AgentModelParameters &
ClientOptions;

View file

@ -0,0 +1,429 @@
import { resolveHeaders } from './env';
import type { TUser } from 'librechat-data-provider';
// Helper function to create test user objects
function createTestUser(overrides: Partial<TUser> = {}): TUser {
return {
id: 'test-user-id',
username: 'testuser',
email: 'test@example.com',
name: 'Test User',
avatar: 'https://example.com/avatar.png',
provider: 'email',
role: 'user',
createdAt: new Date('2021-01-01').toISOString(),
updatedAt: new Date('2021-01-01').toISOString(),
...overrides,
};
}
describe('resolveHeaders', () => {
beforeEach(() => {
// Set up test environment variables
process.env.TEST_API_KEY = 'test-api-key-value';
process.env.ANOTHER_SECRET = 'another-secret-value';
});
afterEach(() => {
// Clean up environment variables
delete process.env.TEST_API_KEY;
delete process.env.ANOTHER_SECRET;
});
it('should return empty object when headers is undefined', () => {
const result = resolveHeaders(undefined);
expect(result).toEqual({});
});
it('should return empty object when headers is null', () => {
const result = resolveHeaders(null as unknown as Record<string, string> | undefined);
expect(result).toEqual({});
});
it('should return empty object when headers is empty', () => {
const result = resolveHeaders({});
expect(result).toEqual({});
});
it('should process environment variables in headers', () => {
const headers = {
Authorization: '${TEST_API_KEY}',
'X-Secret': '${ANOTHER_SECRET}',
'Content-Type': 'application/json',
};
const result = resolveHeaders(headers);
expect(result).toEqual({
Authorization: 'test-api-key-value',
'X-Secret': 'another-secret-value',
'Content-Type': 'application/json',
});
});
it('should process user ID placeholder when user has id', () => {
const user = { id: 'test-user-123' };
const headers = {
'User-Id': '{{LIBRECHAT_USER_ID}}',
'Content-Type': 'application/json',
};
const result = resolveHeaders(headers, user);
expect(result).toEqual({
'User-Id': 'test-user-123',
'Content-Type': 'application/json',
});
});
it('should not process user ID placeholder when user is undefined', () => {
const headers = {
'User-Id': '{{LIBRECHAT_USER_ID}}',
'Content-Type': 'application/json',
};
const result = resolveHeaders(headers);
expect(result).toEqual({
'User-Id': '{{LIBRECHAT_USER_ID}}',
'Content-Type': 'application/json',
});
});
it('should not process user ID placeholder when user has no id', () => {
const user = { id: '' };
const headers = {
'User-Id': '{{LIBRECHAT_USER_ID}}',
'Content-Type': 'application/json',
};
const result = resolveHeaders(headers, user);
expect(result).toEqual({
'User-Id': '{{LIBRECHAT_USER_ID}}',
'Content-Type': 'application/json',
});
});
it('should process full user object placeholders', () => {
const user = createTestUser({
id: 'user-123',
email: 'test@example.com',
username: 'testuser',
name: 'Test User',
role: 'admin',
});
const headers = {
'User-Email': '{{LIBRECHAT_USER_EMAIL}}',
'User-Name': '{{LIBRECHAT_USER_NAME}}',
'User-Username': '{{LIBRECHAT_USER_USERNAME}}',
'User-Role': '{{LIBRECHAT_USER_ROLE}}',
'User-Id': '{{LIBRECHAT_USER_ID}}',
'Content-Type': 'application/json',
};
const result = resolveHeaders(headers, user);
expect(result).toEqual({
'User-Email': 'test@example.com',
'User-Name': 'Test User',
'User-Username': 'testuser',
'User-Role': 'admin',
'User-Id': 'user-123',
'Content-Type': 'application/json',
});
});
it('should handle missing user fields gracefully', () => {
const user = createTestUser({
id: 'user-123',
email: 'test@example.com',
username: undefined, // explicitly set to undefined
});
const headers = {
'User-Email': '{{LIBRECHAT_USER_EMAIL}}',
'User-Username': '{{LIBRECHAT_USER_USERNAME}}',
'Non-Existent': '{{LIBRECHAT_USER_NONEXISTENT}}',
};
const result = resolveHeaders(headers, user);
expect(result).toEqual({
'User-Email': 'test@example.com',
'User-Username': '', // Empty string for missing field
'Non-Existent': '{{LIBRECHAT_USER_NONEXISTENT}}', // Unchanged for non-existent field
});
});
it('should process custom user variables', () => {
const user = { id: 'user-123' };
const customUserVars = {
CUSTOM_TOKEN: 'user-specific-token',
REGION: 'us-west-1',
};
const headers = {
Authorization: 'Bearer {{CUSTOM_TOKEN}}',
'X-Region': '{{REGION}}',
'X-System-Key': '${TEST_API_KEY}',
'X-User-Id': '{{LIBRECHAT_USER_ID}}',
};
const result = resolveHeaders(headers, user, customUserVars);
expect(result).toEqual({
Authorization: 'Bearer user-specific-token',
'X-Region': 'us-west-1',
'X-System-Key': 'test-api-key-value',
'X-User-Id': 'user-123',
});
});
it('should prioritize custom user variables over user fields', () => {
const user = createTestUser({
id: 'user-123',
email: 'user-email@example.com',
});
const customUserVars = {
LIBRECHAT_USER_EMAIL: 'custom-email@example.com',
};
const headers = {
'Test-Email': '{{LIBRECHAT_USER_EMAIL}}',
};
const result = resolveHeaders(headers, user, customUserVars);
expect(result).toEqual({
'Test-Email': 'custom-email@example.com',
});
});
it('should handle boolean user fields', () => {
const user = createTestUser({
id: 'user-123',
// Note: TUser doesn't have these boolean fields, so we'll test with string fields
role: 'admin',
});
const headers = {
'User-Role': '{{LIBRECHAT_USER_ROLE}}',
'User-Id': '{{LIBRECHAT_USER_ID}}',
};
const result = resolveHeaders(headers, user);
expect(result).toEqual({
'User-Role': 'admin',
'User-Id': 'user-123',
});
});
it('should handle multiple occurrences of the same placeholder', () => {
const user = createTestUser({
id: 'user-123',
email: 'test@example.com',
});
const headers = {
'Primary-Email': '{{LIBRECHAT_USER_EMAIL}}',
'Secondary-Email': '{{LIBRECHAT_USER_EMAIL}}',
'Backup-Email': '{{LIBRECHAT_USER_EMAIL}}',
};
const result = resolveHeaders(headers, user);
expect(result).toEqual({
'Primary-Email': 'test@example.com',
'Secondary-Email': 'test@example.com',
'Backup-Email': 'test@example.com',
});
});
it('should handle mixed variable types in the same headers object', () => {
const user = createTestUser({
id: 'user-123',
email: 'test@example.com',
});
const customUserVars = {
CUSTOM_TOKEN: 'secret-token',
};
const headers = {
Authorization: 'Bearer {{CUSTOM_TOKEN}}',
'X-User-Id': '{{LIBRECHAT_USER_ID}}',
'X-System-Key': '${TEST_API_KEY}',
'X-User-Email': '{{LIBRECHAT_USER_EMAIL}}',
'Content-Type': 'application/json',
};
const result = resolveHeaders(headers, user, customUserVars);
expect(result).toEqual({
Authorization: 'Bearer secret-token',
'X-User-Id': 'user-123',
'X-System-Key': 'test-api-key-value',
'X-User-Email': 'test@example.com',
'Content-Type': 'application/json',
});
});
it('should not modify the original headers object', () => {
const originalHeaders = {
Authorization: '${TEST_API_KEY}',
'User-Id': '{{LIBRECHAT_USER_ID}}',
};
const user = { id: 'user-123' };
const result = resolveHeaders(originalHeaders, user);
// Verify the result is processed
expect(result).toEqual({
Authorization: 'test-api-key-value',
'User-Id': 'user-123',
});
// Verify the original object is unchanged
expect(originalHeaders).toEqual({
Authorization: '${TEST_API_KEY}',
'User-Id': '{{LIBRECHAT_USER_ID}}',
});
});
it('should handle special characters in custom variable names', () => {
const user = { id: 'user-123' };
const customUserVars = {
'CUSTOM-VAR': 'dash-value',
CUSTOM_VAR: 'underscore-value',
'CUSTOM.VAR': 'dot-value',
};
const headers = {
'Dash-Header': '{{CUSTOM-VAR}}',
'Underscore-Header': '{{CUSTOM_VAR}}',
'Dot-Header': '{{CUSTOM.VAR}}',
};
const result = resolveHeaders(headers, user, customUserVars);
expect(result).toEqual({
'Dash-Header': 'dash-value',
'Underscore-Header': 'underscore-value',
'Dot-Header': 'dot-value',
});
});
// Additional comprehensive tests for all user field placeholders
it('should replace all allowed user field placeholders', () => {
const user = {
id: 'abc',
name: 'Test User',
username: 'testuser',
email: 'me@example.com',
provider: 'google',
role: 'admin',
googleId: 'gid',
facebookId: 'fbid',
openidId: 'oid',
samlId: 'sid',
ldapId: 'lid',
githubId: 'ghid',
discordId: 'dcid',
appleId: 'aid',
emailVerified: true,
twoFactorEnabled: false,
termsAccepted: true,
};
const headers = {
'X-User-ID': '{{LIBRECHAT_USER_ID}}',
'X-User-Name': '{{LIBRECHAT_USER_NAME}}',
'X-User-Username': '{{LIBRECHAT_USER_USERNAME}}',
'X-User-Email': '{{LIBRECHAT_USER_EMAIL}}',
'X-User-Provider': '{{LIBRECHAT_USER_PROVIDER}}',
'X-User-Role': '{{LIBRECHAT_USER_ROLE}}',
'X-User-GoogleId': '{{LIBRECHAT_USER_GOOGLEID}}',
'X-User-FacebookId': '{{LIBRECHAT_USER_FACEBOOKID}}',
'X-User-OpenIdId': '{{LIBRECHAT_USER_OPENIDID}}',
'X-User-SamlId': '{{LIBRECHAT_USER_SAMLID}}',
'X-User-LdapId': '{{LIBRECHAT_USER_LDAPID}}',
'X-User-GithubId': '{{LIBRECHAT_USER_GITHUBID}}',
'X-User-DiscordId': '{{LIBRECHAT_USER_DISCORDID}}',
'X-User-AppleId': '{{LIBRECHAT_USER_APPLEID}}',
'X-User-EmailVerified': '{{LIBRECHAT_USER_EMAILVERIFIED}}',
'X-User-TwoFactorEnabled': '{{LIBRECHAT_USER_TWOFACTORENABLED}}',
'X-User-TermsAccepted': '{{LIBRECHAT_USER_TERMSACCEPTED}}',
};
const result = resolveHeaders(headers, user);
expect(result['X-User-ID']).toBe('abc');
expect(result['X-User-Name']).toBe('Test User');
expect(result['X-User-Username']).toBe('testuser');
expect(result['X-User-Email']).toBe('me@example.com');
expect(result['X-User-Provider']).toBe('google');
expect(result['X-User-Role']).toBe('admin');
expect(result['X-User-GoogleId']).toBe('gid');
expect(result['X-User-FacebookId']).toBe('fbid');
expect(result['X-User-OpenIdId']).toBe('oid');
expect(result['X-User-SamlId']).toBe('sid');
expect(result['X-User-LdapId']).toBe('lid');
expect(result['X-User-GithubId']).toBe('ghid');
expect(result['X-User-DiscordId']).toBe('dcid');
expect(result['X-User-AppleId']).toBe('aid');
expect(result['X-User-EmailVerified']).toBe('true');
expect(result['X-User-TwoFactorEnabled']).toBe('false');
expect(result['X-User-TermsAccepted']).toBe('true');
});
it('should handle multiple placeholders in one value', () => {
const user = { id: 'abc', email: 'me@example.com' };
const headers = {
'X-Multi': 'User: {{LIBRECHAT_USER_ID}}, Env: ${TEST_API_KEY}, Custom: {{MY_CUSTOM}}',
};
const customVars = { MY_CUSTOM: 'custom-value' };
const result = resolveHeaders(headers, user, customVars);
expect(result['X-Multi']).toBe('User: abc, Env: test-api-key-value, Custom: custom-value');
});
it('should leave unknown placeholders unchanged', () => {
const user = { id: 'abc' };
const headers = {
'X-Unknown': '{{SOMETHING_NOT_RECOGNIZED}}',
'X-Known': '{{LIBRECHAT_USER_ID}}',
};
const result = resolveHeaders(headers, user);
expect(result['X-Unknown']).toBe('{{SOMETHING_NOT_RECOGNIZED}}');
expect(result['X-Known']).toBe('abc');
});
it('should handle a mix of all types', () => {
const user = {
id: 'abc',
email: 'me@example.com',
emailVerified: true,
twoFactorEnabled: false,
};
const headers = {
'X-User': '{{LIBRECHAT_USER_ID}}',
'X-Env': '${TEST_API_KEY}',
'X-Custom': '{{MY_CUSTOM}}',
'X-Multi': 'ID: {{LIBRECHAT_USER_ID}}, ENV: ${TEST_API_KEY}, CUSTOM: {{MY_CUSTOM}}',
'X-Unknown': '{{NOT_A_REAL_PLACEHOLDER}}',
'X-Empty': '',
'X-Boolean': '{{LIBRECHAT_USER_EMAILVERIFIED}}',
};
const customVars = { MY_CUSTOM: 'custom-value' };
const result = resolveHeaders(headers, user, customVars);
expect(result['X-User']).toBe('abc');
expect(result['X-Env']).toBe('test-api-key-value');
expect(result['X-Custom']).toBe('custom-value');
expect(result['X-Multi']).toBe('ID: abc, ENV: test-api-key-value, CUSTOM: custom-value');
expect(result['X-Unknown']).toBe('{{NOT_A_REAL_PLACEHOLDER}}');
expect(result['X-Empty']).toBe('');
expect(result['X-Boolean']).toBe('true');
});
});

View file

@ -0,0 +1,170 @@
import { extractEnvVariable } from 'librechat-data-provider';
import type { TUser, MCPOptions } from 'librechat-data-provider';
/**
* List of allowed user fields that can be used in MCP environment variables.
* These are non-sensitive string/boolean fields from the IUser interface.
*/
const ALLOWED_USER_FIELDS = [
'id',
'name',
'username',
'email',
'provider',
'role',
'googleId',
'facebookId',
'openidId',
'samlId',
'ldapId',
'githubId',
'discordId',
'appleId',
'emailVerified',
'twoFactorEnabled',
'termsAccepted',
] as const;
/**
* Processes a string value to replace user field placeholders
* @param value - The string value to process
* @param user - The user object
* @returns The processed string with placeholders replaced
*/
function processUserPlaceholders(value: string, user?: TUser): string {
if (!user || typeof value !== 'string') {
return value;
}
for (const field of ALLOWED_USER_FIELDS) {
const placeholder = `{{LIBRECHAT_USER_${field.toUpperCase()}}}`;
if (!value.includes(placeholder)) {
continue;
}
const fieldValue = user[field as keyof TUser];
// Skip replacement if field doesn't exist in user object
if (!(field in user)) {
continue;
}
// Special case for 'id' field: skip if undefined or empty
if (field === 'id' && (fieldValue === undefined || fieldValue === '')) {
continue;
}
const replacementValue = fieldValue == null ? '' : String(fieldValue);
value = value.replace(new RegExp(placeholder, 'g'), replacementValue);
}
return value;
}
/**
* Processes a single string value by replacing various types of placeholders
* @param originalValue - The original string value to process
* @param customUserVars - Optional custom user variables to replace placeholders
* @param user - Optional user object for replacing user field placeholders
* @returns The processed string with all placeholders replaced
*/
function processSingleValue({
originalValue,
customUserVars,
user,
}: {
originalValue: string;
customUserVars?: Record<string, string>;
user?: TUser;
}): string {
let value = originalValue;
// 1. Replace custom user variables
if (customUserVars) {
for (const [varName, varVal] of Object.entries(customUserVars)) {
/** Escaped varName for use in regex to avoid issues with special characters */
const escapedVarName = varName.replace(/[.*+?^${}()|[\]\\]/g, '\\$&');
const placeholderRegex = new RegExp(`\\{\\{${escapedVarName}\\}\\}`, 'g');
value = value.replace(placeholderRegex, varVal);
}
}
// 2. Replace user field placeholders (e.g., {{LIBRECHAT_USER_EMAIL}}, {{LIBRECHAT_USER_ID}})
value = processUserPlaceholders(value, user);
// 3. Replace system environment variables
value = extractEnvVariable(value);
return value;
}
/**
* Recursively processes an object to replace environment variables in string values
* @param obj - The object to process
* @param user - The user object containing all user fields
* @param customUserVars - vars that user set in settings
* @returns - The processed object with environment variables replaced
*/
export function processMCPEnv(
obj: Readonly<MCPOptions>,
user?: TUser,
customUserVars?: Record<string, string>,
): MCPOptions {
if (obj === null || obj === undefined) {
return obj;
}
const newObj: MCPOptions = structuredClone(obj);
if ('env' in newObj && newObj.env) {
const processedEnv: Record<string, string> = {};
for (const [key, originalValue] of Object.entries(newObj.env)) {
processedEnv[key] = processSingleValue({ originalValue, customUserVars, user });
}
newObj.env = processedEnv;
}
// Process headers if they exist (for WebSocket, SSE, StreamableHTTP types)
// Note: `env` and `headers` are on different branches of the MCPOptions union type.
if ('headers' in newObj && newObj.headers) {
const processedHeaders: Record<string, string> = {};
for (const [key, originalValue] of Object.entries(newObj.headers)) {
processedHeaders[key] = processSingleValue({ originalValue, customUserVars, user });
}
newObj.headers = processedHeaders;
}
// Process URL if it exists (for WebSocket, SSE, StreamableHTTP types)
if ('url' in newObj && newObj.url) {
newObj.url = processSingleValue({ originalValue: newObj.url, customUserVars, user });
}
return newObj;
}
/**
* Resolves header values by replacing user placeholders, custom variables, and environment variables
* @param headers - The headers object to process
* @param user - Optional user object for replacing user field placeholders (can be partial with just id)
* @param customUserVars - Optional custom user variables to replace placeholders
* @returns - The processed headers with all placeholders replaced
*/
export function resolveHeaders(
headers: Record<string, string> | undefined,
user?: Partial<TUser> | { id: string },
customUserVars?: Record<string, string>,
) {
const resolvedHeaders = { ...(headers ?? {}) };
if (headers && typeof headers === 'object' && !Array.isArray(headers)) {
Object.keys(headers).forEach((key) => {
resolvedHeaders[key] = processSingleValue({
originalValue: headers[key],
customUserVars,
user: user as TUser,
});
});
}
return resolvedHeaders;
}

View file

@ -14,3 +14,13 @@ export function sendEvent(res: ServerResponse, event: ServerSentEvent): void {
}
res.write(`event: message\ndata: ${JSON.stringify(event)}\n\n`);
}
/**
* Sends error data in Server Sent Events format and ends the response.
* @param res - The server response.
* @param message - The error message.
*/
export function handleError(res: ServerResponse, message: string): void {
res.write(`event: error\ndata: ${JSON.stringify(message)}\n\n`);
res.end();
}

View file

@ -1,8 +1,14 @@
export * from './axios';
export * from './azure';
export * from './common';
export * from './env';
export * from './events';
export * from './files';
export * from './generators';
export * from './key';
export * from './llm';
export * from './math';
export * from './openid';
export * from './tempChatRetention';
export { default as Tokenizer } from './tokenizer';
export * from './yaml';

View file

@ -0,0 +1,97 @@
import fs from 'fs';
import path from 'path';
import axios from 'axios';
import { loadServiceKey } from './key';
jest.mock('fs');
jest.mock('axios');
jest.mock('@librechat/data-schemas', () => ({
logger: {
error: jest.fn(),
},
}));
describe('loadServiceKey', () => {
const mockServiceKey = {
type: 'service_account',
project_id: 'test-project',
private_key_id: 'test-key-id',
private_key: '-----BEGIN PRIVATE KEY-----\ntest-key\n-----END PRIVATE KEY-----',
client_email: 'test@test-project.iam.gserviceaccount.com',
client_id: '123456789',
auth_uri: 'https://accounts.google.com/o/oauth2/auth',
token_uri: 'https://oauth2.googleapis.com/token',
auth_provider_x509_cert_url: 'https://www.googleapis.com/oauth2/v1/certs',
client_x509_cert_url:
'https://www.googleapis.com/robot/v1/metadata/x509/test%40test-project.iam.gserviceaccount.com',
};
beforeEach(() => {
jest.clearAllMocks();
});
it('should return null if keyPath is empty', async () => {
const result = await loadServiceKey('');
expect(result).toBeNull();
});
it('should parse stringified JSON directly', async () => {
const jsonString = JSON.stringify(mockServiceKey);
const result = await loadServiceKey(jsonString);
expect(result).toEqual(mockServiceKey);
});
it('should parse stringified JSON with leading/trailing whitespace', async () => {
const jsonString = ` ${JSON.stringify(mockServiceKey)} `;
const result = await loadServiceKey(jsonString);
expect(result).toEqual(mockServiceKey);
});
it('should load from file path', async () => {
const filePath = '/path/to/service-key.json';
(fs.readFileSync as jest.Mock).mockReturnValue(JSON.stringify(mockServiceKey));
const result = await loadServiceKey(filePath);
expect(fs.readFileSync).toHaveBeenCalledWith(path.resolve(filePath), 'utf8');
expect(result).toEqual(mockServiceKey);
});
it('should load from URL', async () => {
const url = 'https://example.com/service-key.json';
(axios.get as jest.Mock).mockResolvedValue({ data: mockServiceKey });
const result = await loadServiceKey(url);
expect(axios.get).toHaveBeenCalledWith(url);
expect(result).toEqual(mockServiceKey);
});
it('should handle invalid JSON string', async () => {
const invalidJson = '{ invalid json }';
const result = await loadServiceKey(invalidJson);
expect(result).toBeNull();
});
it('should handle file read errors', async () => {
const filePath = '/path/to/nonexistent.json';
(fs.readFileSync as jest.Mock).mockImplementation(() => {
throw new Error('File not found');
});
const result = await loadServiceKey(filePath);
expect(result).toBeNull();
});
it('should handle URL fetch errors', async () => {
const url = 'https://example.com/service-key.json';
(axios.get as jest.Mock).mockRejectedValue(new Error('Network error'));
const result = await loadServiceKey(url);
expect(result).toBeNull();
});
it('should validate service key format', async () => {
const invalidServiceKey = { invalid: 'key' };
const result = await loadServiceKey(JSON.stringify(invalidServiceKey));
expect(result).toEqual(invalidServiceKey); // It returns the object as-is, validation is minimal
});
});

View file

@ -0,0 +1,79 @@
import fs from 'fs';
import path from 'path';
import axios from 'axios';
import { logger } from '@librechat/data-schemas';
export interface GoogleServiceKey {
type?: string;
project_id?: string;
private_key_id?: string;
private_key?: string;
client_email?: string;
client_id?: string;
auth_uri?: string;
token_uri?: string;
auth_provider_x509_cert_url?: string;
client_x509_cert_url?: string;
[key: string]: unknown;
}
/**
* Load Google service key from file path, URL, or stringified JSON
* @param keyPath - The path to the service key file, URL to fetch it from, or stringified JSON
* @returns The parsed service key object or null if failed
*/
export async function loadServiceKey(keyPath: string): Promise<GoogleServiceKey | null> {
if (!keyPath) {
return null;
}
let serviceKey: unknown;
// Check if it's a stringified JSON (starts with '{')
if (keyPath.trim().startsWith('{')) {
try {
serviceKey = JSON.parse(keyPath);
} catch (error) {
logger.error('Failed to parse service key from stringified JSON', error);
return null;
}
}
// Check if it's a URL
else if (/^https?:\/\//.test(keyPath)) {
try {
const response = await axios.get(keyPath);
serviceKey = response.data;
} catch (error) {
logger.error(`Failed to fetch the service key from URL: ${keyPath}`, error);
return null;
}
} else {
// It's a file path
try {
const absolutePath = path.isAbsolute(keyPath) ? keyPath : path.resolve(keyPath);
const fileContent = fs.readFileSync(absolutePath, 'utf8');
serviceKey = JSON.parse(fileContent);
} catch (error) {
logger.error(`Failed to load service key from file: ${keyPath}`, error);
return null;
}
}
// If the response is a string (e.g., from a URL that returns JSON as text), parse it
if (typeof serviceKey === 'string') {
try {
serviceKey = JSON.parse(serviceKey);
} catch (parseError) {
logger.error(`Failed to parse service key JSON from ${keyPath}`, parseError);
return null;
}
}
// Validate the service key has required fields
if (!serviceKey || typeof serviceKey !== 'object') {
logger.error(`Invalid service key format from ${keyPath}`);
return null;
}
return serviceKey as GoogleServiceKey;
}

View file

@ -0,0 +1,189 @@
import { extractLibreChatParams } from './llm';
describe('extractLibreChatParams', () => {
it('should return defaults when options is undefined', () => {
const result = extractLibreChatParams(undefined);
expect(result.resendFiles).toBe(true);
expect(result.promptPrefix).toBeUndefined();
expect(result.maxContextTokens).toBeUndefined();
expect(result.modelLabel).toBeUndefined();
expect(result.modelOptions).toEqual({});
});
it('should return defaults when options is null', () => {
const result = extractLibreChatParams();
expect(result.resendFiles).toBe(true);
expect(result.promptPrefix).toBeUndefined();
expect(result.maxContextTokens).toBeUndefined();
expect(result.modelLabel).toBeUndefined();
expect(result.modelOptions).toEqual({});
});
it('should extract all LibreChat params and leave model options', () => {
const options = {
resendFiles: false,
promptPrefix: 'You are a helpful assistant',
maxContextTokens: 4096,
modelLabel: 'GPT-4',
model: 'gpt-4',
temperature: 0.7,
max_tokens: 1000,
};
const result = extractLibreChatParams(options);
expect(result.resendFiles).toBe(false);
expect(result.promptPrefix).toBe('You are a helpful assistant');
expect(result.maxContextTokens).toBe(4096);
expect(result.modelLabel).toBe('GPT-4');
expect(result.modelOptions).toEqual({
model: 'gpt-4',
temperature: 0.7,
max_tokens: 1000,
});
});
it('should handle null values for LibreChat params', () => {
const options = {
resendFiles: true,
promptPrefix: null,
maxContextTokens: 2048,
modelLabel: null,
model: 'claude-3',
};
const result = extractLibreChatParams(options);
expect(result.resendFiles).toBe(true);
expect(result.promptPrefix).toBeNull();
expect(result.maxContextTokens).toBe(2048);
expect(result.modelLabel).toBeNull();
expect(result.modelOptions).toEqual({
model: 'claude-3',
});
});
it('should use default for resendFiles when not provided', () => {
const options = {
promptPrefix: 'Test prefix',
model: 'gpt-3.5-turbo',
temperature: 0.5,
};
const result = extractLibreChatParams(options);
expect(result.resendFiles).toBe(true); // Should use default
expect(result.promptPrefix).toBe('Test prefix');
expect(result.maxContextTokens).toBeUndefined();
expect(result.modelLabel).toBeUndefined();
expect(result.modelOptions).toEqual({
model: 'gpt-3.5-turbo',
temperature: 0.5,
});
});
it('should handle empty options object', () => {
const result = extractLibreChatParams({});
expect(result.resendFiles).toBe(true); // Should use default
expect(result.promptPrefix).toBeUndefined();
expect(result.maxContextTokens).toBeUndefined();
expect(result.modelLabel).toBeUndefined();
expect(result.modelOptions).toEqual({});
});
it('should only extract known LibreChat params', () => {
const options = {
resendFiles: false,
promptPrefix: 'Custom prompt',
maxContextTokens: 8192,
modelLabel: 'Custom Model',
// Model options
model: 'gpt-4',
temperature: 0.9,
top_p: 0.95,
frequency_penalty: 0.5,
presence_penalty: 0.5,
// Unknown params should stay in modelOptions
unknownParam: 'should remain',
customSetting: 123,
};
const result = extractLibreChatParams(options);
// LibreChat params extracted
expect(result.resendFiles).toBe(false);
expect(result.promptPrefix).toBe('Custom prompt');
expect(result.maxContextTokens).toBe(8192);
expect(result.modelLabel).toBe('Custom Model');
// Model options should include everything else
expect(result.modelOptions).toEqual({
model: 'gpt-4',
temperature: 0.9,
top_p: 0.95,
frequency_penalty: 0.5,
presence_penalty: 0.5,
unknownParam: 'should remain',
customSetting: 123,
});
});
it('should not mutate the original options object', () => {
const options = {
resendFiles: false,
promptPrefix: 'Test',
model: 'gpt-4',
temperature: 0.7,
};
const originalOptions = { ...options };
extractLibreChatParams(options);
// Original object should remain unchanged
expect(options).toEqual(originalOptions);
});
it('should handle undefined values for optional LibreChat params', () => {
const options = {
resendFiles: false,
promptPrefix: undefined,
maxContextTokens: undefined,
modelLabel: undefined,
model: 'claude-2',
};
const result = extractLibreChatParams(options);
expect(result.resendFiles).toBe(false);
expect(result.promptPrefix).toBeUndefined();
expect(result.maxContextTokens).toBeUndefined();
expect(result.modelLabel).toBeUndefined();
expect(result.modelOptions).toEqual({
model: 'claude-2',
});
});
it('should handle mixed null and undefined values', () => {
const options = {
promptPrefix: null,
maxContextTokens: undefined,
modelLabel: null,
model: 'gpt-3.5-turbo',
stop: ['\\n', '\\n\\n'],
};
const result = extractLibreChatParams(options);
expect(result.resendFiles).toBe(true); // default
expect(result.promptPrefix).toBeNull();
expect(result.maxContextTokens).toBeUndefined();
expect(result.modelLabel).toBeNull();
expect(result.modelOptions).toEqual({
model: 'gpt-3.5-turbo',
stop: ['\\n', '\\n\\n'],
});
});
});

View file

@ -0,0 +1,47 @@
import { librechat } from 'librechat-data-provider';
import type { DynamicSettingProps } from 'librechat-data-provider';
type LibreChatKeys = keyof typeof librechat;
type LibreChatParams = {
modelOptions: Omit<NonNullable<DynamicSettingProps['conversation']>, LibreChatKeys>;
resendFiles: boolean;
promptPrefix?: string | null;
maxContextTokens?: number;
modelLabel?: string | null;
};
/**
* Separates LibreChat-specific parameters from model options
* @param options - The combined options object
*/
export function extractLibreChatParams(
options?: DynamicSettingProps['conversation'],
): LibreChatParams {
if (!options) {
return {
modelOptions: {} as Omit<NonNullable<DynamicSettingProps['conversation']>, LibreChatKeys>,
resendFiles: librechat.resendFiles.default as boolean,
};
}
const modelOptions = { ...options };
const resendFiles =
(delete modelOptions.resendFiles, options.resendFiles) ??
(librechat.resendFiles.default as boolean);
const promptPrefix = (delete modelOptions.promptPrefix, options.promptPrefix);
const maxContextTokens = (delete modelOptions.maxContextTokens, options.maxContextTokens);
const modelLabel = (delete modelOptions.modelLabel, options.modelLabel);
return {
modelOptions: modelOptions as Omit<
NonNullable<DynamicSettingProps['conversation']>,
LibreChatKeys
>,
maxContextTokens,
promptPrefix,
resendFiles,
modelLabel,
};
}

View file

@ -0,0 +1,45 @@
/**
* Evaluates a mathematical expression provided as a string and returns the result.
*
* If the input is already a number, it returns the number as is.
* If the input is not a string or contains invalid characters, an error is thrown.
* If the evaluated result is not a number, an error is thrown.
*
* @param str - The mathematical expression to evaluate, or a number.
* @param fallbackValue - The default value to return if the input is not a string or number, or if the evaluated result is not a number.
*
* @returns The result of the evaluated expression or the input number.
*
* @throws Throws an error if the input is not a string or number, contains invalid characters, or does not evaluate to a number.
*/
export function math(str: string | number, fallbackValue?: number): number {
const fallback = typeof fallbackValue !== 'undefined' && typeof fallbackValue === 'number';
if (typeof str !== 'string' && typeof str === 'number') {
return str;
} else if (typeof str !== 'string') {
if (fallback) {
return fallbackValue;
}
throw new Error(`str is ${typeof str}, but should be a string`);
}
const validStr = /^[+\-\d.\s*/%()]+$/.test(str);
if (!validStr) {
if (fallback) {
return fallbackValue;
}
throw new Error('Invalid characters in string');
}
const value = eval(str);
if (typeof value !== 'number') {
if (fallback) {
return fallbackValue;
}
throw new Error(`[math] str did not evaluate to a number but to a ${typeof value}`);
}
return value;
}

View file

@ -0,0 +1,133 @@
import {
MIN_RETENTION_HOURS,
MAX_RETENTION_HOURS,
DEFAULT_RETENTION_HOURS,
getTempChatRetentionHours,
createTempChatExpirationDate,
} from './tempChatRetention';
import type { TCustomConfig } from 'librechat-data-provider';
describe('tempChatRetention', () => {
const originalEnv = process.env;
beforeEach(() => {
jest.resetModules();
process.env = { ...originalEnv };
delete process.env.TEMP_CHAT_RETENTION_HOURS;
});
afterAll(() => {
process.env = originalEnv;
});
describe('getTempChatRetentionHours', () => {
it('should return default retention hours when no config or env var is set', () => {
const result = getTempChatRetentionHours();
expect(result).toBe(DEFAULT_RETENTION_HOURS);
});
it('should use environment variable when set', () => {
process.env.TEMP_CHAT_RETENTION_HOURS = '48';
const result = getTempChatRetentionHours();
expect(result).toBe(48);
});
it('should use config value when set', () => {
const config: Partial<TCustomConfig> = {
interface: {
temporaryChatRetention: 12,
},
};
const result = getTempChatRetentionHours(config);
expect(result).toBe(12);
});
it('should prioritize config over environment variable', () => {
process.env.TEMP_CHAT_RETENTION_HOURS = '48';
const config: Partial<TCustomConfig> = {
interface: {
temporaryChatRetention: 12,
},
};
const result = getTempChatRetentionHours(config);
expect(result).toBe(12);
});
it('should enforce minimum retention period', () => {
const config: Partial<TCustomConfig> = {
interface: {
temporaryChatRetention: 0,
},
};
const result = getTempChatRetentionHours(config);
expect(result).toBe(MIN_RETENTION_HOURS);
});
it('should enforce maximum retention period', () => {
const config: Partial<TCustomConfig> = {
interface: {
temporaryChatRetention: 10000,
},
};
const result = getTempChatRetentionHours(config);
expect(result).toBe(MAX_RETENTION_HOURS);
});
it('should handle invalid environment variable', () => {
process.env.TEMP_CHAT_RETENTION_HOURS = 'invalid';
const result = getTempChatRetentionHours();
expect(result).toBe(DEFAULT_RETENTION_HOURS);
});
it('should handle invalid config value', () => {
const config: Partial<TCustomConfig> = {
interface: {
temporaryChatRetention: 'invalid' as unknown as number,
},
};
const result = getTempChatRetentionHours(config);
expect(result).toBe(DEFAULT_RETENTION_HOURS);
});
});
describe('createTempChatExpirationDate', () => {
it('should create expiration date with default retention period', () => {
const result = createTempChatExpirationDate();
const expectedDate = new Date();
expectedDate.setHours(expectedDate.getHours() + DEFAULT_RETENTION_HOURS);
// Allow for small time differences in test execution
const timeDiff = Math.abs(result.getTime() - expectedDate.getTime());
expect(timeDiff).toBeLessThan(1000); // Less than 1 second difference
});
it('should create expiration date with custom retention period', () => {
const config: Partial<TCustomConfig> = {
interface: {
temporaryChatRetention: 12,
},
};
const result = createTempChatExpirationDate(config);
const expectedDate = new Date();
expectedDate.setHours(expectedDate.getHours() + 12);
// Allow for small time differences in test execution
const timeDiff = Math.abs(result.getTime() - expectedDate.getTime());
expect(timeDiff).toBeLessThan(1000); // Less than 1 second difference
});
it('should return a Date object', () => {
const result = createTempChatExpirationDate();
expect(result).toBeInstanceOf(Date);
});
it('should return a future date', () => {
const now = new Date();
const result = createTempChatExpirationDate();
expect(result.getTime()).toBeGreaterThan(now.getTime());
});
});
});

View file

@ -0,0 +1,77 @@
import { logger } from '@librechat/data-schemas';
import type { TCustomConfig } from 'librechat-data-provider';
/**
* Default retention period for temporary chats in hours
*/
export const DEFAULT_RETENTION_HOURS = 24 * 30; // 30 days
/**
* Minimum allowed retention period in hours
*/
export const MIN_RETENTION_HOURS = 1;
/**
* Maximum allowed retention period in hours (1 year = 8760 hours)
*/
export const MAX_RETENTION_HOURS = 8760;
/**
* Gets the temporary chat retention period from environment variables or config
* @param config - The custom configuration object
* @returns The retention period in hours
*/
export function getTempChatRetentionHours(config?: Partial<TCustomConfig> | null): number {
let retentionHours = DEFAULT_RETENTION_HOURS;
// Check environment variable first
if (process.env.TEMP_CHAT_RETENTION_HOURS) {
const envValue = parseInt(process.env.TEMP_CHAT_RETENTION_HOURS, 10);
if (!isNaN(envValue)) {
retentionHours = envValue;
} else {
logger.warn(
`Invalid TEMP_CHAT_RETENTION_HOURS environment variable: ${process.env.TEMP_CHAT_RETENTION_HOURS}. Using default: ${DEFAULT_RETENTION_HOURS} hours.`,
);
}
}
// Check config file (takes precedence over environment variable)
if (config?.interface?.temporaryChatRetention !== undefined) {
const configValue = config.interface.temporaryChatRetention;
if (typeof configValue === 'number' && !isNaN(configValue)) {
retentionHours = configValue;
} else {
logger.warn(
`Invalid temporaryChatRetention in config: ${configValue}. Using ${retentionHours} hours.`,
);
}
}
// Validate the retention period
if (retentionHours < MIN_RETENTION_HOURS) {
logger.warn(
`Temporary chat retention period ${retentionHours} is below minimum ${MIN_RETENTION_HOURS} hours. Using minimum value.`,
);
retentionHours = MIN_RETENTION_HOURS;
} else if (retentionHours > MAX_RETENTION_HOURS) {
logger.warn(
`Temporary chat retention period ${retentionHours} exceeds maximum ${MAX_RETENTION_HOURS} hours. Using maximum value.`,
);
retentionHours = MAX_RETENTION_HOURS;
}
return retentionHours;
}
/**
* Creates an expiration date for temporary chats
* @param config - The custom configuration object
* @returns The expiration date
*/
export function createTempChatExpirationDate(config?: Partial<TCustomConfig>): Date {
const retentionHours = getTempChatRetentionHours(config);
const expiredAt = new Date();
expiredAt.setHours(expiredAt.getHours() + retentionHours);
return expiredAt;
}

View file

@ -0,0 +1,11 @@
import fs from 'fs';
import yaml from 'js-yaml';
export function loadYaml(filepath: string) {
try {
const fileContents = fs.readFileSync(filepath, 'utf8');
return yaml.load(fileContents);
} catch (e) {
return e;
}
}

View file

@ -1,6 +1,6 @@
{
"name": "librechat-data-provider",
"version": "0.7.88",
"version": "0.7.899",
"description": "data services for librechat apps",
"main": "dist/index.js",
"module": "dist/index.es.js",

View file

@ -1,4 +1,3 @@
/* eslint-disable jest/no-conditional-expect */
import { ZodError, z } from 'zod';
import { generateDynamicSchema, validateSettingDefinitions, OptionTypes } from '../src/generate';
import type { SettingsConfiguration } from '../src/generate';
@ -97,6 +96,37 @@ describe('generateDynamicSchema', () => {
expect(result['data']).toEqual({ testEnum: 'option2' });
});
it('should generate a schema for enum settings with empty string option', () => {
const settings: SettingsConfiguration = [
{
key: 'testEnumWithEmpty',
description: 'A test enum setting with empty string',
type: 'enum',
default: '',
options: ['', 'option1', 'option2'],
enumMappings: {
'': 'None',
option1: 'First Option',
option2: 'Second Option',
},
component: 'slider',
columnSpan: 2,
label: 'Test Enum with Empty String',
},
];
const schema = generateDynamicSchema(settings);
const result = schema.safeParse({ testEnumWithEmpty: '' });
expect(result.success).toBeTruthy();
expect(result['data']).toEqual({ testEnumWithEmpty: '' });
// Test with non-empty option
const result2 = schema.safeParse({ testEnumWithEmpty: 'option1' });
expect(result2.success).toBeTruthy();
expect(result2['data']).toEqual({ testEnumWithEmpty: 'option1' });
});
it('should fail for incorrect enum value', () => {
const settings: SettingsConfiguration = [
{
@ -481,6 +511,47 @@ describe('validateSettingDefinitions', () => {
expect(() => validateSettingDefinitions(settingsExceedingMaxTags)).toThrow(ZodError);
});
// Test for incomplete enumMappings
test('should throw error for incomplete enumMappings', () => {
const settingsWithIncompleteEnumMappings: SettingsConfiguration = [
{
key: 'displayMode',
type: 'enum',
component: 'dropdown',
options: ['light', 'dark', 'auto'],
enumMappings: {
light: 'Light Mode',
dark: 'Dark Mode',
// Missing mapping for 'auto'
},
optionType: OptionTypes.Custom,
},
];
expect(() => validateSettingDefinitions(settingsWithIncompleteEnumMappings)).toThrow(ZodError);
});
// Test for complete enumMappings including empty string
test('should not throw error for complete enumMappings including empty string', () => {
const settingsWithCompleteEnumMappings: SettingsConfiguration = [
{
key: 'selectionMode',
type: 'enum',
component: 'slider',
options: ['', 'single', 'multiple'],
enumMappings: {
'': 'None',
single: 'Single Selection',
multiple: 'Multiple Selection',
},
default: '',
optionType: OptionTypes.Custom,
},
];
expect(() => validateSettingDefinitions(settingsWithCompleteEnumMappings)).not.toThrow();
});
});
const settingsConfiguration: SettingsConfiguration = [
@ -515,7 +586,7 @@ const settingsConfiguration: SettingsConfiguration = [
{
key: 'presence_penalty',
description:
'Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model\'s likelihood to talk about new topics.',
"Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.",
type: 'number',
default: 0,
range: {
@ -529,7 +600,7 @@ const settingsConfiguration: SettingsConfiguration = [
{
key: 'frequency_penalty',
description:
'Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model\'s likelihood to repeat the same line verbatim.',
"Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.",
type: 'number',
default: 0,
range: {

View file

@ -70,8 +70,6 @@ export const revokeUserKey = (name: string) => `${keysEndpoint}/${name}`;
export const revokeAllUserKeys = () => `${keysEndpoint}?all=true`;
export const abortRequest = (endpoint: string) => `/api/ask/${endpoint}/abort`;
export const conversationsRoot = '/api/convos';
export const conversations = (params: q.ConversationListParams) => {

View file

@ -482,6 +482,12 @@ const termsOfServiceSchema = z.object({
export type TTermsOfService = z.infer<typeof termsOfServiceSchema>;
const mcpServersSchema = z.object({
placeholder: z.string().optional(),
});
export type TMcpServersConfig = z.infer<typeof mcpServersSchema>;
export const intefaceSchema = z
.object({
privacyPolicy: z
@ -492,6 +498,7 @@ export const intefaceSchema = z
.optional(),
termsOfService: termsOfServiceSchema.optional(),
customWelcome: z.string().optional(),
mcpServers: mcpServersSchema.optional(),
endpointsMenu: z.boolean().optional(),
modelSelect: z.boolean().optional(),
parameters: z.boolean().optional(),
@ -503,6 +510,7 @@ export const intefaceSchema = z
prompts: z.boolean().optional(),
agents: z.boolean().optional(),
temporaryChat: z.boolean().optional(),
temporaryChatRetention: z.number().min(1).max(8760).optional(),
runCode: z.boolean().optional(),
webSearch: z.boolean().optional(),
})
@ -600,12 +608,14 @@ export type TStartupConfig = {
>;
}
>;
mcpPlaceholder?: string;
};
export enum OCRStrategy {
MISTRAL_OCR = 'mistral_ocr',
CUSTOM_OCR = 'custom_ocr',
AZURE_MISTRAL_OCR = 'azure_mistral_ocr',
VERTEXAI_MISTRAL_OCR = 'vertexai_mistral_ocr',
}
export enum SearchCategories {
@ -637,6 +647,8 @@ export enum SafeSearchTypes {
export const webSearchSchema = z.object({
serperApiKey: z.string().optional().default('${SERPER_API_KEY}'),
searxngInstanceUrl: z.string().optional().default('${SEARXNG_INSTANCE_URL}'),
searxngApiKey: z.string().optional().default('${SEARXNG_API_KEY}'),
firecrawlApiKey: z.string().optional().default('${FIRECRAWL_API_KEY}'),
firecrawlApiUrl: z.string().optional().default('${FIRECRAWL_API_URL}'),
jinaApiKey: z.string().optional().default('${JINA_API_KEY}'),
@ -940,19 +952,11 @@ export const initialModelsConfig: TModelsConfig = {
[EModelEndpoint.bedrock]: defaultModels[EModelEndpoint.bedrock],
};
export const EndpointURLs: { [key in EModelEndpoint]: string } = {
[EModelEndpoint.openAI]: `/api/ask/${EModelEndpoint.openAI}`,
[EModelEndpoint.google]: `/api/ask/${EModelEndpoint.google}`,
[EModelEndpoint.custom]: `/api/ask/${EModelEndpoint.custom}`,
[EModelEndpoint.anthropic]: `/api/ask/${EModelEndpoint.anthropic}`,
[EModelEndpoint.gptPlugins]: `/api/ask/${EModelEndpoint.gptPlugins}`,
[EModelEndpoint.azureOpenAI]: `/api/ask/${EModelEndpoint.azureOpenAI}`,
[EModelEndpoint.chatGPTBrowser]: `/api/ask/${EModelEndpoint.chatGPTBrowser}`,
[EModelEndpoint.azureAssistants]: '/api/assistants/v1/chat',
export const EndpointURLs = {
[EModelEndpoint.assistants]: '/api/assistants/v2/chat',
[EModelEndpoint.azureAssistants]: '/api/assistants/v1/chat',
[EModelEndpoint.agents]: `/api/${EModelEndpoint.agents}/chat`,
[EModelEndpoint.bedrock]: `/api/${EModelEndpoint.bedrock}/chat`,
};
} as const;
export const modularEndpoints = new Set<EModelEndpoint | string>([
EModelEndpoint.gptPlugins,
@ -1255,6 +1259,10 @@ export enum ErrorTypes {
* Google provider returned an error
*/
GOOGLE_ERROR = 'google_error',
/**
* Google provider does not allow custom tools with built-in tools
*/
GOOGLE_TOOL_CONFLICT = 'google_tool_conflict',
/**
* Invalid Agent Provider (excluded by Admin)
*/
@ -1377,7 +1385,7 @@ export enum TTSProviders {
/** Enum for app-wide constants */
export enum Constants {
/** Key for the app's version. */
VERSION = 'v0.7.8',
VERSION = 'v0.7.9-rc1',
/** Key for the Custom Config's version (librechat.yaml). */
CONFIG_VERSION = '1.2.8',
/** Standard value for the first message's `parentMessageId` value, to indicate no parent exists. */
@ -1451,10 +1459,20 @@ export enum LocalStorageKeys {
LAST_CODE_TOGGLE_ = 'LAST_CODE_TOGGLE_',
/** Last checked toggle for Web Search per conversation ID */
LAST_WEB_SEARCH_TOGGLE_ = 'LAST_WEB_SEARCH_TOGGLE_',
/** Last checked toggle for File Search per conversation ID */
LAST_FILE_SEARCH_TOGGLE_ = 'LAST_FILE_SEARCH_TOGGLE_',
/** Last checked toggle for Artifacts per conversation ID */
LAST_ARTIFACTS_TOGGLE_ = 'LAST_ARTIFACTS_TOGGLE_',
/** Key for the last selected agent provider */
LAST_AGENT_PROVIDER = 'lastAgentProvider',
/** Key for the last selected agent model */
LAST_AGENT_MODEL = 'lastAgentModel',
/** Pin state for MCP tools per conversation ID */
PIN_MCP_ = 'PIN_MCP_',
/** Pin state for Web Search per conversation ID */
PIN_WEB_SEARCH_ = 'PIN_WEB_SEARCH_',
/** Pin state for Code Interpreter per conversation ID */
PIN_CODE_INTERPRETER_ = 'PIN_CODE_INTERPRETER_',
}
export enum ForkOptions {

View file

@ -11,32 +11,31 @@ export default function createPayload(submission: t.TSubmission) {
isContinued,
isTemporary,
ephemeralAgent,
editedContent,
} = submission;
const { conversationId } = s.tConvoUpdateSchema.parse(conversation);
const { endpoint: _e, endpointType } = endpointOption as {
endpoint: s.EModelEndpoint;
endpointType?: s.EModelEndpoint;
};
const endpoint = _e as s.EModelEndpoint;
let server = EndpointURLs[endpointType ?? endpoint];
const isEphemeral = s.isEphemeralAgent(endpoint, ephemeralAgent);
if (isEdited && s.isAssistantsEndpoint(endpoint)) {
server += '/modify';
} else if (isEdited) {
server = server.replace('/ask/', '/edit/');
} else if (isEphemeral) {
server = `${EndpointURLs[s.EModelEndpoint.agents]}/${endpoint}`;
const endpoint = _e as s.EModelEndpoint;
let server = `${EndpointURLs[s.EModelEndpoint.agents]}/${endpoint}`;
if (s.isAssistantsEndpoint(endpoint)) {
server =
EndpointURLs[(endpointType ?? endpoint) as 'assistants' | 'azureAssistants'] +
(isEdited ? '/modify' : '');
}
const payload: t.TPayload = {
...userMessage,
...endpointOption,
endpoint,
ephemeralAgent: isEphemeral ? ephemeralAgent : undefined,
ephemeralAgent: s.isAssistantsEndpoint(endpoint) ? undefined : ephemeralAgent,
isContinued: !!(isEdited && isContinued),
conversationId,
isTemporary,
editedContent,
};
return { server, payload };

View file

@ -11,14 +11,6 @@ import request from './request';
import * as s from './schemas';
import * as r from './roles';
export function abortRequestWithMessage(
endpoint: string,
abortKey: string,
message: string,
): Promise<void> {
return request.post(endpoints.abortRequest(endpoint), { arg: { abortKey, message } });
}
export function revokeUserKey(name: string): Promise<unknown> {
return request.delete(endpoints.revokeUserKey(name));
}

View file

@ -192,6 +192,12 @@ export const fileConfig = {
},
serverFileSizeLimit: defaultSizeLimit,
avatarSizeLimit: mbToBytes(2),
clientImageResize: {
enabled: false,
maxWidth: 1900,
maxHeight: 1900,
quality: 0.92,
},
checkType: function (fileType: string, supportedTypes: RegExp[] = supportedMimeTypes) {
return supportedTypes.some((regex) => regex.test(fileType));
},
@ -232,6 +238,14 @@ export const fileConfigSchema = z.object({
px: z.number().min(0).optional(),
})
.optional(),
clientImageResize: z
.object({
enabled: z.boolean().optional(),
maxWidth: z.number().min(0).optional(),
maxHeight: z.number().min(0).optional(),
quality: z.number().min(0).max(1).optional(),
})
.optional(),
});
/** Helper function to safely convert string patterns to RegExp objects */
@ -260,6 +274,14 @@ export function mergeFileConfig(dynamic: z.infer<typeof fileConfigSchema> | unde
mergedConfig.avatarSizeLimit = mbToBytes(dynamic.avatarSizeLimit);
}
// Merge clientImageResize configuration
if (dynamic.clientImageResize !== undefined) {
mergedConfig.clientImageResize = {
...mergedConfig.clientImageResize,
...dynamic.clientImageResize,
};
}
if (!dynamic.endpoints) {
return mergedConfig;
}

View file

@ -467,7 +467,11 @@ export function validateSettingDefinitions(settings: SettingsConfiguration): voi
}
/* Default value checks */
if (setting.type === SettingTypes.Number && isNaN(setting.default as number) && setting.default != null) {
if (
setting.type === SettingTypes.Number &&
isNaN(setting.default as number) &&
setting.default != null
) {
errors.push({
code: ZodIssueCode.custom,
message: `Invalid default value for setting ${setting.key}. Must be a number.`,
@ -475,7 +479,11 @@ export function validateSettingDefinitions(settings: SettingsConfiguration): voi
});
}
if (setting.type === SettingTypes.Boolean && typeof setting.default !== 'boolean' && setting.default != null) {
if (
setting.type === SettingTypes.Boolean &&
typeof setting.default !== 'boolean' &&
setting.default != null
) {
errors.push({
code: ZodIssueCode.custom,
message: `Invalid default value for setting ${setting.key}. Must be a boolean.`,
@ -485,7 +493,8 @@ export function validateSettingDefinitions(settings: SettingsConfiguration): voi
if (
(setting.type === SettingTypes.String || setting.type === SettingTypes.Enum) &&
typeof setting.default !== 'string' && setting.default != null
typeof setting.default !== 'string' &&
setting.default != null
) {
errors.push({
code: ZodIssueCode.custom,
@ -520,6 +529,19 @@ export function validateSettingDefinitions(settings: SettingsConfiguration): voi
path: ['default'],
});
}
// Validate enumMappings
if (setting.enumMappings && setting.type === SettingTypes.Enum && setting.options) {
for (const option of setting.options) {
if (!(option in setting.enumMappings)) {
errors.push({
code: ZodIssueCode.custom,
message: `Missing enumMapping for option "${option}" in setting ${setting.key}.`,
path: ['enumMappings'],
});
}
}
}
}
if (errors.length > 0) {

View file

@ -1,7 +1,6 @@
import { z } from 'zod';
import type { TUser } from './types';
import { extractEnvVariable } from './utils';
import { TokenExchangeMethodEnum } from './types/agents';
import { extractEnvVariable } from './utils';
const BaseOptionsSchema = z.object({
iconPath: z.string().optional(),
@ -153,130 +152,3 @@ export const MCPOptionsSchema = z.union([
export const MCPServersSchema = z.record(z.string(), MCPOptionsSchema);
export type MCPOptions = z.infer<typeof MCPOptionsSchema>;
/**
* List of allowed user fields that can be used in MCP environment variables.
* These are non-sensitive string/boolean fields from the IUser interface.
*/
const ALLOWED_USER_FIELDS = [
'name',
'username',
'email',
'provider',
'role',
'googleId',
'facebookId',
'openidId',
'samlId',
'ldapId',
'githubId',
'discordId',
'appleId',
'emailVerified',
'twoFactorEnabled',
'termsAccepted',
] as const;
/**
* Processes a string value to replace user field placeholders
* @param value - The string value to process
* @param user - The user object
* @returns The processed string with placeholders replaced
*/
function processUserPlaceholders(value: string, user?: TUser): string {
if (!user || typeof value !== 'string') {
return value;
}
for (const field of ALLOWED_USER_FIELDS) {
const placeholder = `{{LIBRECHAT_USER_${field.toUpperCase()}}}`;
if (value.includes(placeholder)) {
const fieldValue = user[field as keyof TUser];
const replacementValue = fieldValue != null ? String(fieldValue) : '';
value = value.replace(new RegExp(placeholder, 'g'), replacementValue);
}
}
return value;
}
function processSingleValue({
originalValue,
customUserVars,
user,
}: {
originalValue: string;
customUserVars?: Record<string, string>;
user?: TUser;
}): string {
let value = originalValue;
// 1. Replace custom user variables
if (customUserVars) {
for (const [varName, varVal] of Object.entries(customUserVars)) {
/** Escaped varName for use in regex to avoid issues with special characters */
const escapedVarName = varName.replace(/[.*+?^${}()|[\]\\]/g, '\\$&');
const placeholderRegex = new RegExp(`\\{\\{${escapedVarName}\\}\\}`, 'g');
value = value.replace(placeholderRegex, varVal);
}
}
// 2.A. Special handling for LIBRECHAT_USER_ID placeholder
// This ensures {{LIBRECHAT_USER_ID}} is replaced only if user.id is available.
// If user.id is null/undefined, the placeholder remains
if (user && user.id != null && value.includes('{{LIBRECHAT_USER_ID}}')) {
value = value.replace(/\{\{LIBRECHAT_USER_ID\}\}/g, String(user.id));
}
// 2.B. Replace other standard user field placeholders (e.g., {{LIBRECHAT_USER_EMAIL}})
value = processUserPlaceholders(value, user);
// 3. Replace system environment variables
value = extractEnvVariable(value);
return value;
}
/**
* Recursively processes an object to replace environment variables in string values
* @param obj - The object to process
* @param user - The user object containing all user fields
* @param customUserVars - vars that user set in settings
* @returns - The processed object with environment variables replaced
*/
export function processMCPEnv(
obj: Readonly<MCPOptions>,
user?: TUser,
customUserVars?: Record<string, string>,
): MCPOptions {
if (obj === null || obj === undefined) {
return obj;
}
const newObj: MCPOptions = structuredClone(obj);
if ('env' in newObj && newObj.env) {
const processedEnv: Record<string, string> = {};
for (const [key, originalValue] of Object.entries(newObj.env)) {
processedEnv[key] = processSingleValue({ originalValue, customUserVars, user });
}
newObj.env = processedEnv;
}
// Process headers if they exist (for WebSocket, SSE, StreamableHTTP types)
// Note: `env` and `headers` are on different branches of the MCPOptions union type.
if ('headers' in newObj && newObj.headers) {
const processedHeaders: Record<string, string> = {};
for (const [key, originalValue] of Object.entries(newObj.headers)) {
processedHeaders[key] = processSingleValue({ originalValue, customUserVars, user });
}
newObj.headers = processedHeaders;
}
// Process URL if it exists (for WebSocket, SSE, StreamableHTTP types)
if ('url' in newObj && newObj.url) {
newObj.url = processSingleValue({ originalValue: newObj.url, customUserVars, user });
}
return newObj;
}

View file

@ -4,6 +4,7 @@ import {
openAISettings,
googleSettings,
ReasoningEffort,
ReasoningSummary,
BedrockProviders,
anthropicSettings,
} from './types';
@ -71,6 +72,11 @@ const baseDefinitions: Record<string, SettingDefinition> = {
default: ImageDetail.auto,
component: 'slider',
options: [ImageDetail.low, ImageDetail.auto, ImageDetail.high],
enumMappings: {
[ImageDetail.low]: 'com_ui_low',
[ImageDetail.auto]: 'com_ui_auto',
[ImageDetail.high]: 'com_ui_high',
},
optionType: 'conversation',
columnSpan: 2,
},
@ -83,7 +89,7 @@ const createDefinition = (
return { ...base, ...overrides } as SettingDefinition;
};
const librechat: Record<string, SettingDefinition> = {
export const librechat = {
modelLabel: {
key: 'modelLabel',
label: 'com_endpoint_custom_name',
@ -94,7 +100,7 @@ const librechat: Record<string, SettingDefinition> = {
placeholder: 'com_endpoint_openai_custom_name_placeholder',
placeholderCode: true,
optionType: 'conversation',
},
} as const,
maxContextTokens: {
key: 'maxContextTokens',
label: 'com_endpoint_context_tokens',
@ -107,7 +113,7 @@ const librechat: Record<string, SettingDefinition> = {
descriptionCode: true,
optionType: 'model',
columnSpan: 2,
},
} as const,
resendFiles: {
key: 'resendFiles',
label: 'com_endpoint_plug_resend_files',
@ -120,7 +126,7 @@ const librechat: Record<string, SettingDefinition> = {
optionType: 'conversation',
showDefault: false,
columnSpan: 2,
},
} as const,
promptPrefix: {
key: 'promptPrefix',
label: 'com_endpoint_prompt_prefix',
@ -131,7 +137,7 @@ const librechat: Record<string, SettingDefinition> = {
placeholder: 'com_endpoint_openai_prompt_prefix_placeholder',
placeholderCode: true,
optionType: 'model',
},
} as const,
};
const openAIParams: Record<string, SettingDefinition> = {
@ -211,9 +217,70 @@ const openAIParams: Record<string, SettingDefinition> = {
description: 'com_endpoint_openai_reasoning_effort',
descriptionCode: true,
type: 'enum',
default: ReasoningEffort.medium,
default: ReasoningEffort.none,
component: 'slider',
options: [ReasoningEffort.low, ReasoningEffort.medium, ReasoningEffort.high],
options: [
ReasoningEffort.none,
ReasoningEffort.low,
ReasoningEffort.medium,
ReasoningEffort.high,
],
enumMappings: {
[ReasoningEffort.none]: 'com_ui_none',
[ReasoningEffort.low]: 'com_ui_low',
[ReasoningEffort.medium]: 'com_ui_medium',
[ReasoningEffort.high]: 'com_ui_high',
},
optionType: 'model',
columnSpan: 4,
},
useResponsesApi: {
key: 'useResponsesApi',
label: 'com_endpoint_use_responses_api',
labelCode: true,
description: 'com_endpoint_openai_use_responses_api',
descriptionCode: true,
type: 'boolean',
default: false,
component: 'switch',
optionType: 'model',
showDefault: false,
columnSpan: 2,
},
web_search: {
key: 'web_search',
label: 'com_ui_web_search',
labelCode: true,
description: 'com_endpoint_openai_use_web_search',
descriptionCode: true,
type: 'boolean',
default: false,
component: 'switch',
optionType: 'model',
showDefault: false,
columnSpan: 2,
},
reasoning_summary: {
key: 'reasoning_summary',
label: 'com_endpoint_reasoning_summary',
labelCode: true,
description: 'com_endpoint_openai_reasoning_summary',
descriptionCode: true,
type: 'enum',
default: ReasoningSummary.none,
component: 'slider',
options: [
ReasoningSummary.none,
ReasoningSummary.auto,
ReasoningSummary.concise,
ReasoningSummary.detailed,
],
enumMappings: {
[ReasoningSummary.none]: 'com_ui_none',
[ReasoningSummary.auto]: 'com_ui_auto',
[ReasoningSummary.concise]: 'com_ui_concise',
[ReasoningSummary.detailed]: 'com_ui_detailed',
},
optionType: 'model',
columnSpan: 4,
},
@ -314,6 +381,19 @@ const anthropic: Record<string, SettingDefinition> = {
optionType: 'conversation',
columnSpan: 2,
},
web_search: {
key: 'web_search',
label: 'com_ui_web_search',
labelCode: true,
description: 'com_endpoint_anthropic_use_web_search',
descriptionCode: true,
type: 'boolean',
default: anthropicSettings.web_search.default,
component: 'switch',
optionType: 'conversation',
showDefault: false,
columnSpan: 2,
},
};
const bedrock: Record<string, SettingDefinition> = {
@ -347,7 +427,9 @@ const bedrock: Record<string, SettingDefinition> = {
labelCode: true,
type: 'number',
component: 'input',
placeholder: 'com_endpoint_anthropic_maxoutputtokens',
description: 'com_endpoint_anthropic_maxoutputtokens',
descriptionCode: true,
placeholder: 'com_nav_theme_system',
placeholderCode: true,
optionType: 'model',
columnSpan: 2,
@ -450,6 +532,50 @@ const google: Record<string, SettingDefinition> = {
optionType: 'model',
columnSpan: 2,
},
thinking: {
key: 'thinking',
label: 'com_endpoint_thinking',
labelCode: true,
description: 'com_endpoint_google_thinking',
descriptionCode: true,
type: 'boolean',
default: googleSettings.thinking.default,
component: 'switch',
optionType: 'conversation',
showDefault: false,
columnSpan: 2,
},
thinkingBudget: {
key: 'thinkingBudget',
label: 'com_endpoint_thinking_budget',
labelCode: true,
description: 'com_endpoint_google_thinking_budget',
descriptionCode: true,
placeholder: 'com_ui_auto',
placeholderCode: true,
type: 'number',
component: 'input',
range: {
min: googleSettings.thinkingBudget.min,
max: googleSettings.thinkingBudget.max,
step: googleSettings.thinkingBudget.step,
},
optionType: 'conversation',
columnSpan: 2,
},
web_search: {
key: 'web_search',
label: 'com_endpoint_use_search_grounding',
labelCode: true,
description: 'com_endpoint_google_use_search_grounding',
descriptionCode: true,
type: 'boolean',
default: false,
component: 'switch',
optionType: 'model',
showDefault: false,
columnSpan: 2,
},
};
const googleConfig: SettingsConfiguration = [
@ -461,6 +587,9 @@ const googleConfig: SettingsConfiguration = [
google.topP,
google.topK,
librechat.resendFiles,
google.thinking,
google.thinkingBudget,
google.web_search,
];
const googleCol1: SettingsConfiguration = [
@ -476,6 +605,9 @@ const googleCol2: SettingsConfiguration = [
google.topP,
google.topK,
librechat.resendFiles,
google.thinking,
google.thinkingBudget,
google.web_search,
];
const openAI: SettingsConfiguration = [
@ -490,7 +622,10 @@ const openAI: SettingsConfiguration = [
baseDefinitions.stop,
librechat.resendFiles,
baseDefinitions.imageDetail,
openAIParams.web_search,
openAIParams.reasoning_effort,
openAIParams.useResponsesApi,
openAIParams.reasoning_summary,
];
const openAICol1: SettingsConfiguration = [
@ -507,9 +642,12 @@ const openAICol2: SettingsConfiguration = [
openAIParams.frequency_penalty,
openAIParams.presence_penalty,
baseDefinitions.stop,
openAIParams.reasoning_effort,
librechat.resendFiles,
baseDefinitions.imageDetail,
openAIParams.reasoning_effort,
openAIParams.reasoning_summary,
openAIParams.useResponsesApi,
openAIParams.web_search,
];
const anthropicConfig: SettingsConfiguration = [
@ -524,6 +662,7 @@ const anthropicConfig: SettingsConfiguration = [
anthropic.promptCache,
anthropic.thinking,
anthropic.thinkingBudget,
anthropic.web_search,
];
const anthropicCol1: SettingsConfiguration = [
@ -542,6 +681,7 @@ const anthropicCol2: SettingsConfiguration = [
anthropic.promptCache,
anthropic.thinking,
anthropic.thinkingBudget,
anthropic.web_search,
];
const bedrockAnthropic: SettingsConfiguration = [

View file

@ -122,19 +122,6 @@ export function errorsToString(errors: ZodIssue[]) {
.join(' ');
}
/** Resolves header values to env variables if detected */
export function resolveHeaders(headers: Record<string, string> | undefined) {
const resolvedHeaders = { ...(headers ?? {}) };
if (headers && typeof headers === 'object' && !Array.isArray(headers)) {
Object.keys(headers).forEach((key) => {
resolvedHeaders[key] = extractEnvVariable(headers[key]);
});
}
return resolvedHeaders;
}
export function getFirstDefinedValue(possibleValues: string[]) {
let returnValue;
for (const value of possibleValues) {
@ -275,15 +262,11 @@ export const getResponseSender = (endpointOption: t.TEndpointOption): string =>
if (endpoint === EModelEndpoint.google) {
if (modelLabel) {
return modelLabel;
} else if (model && (model.includes('gemini') || model.includes('learnlm'))) {
return 'Gemini';
} else if (model?.toLowerCase().includes('gemma') === true) {
return 'Gemma';
} else if (model && model.includes('code')) {
return 'Codey';
}
return 'PaLM2';
return 'Gemini';
}
if (endpoint === EModelEndpoint.custom || endpointType === EModelEndpoint.custom) {

View file

@ -12,23 +12,6 @@ import { QueryKeys } from '../keys';
import * as s from '../schemas';
import * as t from '../types';
export const useAbortRequestWithMessage = (): UseMutationResult<
void,
Error,
{ endpoint: string; abortKey: string; message: string }
> => {
const queryClient = useQueryClient();
return useMutation(
({ endpoint, abortKey, message }) =>
dataService.abortRequestWithMessage(endpoint, abortKey, message),
{
onSuccess: () => {
queryClient.invalidateQueries([QueryKeys.balance]);
},
},
);
};
export const useGetSharedMessages = (
shareId: string,
config?: UseQueryOptions<t.TSharedMessagesResponse>,

View file

@ -3,7 +3,6 @@ import { Tools } from './types/assistants';
import type { TMessageContentParts, FunctionTool, FunctionToolCall } from './types/assistants';
import { TFeedback, feedbackSchema } from './feedback';
import type { SearchResultData } from './types/web';
import type { TEphemeralAgent } from './types';
import type { TFile } from './types/files';
export const isUUID = z.string().uuid();
@ -91,22 +90,6 @@ export const isAgentsEndpoint = (_endpoint?: EModelEndpoint.agents | null | stri
return endpoint === EModelEndpoint.agents;
};
export const isEphemeralAgent = (
endpoint?: EModelEndpoint.agents | null | string,
ephemeralAgent?: TEphemeralAgent | null,
) => {
if (!ephemeralAgent) {
return false;
}
if (isAgentsEndpoint(endpoint)) {
return false;
}
const hasMCPSelected = (ephemeralAgent?.mcp?.length ?? 0) > 0;
const hasCodeSelected = (ephemeralAgent?.execute_code ?? false) === true;
const hasSearchSelected = (ephemeralAgent?.web_search ?? false) === true;
return hasMCPSelected || hasCodeSelected || hasSearchSelected;
};
export const isParamEndpoint = (
endpoint: EModelEndpoint | string,
endpointType?: EModelEndpoint | string,
@ -129,11 +112,19 @@ export enum ImageDetail {
}
export enum ReasoningEffort {
none = '',
low = 'low',
medium = 'medium',
high = 'high',
}
export enum ReasoningSummary {
none = '',
auto = 'auto',
concise = 'concise',
detailed = 'detailed',
}
export const imageDetailNumeric = {
[ImageDetail.low]: 0,
[ImageDetail.auto]: 1,
@ -148,6 +139,7 @@ export const imageDetailValue = {
export const eImageDetailSchema = z.nativeEnum(ImageDetail);
export const eReasoningEffortSchema = z.nativeEnum(ReasoningEffort);
export const eReasoningSummarySchema = z.nativeEnum(ReasoningSummary);
export const defaultAssistantFormValues = {
assistant: '',
@ -272,6 +264,18 @@ export const googleSettings = {
step: 1 as const,
default: 40 as const,
},
thinking: {
default: true as const,
},
thinkingBudget: {
min: -1 as const,
max: 32768 as const,
step: 1 as const,
/** `-1` = Dynamic Thinking, meaning the model will adjust
* the budget based on the complexity of the request.
*/
default: -1 as const,
},
};
const ANTHROPIC_MAX_OUTPUT = 128000 as const;
@ -348,6 +352,9 @@ export const anthropicSettings = {
default: LEGACY_ANTHROPIC_MAX_OUTPUT,
},
},
web_search: {
default: false as const,
},
};
export const agentsSettings = {
@ -417,7 +424,7 @@ export type TPluginAuthConfig = z.infer<typeof tPluginAuthConfigSchema>;
export const tPluginSchema = z.object({
name: z.string(),
pluginKey: z.string(),
description: z.string(),
description: z.string().optional(),
icon: z.string().optional(),
authConfig: z.array(tPluginAuthConfigSchema).optional(),
authenticated: z.boolean().optional(),
@ -499,6 +506,7 @@ export const tMessageSchema = z.object({
title: z.string().nullable().or(z.literal('New Chat')).default('New Chat'),
sender: z.string().optional(),
text: z.string(),
/** @deprecated */
generation: z.string().nullable().optional(),
isCreatedByUser: z.boolean(),
error: z.boolean().optional(),
@ -624,8 +632,13 @@ export const tConversationSchema = z.object({
file_ids: z.array(z.string()).optional(),
/* vision */
imageDetail: eImageDetailSchema.optional(),
/* OpenAI: o1 only */
reasoning_effort: eReasoningEffortSchema.optional(),
/* OpenAI: Reasoning models only */
reasoning_effort: eReasoningEffortSchema.optional().nullable(),
reasoning_summary: eReasoningSummarySchema.optional().nullable(),
/* OpenAI: use Responses API */
useResponsesApi: z.boolean().optional(),
/* OpenAI Responses API / Anthropic API / Google API */
web_search: z.boolean().optional(),
/* assistant */
assistant_id: z.string().optional(),
/* agents */
@ -722,6 +735,14 @@ export const tQueryParamsSchema = tConversationSchema
top_p: true,
/** @endpoints openAI, custom, azureOpenAI */
max_tokens: true,
/** @endpoints openAI, custom, azureOpenAI */
reasoning_effort: true,
/** @endpoints openAI, custom, azureOpenAI */
reasoning_summary: true,
/** @endpoints openAI, custom, azureOpenAI */
useResponsesApi: true,
/** @endpoints openAI, anthropic, google */
web_search: true,
/** @endpoints google, anthropic, bedrock */
topP: true,
/** @endpoints google, anthropic */
@ -802,6 +823,9 @@ export const googleBaseSchema = tConversationSchema.pick({
artifacts: true,
topP: true,
topK: true,
thinking: true,
thinkingBudget: true,
web_search: true,
iconURL: true,
greeting: true,
spec: true,
@ -827,6 +851,13 @@ export const googleGenConfigSchema = z
presencePenalty: coerceNumber.optional(),
frequencyPenalty: coerceNumber.optional(),
stopSequences: z.array(z.string()).optional(),
thinkingConfig: z
.object({
includeThoughts: z.boolean().optional(),
thinkingBudget: coerceNumber.optional(),
})
.optional(),
web_search: z.boolean().optional(),
})
.strip()
.optional();
@ -1041,10 +1072,13 @@ export const openAIBaseSchema = tConversationSchema.pick({
maxContextTokens: true,
max_tokens: true,
reasoning_effort: true,
reasoning_summary: true,
useResponsesApi: true,
web_search: true,
});
export const openAISchema = openAIBaseSchema
.transform((obj: Partial<TConversation>) => removeNullishValues(obj))
.transform((obj: Partial<TConversation>) => removeNullishValues(obj, true))
.catch(() => ({}));
export const compactGoogleSchema = googleBaseSchema
@ -1084,6 +1118,7 @@ export const anthropicBaseSchema = tConversationSchema.pick({
greeting: true,
spec: true,
maxContextTokens: true,
web_search: true,
});
export const anthropicSchema = anthropicBaseSchema

View file

@ -98,6 +98,7 @@ export type TEndpointOption = Pick<
export type TEphemeralAgent = {
mcp?: string[];
web_search?: boolean;
file_search?: boolean;
execute_code?: boolean;
};
@ -108,10 +109,14 @@ export type TPayload = Partial<TMessage> &
messages?: TMessages;
isTemporary: boolean;
ephemeralAgent?: TEphemeralAgent | null;
editedContent?: {
index: number;
text: string;
type: 'text' | 'think';
} | null;
};
export type TSubmission = {
artifacts?: string;
plugin?: TResPlugin;
plugins?: TResPlugin[];
userMessage: TMessage;
@ -126,6 +131,11 @@ export type TSubmission = {
endpointOption: TEndpointOption;
clientTimestamp?: string;
ephemeralAgent?: TEphemeralAgent | null;
editedContent?: {
index: number;
text: string;
type: 'text' | 'think';
} | null;
};
export type EventSubmission = Omit<TSubmission, 'initialResponse'> & { initialResponse: TMessage };
@ -133,7 +143,7 @@ export type EventSubmission = Omit<TSubmission, 'initialResponse'> & { initialRe
export type TPluginAction = {
pluginKey: string;
action: 'install' | 'uninstall';
auth?: Partial<Record<string, string>>;
auth?: Partial<Record<string, string>> | null;
isEntityTool?: boolean;
};
@ -143,7 +153,7 @@ export type TUpdateUserPlugins = {
isEntityTool?: boolean;
pluginKey: string;
action: string;
auth?: Partial<Record<string, string | null>>;
auth?: Partial<Record<string, string | null>> | null;
};
// TODO `label` needs to be changed to the proper `TranslationKeys`

View file

@ -11,6 +11,7 @@ export enum FileSources {
execute_code = 'execute_code',
mistral_ocr = 'mistral_ocr',
azure_mistral_ocr = 'azure_mistral_ocr',
vertexai_mistral_ocr = 'vertexai_mistral_ocr',
text = 'text',
}
@ -48,6 +49,12 @@ export type FileConfig = {
};
serverFileSizeLimit?: number;
avatarSizeLimit?: number;
clientImageResize?: {
enabled?: boolean;
maxWidth?: number;
maxHeight?: number;
quality?: number;
};
checkType?: (fileType: string, supportedTypes: RegExp[]) => boolean;
};

View file

@ -13,6 +13,8 @@ export function loadWebSearchConfig(
config: TCustomConfig['webSearch'],
): TCustomConfig['webSearch'] {
const serperApiKey = config?.serperApiKey ?? '${SERPER_API_KEY}';
const searxngInstanceUrl = config?.searxngInstanceUrl ?? '${SEARXNG_INSTANCE_URL}';
const searxngApiKey = config?.searxngApiKey ?? '${SEARXNG_API_KEY}';
const firecrawlApiKey = config?.firecrawlApiKey ?? '${FIRECRAWL_API_KEY}';
const firecrawlApiUrl = config?.firecrawlApiUrl ?? '${FIRECRAWL_API_URL}';
const jinaApiKey = config?.jinaApiKey ?? '${JINA_API_KEY}';
@ -25,6 +27,8 @@ export function loadWebSearchConfig(
jinaApiKey,
cohereApiKey,
serperApiKey,
searxngInstanceUrl,
searxngApiKey,
firecrawlApiKey,
firecrawlApiUrl,
};
@ -32,6 +36,8 @@ export function loadWebSearchConfig(
export type TWebSearchKeys =
| 'serperApiKey'
| 'searxngInstanceUrl'
| 'searxngApiKey'
| 'firecrawlApiKey'
| 'firecrawlApiUrl'
| 'jinaApiKey'
@ -47,6 +53,11 @@ export const webSearchAuth = {
serper: {
serperApiKey: 1 as const,
},
searxng: {
searxngInstanceUrl: 1 as const,
/** Optional (0) */
searxngApiKey: 0 as const,
},
},
scrapers: {
firecrawl: {

View file

@ -11,8 +11,6 @@ const formatDate = (date: Date): string => {
// Factory function that takes mongoose instance and returns the methods
export function createMemoryMethods(mongoose: typeof import('mongoose')) {
const MemoryEntry = mongoose.models.MemoryEntry;
/**
* Creates a new memory entry for a user
* Throws an error if a memory with the same key already exists
@ -28,6 +26,7 @@ export function createMemoryMethods(mongoose: typeof import('mongoose')) {
return { ok: false };
}
const MemoryEntry = mongoose.models.MemoryEntry;
const existingMemory = await MemoryEntry.findOne({ userId, key });
if (existingMemory) {
throw new Error('Memory with this key already exists');
@ -63,6 +62,7 @@ export function createMemoryMethods(mongoose: typeof import('mongoose')) {
return { ok: false };
}
const MemoryEntry = mongoose.models.MemoryEntry;
await MemoryEntry.findOneAndUpdate(
{ userId, key },
{
@ -89,6 +89,7 @@ export function createMemoryMethods(mongoose: typeof import('mongoose')) {
*/
async function deleteMemory({ userId, key }: t.DeleteMemoryParams): Promise<t.MemoryResult> {
try {
const MemoryEntry = mongoose.models.MemoryEntry;
const result = await MemoryEntry.findOneAndDelete({ userId, key });
return { ok: !!result };
} catch (error) {
@ -105,6 +106,7 @@ export function createMemoryMethods(mongoose: typeof import('mongoose')) {
userId: string | Types.ObjectId,
): Promise<t.IMemoryEntryLean[]> {
try {
const MemoryEntry = mongoose.models.MemoryEntry;
return (await MemoryEntry.find({ userId }).lean()) as t.IMemoryEntryLean[];
} catch (error) {
throw new Error(

View file

@ -1,16 +1,14 @@
import type { DeleteResult, Model } from 'mongoose';
import type { IPluginAuth } from '~/schema/pluginAuth';
import type {
FindPluginAuthsByKeysParams,
UpdatePluginAuthParams,
DeletePluginAuthParams,
FindPluginAuthParams,
IPluginAuth,
} from '~/types';
// Factory function that takes mongoose instance and returns the methods
export function createPluginAuthMethods(mongoose: typeof import('mongoose')) {
const PluginAuth: Model<IPluginAuth> = mongoose.models.PluginAuth;
/**
* Finds a single plugin auth entry by userId and authField
*/
@ -19,6 +17,7 @@ export function createPluginAuthMethods(mongoose: typeof import('mongoose')) {
authField,
}: FindPluginAuthParams): Promise<IPluginAuth | null> {
try {
const PluginAuth: Model<IPluginAuth> = mongoose.models.PluginAuth;
return await PluginAuth.findOne({ userId, authField }).lean();
} catch (error) {
throw new Error(
@ -39,6 +38,7 @@ export function createPluginAuthMethods(mongoose: typeof import('mongoose')) {
return [];
}
const PluginAuth: Model<IPluginAuth> = mongoose.models.PluginAuth;
return await PluginAuth.find({
userId,
pluginKey: { $in: pluginKeys },
@ -60,6 +60,7 @@ export function createPluginAuthMethods(mongoose: typeof import('mongoose')) {
value,
}: UpdatePluginAuthParams): Promise<IPluginAuth> {
try {
const PluginAuth: Model<IPluginAuth> = mongoose.models.PluginAuth;
const existingAuth = await PluginAuth.findOne({ userId, pluginKey, authField }).lean();
if (existingAuth) {
@ -95,6 +96,7 @@ export function createPluginAuthMethods(mongoose: typeof import('mongoose')) {
all = false,
}: DeletePluginAuthParams): Promise<DeleteResult> {
try {
const PluginAuth: Model<IPluginAuth> = mongoose.models.PluginAuth;
if (all) {
const filter: DeletePluginAuthParams = { userId };
if (pluginKey) {
@ -120,6 +122,7 @@ export function createPluginAuthMethods(mongoose: typeof import('mongoose')) {
*/
async function deleteAllUserPluginAuths(userId: string): Promise<DeleteResult> {
try {
const PluginAuth: Model<IPluginAuth> = mongoose.models.PluginAuth;
return await PluginAuth.deleteMany({ userId });
} catch (error) {
throw new Error(

View file

@ -13,14 +13,10 @@ export class SessionError extends Error {
}
const { REFRESH_TOKEN_EXPIRY } = process.env ?? {};
const expires = REFRESH_TOKEN_EXPIRY
? eval(REFRESH_TOKEN_EXPIRY)
: 1000 * 60 * 60 * 24 * 7; // 7 days default
const expires = REFRESH_TOKEN_EXPIRY ? eval(REFRESH_TOKEN_EXPIRY) : 1000 * 60 * 60 * 24 * 7; // 7 days default
// Factory function that takes mongoose instance and returns the methods
export function createSessionMethods(mongoose: typeof import('mongoose')) {
const Session = mongoose.models.Session;
/**
* Creates a new session for a user
*/
@ -33,13 +29,14 @@ export function createSessionMethods(mongoose: typeof import('mongoose')) {
}
try {
const session = new Session({
const Session = mongoose.models.Session;
const currentSession = new Session({
user: userId,
expiration: options.expiration || new Date(Date.now() + expires),
});
const refreshToken = await generateRefreshToken(session);
const refreshToken = await generateRefreshToken(currentSession);
return { session, refreshToken };
return { session: currentSession, refreshToken };
} catch (error) {
logger.error('[createSession] Error creating session:', error);
throw new SessionError('Failed to create session', 'CREATE_SESSION_FAILED');
@ -54,6 +51,7 @@ export function createSessionMethods(mongoose: typeof import('mongoose')) {
options: t.SessionQueryOptions = { lean: true },
): Promise<t.ISession | null> {
try {
const Session = mongoose.models.Session;
const query: Record<string, unknown> = {};
if (!params.refreshToken && !params.userId && !params.sessionId) {
@ -109,6 +107,7 @@ export function createSessionMethods(mongoose: typeof import('mongoose')) {
newExpiration?: Date,
): Promise<t.ISession> {
try {
const Session = mongoose.models.Session;
const sessionDoc = typeof session === 'string' ? await Session.findById(session) : session;
if (!sessionDoc) {
@ -128,6 +127,7 @@ export function createSessionMethods(mongoose: typeof import('mongoose')) {
*/
async function deleteSession(params: t.DeleteSessionParams): Promise<{ deletedCount?: number }> {
try {
const Session = mongoose.models.Session;
if (!params.refreshToken && !params.sessionId) {
throw new SessionError(
'Either refreshToken or sessionId is required',
@ -166,6 +166,7 @@ export function createSessionMethods(mongoose: typeof import('mongoose')) {
options: t.DeleteAllSessionsOptions = {},
): Promise<{ deletedCount?: number }> {
try {
const Session = mongoose.models.Session;
if (!userId) {
throw new SessionError('User ID is required', 'INVALID_USER_ID');
}
@ -237,6 +238,7 @@ export function createSessionMethods(mongoose: typeof import('mongoose')) {
*/
async function countActiveSessions(userId: string): Promise<number> {
try {
const Session = mongoose.models.Session;
if (!userId) {
throw new SessionError('User ID is required', 'INVALID_USER_ID');
}

View file

@ -716,7 +716,7 @@ export default function mongoMeili(schema: Schema, options: MongoMeiliOptions):
} catch (error) {
if (meiliEnabled) {
logger.error(
'[MeiliMongooseModel.deleteMany] There was an issue deleting conversation indexes upon deletion. Next startup may be slow due to syncing.',
'[MeiliMongooseModel.deleteMany] There was an issue deleting conversation indexes upon deletion. Next startup may trigger syncing.',
error,
);
}

View file

@ -131,8 +131,22 @@ export const conversationPreset = {
max_tokens: {
type: Number,
},
/** omni models only */
useResponsesApi: {
type: Boolean,
},
/** OpenAI Responses API / Anthropic API */
web_search: {
type: Boolean,
},
/** Google */
grounding: {
type: Boolean,
},
/** Reasoning models only */
reasoning_effort: {
type: String,
},
reasoning_summary: {
type: String,
},
};

View file

@ -46,6 +46,8 @@ export interface IPreset extends Document {
maxContextTokens?: number;
max_tokens?: number;
reasoning_effort?: string;
reasoning_summary?: string;
useResponsesApi?: boolean;
// end of additional fields
agentOptions?: unknown;
}

View file

@ -45,6 +45,9 @@ export interface IConversation extends Document {
maxContextTokens?: number;
max_tokens?: number;
reasoning_effort?: string;
reasoning_summary?: string;
useResponsesApi?: boolean;
web_search?: boolean;
// Additional fields
files?: string[];
expiredAt?: Date;