Merge branch 'main' into refactor/package-auth

This commit is contained in:
Cha 2025-06-17 18:26:25 +08:00
commit 02b9c9d447
340 changed files with 18559 additions and 14872 deletions

View file

@ -1,7 +1,9 @@
const jwt = require('jsonwebtoken');
const { nanoid } = require('nanoid');
const { tool } = require('@langchain/core/tools');
const { logger } = require('@librechat/data-schemas');
const { GraphEvents, sleep } = require('@librechat/agents');
const { sendEvent, logAxiosError } = require('@librechat/api');
const {
Time,
CacheKeys,
@ -13,11 +15,10 @@ const {
actionDomainSeparator,
} = require('librechat-data-provider');
const { refreshAccessToken } = require('~/server/services/TokenService');
const { logger, getFlowStateManager, sendEvent } = require('~/config');
const { encryptV2, decryptV2 } = require('~/server/utils/crypto');
const { getActions, deleteActions } = require('~/models/Action');
const { deleteAssistant } = require('~/models/Assistant');
const { logAxiosError } = require('~/utils');
const { getFlowStateManager } = require('~/config');
const { getLogStores } = require('~/cache');
const { findToken } = require('~/models');
@ -208,6 +209,7 @@ async function createActionTool({
userId: userId,
client_url: metadata.auth.client_url,
redirect_uri: `${process.env.DOMAIN_SERVER}/api/actions/${action_id}/oauth/callback`,
token_exchange_method: metadata.auth.token_exchange_method,
/** Encrypted values */
encrypted_oauth_client_id: encrypted.oauth_client_id,
encrypted_oauth_client_secret: encrypted.oauth_client_secret,
@ -262,6 +264,7 @@ async function createActionTool({
refresh_token,
client_url: metadata.auth.client_url,
encrypted_oauth_client_id: encrypted.oauth_client_id,
token_exchange_method: metadata.auth.token_exchange_method,
encrypted_oauth_client_secret: encrypted.oauth_client_secret,
});
const flowsCache = getLogStores(CacheKeys.FLOWS);

View file

@ -3,9 +3,11 @@ const {
loadOCRConfig,
processMCPEnv,
EModelEndpoint,
loadMemoryConfig,
getConfigDefaults,
loadWebSearchConfig,
} = require('librechat-data-provider');
const { agentsConfigSetup } = require('@librechat/api');
const {
checkHealth,
checkConfig,
@ -24,7 +26,6 @@ const { azureConfigSetup } = require('./start/azureOpenAI');
const { processModelSpecs } = require('./start/modelSpecs');
const { initializeS3 } = require('./Files/S3/initialize');
const { loadAndFormatTools } = require('./ToolService');
const { agentsConfigSetup } = require('./start/agents');
const { isEnabled } = require('~/server/utils');
const { initializeRoles } = require('~/models');
const { getMCPManager } = require('~/config');
@ -44,6 +45,7 @@ const AppService = async (app) => {
const ocr = loadOCRConfig(config.ocr);
const webSearch = loadWebSearchConfig(config.webSearch);
checkWebSearchConfig(webSearch);
const memory = loadMemoryConfig(config.memory);
const filteredTools = config.filteredTools;
const includedTools = config.includedTools;
const fileStrategy = config.fileStrategy ?? configDefaults.fileStrategy;
@ -88,6 +90,7 @@ const AppService = async (app) => {
const defaultLocals = {
ocr,
paths,
memory,
webSearch,
fileStrategy,
socialLogins,
@ -100,8 +103,13 @@ const AppService = async (app) => {
balance,
};
const agentsDefaults = agentsConfigSetup(config);
if (!Object.keys(config).length) {
app.locals = defaultLocals;
app.locals = {
...defaultLocals,
[EModelEndpoint.agents]: agentsDefaults,
};
return;
}
@ -136,9 +144,7 @@ const AppService = async (app) => {
);
}
if (endpoints?.[EModelEndpoint.agents]) {
endpointLocals[EModelEndpoint.agents] = agentsConfigSetup(config);
}
endpointLocals[EModelEndpoint.agents] = agentsConfigSetup(config, agentsDefaults);
const endpointKeys = [
EModelEndpoint.openAI,

View file

@ -2,8 +2,10 @@ const {
FileSources,
EModelEndpoint,
EImageOutputType,
AgentCapabilities,
defaultSocialLogins,
validateAzureGroups,
defaultAgentCapabilities,
deprecatedAzureVariables,
conflictingAzureVariables,
} = require('librechat-data-provider');
@ -151,6 +153,11 @@ describe('AppService', () => {
safeSearch: 1,
serperApiKey: '${SERPER_API_KEY}',
},
memory: undefined,
agents: {
disableBuilder: false,
capabilities: expect.arrayContaining([...defaultAgentCapabilities]),
},
});
});
@ -268,6 +275,71 @@ describe('AppService', () => {
);
});
it('should correctly configure Agents endpoint based on custom config', async () => {
require('./Config/loadCustomConfig').mockImplementationOnce(() =>
Promise.resolve({
endpoints: {
[EModelEndpoint.agents]: {
disableBuilder: true,
recursionLimit: 10,
maxRecursionLimit: 20,
allowedProviders: ['openai', 'anthropic'],
capabilities: [AgentCapabilities.tools, AgentCapabilities.actions],
},
},
}),
);
await AppService(app);
expect(app.locals).toHaveProperty(EModelEndpoint.agents);
expect(app.locals[EModelEndpoint.agents]).toEqual(
expect.objectContaining({
disableBuilder: true,
recursionLimit: 10,
maxRecursionLimit: 20,
allowedProviders: expect.arrayContaining(['openai', 'anthropic']),
capabilities: expect.arrayContaining([AgentCapabilities.tools, AgentCapabilities.actions]),
}),
);
});
it('should configure Agents endpoint with defaults when no config is provided', async () => {
require('./Config/loadCustomConfig').mockImplementationOnce(() => Promise.resolve({}));
await AppService(app);
expect(app.locals).toHaveProperty(EModelEndpoint.agents);
expect(app.locals[EModelEndpoint.agents]).toEqual(
expect.objectContaining({
disableBuilder: false,
capabilities: expect.arrayContaining([...defaultAgentCapabilities]),
}),
);
});
it('should configure Agents endpoint with defaults when endpoints exist but agents is not defined', async () => {
require('./Config/loadCustomConfig').mockImplementationOnce(() =>
Promise.resolve({
endpoints: {
[EModelEndpoint.openAI]: {
titleConvo: true,
},
},
}),
);
await AppService(app);
expect(app.locals).toHaveProperty(EModelEndpoint.agents);
expect(app.locals[EModelEndpoint.agents]).toEqual(
expect.objectContaining({
disableBuilder: false,
capabilities: expect.arrayContaining([...defaultAgentCapabilities]),
}),
);
});
it('should correctly configure minimum Azure OpenAI Assistant values', async () => {
const assistantGroups = [azureGroups[0], { ...azureGroups[1], assistants: true }];
require('./Config/loadCustomConfig').mockImplementationOnce(() =>

View file

@ -0,0 +1,196 @@
const { Providers } = require('@librechat/agents');
const { primeResources, optionalChainWithEmptyCheck } = require('@librechat/api');
const {
ErrorTypes,
EModelEndpoint,
EToolResources,
replaceSpecialVars,
providerEndpointMap,
} = require('librechat-data-provider');
const initAnthropic = require('~/server/services/Endpoints/anthropic/initialize');
const getBedrockOptions = require('~/server/services/Endpoints/bedrock/options');
const initOpenAI = require('~/server/services/Endpoints/openAI/initialize');
const initCustom = require('~/server/services/Endpoints/custom/initialize');
const initGoogle = require('~/server/services/Endpoints/google/initialize');
const generateArtifactsPrompt = require('~/app/clients/prompts/artifacts');
const { getCustomEndpointConfig } = require('~/server/services/Config');
const { processFiles } = require('~/server/services/Files/process');
const { getConvoFiles } = require('~/models/Conversation');
const { getToolFilesByIds } = require('~/models/File');
const { getModelMaxTokens } = require('~/utils');
const { getFiles } = require('~/models/File');
const providerConfigMap = {
[Providers.XAI]: initCustom,
[Providers.OLLAMA]: initCustom,
[Providers.DEEPSEEK]: initCustom,
[Providers.OPENROUTER]: initCustom,
[EModelEndpoint.openAI]: initOpenAI,
[EModelEndpoint.google]: initGoogle,
[EModelEndpoint.azureOpenAI]: initOpenAI,
[EModelEndpoint.anthropic]: initAnthropic,
[EModelEndpoint.bedrock]: getBedrockOptions,
};
/**
* @param {object} params
* @param {ServerRequest} params.req
* @param {ServerResponse} params.res
* @param {Agent} params.agent
* @param {string | null} [params.conversationId]
* @param {Array<IMongoFile>} [params.requestFiles]
* @param {typeof import('~/server/services/ToolService').loadAgentTools | undefined} [params.loadTools]
* @param {TEndpointOption} [params.endpointOption]
* @param {Set<string>} [params.allowedProviders]
* @param {boolean} [params.isInitialAgent]
* @returns {Promise<Agent & { tools: StructuredTool[], attachments: Array<MongoFile>, toolContextMap: Record<string, unknown>, maxContextTokens: number }>}
*/
const initializeAgent = async ({
req,
res,
agent,
loadTools,
requestFiles,
conversationId,
endpointOption,
allowedProviders,
isInitialAgent = false,
}) => {
if (allowedProviders.size > 0 && !allowedProviders.has(agent.provider)) {
throw new Error(
`{ "type": "${ErrorTypes.INVALID_AGENT_PROVIDER}", "info": "${agent.provider}" }`,
);
}
let currentFiles;
if (
isInitialAgent &&
conversationId != null &&
(agent.model_parameters?.resendFiles ?? true) === true
) {
const fileIds = (await getConvoFiles(conversationId)) ?? [];
/** @type {Set<EToolResources>} */
const toolResourceSet = new Set();
for (const tool of agent.tools) {
if (EToolResources[tool]) {
toolResourceSet.add(EToolResources[tool]);
}
}
const toolFiles = await getToolFilesByIds(fileIds, toolResourceSet);
if (requestFiles.length || toolFiles.length) {
currentFiles = await processFiles(requestFiles.concat(toolFiles));
}
} else if (isInitialAgent && requestFiles.length) {
currentFiles = await processFiles(requestFiles);
}
const { attachments, tool_resources } = await primeResources({
req,
getFiles,
attachments: currentFiles,
tool_resources: agent.tool_resources,
requestFileSet: new Set(requestFiles?.map((file) => file.file_id)),
});
const provider = agent.provider;
const { tools, toolContextMap } =
(await loadTools?.({
req,
res,
provider,
agentId: agent.id,
tools: agent.tools,
model: agent.model,
tool_resources,
})) ?? {};
agent.endpoint = provider;
let getOptions = providerConfigMap[provider];
if (!getOptions && providerConfigMap[provider.toLowerCase()] != null) {
agent.provider = provider.toLowerCase();
getOptions = providerConfigMap[agent.provider];
} else if (!getOptions) {
const customEndpointConfig = await getCustomEndpointConfig(provider);
if (!customEndpointConfig) {
throw new Error(`Provider ${provider} not supported`);
}
getOptions = initCustom;
agent.provider = Providers.OPENAI;
}
const model_parameters = Object.assign(
{},
agent.model_parameters ?? { model: agent.model },
isInitialAgent === true ? endpointOption?.model_parameters : {},
);
const _endpointOption =
isInitialAgent === true
? Object.assign({}, endpointOption, { model_parameters })
: { model_parameters };
const options = await getOptions({
req,
res,
optionsOnly: true,
overrideEndpoint: provider,
overrideModel: agent.model,
endpointOption: _endpointOption,
});
if (
agent.endpoint === EModelEndpoint.azureOpenAI &&
options.llmConfig?.azureOpenAIApiInstanceName == null
) {
agent.provider = Providers.OPENAI;
}
if (options.provider != null) {
agent.provider = options.provider;
}
/** @type {import('@librechat/agents').ClientOptions} */
agent.model_parameters = Object.assign(model_parameters, options.llmConfig);
if (options.configOptions) {
agent.model_parameters.configuration = options.configOptions;
}
if (!agent.model_parameters.model) {
agent.model_parameters.model = agent.model;
}
if (agent.instructions && agent.instructions !== '') {
agent.instructions = replaceSpecialVars({
text: agent.instructions,
user: req.user,
});
}
if (typeof agent.artifacts === 'string' && agent.artifacts !== '') {
agent.additional_instructions = generateArtifactsPrompt({
endpoint: agent.provider,
artifacts: agent.artifacts,
});
}
const tokensModel =
agent.provider === EModelEndpoint.azureOpenAI ? agent.model : agent.model_parameters.model;
const maxTokens = optionalChainWithEmptyCheck(
agent.model_parameters.maxOutputTokens,
agent.model_parameters.maxTokens,
0,
);
const maxContextTokens = optionalChainWithEmptyCheck(
agent.model_parameters.maxContextTokens,
agent.max_context_tokens,
getModelMaxTokens(tokensModel, providerEndpointMap[provider]),
4096,
);
return {
...agent,
tools,
attachments,
toolContextMap,
maxContextTokens: (maxContextTokens - maxTokens) * 0.9,
};
};
module.exports = { initializeAgent };

View file

@ -1,294 +1,41 @@
const { createContentAggregator, Providers } = require('@librechat/agents');
const {
Constants,
ErrorTypes,
EModelEndpoint,
EToolResources,
getResponseSender,
AgentCapabilities,
replaceSpecialVars,
providerEndpointMap,
} = require('librechat-data-provider');
const { logger } = require('@librechat/data-schemas');
const { createContentAggregator } = require('@librechat/agents');
const { Constants, EModelEndpoint, getResponseSender } = require('librechat-data-provider');
const {
getDefaultHandlers,
createToolEndCallback,
} = require('~/server/controllers/agents/callbacks');
const initAnthropic = require('~/server/services/Endpoints/anthropic/initialize');
const getBedrockOptions = require('~/server/services/Endpoints/bedrock/options');
const initOpenAI = require('~/server/services/Endpoints/openAI/initialize');
const initCustom = require('~/server/services/Endpoints/custom/initialize');
const initGoogle = require('~/server/services/Endpoints/google/initialize');
const generateArtifactsPrompt = require('~/app/clients/prompts/artifacts');
const { getCustomEndpointConfig } = require('~/server/services/Config');
const { processFiles } = require('~/server/services/Files/process');
const { initializeAgent } = require('~/server/services/Endpoints/agents/agent');
const { loadAgentTools } = require('~/server/services/ToolService');
const AgentClient = require('~/server/controllers/agents/client');
const { getConvoFiles } = require('~/models/Conversation');
const { getToolFilesByIds } = require('~/models/File');
const { getModelMaxTokens } = require('~/utils');
const { getAgent } = require('~/models/Agent');
const { getFiles } = require('~/models/File');
const { logger } = require('~/config');
const providerConfigMap = {
[Providers.XAI]: initCustom,
[Providers.OLLAMA]: initCustom,
[Providers.DEEPSEEK]: initCustom,
[Providers.OPENROUTER]: initCustom,
[EModelEndpoint.openAI]: initOpenAI,
[EModelEndpoint.google]: initGoogle,
[EModelEndpoint.azureOpenAI]: initOpenAI,
[EModelEndpoint.anthropic]: initAnthropic,
[EModelEndpoint.bedrock]: getBedrockOptions,
};
/**
* @param {Object} params
* @param {ServerRequest} params.req
* @param {Promise<Array<MongoFile | null>> | undefined} [params.attachments]
* @param {Set<string>} params.requestFileSet
* @param {AgentToolResources | undefined} [params.tool_resources]
* @returns {Promise<{ attachments: Array<MongoFile | undefined> | undefined, tool_resources: AgentToolResources | undefined }>}
*/
const primeResources = async ({
req,
attachments: _attachments,
tool_resources: _tool_resources,
requestFileSet,
}) => {
try {
/** @type {Array<MongoFile | undefined> | undefined} */
let attachments;
const tool_resources = _tool_resources ?? {};
const isOCREnabled = (req.app.locals?.[EModelEndpoint.agents]?.capabilities ?? []).includes(
AgentCapabilities.ocr,
);
if (tool_resources[EToolResources.ocr]?.file_ids && isOCREnabled) {
const context = await getFiles(
{
file_id: { $in: tool_resources.ocr.file_ids },
},
{},
{},
);
attachments = (attachments ?? []).concat(context);
function createToolLoader() {
/**
* @param {object} params
* @param {ServerRequest} params.req
* @param {ServerResponse} params.res
* @param {string} params.agentId
* @param {string[]} params.tools
* @param {string} params.provider
* @param {string} params.model
* @param {AgentToolResources} params.tool_resources
* @returns {Promise<{ tools: StructuredTool[], toolContextMap: Record<string, unknown> } | undefined>}
*/
return async function loadTools({ req, res, agentId, tools, provider, model, tool_resources }) {
const agent = { id: agentId, tools, provider, model };
try {
return await loadAgentTools({
req,
res,
agent,
tool_resources,
});
} catch (error) {
logger.error('Error loading tools for agent ' + agentId, error);
}
if (!_attachments) {
return { attachments, tool_resources };
}
/** @type {Array<MongoFile | undefined> | undefined} */
const files = await _attachments;
if (!attachments) {
/** @type {Array<MongoFile | undefined>} */
attachments = [];
}
for (const file of files) {
if (!file) {
continue;
}
if (file.metadata?.fileIdentifier) {
const execute_code = tool_resources[EToolResources.execute_code] ?? {};
if (!execute_code.files) {
tool_resources[EToolResources.execute_code] = { ...execute_code, files: [] };
}
tool_resources[EToolResources.execute_code].files.push(file);
} else if (file.embedded === true) {
const file_search = tool_resources[EToolResources.file_search] ?? {};
if (!file_search.files) {
tool_resources[EToolResources.file_search] = { ...file_search, files: [] };
}
tool_resources[EToolResources.file_search].files.push(file);
} else if (
requestFileSet.has(file.file_id) &&
file.type.startsWith('image') &&
file.height &&
file.width
) {
const image_edit = tool_resources[EToolResources.image_edit] ?? {};
if (!image_edit.files) {
tool_resources[EToolResources.image_edit] = { ...image_edit, files: [] };
}
tool_resources[EToolResources.image_edit].files.push(file);
}
attachments.push(file);
}
return { attachments, tool_resources };
} catch (error) {
logger.error('Error priming resources', error);
return { attachments: _attachments, tool_resources: _tool_resources };
}
};
/**
* @param {...string | number} values
* @returns {string | number | undefined}
*/
function optionalChainWithEmptyCheck(...values) {
for (const value of values) {
if (value !== undefined && value !== null && value !== '') {
return value;
}
}
return values[values.length - 1];
}
/**
* @param {object} params
* @param {ServerRequest} params.req
* @param {ServerResponse} params.res
* @param {Agent} params.agent
* @param {Set<string>} [params.allowedProviders]
* @param {object} [params.endpointOption]
* @param {boolean} [params.isInitialAgent]
* @returns {Promise<Agent>}
*/
const initializeAgentOptions = async ({
req,
res,
agent,
endpointOption,
allowedProviders,
isInitialAgent = false,
}) => {
if (allowedProviders.size > 0 && !allowedProviders.has(agent.provider)) {
throw new Error(
`{ "type": "${ErrorTypes.INVALID_AGENT_PROVIDER}", "info": "${agent.provider}" }`,
);
}
let currentFiles;
/** @type {Array<MongoFile>} */
const requestFiles = req.body.files ?? [];
if (
isInitialAgent &&
req.body.conversationId != null &&
(agent.model_parameters?.resendFiles ?? true) === true
) {
const fileIds = (await getConvoFiles(req.body.conversationId)) ?? [];
/** @type {Set<EToolResources>} */
const toolResourceSet = new Set();
for (const tool of agent.tools) {
if (EToolResources[tool]) {
toolResourceSet.add(EToolResources[tool]);
}
}
const toolFiles = await getToolFilesByIds(fileIds, toolResourceSet);
if (requestFiles.length || toolFiles.length) {
currentFiles = await processFiles(requestFiles.concat(toolFiles));
}
} else if (isInitialAgent && requestFiles.length) {
currentFiles = await processFiles(requestFiles);
}
const { attachments, tool_resources } = await primeResources({
req,
attachments: currentFiles,
tool_resources: agent.tool_resources,
requestFileSet: new Set(requestFiles.map((file) => file.file_id)),
});
const provider = agent.provider;
const { tools, toolContextMap } = await loadAgentTools({
req,
res,
agent: {
id: agent.id,
tools: agent.tools,
provider,
model: agent.model,
},
tool_resources,
});
agent.endpoint = provider;
let getOptions = providerConfigMap[provider];
if (!getOptions && providerConfigMap[provider.toLowerCase()] != null) {
agent.provider = provider.toLowerCase();
getOptions = providerConfigMap[agent.provider];
} else if (!getOptions) {
const customEndpointConfig = await getCustomEndpointConfig(provider);
if (!customEndpointConfig) {
throw new Error(`Provider ${provider} not supported`);
}
getOptions = initCustom;
agent.provider = Providers.OPENAI;
}
const model_parameters = Object.assign(
{},
agent.model_parameters ?? { model: agent.model },
isInitialAgent === true ? endpointOption?.model_parameters : {},
);
const _endpointOption =
isInitialAgent === true
? Object.assign({}, endpointOption, { model_parameters })
: { model_parameters };
const options = await getOptions({
req,
res,
optionsOnly: true,
overrideEndpoint: provider,
overrideModel: agent.model,
endpointOption: _endpointOption,
});
if (
agent.endpoint === EModelEndpoint.azureOpenAI &&
options.llmConfig?.azureOpenAIApiInstanceName == null
) {
agent.provider = Providers.OPENAI;
}
if (options.provider != null) {
agent.provider = options.provider;
}
/** @type {import('@librechat/agents').ClientOptions} */
agent.model_parameters = Object.assign(model_parameters, options.llmConfig);
if (options.configOptions) {
agent.model_parameters.configuration = options.configOptions;
}
if (!agent.model_parameters.model) {
agent.model_parameters.model = agent.model;
}
if (agent.instructions && agent.instructions !== '') {
agent.instructions = replaceSpecialVars({
text: agent.instructions,
user: req.user,
});
}
if (typeof agent.artifacts === 'string' && agent.artifacts !== '') {
agent.additional_instructions = generateArtifactsPrompt({
endpoint: agent.provider,
artifacts: agent.artifacts,
});
}
const tokensModel =
agent.provider === EModelEndpoint.azureOpenAI ? agent.model : agent.model_parameters.model;
const maxTokens = optionalChainWithEmptyCheck(
agent.model_parameters.maxOutputTokens,
agent.model_parameters.maxTokens,
0,
);
const maxContextTokens = optionalChainWithEmptyCheck(
agent.model_parameters.maxContextTokens,
agent.max_context_tokens,
getModelMaxTokens(tokensModel, providerEndpointMap[provider]),
4096,
);
return {
...agent,
tools,
attachments,
toolContextMap,
maxContextTokens: (maxContextTokens - maxTokens) * 0.9,
};
};
}
const initializeClient = async ({ req, res, endpointOption }) => {
if (!endpointOption) {
@ -313,7 +60,6 @@ const initializeClient = async ({ req, res, endpointOption }) => {
throw new Error('No agent promise provided');
}
// Initialize primary agent
const primaryAgent = await endpointOption.agent;
if (!primaryAgent) {
throw new Error('Agent not found');
@ -323,10 +69,18 @@ const initializeClient = async ({ req, res, endpointOption }) => {
/** @type {Set<string>} */
const allowedProviders = new Set(req?.app?.locals?.[EModelEndpoint.agents]?.allowedProviders);
// Handle primary agent
const primaryConfig = await initializeAgentOptions({
const loadTools = createToolLoader();
/** @type {Array<MongoFile>} */
const requestFiles = req.body.files ?? [];
/** @type {string} */
const conversationId = req.body.conversationId;
const primaryConfig = await initializeAgent({
req,
res,
loadTools,
requestFiles,
conversationId,
agent: primaryAgent,
endpointOption,
allowedProviders,
@ -340,10 +94,13 @@ const initializeClient = async ({ req, res, endpointOption }) => {
if (!agent) {
throw new Error(`Agent ${agentId} not found`);
}
const config = await initializeAgentOptions({
const config = await initializeAgent({
req,
res,
agent,
loadTools,
requestFiles,
conversationId,
endpointOption,
allowedProviders,
});

View file

@ -1,5 +1,6 @@
const OpenAI = require('openai');
const { HttpsProxyAgent } = require('https-proxy-agent');
const { constructAzureURL, isUserProvided } = require('@librechat/api');
const {
ErrorTypes,
EModelEndpoint,
@ -12,8 +13,6 @@ const {
checkUserKeyExpiry,
} = require('~/server/services/UserService');
const OpenAIClient = require('~/app/clients/OpenAIClient');
const { isUserProvided } = require('~/server/utils');
const { constructAzureURL } = require('~/utils');
class Files {
constructor(client) {

View file

@ -1,4 +1,5 @@
const { HttpsProxyAgent } = require('https-proxy-agent');
const { createHandleLLMNewToken } = require('@librechat/api');
const {
AuthType,
Constants,
@ -8,7 +9,6 @@ const {
removeNullishValues,
} = require('librechat-data-provider');
const { getUserKey, checkUserKeyExpiry } = require('~/server/services/UserService');
const { createHandleLLMNewToken } = require('~/app/clients/generators');
const getOptions = async ({ req, overrideModel, endpointOption }) => {
const {

View file

@ -6,10 +6,9 @@ const {
extractEnvVariable,
} = require('librechat-data-provider');
const { Providers } = require('@librechat/agents');
const { getOpenAIConfig, createHandleLLMNewToken } = require('@librechat/api');
const { getUserKeyValues, checkUserKeyExpiry } = require('~/server/services/UserService');
const { getLLMConfig } = require('~/server/services/Endpoints/openAI/llm');
const { getCustomEndpointConfig } = require('~/server/services/Config');
const { createHandleLLMNewToken } = require('~/app/clients/generators');
const { fetchModels } = require('~/server/services/ModelService');
const OpenAIClient = require('~/app/clients/OpenAIClient');
const { isUserProvided } = require('~/server/utils');
@ -144,7 +143,7 @@ const initializeClient = async ({ req, res, endpointOption, optionsOnly, overrid
clientOptions,
);
clientOptions.modelOptions.user = req.user.id;
const options = getLLMConfig(apiKey, clientOptions, endpoint);
const options = getOpenAIConfig(apiKey, clientOptions, endpoint);
if (!customOptions.streamRate) {
return options;
}

View file

@ -25,9 +25,9 @@ const initializeClient = async ({ req, res, endpointOption, overrideModel, optio
const credentials = isUserProvided
? userKey
: {
[AuthKeys.GOOGLE_SERVICE_KEY]: serviceKey,
[AuthKeys.GOOGLE_API_KEY]: GOOGLE_KEY,
};
[AuthKeys.GOOGLE_SERVICE_KEY]: serviceKey,
[AuthKeys.GOOGLE_API_KEY]: GOOGLE_KEY,
};
let clientOptions = {};

View file

@ -94,7 +94,7 @@ function getLLMConfig(credentials, options = {}) {
// Extract from credentials
const serviceKeyRaw = creds[AuthKeys.GOOGLE_SERVICE_KEY] ?? {};
const serviceKey =
typeof serviceKeyRaw === 'string' ? JSON.parse(serviceKeyRaw) : serviceKeyRaw ?? {};
typeof serviceKeyRaw === 'string' ? JSON.parse(serviceKeyRaw) : (serviceKeyRaw ?? {});
const project_id = serviceKey?.project_id ?? null;
const apiKey = creds[AuthKeys.GOOGLE_API_KEY] ?? null;
@ -156,10 +156,6 @@ function getLLMConfig(credentials, options = {}) {
}
if (authHeader) {
/**
* NOTE: NOT SUPPORTED BY LANGCHAIN GENAI CLIENT,
* REQUIRES PR IN https://github.com/langchain-ai/langchainjs
*/
llmConfig.customHeaders = {
Authorization: `Bearer ${apiKey}`,
};

View file

@ -1,11 +1,10 @@
const {
EModelEndpoint,
mapModelToAzureConfig,
resolveHeaders,
mapModelToAzureConfig,
} = require('librechat-data-provider');
const { isEnabled, isUserProvided, getAzureCredentials } = require('@librechat/api');
const { getUserKeyValues, checkUserKeyExpiry } = require('~/server/services/UserService');
const { isEnabled, isUserProvided } = require('~/server/utils');
const { getAzureCredentials } = require('~/utils');
const { PluginsClient } = require('~/app');
const initializeClient = async ({ req, res, endpointOption }) => {

View file

@ -114,11 +114,11 @@ describe('gptPlugins/initializeClient', () => {
test('should initialize PluginsClient with Azure credentials when PLUGINS_USE_AZURE is true', async () => {
process.env.AZURE_API_KEY = 'test-azure-api-key';
(process.env.AZURE_OPENAI_API_INSTANCE_NAME = 'some-value'),
(process.env.AZURE_OPENAI_API_DEPLOYMENT_NAME = 'some-value'),
(process.env.AZURE_OPENAI_API_VERSION = 'some-value'),
(process.env.AZURE_OPENAI_API_COMPLETIONS_DEPLOYMENT_NAME = 'some-value'),
(process.env.AZURE_OPENAI_API_EMBEDDINGS_DEPLOYMENT_NAME = 'some-value'),
(process.env.PLUGINS_USE_AZURE = 'true');
(process.env.AZURE_OPENAI_API_DEPLOYMENT_NAME = 'some-value'),
(process.env.AZURE_OPENAI_API_VERSION = 'some-value'),
(process.env.AZURE_OPENAI_API_COMPLETIONS_DEPLOYMENT_NAME = 'some-value'),
(process.env.AZURE_OPENAI_API_EMBEDDINGS_DEPLOYMENT_NAME = 'some-value'),
(process.env.PLUGINS_USE_AZURE = 'true');
process.env.DEBUG_PLUGINS = 'false';
process.env.OPENAI_SUMMARIZE = 'false';

View file

@ -4,12 +4,15 @@ const {
resolveHeaders,
mapModelToAzureConfig,
} = require('librechat-data-provider');
const {
isEnabled,
isUserProvided,
getOpenAIConfig,
getAzureCredentials,
createHandleLLMNewToken,
} = require('@librechat/api');
const { getUserKeyValues, checkUserKeyExpiry } = require('~/server/services/UserService');
const { getLLMConfig } = require('~/server/services/Endpoints/openAI/llm');
const { createHandleLLMNewToken } = require('~/app/clients/generators');
const { isEnabled, isUserProvided } = require('~/server/utils');
const OpenAIClient = require('~/app/clients/OpenAIClient');
const { getAzureCredentials } = require('~/utils');
const initializeClient = async ({
req,
@ -140,7 +143,7 @@ const initializeClient = async ({
modelOptions.model = modelName;
clientOptions = Object.assign({ modelOptions }, clientOptions);
clientOptions.modelOptions.user = req.user.id;
const options = getLLMConfig(apiKey, clientOptions);
const options = getOpenAIConfig(apiKey, clientOptions);
const streamRate = clientOptions.streamRate;
if (!streamRate) {
return options;

View file

@ -1,170 +0,0 @@
const { HttpsProxyAgent } = require('https-proxy-agent');
const { KnownEndpoints } = require('librechat-data-provider');
const { sanitizeModelName, constructAzureURL } = require('~/utils');
const { isEnabled } = require('~/server/utils');
/**
* Generates configuration options for creating a language model (LLM) instance.
* @param {string} apiKey - The API key for authentication.
* @param {Object} options - Additional options for configuring the LLM.
* @param {Object} [options.modelOptions] - Model-specific options.
* @param {string} [options.modelOptions.model] - The name of the model to use.
* @param {string} [options.modelOptions.user] - The user ID
* @param {number} [options.modelOptions.temperature] - Controls randomness in output generation (0-2).
* @param {number} [options.modelOptions.top_p] - Controls diversity via nucleus sampling (0-1).
* @param {number} [options.modelOptions.frequency_penalty] - Reduces repetition of token sequences (-2 to 2).
* @param {number} [options.modelOptions.presence_penalty] - Encourages discussing new topics (-2 to 2).
* @param {number} [options.modelOptions.max_tokens] - The maximum number of tokens to generate.
* @param {string[]} [options.modelOptions.stop] - Sequences where the API will stop generating further tokens.
* @param {string} [options.reverseProxyUrl] - URL for a reverse proxy, if used.
* @param {boolean} [options.useOpenRouter] - Flag to use OpenRouter API.
* @param {Object} [options.headers] - Additional headers for API requests.
* @param {string} [options.proxy] - Proxy server URL.
* @param {Object} [options.azure] - Azure-specific configurations.
* @param {boolean} [options.streaming] - Whether to use streaming mode.
* @param {Object} [options.addParams] - Additional parameters to add to the model options.
* @param {string[]} [options.dropParams] - Parameters to remove from the model options.
* @param {string|null} [endpoint=null] - The endpoint name
* @returns {Object} Configuration options for creating an LLM instance.
*/
function getLLMConfig(apiKey, options = {}, endpoint = null) {
let {
modelOptions = {},
reverseProxyUrl,
defaultQuery,
headers,
proxy,
azure,
streaming = true,
addParams,
dropParams,
} = options;
/** @type {OpenAIClientOptions} */
let llmConfig = {
streaming,
};
Object.assign(llmConfig, modelOptions);
if (addParams && typeof addParams === 'object') {
Object.assign(llmConfig, addParams);
}
/** Note: OpenAI Web Search models do not support any known parameters besdies `max_tokens` */
if (modelOptions.model && /gpt-4o.*search/.test(modelOptions.model)) {
const searchExcludeParams = [
'frequency_penalty',
'presence_penalty',
'temperature',
'top_p',
'top_k',
'stop',
'logit_bias',
'seed',
'response_format',
'n',
'logprobs',
'user',
];
dropParams = dropParams || [];
dropParams = [...new Set([...dropParams, ...searchExcludeParams])];
}
if (dropParams && Array.isArray(dropParams)) {
dropParams.forEach((param) => {
if (llmConfig[param]) {
llmConfig[param] = undefined;
}
});
}
let useOpenRouter;
/** @type {OpenAIClientOptions['configuration']} */
const configOptions = {};
if (
(reverseProxyUrl && reverseProxyUrl.includes(KnownEndpoints.openrouter)) ||
(endpoint && endpoint.toLowerCase().includes(KnownEndpoints.openrouter))
) {
useOpenRouter = true;
llmConfig.include_reasoning = true;
configOptions.baseURL = reverseProxyUrl;
configOptions.defaultHeaders = Object.assign(
{
'HTTP-Referer': 'https://librechat.ai',
'X-Title': 'LibreChat',
},
headers,
);
} else if (reverseProxyUrl) {
configOptions.baseURL = reverseProxyUrl;
if (headers) {
configOptions.defaultHeaders = headers;
}
}
if (defaultQuery) {
configOptions.defaultQuery = defaultQuery;
}
if (proxy) {
const proxyAgent = new HttpsProxyAgent(proxy);
Object.assign(configOptions, {
httpAgent: proxyAgent,
httpsAgent: proxyAgent,
});
}
if (azure) {
const useModelName = isEnabled(process.env.AZURE_USE_MODEL_AS_DEPLOYMENT_NAME);
azure.azureOpenAIApiDeploymentName = useModelName
? sanitizeModelName(llmConfig.model)
: azure.azureOpenAIApiDeploymentName;
if (process.env.AZURE_OPENAI_DEFAULT_MODEL) {
llmConfig.model = process.env.AZURE_OPENAI_DEFAULT_MODEL;
}
if (configOptions.baseURL) {
const azureURL = constructAzureURL({
baseURL: configOptions.baseURL,
azureOptions: azure,
});
azure.azureOpenAIBasePath = azureURL.split(`/${azure.azureOpenAIApiDeploymentName}`)[0];
}
Object.assign(llmConfig, azure);
llmConfig.model = llmConfig.azureOpenAIApiDeploymentName;
} else {
llmConfig.apiKey = apiKey;
// Object.assign(llmConfig, {
// configuration: { apiKey },
// });
}
if (process.env.OPENAI_ORGANIZATION && this.azure) {
llmConfig.organization = process.env.OPENAI_ORGANIZATION;
}
if (useOpenRouter && llmConfig.reasoning_effort != null) {
llmConfig.reasoning = {
effort: llmConfig.reasoning_effort,
};
delete llmConfig.reasoning_effort;
}
if (llmConfig?.['max_tokens'] != null) {
/** @type {number} */
llmConfig.maxTokens = llmConfig['max_tokens'];
delete llmConfig['max_tokens'];
}
return {
/** @type {OpenAIClientOptions} */
llmConfig,
/** @type {OpenAIClientOptions['configuration']} */
configOptions,
};
}
module.exports = { getLLMConfig };

View file

@ -2,9 +2,9 @@ const axios = require('axios');
const fs = require('fs').promises;
const FormData = require('form-data');
const { Readable } = require('stream');
const { genAzureEndpoint } = require('@librechat/api');
const { extractEnvVariable, STTProviders } = require('librechat-data-provider');
const { getCustomConfig } = require('~/server/services/Config');
const { genAzureEndpoint } = require('~/utils');
const { logger } = require('~/config');
/**

View file

@ -1,8 +1,8 @@
const axios = require('axios');
const { genAzureEndpoint } = require('@librechat/api');
const { extractEnvVariable, TTSProviders } = require('librechat-data-provider');
const { getRandomVoiceId, createChunkProcessor, splitTextIntoChunks } = require('./streamAudio');
const { getCustomConfig } = require('~/server/services/Config');
const { genAzureEndpoint } = require('~/utils');
const { logger } = require('~/config');
/**

View file

@ -91,24 +91,44 @@ async function prepareAzureImageURL(req, file) {
* @param {Buffer} params.buffer - The avatar image buffer.
* @param {string} params.userId - The user's id.
* @param {string} params.manual - Flag to indicate manual update.
* @param {string} [params.agentId] - Optional agent ID if this is an agent avatar.
* @param {string} [params.basePath='images'] - The base folder within the container.
* @param {string} [params.containerName] - The Azure Blob container name.
* @returns {Promise<string>} The URL of the avatar.
*/
async function processAzureAvatar({ buffer, userId, manual, basePath = 'images', containerName }) {
async function processAzureAvatar({
buffer,
userId,
manual,
agentId,
basePath = 'images',
containerName,
}) {
try {
const metadata = await sharp(buffer).metadata();
const extension = metadata.format === 'gif' ? 'gif' : 'png';
const timestamp = new Date().getTime();
/** Unique filename with timestamp and optional agent ID */
const fileName = agentId
? `agent-${agentId}-avatar-${timestamp}.${extension}`
: `avatar-${timestamp}.${extension}`;
const downloadURL = await saveBufferToAzure({
userId,
buffer,
fileName: 'avatar.png',
fileName,
basePath,
containerName,
});
const isManual = manual === 'true';
const url = `${downloadURL}?manual=${isManual}`;
if (isManual) {
// Only update user record if this is a user avatar (manual === 'true')
if (isManual && !agentId) {
await updateUser(userId, { avatar: url });
}
return url;
} catch (error) {
logger.error('[processAzureAvatar] Error uploading profile picture to Azure:', error);

View file

@ -1,7 +1,6 @@
const FormData = require('form-data');
const { getCodeBaseURL } = require('@librechat/agents');
const { createAxiosInstance } = require('~/config');
const { logAxiosError } = require('~/utils');
const { createAxiosInstance, logAxiosError } = require('@librechat/api');
const axios = createAxiosInstance();

View file

@ -1,6 +1,8 @@
const path = require('path');
const { v4 } = require('uuid');
const axios = require('axios');
const { logAxiosError } = require('@librechat/api');
const { logger } = require('@librechat/data-schemas');
const { getCodeBaseURL } = require('@librechat/agents');
const {
Tools,
@ -12,8 +14,6 @@ const {
const { getStrategyFunctions } = require('~/server/services/Files/strategies');
const { convertImage } = require('~/server/services/Files/images/convert');
const { createFile, getFiles, updateFile } = require('~/models/File');
const { logAxiosError } = require('~/utils');
const { logger } = require('~/config');
/**
* Process OpenAI image files, convert to target format, save and return file metadata.

View file

@ -82,22 +82,32 @@ async function prepareImageURL(req, file) {
* @param {Buffer} params.buffer - The Buffer containing the avatar image.
* @param {string} params.userId - The user ID.
* @param {string} params.manual - A string flag indicating whether the update is manual ('true' or 'false').
* @param {string} [params.agentId] - Optional agent ID if this is an agent avatar.
* @returns {Promise<string>} - A promise that resolves with the URL of the uploaded avatar.
* @throws {Error} - Throws an error if Firebase is not initialized or if there is an error in uploading.
*/
async function processFirebaseAvatar({ buffer, userId, manual }) {
async function processFirebaseAvatar({ buffer, userId, manual, agentId }) {
try {
const metadata = await sharp(buffer).metadata();
const extension = metadata.format === 'gif' ? 'gif' : 'png';
const timestamp = new Date().getTime();
/** Unique filename with timestamp and optional agent ID */
const fileName = agentId
? `agent-${agentId}-avatar-${timestamp}.${extension}`
: `avatar-${timestamp}.${extension}`;
const downloadURL = await saveBufferToFirebase({
userId,
buffer,
fileName: 'avatar.png',
fileName,
});
const isManual = manual === 'true';
const url = `${downloadURL}?manual=${isManual}`;
if (isManual) {
// Only update user record if this is a user avatar (manual === 'true')
if (isManual && !agentId) {
await updateUser(userId, { avatar: url });
}

View file

@ -201,6 +201,10 @@ const unlinkFile = async (filepath) => {
*/
const deleteLocalFile = async (req, file) => {
const { publicPath, uploads } = req.app.locals.paths;
/** Filepath stripped of query parameters (e.g., ?manual=true) */
const cleanFilepath = file.filepath.split('?')[0];
if (file.embedded && process.env.RAG_API_URL) {
const jwtToken = req.headers.authorization.split(' ')[1];
axios.delete(`${process.env.RAG_API_URL}/documents`, {
@ -213,32 +217,32 @@ const deleteLocalFile = async (req, file) => {
});
}
if (file.filepath.startsWith(`/uploads/${req.user.id}`)) {
if (cleanFilepath.startsWith(`/uploads/${req.user.id}`)) {
const userUploadDir = path.join(uploads, req.user.id);
const basePath = file.filepath.split(`/uploads/${req.user.id}/`)[1];
const basePath = cleanFilepath.split(`/uploads/${req.user.id}/`)[1];
if (!basePath) {
throw new Error(`Invalid file path: ${file.filepath}`);
throw new Error(`Invalid file path: ${cleanFilepath}`);
}
const filepath = path.join(userUploadDir, basePath);
const rel = path.relative(userUploadDir, filepath);
if (rel.startsWith('..') || path.isAbsolute(rel) || rel.includes(`..${path.sep}`)) {
throw new Error(`Invalid file path: ${file.filepath}`);
throw new Error(`Invalid file path: ${cleanFilepath}`);
}
await unlinkFile(filepath);
return;
}
const parts = file.filepath.split(path.sep);
const parts = cleanFilepath.split(path.sep);
const subfolder = parts[1];
if (!subfolder && parts[0] === EModelEndpoint.agents) {
logger.warn(`Agent File ${file.file_id} is missing filepath, may have been deleted already`);
return;
}
const filepath = path.join(publicPath, file.filepath);
const filepath = path.join(publicPath, cleanFilepath);
if (!isValidPath(req, publicPath, subfolder, filepath)) {
throw new Error('Invalid file path');

View file

@ -112,10 +112,11 @@ async function prepareImagesLocal(req, file) {
* @param {Buffer} params.buffer - The Buffer containing the avatar image.
* @param {string} params.userId - The user ID.
* @param {string} params.manual - A string flag indicating whether the update is manual ('true' or 'false').
* @param {string} [params.agentId] - Optional agent ID if this is an agent avatar.
* @returns {Promise<string>} - A promise that resolves with the URL of the uploaded avatar.
* @throws {Error} - Throws an error if Firebase is not initialized or if there is an error in uploading.
*/
async function processLocalAvatar({ buffer, userId, manual }) {
async function processLocalAvatar({ buffer, userId, manual, agentId }) {
const userDir = path.resolve(
__dirname,
'..',
@ -129,7 +130,14 @@ async function processLocalAvatar({ buffer, userId, manual }) {
userId,
);
const fileName = `avatar-${new Date().getTime()}.png`;
const metadata = await sharp(buffer).metadata();
const extension = metadata.format === 'gif' ? 'gif' : 'png';
const timestamp = new Date().getTime();
/** Unique filename with timestamp and optional agent ID */
const fileName = agentId
? `agent-${agentId}-avatar-${timestamp}.${extension}`
: `avatar-${timestamp}.${extension}`;
const urlRoute = `/images/${userId}/${fileName}`;
const avatarPath = path.join(userDir, fileName);
@ -139,7 +147,8 @@ async function processLocalAvatar({ buffer, userId, manual }) {
const isManual = manual === 'true';
let url = `${urlRoute}?manual=${isManual}`;
if (isManual) {
// Only update user record if this is a user avatar (manual === 'true')
if (isManual && !agentId) {
await updateUser(userId, { avatar: url });
}

View file

@ -1,238 +0,0 @@
// ~/server/services/Files/MistralOCR/crud.js
const fs = require('fs');
const path = require('path');
const FormData = require('form-data');
const {
FileSources,
envVarRegex,
extractEnvVariable,
extractVariableName,
} = require('librechat-data-provider');
const { loadAuthValues } = require('~/server/services/Tools/credentials');
const { logger, createAxiosInstance } = require('~/config');
const { logAxiosError } = require('~/utils/axios');
const axios = createAxiosInstance();
/**
* Uploads a document to Mistral API using file streaming to avoid loading the entire file into memory
*
* @param {Object} params Upload parameters
* @param {string} params.filePath The path to the file on disk
* @param {string} [params.fileName] Optional filename to use (defaults to the name from filePath)
* @param {string} params.apiKey Mistral API key
* @param {string} [params.baseURL=https://api.mistral.ai/v1] Mistral API base URL
* @returns {Promise<Object>} The response from Mistral API
*/
async function uploadDocumentToMistral({
filePath,
fileName = '',
apiKey,
baseURL = 'https://api.mistral.ai/v1',
}) {
const form = new FormData();
form.append('purpose', 'ocr');
const actualFileName = fileName || path.basename(filePath);
const fileStream = fs.createReadStream(filePath);
form.append('file', fileStream, { filename: actualFileName });
return axios
.post(`${baseURL}/files`, form, {
headers: {
Authorization: `Bearer ${apiKey}`,
...form.getHeaders(),
},
maxBodyLength: Infinity,
maxContentLength: Infinity,
})
.then((res) => res.data)
.catch((error) => {
throw error;
});
}
async function getSignedUrl({
apiKey,
fileId,
expiry = 24,
baseURL = 'https://api.mistral.ai/v1',
}) {
return axios
.get(`${baseURL}/files/${fileId}/url?expiry=${expiry}`, {
headers: {
Authorization: `Bearer ${apiKey}`,
},
})
.then((res) => res.data)
.catch((error) => {
logger.error('Error fetching signed URL:', error.message);
throw error;
});
}
/**
* @param {Object} params
* @param {string} params.apiKey
* @param {string} params.url - The document or image URL
* @param {string} [params.documentType='document_url'] - 'document_url' or 'image_url'
* @param {string} [params.model]
* @param {string} [params.baseURL]
* @returns {Promise<OCRResult>}
*/
async function performOCR({
apiKey,
url,
documentType = 'document_url',
model = 'mistral-ocr-latest',
baseURL = 'https://api.mistral.ai/v1',
}) {
const documentKey = documentType === 'image_url' ? 'image_url' : 'document_url';
return axios
.post(
`${baseURL}/ocr`,
{
model,
image_limit: 0,
include_image_base64: false,
document: {
type: documentType,
[documentKey]: url,
},
},
{
headers: {
'Content-Type': 'application/json',
Authorization: `Bearer ${apiKey}`,
},
},
)
.then((res) => res.data)
.catch((error) => {
logger.error('Error performing OCR:', error.message);
throw error;
});
}
/**
* Uploads a file to the Mistral OCR API and processes the OCR result.
*
* @param {Object} params - The params object.
* @param {ServerRequest} params.req - The request object from Express. It should have a `user` property with an `id`
* representing the user
* @param {Express.Multer.File} params.file - The file object, which is part of the request. The file object should
* have a `mimetype` property that tells us the file type
* @param {string} params.file_id - The file ID.
* @param {string} [params.entity_id] - The entity ID, not used here but passed for consistency.
* @returns {Promise<{ filepath: string, bytes: number }>} - The result object containing the processed `text` and `images` (not currently used),
* along with the `filename` and `bytes` properties.
*/
const uploadMistralOCR = async ({ req, file, file_id, entity_id }) => {
try {
/** @type {TCustomConfig['ocr']} */
const ocrConfig = req.app.locals?.ocr;
const apiKeyConfig = ocrConfig.apiKey || '';
const baseURLConfig = ocrConfig.baseURL || '';
const isApiKeyEnvVar = envVarRegex.test(apiKeyConfig);
const isBaseURLEnvVar = envVarRegex.test(baseURLConfig);
const isApiKeyEmpty = !apiKeyConfig.trim();
const isBaseURLEmpty = !baseURLConfig.trim();
let apiKey, baseURL;
if (isApiKeyEnvVar || isBaseURLEnvVar || isApiKeyEmpty || isBaseURLEmpty) {
const apiKeyVarName = isApiKeyEnvVar ? extractVariableName(apiKeyConfig) : 'OCR_API_KEY';
const baseURLVarName = isBaseURLEnvVar ? extractVariableName(baseURLConfig) : 'OCR_BASEURL';
const authValues = await loadAuthValues({
userId: req.user.id,
authFields: [baseURLVarName, apiKeyVarName],
optional: new Set([baseURLVarName]),
});
apiKey = authValues[apiKeyVarName];
baseURL = authValues[baseURLVarName];
} else {
apiKey = apiKeyConfig;
baseURL = baseURLConfig;
}
const mistralFile = await uploadDocumentToMistral({
filePath: file.path,
fileName: file.originalname,
apiKey,
baseURL,
});
const modelConfig = ocrConfig.mistralModel || '';
const model = envVarRegex.test(modelConfig)
? extractEnvVariable(modelConfig)
: modelConfig.trim() || 'mistral-ocr-latest';
const signedUrlResponse = await getSignedUrl({
apiKey,
baseURL,
fileId: mistralFile.id,
});
const mimetype = (file.mimetype || '').toLowerCase();
const originalname = file.originalname || '';
const isImage =
mimetype.startsWith('image') || /\.(png|jpe?g|gif|bmp|webp|tiff?)$/i.test(originalname);
const documentType = isImage ? 'image_url' : 'document_url';
const ocrResult = await performOCR({
apiKey,
baseURL,
model,
url: signedUrlResponse.url,
documentType,
});
let aggregatedText = '';
const images = [];
ocrResult.pages.forEach((page, index) => {
if (ocrResult.pages.length > 1) {
aggregatedText += `# PAGE ${index + 1}\n`;
}
aggregatedText += page.markdown + '\n\n';
if (page.images && page.images.length > 0) {
page.images.forEach((image) => {
if (image.image_base64) {
images.push(image.image_base64);
}
});
}
});
return {
filename: file.originalname,
bytes: aggregatedText.length * 4,
filepath: FileSources.mistral_ocr,
text: aggregatedText,
images,
};
} catch (error) {
let message = 'Error uploading document to Mistral OCR API';
const detail = error?.response?.data?.detail;
if (detail && detail !== '') {
message = detail;
}
const responseMessage = error?.response?.data?.message;
throw new Error(
`${logAxiosError({ error, message })}${responseMessage && responseMessage !== '' ? ` - ${responseMessage}` : ''}`,
);
}
};
module.exports = {
uploadDocumentToMistral,
uploadMistralOCR,
getSignedUrl,
performOCR,
};

View file

@ -1,848 +0,0 @@
const fs = require('fs');
const mockAxios = {
interceptors: {
request: { use: jest.fn(), eject: jest.fn() },
response: { use: jest.fn(), eject: jest.fn() },
},
create: jest.fn().mockReturnValue({
defaults: {
proxy: null,
},
get: jest.fn().mockResolvedValue({ data: {} }),
post: jest.fn().mockResolvedValue({ data: {} }),
put: jest.fn().mockResolvedValue({ data: {} }),
delete: jest.fn().mockResolvedValue({ data: {} }),
}),
get: jest.fn().mockResolvedValue({ data: {} }),
post: jest.fn().mockResolvedValue({ data: {} }),
put: jest.fn().mockResolvedValue({ data: {} }),
delete: jest.fn().mockResolvedValue({ data: {} }),
reset: jest.fn().mockImplementation(function () {
this.get.mockClear();
this.post.mockClear();
this.put.mockClear();
this.delete.mockClear();
this.create.mockClear();
}),
};
jest.mock('axios', () => mockAxios);
jest.mock('fs');
jest.mock('~/config', () => ({
logger: {
error: jest.fn(),
},
createAxiosInstance: () => mockAxios,
}));
jest.mock('~/server/services/Tools/credentials', () => ({
loadAuthValues: jest.fn(),
}));
const { uploadDocumentToMistral, uploadMistralOCR, getSignedUrl, performOCR } = require('./crud');
describe('MistralOCR Service', () => {
afterEach(() => {
mockAxios.reset();
jest.clearAllMocks();
});
describe('uploadDocumentToMistral', () => {
beforeEach(() => {
// Create a more complete mock for file streams that FormData can work with
const mockReadStream = {
on: jest.fn().mockImplementation(function (event, handler) {
// Simulate immediate 'end' event to make FormData complete processing
if (event === 'end') {
handler();
}
return this;
}),
pipe: jest.fn().mockImplementation(function () {
return this;
}),
pause: jest.fn(),
resume: jest.fn(),
emit: jest.fn(),
once: jest.fn(),
destroy: jest.fn(),
};
fs.createReadStream = jest.fn().mockReturnValue(mockReadStream);
// Mock FormData's append to avoid actual stream processing
jest.mock('form-data', () => {
const mockFormData = function () {
return {
append: jest.fn(),
getHeaders: jest
.fn()
.mockReturnValue({ 'content-type': 'multipart/form-data; boundary=---boundary' }),
getBuffer: jest.fn().mockReturnValue(Buffer.from('mock-form-data')),
getLength: jest.fn().mockReturnValue(100),
};
};
return mockFormData;
});
});
it('should upload a document to Mistral API using file streaming', async () => {
const mockResponse = { data: { id: 'file-123', purpose: 'ocr' } };
mockAxios.post.mockResolvedValueOnce(mockResponse);
const result = await uploadDocumentToMistral({
filePath: '/path/to/test.pdf',
fileName: 'test.pdf',
apiKey: 'test-api-key',
});
// Check that createReadStream was called with the correct file path
expect(fs.createReadStream).toHaveBeenCalledWith('/path/to/test.pdf');
// Since we're mocking FormData, we'll just check that axios was called correctly
expect(mockAxios.post).toHaveBeenCalledWith(
'https://api.mistral.ai/v1/files',
expect.anything(),
expect.objectContaining({
headers: expect.objectContaining({
Authorization: 'Bearer test-api-key',
}),
maxBodyLength: Infinity,
maxContentLength: Infinity,
}),
);
expect(result).toEqual(mockResponse.data);
});
it('should handle errors during document upload', async () => {
const errorMessage = 'API error';
mockAxios.post.mockRejectedValueOnce(new Error(errorMessage));
await expect(
uploadDocumentToMistral({
filePath: '/path/to/test.pdf',
fileName: 'test.pdf',
apiKey: 'test-api-key',
}),
).rejects.toThrow(errorMessage);
});
});
describe('getSignedUrl', () => {
it('should fetch signed URL from Mistral API', async () => {
const mockResponse = { data: { url: 'https://document-url.com' } };
mockAxios.get.mockResolvedValueOnce(mockResponse);
const result = await getSignedUrl({
fileId: 'file-123',
apiKey: 'test-api-key',
});
expect(mockAxios.get).toHaveBeenCalledWith(
'https://api.mistral.ai/v1/files/file-123/url?expiry=24',
{
headers: {
Authorization: 'Bearer test-api-key',
},
},
);
expect(result).toEqual(mockResponse.data);
});
it('should handle errors when fetching signed URL', async () => {
const errorMessage = 'API error';
mockAxios.get.mockRejectedValueOnce(new Error(errorMessage));
await expect(
getSignedUrl({
fileId: 'file-123',
apiKey: 'test-api-key',
}),
).rejects.toThrow();
const { logger } = require('~/config');
expect(logger.error).toHaveBeenCalledWith('Error fetching signed URL:', errorMessage);
});
});
describe('performOCR', () => {
it('should perform OCR using Mistral API (document_url)', async () => {
const mockResponse = {
data: {
pages: [{ markdown: 'Page 1 content' }, { markdown: 'Page 2 content' }],
},
};
mockAxios.post.mockResolvedValueOnce(mockResponse);
const result = await performOCR({
apiKey: 'test-api-key',
url: 'https://document-url.com',
model: 'mistral-ocr-latest',
documentType: 'document_url',
});
expect(mockAxios.post).toHaveBeenCalledWith(
'https://api.mistral.ai/v1/ocr',
{
model: 'mistral-ocr-latest',
include_image_base64: false,
image_limit: 0,
document: {
type: 'document_url',
document_url: 'https://document-url.com',
},
},
{
headers: {
'Content-Type': 'application/json',
Authorization: 'Bearer test-api-key',
},
},
);
expect(result).toEqual(mockResponse.data);
});
it('should perform OCR using Mistral API (image_url)', async () => {
const mockResponse = {
data: {
pages: [{ markdown: 'Image OCR content' }],
},
};
mockAxios.post.mockResolvedValueOnce(mockResponse);
const result = await performOCR({
apiKey: 'test-api-key',
url: 'https://image-url.com/image.png',
model: 'mistral-ocr-latest',
documentType: 'image_url',
});
expect(mockAxios.post).toHaveBeenCalledWith(
'https://api.mistral.ai/v1/ocr',
{
model: 'mistral-ocr-latest',
include_image_base64: false,
image_limit: 0,
document: {
type: 'image_url',
image_url: 'https://image-url.com/image.png',
},
},
{
headers: {
'Content-Type': 'application/json',
Authorization: 'Bearer test-api-key',
},
},
);
expect(result).toEqual(mockResponse.data);
});
it('should handle errors during OCR processing', async () => {
const errorMessage = 'OCR processing error';
mockAxios.post.mockRejectedValueOnce(new Error(errorMessage));
await expect(
performOCR({
apiKey: 'test-api-key',
url: 'https://document-url.com',
}),
).rejects.toThrow();
const { logger } = require('~/config');
expect(logger.error).toHaveBeenCalledWith('Error performing OCR:', errorMessage);
});
});
describe('uploadMistralOCR', () => {
beforeEach(() => {
const mockReadStream = {
on: jest.fn().mockImplementation(function (event, handler) {
if (event === 'end') {
handler();
}
return this;
}),
pipe: jest.fn().mockImplementation(function () {
return this;
}),
pause: jest.fn(),
resume: jest.fn(),
emit: jest.fn(),
once: jest.fn(),
destroy: jest.fn(),
};
fs.createReadStream = jest.fn().mockReturnValue(mockReadStream);
});
it('should process OCR for a file with standard configuration', async () => {
// Setup mocks
const { loadAuthValues } = require('~/server/services/Tools/credentials');
loadAuthValues.mockResolvedValue({
OCR_API_KEY: 'test-api-key',
OCR_BASEURL: 'https://api.mistral.ai/v1',
});
// Mock file upload response
mockAxios.post.mockResolvedValueOnce({
data: { id: 'file-123', purpose: 'ocr' },
});
// Mock signed URL response
mockAxios.get.mockResolvedValueOnce({
data: { url: 'https://signed-url.com' },
});
// Mock OCR response with text and images
mockAxios.post.mockResolvedValueOnce({
data: {
pages: [
{
markdown: 'Page 1 content',
images: [{ image_base64: 'base64image1' }],
},
{
markdown: 'Page 2 content',
images: [{ image_base64: 'base64image2' }],
},
],
},
});
const req = {
user: { id: 'user123' },
app: {
locals: {
ocr: {
// Use environment variable syntax to ensure loadAuthValues is called
apiKey: '${OCR_API_KEY}',
baseURL: '${OCR_BASEURL}',
mistralModel: 'mistral-medium',
},
},
},
};
const file = {
path: '/tmp/upload/file.pdf',
originalname: 'document.pdf',
mimetype: 'application/pdf',
};
const result = await uploadMistralOCR({
req,
file,
file_id: 'file123',
entity_id: 'entity123',
});
expect(fs.createReadStream).toHaveBeenCalledWith('/tmp/upload/file.pdf');
expect(loadAuthValues).toHaveBeenCalledWith({
userId: 'user123',
authFields: ['OCR_BASEURL', 'OCR_API_KEY'],
optional: expect.any(Set),
});
// Verify OCR result
expect(result).toEqual({
filename: 'document.pdf',
bytes: expect.any(Number),
filepath: 'mistral_ocr',
text: expect.stringContaining('# PAGE 1'),
images: ['base64image1', 'base64image2'],
});
});
it('should process OCR for an image file and use image_url type', async () => {
const { loadAuthValues } = require('~/server/services/Tools/credentials');
loadAuthValues.mockResolvedValue({
OCR_API_KEY: 'test-api-key',
OCR_BASEURL: 'https://api.mistral.ai/v1',
});
// Mock file upload response
mockAxios.post.mockResolvedValueOnce({
data: { id: 'file-456', purpose: 'ocr' },
});
// Mock signed URL response
mockAxios.get.mockResolvedValueOnce({
data: { url: 'https://signed-url.com/image.png' },
});
// Mock OCR response for image
mockAxios.post.mockResolvedValueOnce({
data: {
pages: [
{
markdown: 'Image OCR result',
images: [{ image_base64: 'imgbase64' }],
},
],
},
});
const req = {
user: { id: 'user456' },
app: {
locals: {
ocr: {
apiKey: '${OCR_API_KEY}',
baseURL: '${OCR_BASEURL}',
mistralModel: 'mistral-medium',
},
},
},
};
const file = {
path: '/tmp/upload/image.png',
originalname: 'image.png',
mimetype: 'image/png',
};
const result = await uploadMistralOCR({
req,
file,
file_id: 'file456',
entity_id: 'entity456',
});
expect(fs.createReadStream).toHaveBeenCalledWith('/tmp/upload/image.png');
expect(loadAuthValues).toHaveBeenCalledWith({
userId: 'user456',
authFields: ['OCR_BASEURL', 'OCR_API_KEY'],
optional: expect.any(Set),
});
// Check that the OCR API was called with image_url type
expect(mockAxios.post).toHaveBeenCalledWith(
'https://api.mistral.ai/v1/ocr',
expect.objectContaining({
document: expect.objectContaining({
type: 'image_url',
image_url: 'https://signed-url.com/image.png',
}),
}),
expect.any(Object),
);
expect(result).toEqual({
filename: 'image.png',
bytes: expect.any(Number),
filepath: 'mistral_ocr',
text: expect.stringContaining('Image OCR result'),
images: ['imgbase64'],
});
});
it('should process variable references in configuration', async () => {
// Setup mocks with environment variables
const { loadAuthValues } = require('~/server/services/Tools/credentials');
loadAuthValues.mockResolvedValue({
CUSTOM_API_KEY: 'custom-api-key',
CUSTOM_BASEURL: 'https://custom-api.mistral.ai/v1',
});
// Mock API responses
mockAxios.post.mockResolvedValueOnce({
data: { id: 'file-123', purpose: 'ocr' },
});
mockAxios.get.mockResolvedValueOnce({
data: { url: 'https://signed-url.com' },
});
mockAxios.post.mockResolvedValueOnce({
data: {
pages: [{ markdown: 'Content from custom API' }],
},
});
const req = {
user: { id: 'user123' },
app: {
locals: {
ocr: {
apiKey: '${CUSTOM_API_KEY}',
baseURL: '${CUSTOM_BASEURL}',
mistralModel: '${CUSTOM_MODEL}',
},
},
},
};
// Set environment variable for model
process.env.CUSTOM_MODEL = 'mistral-large';
const file = {
path: '/tmp/upload/file.pdf',
originalname: 'document.pdf',
};
const result = await uploadMistralOCR({
req,
file,
file_id: 'file123',
entity_id: 'entity123',
});
expect(fs.createReadStream).toHaveBeenCalledWith('/tmp/upload/file.pdf');
// Verify that custom environment variables were extracted and used
expect(loadAuthValues).toHaveBeenCalledWith({
userId: 'user123',
authFields: ['CUSTOM_BASEURL', 'CUSTOM_API_KEY'],
optional: expect.any(Set),
});
// Check that mistral-large was used in the OCR API call
expect(mockAxios.post).toHaveBeenCalledWith(
expect.anything(),
expect.objectContaining({
model: 'mistral-large',
}),
expect.anything(),
);
expect(result.text).toEqual('Content from custom API\n\n');
});
it('should fall back to default values when variables are not properly formatted', async () => {
const { loadAuthValues } = require('~/server/services/Tools/credentials');
loadAuthValues.mockResolvedValue({
OCR_API_KEY: 'default-api-key',
OCR_BASEURL: undefined, // Testing optional parameter
});
mockAxios.post.mockResolvedValueOnce({
data: { id: 'file-123', purpose: 'ocr' },
});
mockAxios.get.mockResolvedValueOnce({
data: { url: 'https://signed-url.com' },
});
mockAxios.post.mockResolvedValueOnce({
data: {
pages: [{ markdown: 'Default API result' }],
},
});
const req = {
user: { id: 'user123' },
app: {
locals: {
ocr: {
// Use environment variable syntax to ensure loadAuthValues is called
apiKey: '${INVALID_FORMAT}', // Using valid env var format but with an invalid name
baseURL: '${OCR_BASEURL}', // Using valid env var format
mistralModel: 'mistral-ocr-latest', // Plain string value
},
},
},
};
const file = {
path: '/tmp/upload/file.pdf',
originalname: 'document.pdf',
};
await uploadMistralOCR({
req,
file,
file_id: 'file123',
entity_id: 'entity123',
});
expect(fs.createReadStream).toHaveBeenCalledWith('/tmp/upload/file.pdf');
// Should use the default values
expect(loadAuthValues).toHaveBeenCalledWith({
userId: 'user123',
authFields: ['OCR_BASEURL', 'INVALID_FORMAT'],
optional: expect.any(Set),
});
// Should use the default model when not using environment variable format
expect(mockAxios.post).toHaveBeenCalledWith(
expect.anything(),
expect.objectContaining({
model: 'mistral-ocr-latest',
}),
expect.anything(),
);
});
it('should handle API errors during OCR process', async () => {
const { loadAuthValues } = require('~/server/services/Tools/credentials');
loadAuthValues.mockResolvedValue({
OCR_API_KEY: 'test-api-key',
});
// Mock file upload to fail
mockAxios.post.mockRejectedValueOnce(new Error('Upload failed'));
const req = {
user: { id: 'user123' },
app: {
locals: {
ocr: {
apiKey: 'OCR_API_KEY',
baseURL: 'OCR_BASEURL',
},
},
},
};
const file = {
path: '/tmp/upload/file.pdf',
originalname: 'document.pdf',
};
await expect(
uploadMistralOCR({
req,
file,
file_id: 'file123',
entity_id: 'entity123',
}),
).rejects.toThrow('Error uploading document to Mistral OCR API');
expect(fs.createReadStream).toHaveBeenCalledWith('/tmp/upload/file.pdf');
});
it('should handle single page documents without page numbering', async () => {
const { loadAuthValues } = require('~/server/services/Tools/credentials');
loadAuthValues.mockResolvedValue({
OCR_API_KEY: 'test-api-key',
OCR_BASEURL: 'https://api.mistral.ai/v1', // Make sure this is included
});
// Clear all previous mocks
mockAxios.post.mockClear();
mockAxios.get.mockClear();
// 1. First mock: File upload response
mockAxios.post.mockImplementationOnce(() =>
Promise.resolve({ data: { id: 'file-123', purpose: 'ocr' } }),
);
// 2. Second mock: Signed URL response
mockAxios.get.mockImplementationOnce(() =>
Promise.resolve({ data: { url: 'https://signed-url.com' } }),
);
// 3. Third mock: OCR response
mockAxios.post.mockImplementationOnce(() =>
Promise.resolve({
data: {
pages: [{ markdown: 'Single page content' }],
},
}),
);
const req = {
user: { id: 'user123' },
app: {
locals: {
ocr: {
apiKey: 'OCR_API_KEY',
baseURL: 'OCR_BASEURL',
mistralModel: 'mistral-ocr-latest',
},
},
},
};
const file = {
path: '/tmp/upload/file.pdf',
originalname: 'single-page.pdf',
};
const result = await uploadMistralOCR({
req,
file,
file_id: 'file123',
entity_id: 'entity123',
});
expect(fs.createReadStream).toHaveBeenCalledWith('/tmp/upload/file.pdf');
// Verify that single page documents don't include page numbering
expect(result.text).not.toContain('# PAGE');
expect(result.text).toEqual('Single page content\n\n');
});
it('should use literal values in configuration when provided directly', async () => {
const { loadAuthValues } = require('~/server/services/Tools/credentials');
// We'll still mock this but it should not be used for literal values
loadAuthValues.mockResolvedValue({});
// Clear all previous mocks
mockAxios.post.mockClear();
mockAxios.get.mockClear();
// 1. First mock: File upload response
mockAxios.post.mockImplementationOnce(() =>
Promise.resolve({ data: { id: 'file-123', purpose: 'ocr' } }),
);
// 2. Second mock: Signed URL response
mockAxios.get.mockImplementationOnce(() =>
Promise.resolve({ data: { url: 'https://signed-url.com' } }),
);
// 3. Third mock: OCR response
mockAxios.post.mockImplementationOnce(() =>
Promise.resolve({
data: {
pages: [{ markdown: 'Processed with literal config values' }],
},
}),
);
const req = {
user: { id: 'user123' },
app: {
locals: {
ocr: {
// Direct values that should be used as-is, without variable substitution
apiKey: 'actual-api-key-value',
baseURL: 'https://direct-api-url.mistral.ai/v1',
mistralModel: 'mistral-direct-model',
},
},
},
};
const file = {
path: '/tmp/upload/file.pdf',
originalname: 'direct-values.pdf',
};
const result = await uploadMistralOCR({
req,
file,
file_id: 'file123',
entity_id: 'entity123',
});
expect(fs.createReadStream).toHaveBeenCalledWith('/tmp/upload/file.pdf');
// Verify the correct URL was used with the direct baseURL value
expect(mockAxios.post).toHaveBeenCalledWith(
'https://direct-api-url.mistral.ai/v1/files',
expect.any(Object),
expect.objectContaining({
headers: expect.objectContaining({
Authorization: 'Bearer actual-api-key-value',
}),
}),
);
// Check the OCR call was made with the direct model value
expect(mockAxios.post).toHaveBeenCalledWith(
'https://direct-api-url.mistral.ai/v1/ocr',
expect.objectContaining({
model: 'mistral-direct-model',
}),
expect.any(Object),
);
// Verify the result
expect(result.text).toEqual('Processed with literal config values\n\n');
// Verify loadAuthValues was never called since we used direct values
expect(loadAuthValues).not.toHaveBeenCalled();
});
it('should handle empty configuration values and use defaults', async () => {
const { loadAuthValues } = require('~/server/services/Tools/credentials');
// Set up the mock values to be returned by loadAuthValues
loadAuthValues.mockResolvedValue({
OCR_API_KEY: 'default-from-env-key',
OCR_BASEURL: 'https://default-from-env.mistral.ai/v1',
});
// Clear all previous mocks
mockAxios.post.mockClear();
mockAxios.get.mockClear();
// 1. First mock: File upload response
mockAxios.post.mockImplementationOnce(() =>
Promise.resolve({ data: { id: 'file-123', purpose: 'ocr' } }),
);
// 2. Second mock: Signed URL response
mockAxios.get.mockImplementationOnce(() =>
Promise.resolve({ data: { url: 'https://signed-url.com' } }),
);
// 3. Third mock: OCR response
mockAxios.post.mockImplementationOnce(() =>
Promise.resolve({
data: {
pages: [{ markdown: 'Content from default configuration' }],
},
}),
);
const req = {
user: { id: 'user123' },
app: {
locals: {
ocr: {
// Empty string values - should fall back to defaults
apiKey: '',
baseURL: '',
mistralModel: '',
},
},
},
};
const file = {
path: '/tmp/upload/file.pdf',
originalname: 'empty-config.pdf',
};
const result = await uploadMistralOCR({
req,
file,
file_id: 'file123',
entity_id: 'entity123',
});
expect(fs.createReadStream).toHaveBeenCalledWith('/tmp/upload/file.pdf');
// Verify loadAuthValues was called with the default variable names
expect(loadAuthValues).toHaveBeenCalledWith({
userId: 'user123',
authFields: ['OCR_BASEURL', 'OCR_API_KEY'],
optional: expect.any(Set),
});
// Verify the API calls used the default values from loadAuthValues
expect(mockAxios.post).toHaveBeenCalledWith(
'https://default-from-env.mistral.ai/v1/files',
expect.any(Object),
expect.objectContaining({
headers: expect.objectContaining({
Authorization: 'Bearer default-from-env-key',
}),
}),
);
// Verify the OCR model defaulted to mistral-ocr-latest
expect(mockAxios.post).toHaveBeenCalledWith(
'https://default-from-env.mistral.ai/v1/ocr',
expect.objectContaining({
model: 'mistral-ocr-latest',
}),
expect.any(Object),
);
// Check result
expect(result.text).toEqual('Content from default configuration\n\n');
});
});
});

View file

@ -1,5 +0,0 @@
const crud = require('./crud');
module.exports = {
...crud,
};

View file

@ -94,15 +94,28 @@ async function prepareImageURLS3(req, file) {
* @param {Buffer} params.buffer - Avatar image buffer.
* @param {string} params.userId - User's unique identifier.
* @param {string} params.manual - 'true' or 'false' flag for manual update.
* @param {string} [params.agentId] - Optional agent ID if this is an agent avatar.
* @param {string} [params.basePath='images'] - Base path in the bucket.
* @returns {Promise<string>} Signed URL of the uploaded avatar.
*/
async function processS3Avatar({ buffer, userId, manual, basePath = defaultBasePath }) {
async function processS3Avatar({ buffer, userId, manual, agentId, basePath = defaultBasePath }) {
try {
const downloadURL = await saveBufferToS3({ userId, buffer, fileName: 'avatar.png', basePath });
if (manual === 'true') {
const metadata = await sharp(buffer).metadata();
const extension = metadata.format === 'gif' ? 'gif' : 'png';
const timestamp = new Date().getTime();
/** Unique filename with timestamp and optional agent ID */
const fileName = agentId
? `agent-${agentId}-avatar-${timestamp}.${extension}`
: `avatar-${timestamp}.${extension}`;
const downloadURL = await saveBufferToS3({ userId, buffer, fileName, basePath });
// Only update user record if this is a user avatar (manual === 'true')
if (manual === 'true' && !agentId) {
await updateUser(userId, { avatar: downloadURL });
}
return downloadURL;
} catch (error) {
logger.error('[processS3Avatar] Error processing S3 avatar:', error.message);

View file

@ -1,9 +1,9 @@
const fs = require('fs');
const axios = require('axios');
const FormData = require('form-data');
const { logAxiosError } = require('@librechat/api');
const { logger } = require('@librechat/data-schemas');
const { FileSources } = require('librechat-data-provider');
const { logAxiosError } = require('~/utils');
const { logger } = require('~/config');
/**
* Deletes a file from the vector database. This function takes a file object, constructs the full path, and

View file

@ -1,4 +1,5 @@
const axios = require('axios');
const { logAxiosError } = require('@librechat/api');
const {
FileSources,
VisionModes,
@ -7,8 +8,6 @@ const {
EModelEndpoint,
} = require('librechat-data-provider');
const { getStrategyFunctions } = require('~/server/services/Files/strategies');
const { logAxiosError } = require('~/utils');
const { logger } = require('~/config');
/**
* Converts a readable stream to a base64 encoded string.

View file

@ -519,7 +519,7 @@ const processAgentFileUpload = async ({ req, res, metadata }) => {
throw new Error('OCR capability is not enabled for Agents');
}
const { handleFileUpload: uploadMistralOCR } = getStrategyFunctions(
const { handleFileUpload: uploadOCR } = getStrategyFunctions(
req.app.locals?.ocr?.strategy ?? FileSources.mistral_ocr,
);
const { file_id, temp_file_id } = metadata;
@ -531,7 +531,7 @@ const processAgentFileUpload = async ({ req, res, metadata }) => {
images,
filename,
filepath: ocrFileURL,
} = await uploadMistralOCR({ req, file, file_id, entity_id: agent_id, basePath });
} = await uploadOCR({ req, file, loadAuthValues });
const fileInfo = removeNullishValues({
text,

View file

@ -1,4 +1,5 @@
const { FileSources } = require('librechat-data-provider');
const { uploadMistralOCR, uploadAzureMistralOCR } = require('@librechat/api');
const {
getFirebaseURL,
prepareImageURL,
@ -46,7 +47,6 @@ const {
const { uploadOpenAIFile, deleteOpenAIFile, getOpenAIFileStream } = require('./OpenAI');
const { getCodeOutputDownloadStream, uploadCodeEnvFile } = require('./Code');
const { uploadVectors, deleteVectors } = require('./VectorDB');
const { uploadMistralOCR } = require('./MistralOCR');
/**
* Firebase Storage Strategy Functions
@ -190,6 +190,26 @@ const mistralOCRStrategy = () => ({
handleFileUpload: uploadMistralOCR,
});
const azureMistralOCRStrategy = () => ({
/** @type {typeof saveFileFromURL | null} */
saveURL: null,
/** @type {typeof getLocalFileURL | null} */
getFileURL: null,
/** @type {typeof saveLocalBuffer | null} */
saveBuffer: null,
/** @type {typeof processLocalAvatar | null} */
processAvatar: null,
/** @type {typeof uploadLocalImage | null} */
handleImageUpload: null,
/** @type {typeof prepareImagesLocal | null} */
prepareImagePayload: null,
/** @type {typeof deleteLocalFile | null} */
deleteFile: null,
/** @type {typeof getLocalFileStream | null} */
getDownloadStream: null,
handleFileUpload: uploadAzureMistralOCR,
});
// Strategy Selector
const getStrategyFunctions = (fileSource) => {
if (fileSource === FileSources.firebase) {
@ -210,6 +230,8 @@ const getStrategyFunctions = (fileSource) => {
return codeOutputStrategy();
} else if (fileSource === FileSources.mistral_ocr) {
return mistralOCRStrategy();
} else if (fileSource === FileSources.azure_mistral_ocr) {
return azureMistralOCRStrategy();
} else {
throw new Error('Invalid file source');
}

View file

@ -1,6 +1,6 @@
const { z } = require('zod');
const { tool } = require('@langchain/core/tools');
const { normalizeServerName } = require('librechat-mcp');
const { normalizeServerName } = require('@librechat/api');
const { Constants: AgentConstants, Providers } = require('@librechat/agents');
const {
Constants,
@ -50,9 +50,10 @@ async function createMCPTool({ req, toolKey, provider: _provider }) {
/** @type {(toolArguments: Object | string, config?: GraphRunnableConfig) => Promise<unknown>} */
const _call = async (toolArguments, config) => {
const userId = config?.configurable?.user?.id || config?.configurable?.user_id;
try {
const derivedSignal = config?.signal ? AbortSignal.any([config.signal]) : undefined;
const mcpManager = getMCPManager(config?.configurable?.user_id);
const mcpManager = getMCPManager(userId);
const provider = (config?.metadata?.provider || _provider)?.toLowerCase();
const result = await mcpManager.callTool({
serverName,
@ -60,8 +61,8 @@ async function createMCPTool({ req, toolKey, provider: _provider }) {
provider,
toolArguments,
options: {
userId: config?.configurable?.user_id,
signal: derivedSignal,
user: config?.configurable?.user,
},
});
@ -74,7 +75,7 @@ async function createMCPTool({ req, toolKey, provider: _provider }) {
return result;
} catch (error) {
logger.error(
`[MCP][User: ${config?.configurable?.user_id}][${serverName}] Error calling "${toolName}" MCP tool:`,
`[MCP][User: ${userId}][${serverName}] Error calling "${toolName}" MCP tool:`,
error,
);
throw new Error(

View file

@ -1,12 +1,13 @@
const axios = require('axios');
const { Providers } = require('@librechat/agents');
const { logAxiosError } = require('@librechat/api');
const { logger } = require('@librechat/data-schemas');
const { HttpsProxyAgent } = require('https-proxy-agent');
const { EModelEndpoint, defaultModels, CacheKeys } = require('librechat-data-provider');
const { inputSchema, logAxiosError, extractBaseURL, processModelData } = require('~/utils');
const { inputSchema, extractBaseURL, processModelData } = require('~/utils');
const { OllamaClient } = require('~/app/clients/OllamaClient');
const { isUserProvided } = require('~/server/utils');
const getLogStores = require('~/cache/getLogStores');
const { logger } = require('~/config');
/**
* Splits a string by commas and trims each resulting value.

View file

@ -1,6 +1,6 @@
const axios = require('axios');
const { logger } = require('@librechat/data-schemas');
const { EModelEndpoint, defaultModels } = require('librechat-data-provider');
const { logger } = require('~/config');
const {
fetchModels,
@ -28,7 +28,8 @@ jest.mock('~/cache/getLogStores', () =>
set: jest.fn().mockResolvedValue(true),
})),
);
jest.mock('~/config', () => ({
jest.mock('@librechat/data-schemas', () => ({
...jest.requireActual('@librechat/data-schemas'),
logger: {
error: jest.fn(),
},

View file

@ -1,6 +1,6 @@
const axios = require('axios');
const { logAxiosError } = require('@librechat/api');
const { EModelEndpoint } = require('librechat-data-provider');
const { logAxiosError } = require('~/utils');
/**
* @typedef {Object} RetrieveOptions

View file

@ -1,8 +1,9 @@
const axios = require('axios');
const { logAxiosError } = require('@librechat/api');
const { logger } = require('@librechat/data-schemas');
const { TokenExchangeMethodEnum } = require('librechat-data-provider');
const { handleOAuthToken } = require('~/models/Token');
const { decryptV2 } = require('~/server/utils/crypto');
const { logAxiosError } = require('~/utils');
const { logger } = require('~/config');
/**
* Processes the access tokens and stores them in the database.
@ -49,6 +50,7 @@ async function processAccessTokens(tokenData, { userId, identifier }) {
* @param {string} fields.client_url - The URL of the OAuth provider.
* @param {string} fields.identifier - The identifier for the token.
* @param {string} fields.refresh_token - The refresh token to use.
* @param {string} fields.token_exchange_method - The token exchange method ('default_post' or 'basic_auth_header').
* @param {string} fields.encrypted_oauth_client_id - The client ID for the OAuth provider.
* @param {string} fields.encrypted_oauth_client_secret - The client secret for the OAuth provider.
* @returns {Promise<{
@ -63,26 +65,36 @@ const refreshAccessToken = async ({
client_url,
identifier,
refresh_token,
token_exchange_method,
encrypted_oauth_client_id,
encrypted_oauth_client_secret,
}) => {
try {
const oauth_client_id = await decryptV2(encrypted_oauth_client_id);
const oauth_client_secret = await decryptV2(encrypted_oauth_client_secret);
const headers = {
'Content-Type': 'application/x-www-form-urlencoded',
Accept: 'application/json',
};
const params = new URLSearchParams({
client_id: oauth_client_id,
client_secret: oauth_client_secret,
grant_type: 'refresh_token',
refresh_token,
});
if (token_exchange_method === TokenExchangeMethodEnum.BasicAuthHeader) {
const basicAuth = Buffer.from(`${oauth_client_id}:${oauth_client_secret}`).toString('base64');
headers['Authorization'] = `Basic ${basicAuth}`;
} else {
params.append('client_id', oauth_client_id);
params.append('client_secret', oauth_client_secret);
}
const response = await axios({
method: 'POST',
url: client_url,
headers: {
'Content-Type': 'application/x-www-form-urlencoded',
Accept: 'application/json',
},
headers,
data: params.toString(),
});
await processAccessTokens(response.data, {
@ -110,6 +122,7 @@ const refreshAccessToken = async ({
* @param {string} fields.identifier - The identifier for the token.
* @param {string} fields.client_url - The URL of the OAuth provider.
* @param {string} fields.redirect_uri - The redirect URI for the OAuth provider.
* @param {string} fields.token_exchange_method - The token exchange method ('default_post' or 'basic_auth_header').
* @param {string} fields.encrypted_oauth_client_id - The client ID for the OAuth provider.
* @param {string} fields.encrypted_oauth_client_secret - The client secret for the OAuth provider.
* @returns {Promise<{
@ -125,27 +138,37 @@ const getAccessToken = async ({
identifier,
client_url,
redirect_uri,
token_exchange_method,
encrypted_oauth_client_id,
encrypted_oauth_client_secret,
}) => {
const oauth_client_id = await decryptV2(encrypted_oauth_client_id);
const oauth_client_secret = await decryptV2(encrypted_oauth_client_secret);
const headers = {
'Content-Type': 'application/x-www-form-urlencoded',
Accept: 'application/json',
};
const params = new URLSearchParams({
code,
client_id: oauth_client_id,
client_secret: oauth_client_secret,
grant_type: 'authorization_code',
redirect_uri,
});
if (token_exchange_method === TokenExchangeMethodEnum.BasicAuthHeader) {
const basicAuth = Buffer.from(`${oauth_client_id}:${oauth_client_secret}`).toString('base64');
headers['Authorization'] = `Basic ${basicAuth}`;
} else {
params.append('client_id', oauth_client_id);
params.append('client_secret', oauth_client_secret);
}
try {
const response = await axios({
method: 'POST',
url: client_url,
headers: {
'Content-Type': 'application/x-www-form-urlencoded',
Accept: 'application/json',
},
headers,
data: params.toString(),
});

View file

@ -1,64 +0,0 @@
const { encoding_for_model: encodingForModel, get_encoding: getEncoding } = require('tiktoken');
const { logger } = require('~/config');
class Tokenizer {
constructor() {
this.tokenizersCache = {};
this.tokenizerCallsCount = 0;
}
getTokenizer(encoding, isModelName = false, extendSpecialTokens = {}) {
let tokenizer;
if (this.tokenizersCache[encoding]) {
tokenizer = this.tokenizersCache[encoding];
} else {
if (isModelName) {
tokenizer = encodingForModel(encoding, extendSpecialTokens);
} else {
tokenizer = getEncoding(encoding, extendSpecialTokens);
}
this.tokenizersCache[encoding] = tokenizer;
}
return tokenizer;
}
freeAndResetAllEncoders() {
try {
Object.keys(this.tokenizersCache).forEach((key) => {
if (this.tokenizersCache[key]) {
this.tokenizersCache[key].free();
delete this.tokenizersCache[key];
}
});
this.tokenizerCallsCount = 1;
} catch (error) {
logger.error('[Tokenizer] Free and reset encoders error', error);
}
}
resetTokenizersIfNecessary() {
if (this.tokenizerCallsCount >= 25) {
if (this.options?.debug) {
logger.debug('[Tokenizer] freeAndResetAllEncoders: reached 25 encodings, resetting...');
}
this.freeAndResetAllEncoders();
}
this.tokenizerCallsCount++;
}
getTokenCount(text, encoding = 'cl100k_base') {
this.resetTokenizersIfNecessary();
try {
const tokenizer = this.getTokenizer(encoding);
return tokenizer.encode(text, 'all').length;
} catch (error) {
this.freeAndResetAllEncoders();
const tokenizer = this.getTokenizer(encoding);
return tokenizer.encode(text, 'all').length;
}
}
}
const TokenizerSingleton = new Tokenizer();
module.exports = TokenizerSingleton;

View file

@ -1,136 +0,0 @@
/**
* @file Tokenizer.spec.cjs
*
* Tests the real TokenizerSingleton (no mocking of `tiktoken`).
* Make sure to install `tiktoken` and have it configured properly.
*/
const Tokenizer = require('./Tokenizer'); // <-- Adjust path to your singleton file
const { logger } = require('~/config');
describe('Tokenizer', () => {
it('should be a singleton (same instance)', () => {
const AnotherTokenizer = require('./Tokenizer'); // same path
expect(Tokenizer).toBe(AnotherTokenizer);
});
describe('getTokenizer', () => {
it('should create an encoder for an explicit model name (e.g., "gpt-4")', () => {
// The real `encoding_for_model` will be called internally
// as soon as we pass isModelName = true.
const tokenizer = Tokenizer.getTokenizer('gpt-4', true);
// Basic sanity checks
expect(tokenizer).toBeDefined();
// You can optionally check certain properties from `tiktoken` if they exist
// e.g., expect(typeof tokenizer.encode).toBe('function');
});
it('should create an encoder for a known encoding (e.g., "cl100k_base")', () => {
// The real `get_encoding` will be called internally
// as soon as we pass isModelName = false.
const tokenizer = Tokenizer.getTokenizer('cl100k_base', false);
expect(tokenizer).toBeDefined();
// e.g., expect(typeof tokenizer.encode).toBe('function');
});
it('should return cached tokenizer if previously fetched', () => {
const tokenizer1 = Tokenizer.getTokenizer('cl100k_base', false);
const tokenizer2 = Tokenizer.getTokenizer('cl100k_base', false);
// Should be the exact same instance from the cache
expect(tokenizer1).toBe(tokenizer2);
});
});
describe('freeAndResetAllEncoders', () => {
beforeEach(() => {
jest.clearAllMocks();
});
it('should free all encoders and reset tokenizerCallsCount to 1', () => {
// By creating two different encodings, we populate the cache
Tokenizer.getTokenizer('cl100k_base', false);
Tokenizer.getTokenizer('r50k_base', false);
// Now free them
Tokenizer.freeAndResetAllEncoders();
// The internal cache is cleared
expect(Tokenizer.tokenizersCache['cl100k_base']).toBeUndefined();
expect(Tokenizer.tokenizersCache['r50k_base']).toBeUndefined();
// tokenizerCallsCount is reset to 1
expect(Tokenizer.tokenizerCallsCount).toBe(1);
});
it('should catch and log errors if freeing fails', () => {
// Mock logger.error before the test
const mockLoggerError = jest.spyOn(logger, 'error');
// Set up a problematic tokenizer in the cache
Tokenizer.tokenizersCache['cl100k_base'] = {
free() {
throw new Error('Intentional free error');
},
};
// Should not throw uncaught errors
Tokenizer.freeAndResetAllEncoders();
// Verify logger.error was called with correct arguments
expect(mockLoggerError).toHaveBeenCalledWith(
'[Tokenizer] Free and reset encoders error',
expect.any(Error),
);
// Clean up
mockLoggerError.mockRestore();
Tokenizer.tokenizersCache = {};
});
});
describe('getTokenCount', () => {
beforeEach(() => {
jest.clearAllMocks();
Tokenizer.freeAndResetAllEncoders();
});
it('should return the number of tokens in the given text', () => {
const text = 'Hello, world!';
const count = Tokenizer.getTokenCount(text, 'cl100k_base');
expect(count).toBeGreaterThan(0);
});
it('should reset encoders if an error is thrown', () => {
// We can simulate an error by temporarily overriding the selected tokenizers `encode` method.
const tokenizer = Tokenizer.getTokenizer('cl100k_base', false);
const originalEncode = tokenizer.encode;
tokenizer.encode = () => {
throw new Error('Forced error');
};
// Despite the forced error, the code should catch and reset, then re-encode
const count = Tokenizer.getTokenCount('Hello again', 'cl100k_base');
expect(count).toBeGreaterThan(0);
// Restore the original encode
tokenizer.encode = originalEncode;
});
it('should reset tokenizers after 25 calls', () => {
// Spy on freeAndResetAllEncoders
const resetSpy = jest.spyOn(Tokenizer, 'freeAndResetAllEncoders');
// Make 24 calls; should NOT reset yet
for (let i = 0; i < 24; i++) {
Tokenizer.getTokenCount('test text', 'cl100k_base');
}
expect(resetSpy).not.toHaveBeenCalled();
// 25th call triggers the reset
Tokenizer.getTokenCount('the 25th call!', 'cl100k_base');
expect(resetSpy).toHaveBeenCalledTimes(1);
});
});
});

View file

@ -500,6 +500,8 @@ async function processRequiredActions(client, requiredActions) {
async function loadAgentTools({ req, res, agent, tool_resources, openAIApiKey }) {
if (!agent.tools || agent.tools.length === 0) {
return {};
} else if (agent.tools && agent.tools.length === 1 && agent.tools[0] === AgentCapabilities.ocr) {
return {};
}
const endpointsConfig = await getEndpointsConfig(req);

View file

@ -1,14 +0,0 @@
const { EModelEndpoint, agentsEndpointSChema } = require('librechat-data-provider');
/**
* Sets up the Agents configuration from the config (`librechat.yaml`) file.
* @param {TCustomConfig} config - The loaded custom configuration.
* @returns {Partial<TAgentsEndpoint>} The Agents endpoint configuration.
*/
function agentsConfigSetup(config) {
const agentsConfig = config.endpoints[EModelEndpoint.agents];
const parsedConfig = agentsEndpointSChema.parse(agentsConfig);
return parsedConfig;
}
module.exports = { agentsConfigSetup };

View file

@ -2,6 +2,7 @@ const {
SystemRoles,
Permissions,
PermissionTypes,
isMemoryEnabled,
removeNullishValues,
} = require('librechat-data-provider');
const { updateAccessPermissions } = require('~/models/Role');
@ -20,6 +21,14 @@ async function loadDefaultInterface(config, configDefaults, roleName = SystemRol
const hasModelSpecs = config?.modelSpecs?.list?.length > 0;
const includesAddedEndpoints = config?.modelSpecs?.addedEndpoints?.length > 0;
const memoryConfig = config?.memory;
const memoryEnabled = isMemoryEnabled(memoryConfig);
/** Only disable memories if memory config is present but disabled/invalid */
const shouldDisableMemories = memoryConfig && !memoryEnabled;
/** Check if personalization is enabled (defaults to true if memory is configured and enabled) */
const isPersonalizationEnabled =
memoryConfig && memoryEnabled && memoryConfig.personalize !== false;
/** @type {TCustomConfig['interface']} */
const loadedInterface = removeNullishValues({
endpointsMenu:
@ -33,6 +42,7 @@ async function loadDefaultInterface(config, configDefaults, roleName = SystemRol
privacyPolicy: interfaceConfig?.privacyPolicy ?? defaults.privacyPolicy,
termsOfService: interfaceConfig?.termsOfService ?? defaults.termsOfService,
bookmarks: interfaceConfig?.bookmarks ?? defaults.bookmarks,
memories: shouldDisableMemories ? false : (interfaceConfig?.memories ?? defaults.memories),
prompts: interfaceConfig?.prompts ?? defaults.prompts,
multiConvo: interfaceConfig?.multiConvo ?? defaults.multiConvo,
agents: interfaceConfig?.agents ?? defaults.agents,
@ -45,6 +55,10 @@ async function loadDefaultInterface(config, configDefaults, roleName = SystemRol
await updateAccessPermissions(roleName, {
[PermissionTypes.PROMPTS]: { [Permissions.USE]: loadedInterface.prompts },
[PermissionTypes.BOOKMARKS]: { [Permissions.USE]: loadedInterface.bookmarks },
[PermissionTypes.MEMORIES]: {
[Permissions.USE]: loadedInterface.memories,
[Permissions.OPT_OUT]: isPersonalizationEnabled,
},
[PermissionTypes.MULTI_CONVO]: { [Permissions.USE]: loadedInterface.multiConvo },
[PermissionTypes.AGENTS]: { [Permissions.USE]: loadedInterface.agents },
[PermissionTypes.TEMPORARY_CHAT]: { [Permissions.USE]: loadedInterface.temporaryChat },
@ -54,6 +68,10 @@ async function loadDefaultInterface(config, configDefaults, roleName = SystemRol
await updateAccessPermissions(SystemRoles.ADMIN, {
[PermissionTypes.PROMPTS]: { [Permissions.USE]: loadedInterface.prompts },
[PermissionTypes.BOOKMARKS]: { [Permissions.USE]: loadedInterface.bookmarks },
[PermissionTypes.MEMORIES]: {
[Permissions.USE]: loadedInterface.memories,
[Permissions.OPT_OUT]: isPersonalizationEnabled,
},
[PermissionTypes.MULTI_CONVO]: { [Permissions.USE]: loadedInterface.multiConvo },
[PermissionTypes.AGENTS]: { [Permissions.USE]: loadedInterface.agents },
[PermissionTypes.TEMPORARY_CHAT]: { [Permissions.USE]: loadedInterface.temporaryChat },

View file

@ -12,6 +12,7 @@ describe('loadDefaultInterface', () => {
interface: {
prompts: true,
bookmarks: true,
memories: true,
multiConvo: true,
agents: true,
temporaryChat: true,
@ -26,6 +27,7 @@ describe('loadDefaultInterface', () => {
expect(updateAccessPermissions).toHaveBeenCalledWith(SystemRoles.USER, {
[PermissionTypes.PROMPTS]: { [Permissions.USE]: true },
[PermissionTypes.BOOKMARKS]: { [Permissions.USE]: true },
[PermissionTypes.MEMORIES]: { [Permissions.USE]: true },
[PermissionTypes.MULTI_CONVO]: { [Permissions.USE]: true },
[PermissionTypes.AGENTS]: { [Permissions.USE]: true },
[PermissionTypes.TEMPORARY_CHAT]: { [Permissions.USE]: true },
@ -39,6 +41,7 @@ describe('loadDefaultInterface', () => {
interface: {
prompts: false,
bookmarks: false,
memories: false,
multiConvo: false,
agents: false,
temporaryChat: false,
@ -53,6 +56,7 @@ describe('loadDefaultInterface', () => {
expect(updateAccessPermissions).toHaveBeenCalledWith(SystemRoles.USER, {
[PermissionTypes.PROMPTS]: { [Permissions.USE]: false },
[PermissionTypes.BOOKMARKS]: { [Permissions.USE]: false },
[PermissionTypes.MEMORIES]: { [Permissions.USE]: false },
[PermissionTypes.MULTI_CONVO]: { [Permissions.USE]: false },
[PermissionTypes.AGENTS]: { [Permissions.USE]: false },
[PermissionTypes.TEMPORARY_CHAT]: { [Permissions.USE]: false },
@ -70,6 +74,7 @@ describe('loadDefaultInterface', () => {
expect(updateAccessPermissions).toHaveBeenCalledWith(SystemRoles.USER, {
[PermissionTypes.PROMPTS]: { [Permissions.USE]: undefined },
[PermissionTypes.BOOKMARKS]: { [Permissions.USE]: undefined },
[PermissionTypes.MEMORIES]: { [Permissions.USE]: undefined },
[PermissionTypes.MULTI_CONVO]: { [Permissions.USE]: undefined },
[PermissionTypes.AGENTS]: { [Permissions.USE]: undefined },
[PermissionTypes.TEMPORARY_CHAT]: { [Permissions.USE]: undefined },
@ -83,6 +88,7 @@ describe('loadDefaultInterface', () => {
interface: {
prompts: undefined,
bookmarks: undefined,
memories: undefined,
multiConvo: undefined,
agents: undefined,
temporaryChat: undefined,
@ -97,6 +103,7 @@ describe('loadDefaultInterface', () => {
expect(updateAccessPermissions).toHaveBeenCalledWith(SystemRoles.USER, {
[PermissionTypes.PROMPTS]: { [Permissions.USE]: undefined },
[PermissionTypes.BOOKMARKS]: { [Permissions.USE]: undefined },
[PermissionTypes.MEMORIES]: { [Permissions.USE]: undefined },
[PermissionTypes.MULTI_CONVO]: { [Permissions.USE]: undefined },
[PermissionTypes.AGENTS]: { [Permissions.USE]: undefined },
[PermissionTypes.TEMPORARY_CHAT]: { [Permissions.USE]: undefined },
@ -110,6 +117,7 @@ describe('loadDefaultInterface', () => {
interface: {
prompts: true,
bookmarks: false,
memories: true,
multiConvo: undefined,
agents: true,
temporaryChat: undefined,
@ -124,6 +132,7 @@ describe('loadDefaultInterface', () => {
expect(updateAccessPermissions).toHaveBeenCalledWith(SystemRoles.USER, {
[PermissionTypes.PROMPTS]: { [Permissions.USE]: true },
[PermissionTypes.BOOKMARKS]: { [Permissions.USE]: false },
[PermissionTypes.MEMORIES]: { [Permissions.USE]: true },
[PermissionTypes.MULTI_CONVO]: { [Permissions.USE]: undefined },
[PermissionTypes.AGENTS]: { [Permissions.USE]: true },
[PermissionTypes.TEMPORARY_CHAT]: { [Permissions.USE]: undefined },
@ -138,6 +147,7 @@ describe('loadDefaultInterface', () => {
interface: {
prompts: true,
bookmarks: true,
memories: true,
multiConvo: true,
agents: true,
temporaryChat: true,
@ -151,6 +161,7 @@ describe('loadDefaultInterface', () => {
expect(updateAccessPermissions).toHaveBeenCalledWith(SystemRoles.USER, {
[PermissionTypes.PROMPTS]: { [Permissions.USE]: true },
[PermissionTypes.BOOKMARKS]: { [Permissions.USE]: true },
[PermissionTypes.MEMORIES]: { [Permissions.USE]: true },
[PermissionTypes.MULTI_CONVO]: { [Permissions.USE]: true },
[PermissionTypes.AGENTS]: { [Permissions.USE]: true },
[PermissionTypes.TEMPORARY_CHAT]: { [Permissions.USE]: true },
@ -168,6 +179,7 @@ describe('loadDefaultInterface', () => {
expect(updateAccessPermissions).toHaveBeenCalledWith(SystemRoles.USER, {
[PermissionTypes.PROMPTS]: { [Permissions.USE]: undefined },
[PermissionTypes.BOOKMARKS]: { [Permissions.USE]: undefined },
[PermissionTypes.MEMORIES]: { [Permissions.USE]: undefined },
[PermissionTypes.MULTI_CONVO]: { [Permissions.USE]: true },
[PermissionTypes.AGENTS]: { [Permissions.USE]: undefined },
[PermissionTypes.TEMPORARY_CHAT]: { [Permissions.USE]: undefined },
@ -185,6 +197,7 @@ describe('loadDefaultInterface', () => {
expect(updateAccessPermissions).toHaveBeenCalledWith(SystemRoles.USER, {
[PermissionTypes.PROMPTS]: { [Permissions.USE]: undefined },
[PermissionTypes.BOOKMARKS]: { [Permissions.USE]: undefined },
[PermissionTypes.MEMORIES]: { [Permissions.USE]: undefined },
[PermissionTypes.MULTI_CONVO]: { [Permissions.USE]: false },
[PermissionTypes.AGENTS]: { [Permissions.USE]: undefined },
[PermissionTypes.TEMPORARY_CHAT]: { [Permissions.USE]: undefined },
@ -202,6 +215,7 @@ describe('loadDefaultInterface', () => {
expect(updateAccessPermissions).toHaveBeenCalledWith(SystemRoles.USER, {
[PermissionTypes.PROMPTS]: { [Permissions.USE]: undefined },
[PermissionTypes.BOOKMARKS]: { [Permissions.USE]: undefined },
[PermissionTypes.MEMORIES]: { [Permissions.USE]: undefined },
[PermissionTypes.MULTI_CONVO]: { [Permissions.USE]: undefined },
[PermissionTypes.AGENTS]: { [Permissions.USE]: undefined },
[PermissionTypes.TEMPORARY_CHAT]: { [Permissions.USE]: undefined },
@ -215,6 +229,7 @@ describe('loadDefaultInterface', () => {
interface: {
prompts: true,
bookmarks: false,
memories: true,
multiConvo: true,
agents: false,
temporaryChat: true,
@ -228,6 +243,7 @@ describe('loadDefaultInterface', () => {
expect(updateAccessPermissions).toHaveBeenCalledWith(SystemRoles.USER, {
[PermissionTypes.PROMPTS]: { [Permissions.USE]: true },
[PermissionTypes.BOOKMARKS]: { [Permissions.USE]: false },
[PermissionTypes.MEMORIES]: { [Permissions.USE]: true },
[PermissionTypes.MULTI_CONVO]: { [Permissions.USE]: true },
[PermissionTypes.AGENTS]: { [Permissions.USE]: false },
[PermissionTypes.TEMPORARY_CHAT]: { [Permissions.USE]: true },
@ -242,6 +258,7 @@ describe('loadDefaultInterface', () => {
interface: {
prompts: true,
bookmarks: true,
memories: false,
multiConvo: false,
agents: undefined,
temporaryChat: undefined,
@ -255,6 +272,7 @@ describe('loadDefaultInterface', () => {
expect(updateAccessPermissions).toHaveBeenCalledWith(SystemRoles.USER, {
[PermissionTypes.PROMPTS]: { [Permissions.USE]: true },
[PermissionTypes.BOOKMARKS]: { [Permissions.USE]: true },
[PermissionTypes.MEMORIES]: { [Permissions.USE]: false },
[PermissionTypes.MULTI_CONVO]: { [Permissions.USE]: false },
[PermissionTypes.AGENTS]: { [Permissions.USE]: undefined },
[PermissionTypes.TEMPORARY_CHAT]: { [Permissions.USE]: undefined },
@ -268,6 +286,7 @@ describe('loadDefaultInterface', () => {
interface: {
prompts: true,
bookmarks: false,
memories: true,
multiConvo: true,
agents: false,
temporaryChat: true,
@ -281,6 +300,7 @@ describe('loadDefaultInterface', () => {
expect(updateAccessPermissions).toHaveBeenCalledWith(SystemRoles.USER, {
[PermissionTypes.PROMPTS]: { [Permissions.USE]: true },
[PermissionTypes.BOOKMARKS]: { [Permissions.USE]: false },
[PermissionTypes.MEMORIES]: { [Permissions.USE]: true },
[PermissionTypes.MULTI_CONVO]: { [Permissions.USE]: true },
[PermissionTypes.AGENTS]: { [Permissions.USE]: false },
[PermissionTypes.TEMPORARY_CHAT]: { [Permissions.USE]: true },