Merge branch 'main' into refactor/package-auth

This commit is contained in:
Cha 2025-06-17 18:26:25 +08:00
commit 02b9c9d447
340 changed files with 18559 additions and 14872 deletions

View file

@ -515,6 +515,18 @@ EMAIL_PASSWORD=
EMAIL_FROM_NAME=
EMAIL_FROM=noreply@librechat.ai
#========================#
# Mailgun API #
#========================#
# MAILGUN_API_KEY=your-mailgun-api-key
# MAILGUN_DOMAIN=mg.yourdomain.com
# EMAIL_FROM=noreply@yourdomain.com
# EMAIL_FROM_NAME="LibreChat"
# # Optional: For EU region
# MAILGUN_HOST=https://api.eu.mailgun.net
#========================#
# Firebase CDN #
#========================#

View file

@ -30,8 +30,8 @@ Project maintainers have the right and responsibility to remove, edit, or reject
2. Install typescript globally: `npm i -g typescript`.
3. Run `npm ci` to install dependencies.
4. Build the data provider: `npm run build:data-provider`.
5. Build MCP: `npm run build:mcp`.
6. Build data schemas: `npm run build:data-schemas`.
5. Build data schemas: `npm run build:data-schemas`.
6. Build API methods: `npm run build:api`.
7. Setup and run unit tests:
- Copy `.env.test`: `cp api/test/.env.test.example api/test/.env.test`.
- Run backend unit tests: `npm run test:api`.

View file

@ -7,6 +7,7 @@ on:
- release/*
paths:
- 'api/**'
- 'packages/api/**'
jobs:
tests_Backend:
name: Run Backend unit tests
@ -36,12 +37,12 @@ jobs:
- name: Install Data Provider Package
run: npm run build:data-provider
- name: Install MCP Package
run: npm run build:mcp
- name: Install Data Schemas Package
run: npm run build:data-schemas
- name: Install API Package
run: npm run build:api
- name: Create empty auth.json file
run: |
mkdir -p api/data
@ -66,5 +67,8 @@ jobs:
- name: Run librechat-data-provider unit tests
run: cd packages/data-provider && npm run test:ci
- name: Run librechat-mcp unit tests
run: cd packages/mcp && npm run test:ci
- name: Run @librechat/data-schemas unit tests
run: cd packages/data-schemas && npm run test:ci
- name: Run @librechat/api unit tests
run: cd packages/api && npm run test:ci

View file

@ -30,17 +30,17 @@ jobs:
DO_USER: ${{ secrets.DO_USER }}
run: |
ssh -o StrictHostKeyChecking=no ${DO_USER}@${DO_HOST} << EOF
sudo -i -u danny bash << EEOF
sudo -i -u danny bash << 'EEOF'
cd ~/LibreChat && \
git fetch origin main && \
npm run stop:deployed && \
docker images -a | grep "librechat" | awk '{print \$3}' | xargs docker rmi && \
npm run update:deployed && \
sudo npm run stop:deployed && \
sudo docker images --format "{{.Repository}}:{{.ID}}" | grep -E "lc-dev|librechat" | cut -d: -f2 | xargs -r sudo docker rmi -f || true && \
sudo npm run update:deployed && \
git checkout dev && \
git pull origin dev && \
git checkout do-deploy && \
git rebase dev && \
npm run start:deployed && \
sudo npm run start:deployed && \
echo "Update completed. Application should be running now."
EEOF
EOF

View file

@ -14,7 +14,7 @@ RUN npm config set fetch-retry-maxtimeout 600000 && \
npm config set fetch-retry-mintimeout 15000
COPY package*.json ./
COPY packages/data-provider/package*.json ./packages/data-provider/
COPY packages/mcp/package*.json ./packages/mcp/
COPY packages/api/package*.json ./packages/api/
COPY packages/data-schemas/package*.json ./packages/data-schemas/
COPY client/package*.json ./client/
COPY api/package*.json ./api/
@ -24,26 +24,27 @@ FROM base-min AS base
WORKDIR /app
RUN npm ci
# Build data-provider
# Build `data-provider` package
FROM base AS data-provider-build
WORKDIR /app/packages/data-provider
COPY packages/data-provider ./
RUN npm run build
# Build mcp package
FROM base AS mcp-build
WORKDIR /app/packages/mcp
COPY packages/mcp ./
COPY --from=data-provider-build /app/packages/data-provider/dist /app/packages/data-provider/dist
RUN npm run build
# Build data-schemas
# Build `data-schemas` package
FROM base AS data-schemas-build
WORKDIR /app/packages/data-schemas
COPY packages/data-schemas ./
COPY --from=data-provider-build /app/packages/data-provider/dist /app/packages/data-provider/dist
RUN npm run build
# Build `api` package
FROM base AS api-package-build
WORKDIR /app/packages/api
COPY packages/api ./
COPY --from=data-provider-build /app/packages/data-provider/dist /app/packages/data-provider/dist
COPY --from=data-schemas-build /app/packages/data-schemas/dist /app/packages/data-schemas/dist
RUN npm run build
# Client build
FROM base AS client-build
WORKDIR /app/client
@ -63,8 +64,8 @@ RUN npm ci --omit=dev
COPY api ./api
COPY config ./config
COPY --from=data-provider-build /app/packages/data-provider/dist ./packages/data-provider/dist
COPY --from=mcp-build /app/packages/mcp/dist ./packages/mcp/dist
COPY --from=data-schemas-build /app/packages/data-schemas/dist ./packages/data-schemas/dist
COPY --from=api-package-build /app/packages/api/dist ./packages/api/dist
COPY --from=client-build /app/client/dist ./client/dist
WORKDIR /app/api
EXPOSE 3080

View file

@ -150,8 +150,8 @@ Click on the thumbnail to open the video☝
**Other:**
- **Website:** [librechat.ai](https://librechat.ai)
- **Documentation:** [docs.librechat.ai](https://docs.librechat.ai)
- **Blog:** [blog.librechat.ai](https://blog.librechat.ai)
- **Documentation:** [librechat.ai/docs](https://librechat.ai/docs)
- **Blog:** [librechat.ai/blog](https://librechat.ai/blog)
---

View file

@ -10,6 +10,7 @@ const {
validateVisionModel,
} = require('librechat-data-provider');
const { SplitStreamHandler: _Handler } = require('@librechat/agents');
const { Tokenizer, createFetch, createStreamEventHandlers } = require('@librechat/api');
const {
truncateText,
formatMessage,
@ -26,8 +27,6 @@ const {
const { getModelMaxTokens, getModelMaxOutputTokens, matchModelName } = require('~/utils');
const { spendTokens, spendStructuredTokens } = require('~/models/spendTokens');
const { encodeAndFormat } = require('~/server/services/Files/images/encode');
const { createFetch, createStreamEventHandlers } = require('./generators');
const Tokenizer = require('~/server/services/Tokenizer');
const { sleep } = require('~/server/utils');
const BaseClient = require('./BaseClient');
const { logger } = require('~/config');

View file

@ -2,6 +2,7 @@ const { Keyv } = require('keyv');
const crypto = require('crypto');
const { CohereClient } = require('cohere-ai');
const { fetchEventSource } = require('@waylaidwanderer/fetch-event-source');
const { constructAzureURL, genAzureChatCompletion } = require('@librechat/api');
const { encoding_for_model: encodingForModel, get_encoding: getEncoding } = require('tiktoken');
const {
ImageDetail,
@ -10,9 +11,9 @@ const {
CohereConstants,
mapModelToAzureConfig,
} = require('librechat-data-provider');
const { extractBaseURL, constructAzureURL, genAzureChatCompletion } = require('~/utils');
const { createContextHandlers } = require('./prompts');
const { createCoherePayload } = require('./llm');
const { extractBaseURL } = require('~/utils');
const BaseClient = require('./BaseClient');
const { logger } = require('~/config');

View file

@ -1,4 +1,5 @@
const { google } = require('googleapis');
const { Tokenizer } = require('@librechat/api');
const { concat } = require('@langchain/core/utils/stream');
const { ChatVertexAI } = require('@langchain/google-vertexai');
const { ChatGoogleGenerativeAI } = require('@langchain/google-genai');
@ -19,7 +20,6 @@ const {
} = require('librechat-data-provider');
const { getSafetySettings } = require('~/server/services/Endpoints/google/llm');
const { encodeAndFormat } = require('~/server/services/Files/images');
const Tokenizer = require('~/server/services/Tokenizer');
const { spendTokens } = require('~/models/spendTokens');
const { getModelMaxTokens } = require('~/utils');
const { sleep } = require('~/server/utils');
@ -34,7 +34,8 @@ const BaseClient = require('./BaseClient');
const loc = process.env.GOOGLE_LOC || 'us-central1';
const publisher = 'google';
const endpointPrefix = `${loc}-aiplatform.googleapis.com`;
const endpointPrefix =
loc === 'global' ? 'aiplatform.googleapis.com' : `${loc}-aiplatform.googleapis.com`;
const settings = endpointSettings[EModelEndpoint.google];
const EXCLUDED_GENAI_MODELS = /gemini-(?:1\.0|1-0|pro)/;

View file

@ -1,10 +1,11 @@
const { z } = require('zod');
const axios = require('axios');
const { Ollama } = require('ollama');
const { sleep } = require('@librechat/agents');
const { logAxiosError } = require('@librechat/api');
const { logger } = require('@librechat/data-schemas');
const { Constants } = require('librechat-data-provider');
const { deriveBaseURL, logAxiosError } = require('~/utils');
const { sleep } = require('~/server/utils');
const { logger } = require('~/config');
const { deriveBaseURL } = require('~/utils');
const ollamaPayloadSchema = z.object({
mirostat: z.number().optional(),
@ -67,7 +68,7 @@ class OllamaClient {
return models;
} catch (error) {
const logMessage =
'Failed to fetch models from Ollama API. If you are not using Ollama directly, and instead, through some aggregator or reverse proxy that handles fetching via OpenAI spec, ensure the name of the endpoint doesn\'t start with `ollama` (case-insensitive).';
"Failed to fetch models from Ollama API. If you are not using Ollama directly, and instead, through some aggregator or reverse proxy that handles fetching via OpenAI spec, ensure the name of the endpoint doesn't start with `ollama` (case-insensitive).";
logAxiosError({ message: logMessage, error });
return [];
}

View file

@ -1,6 +1,14 @@
const { OllamaClient } = require('./OllamaClient');
const { HttpsProxyAgent } = require('https-proxy-agent');
const { SplitStreamHandler, CustomOpenAIClient: OpenAI } = require('@librechat/agents');
const {
isEnabled,
Tokenizer,
createFetch,
constructAzureURL,
genAzureChatCompletion,
createStreamEventHandlers,
} = require('@librechat/api');
const {
Constants,
ImageDetail,
@ -16,13 +24,6 @@ const {
validateVisionModel,
mapModelToAzureConfig,
} = require('librechat-data-provider');
const {
extractBaseURL,
constructAzureURL,
getModelMaxTokens,
genAzureChatCompletion,
getModelMaxOutputTokens,
} = require('~/utils');
const {
truncateText,
formatMessage,
@ -30,10 +31,9 @@ const {
titleInstruction,
createContextHandlers,
} = require('./prompts');
const { extractBaseURL, getModelMaxTokens, getModelMaxOutputTokens } = require('~/utils');
const { encodeAndFormat } = require('~/server/services/Files/images/encode');
const { createFetch, createStreamEventHandlers } = require('./generators');
const { addSpaceIfNeeded, isEnabled, sleep } = require('~/server/utils');
const Tokenizer = require('~/server/services/Tokenizer');
const { addSpaceIfNeeded, sleep } = require('~/server/utils');
const { spendTokens } = require('~/models/spendTokens');
const { handleOpenAIErrors } = require('./tools/util');
const { createLLM, RunManager } = require('./llm');

View file

@ -1,71 +0,0 @@
const fetch = require('node-fetch');
const { GraphEvents } = require('@librechat/agents');
const { logger, sendEvent } = require('~/config');
const { sleep } = require('~/server/utils');
/**
* Makes a function to make HTTP request and logs the process.
* @param {Object} params
* @param {boolean} [params.directEndpoint] - Whether to use a direct endpoint.
* @param {string} [params.reverseProxyUrl] - The reverse proxy URL to use for the request.
* @returns {Promise<Response>} - A promise that resolves to the response of the fetch request.
*/
function createFetch({ directEndpoint = false, reverseProxyUrl = '' }) {
/**
* Makes an HTTP request and logs the process.
* @param {RequestInfo} url - The URL to make the request to. Can be a string or a Request object.
* @param {RequestInit} [init] - Optional init options for the request.
* @returns {Promise<Response>} - A promise that resolves to the response of the fetch request.
*/
return async (_url, init) => {
let url = _url;
if (directEndpoint) {
url = reverseProxyUrl;
}
logger.debug(`Making request to ${url}`);
if (typeof Bun !== 'undefined') {
return await fetch(url, init);
}
return await fetch(url, init);
};
}
// Add this at the module level outside the class
/**
* Creates event handlers for stream events that don't capture client references
* @param {Object} res - The response object to send events to
* @returns {Object} Object containing handler functions
*/
function createStreamEventHandlers(res) {
return {
[GraphEvents.ON_RUN_STEP]: (event) => {
if (res) {
sendEvent(res, event);
}
},
[GraphEvents.ON_MESSAGE_DELTA]: (event) => {
if (res) {
sendEvent(res, event);
}
},
[GraphEvents.ON_REASONING_DELTA]: (event) => {
if (res) {
sendEvent(res, event);
}
},
};
}
function createHandleLLMNewToken(streamRate) {
return async () => {
if (streamRate) {
await sleep(streamRate);
}
};
}
module.exports = {
createFetch,
createHandleLLMNewToken,
createStreamEventHandlers,
};

View file

@ -1,6 +1,5 @@
const { ChatOpenAI } = require('@langchain/openai');
const { sanitizeModelName, constructAzureURL } = require('~/utils');
const { isEnabled } = require('~/server/utils');
const { isEnabled, sanitizeModelName, constructAzureURL } = require('@librechat/api');
/**
* Creates a new instance of a language model (LLM) for chat interactions.

View file

@ -33,7 +33,9 @@ jest.mock('~/models', () => ({
const { getConvo, saveConvo } = require('~/models');
jest.mock('@librechat/agents', () => {
const { Providers } = jest.requireActual('@librechat/agents');
return {
Providers,
ChatOpenAI: jest.fn().mockImplementation(() => {
return {};
}),

View file

@ -1,184 +0,0 @@
require('dotenv').config();
const fs = require('fs');
const { z } = require('zod');
const path = require('path');
const yaml = require('js-yaml');
const { createOpenAPIChain } = require('langchain/chains');
const { DynamicStructuredTool } = require('@langchain/core/tools');
const { ChatPromptTemplate, HumanMessagePromptTemplate } = require('@langchain/core/prompts');
const { logger } = require('~/config');
function addLinePrefix(text, prefix = '// ') {
return text
.split('\n')
.map((line) => prefix + line)
.join('\n');
}
function createPrompt(name, functions) {
const prefix = `// The ${name} tool has the following functions. Determine the desired or most optimal function for the user's query:`;
const functionDescriptions = functions
.map((func) => `// - ${func.name}: ${func.description}`)
.join('\n');
return `${prefix}\n${functionDescriptions}
// You are an expert manager and scrum master. You must provide a detailed intent to better execute the function.
// Always format as such: {{"func": "function_name", "intent": "intent and expected result"}}`;
}
const AuthBearer = z
.object({
type: z.string().includes('service_http'),
authorization_type: z.string().includes('bearer'),
verification_tokens: z.object({
openai: z.string(),
}),
})
.catch(() => false);
const AuthDefinition = z
.object({
type: z.string(),
authorization_type: z.string(),
verification_tokens: z.object({
openai: z.string(),
}),
})
.catch(() => false);
async function readSpecFile(filePath) {
try {
const fileContents = await fs.promises.readFile(filePath, 'utf8');
if (path.extname(filePath) === '.json') {
return JSON.parse(fileContents);
}
return yaml.load(fileContents);
} catch (e) {
logger.error('[readSpecFile] error', e);
return false;
}
}
async function getSpec(url) {
const RegularUrl = z
.string()
.url()
.catch(() => false);
if (RegularUrl.parse(url) && path.extname(url) === '.json') {
const response = await fetch(url);
return await response.json();
}
const ValidSpecPath = z
.string()
.url()
.catch(async () => {
const spec = path.join(__dirname, '..', '.well-known', 'openapi', url);
if (!fs.existsSync(spec)) {
return false;
}
return await readSpecFile(spec);
});
return ValidSpecPath.parse(url);
}
async function createOpenAPIPlugin({ data, llm, user, message, memory, signal }) {
let spec;
try {
spec = await getSpec(data.api.url);
} catch (error) {
logger.error('[createOpenAPIPlugin] getSpec error', error);
return null;
}
if (!spec) {
logger.warn('[createOpenAPIPlugin] No spec found');
return null;
}
const headers = {};
const { auth, name_for_model, description_for_model, description_for_human } = data;
if (auth && AuthDefinition.parse(auth)) {
logger.debug('[createOpenAPIPlugin] auth detected', auth);
const { openai } = auth.verification_tokens;
if (AuthBearer.parse(auth)) {
headers.authorization = `Bearer ${openai}`;
logger.debug('[createOpenAPIPlugin] added auth bearer', headers);
}
}
const chainOptions = { llm };
if (data.headers && data.headers['librechat_user_id']) {
logger.debug('[createOpenAPIPlugin] id detected', headers);
headers[data.headers['librechat_user_id']] = user;
}
if (Object.keys(headers).length > 0) {
logger.debug('[createOpenAPIPlugin] headers detected', headers);
chainOptions.headers = headers;
}
if (data.params) {
logger.debug('[createOpenAPIPlugin] params detected', data.params);
chainOptions.params = data.params;
}
let history = '';
if (memory) {
logger.debug('[createOpenAPIPlugin] openAPI chain: memory detected', memory);
const { history: chat_history } = await memory.loadMemoryVariables({});
history = chat_history?.length > 0 ? `\n\n## Chat History:\n${chat_history}\n` : '';
}
chainOptions.prompt = ChatPromptTemplate.fromMessages([
HumanMessagePromptTemplate.fromTemplate(
`# Use the provided API's to respond to this query:\n\n{query}\n\n## Instructions:\n${addLinePrefix(
description_for_model,
)}${history}`,
),
]);
const chain = await createOpenAPIChain(spec, chainOptions);
const { functions } = chain.chains[0].lc_kwargs.llmKwargs;
return new DynamicStructuredTool({
name: name_for_model,
description_for_model: `${addLinePrefix(description_for_human)}${createPrompt(
name_for_model,
functions,
)}`,
description: `${description_for_human}`,
schema: z.object({
func: z
.string()
.describe(
`The function to invoke. The functions available are: ${functions
.map((func) => func.name)
.join(', ')}`,
),
intent: z
.string()
.describe('Describe your intent with the function and your expected result'),
}),
func: async ({ func = '', intent = '' }) => {
const filteredFunctions = functions.filter((f) => f.name === func);
chain.chains[0].lc_kwargs.llmKwargs.functions = filteredFunctions;
const query = `${message}${func?.length > 0 ? `\n// Intent: ${intent}` : ''}`;
const result = await chain.call({
query,
signal,
});
return result.response;
},
});
}
module.exports = {
getSpec,
readSpecFile,
createOpenAPIPlugin,
};

View file

@ -1,72 +0,0 @@
const fs = require('fs');
const { createOpenAPIPlugin, getSpec, readSpecFile } = require('./OpenAPIPlugin');
global.fetch = jest.fn().mockImplementationOnce(() => {
return new Promise((resolve) => {
resolve({
ok: true,
json: () => Promise.resolve({ key: 'value' }),
});
});
});
jest.mock('fs', () => ({
promises: {
readFile: jest.fn(),
},
existsSync: jest.fn(),
}));
describe('readSpecFile', () => {
it('reads JSON file correctly', async () => {
fs.promises.readFile.mockResolvedValue(JSON.stringify({ test: 'value' }));
const result = await readSpecFile('test.json');
expect(result).toEqual({ test: 'value' });
});
it('reads YAML file correctly', async () => {
fs.promises.readFile.mockResolvedValue('test: value');
const result = await readSpecFile('test.yaml');
expect(result).toEqual({ test: 'value' });
});
it('handles error correctly', async () => {
fs.promises.readFile.mockRejectedValue(new Error('test error'));
const result = await readSpecFile('test.json');
expect(result).toBe(false);
});
});
describe('getSpec', () => {
it('fetches spec from url correctly', async () => {
const parsedJson = await getSpec('https://www.instacart.com/.well-known/ai-plugin.json');
const isObject = typeof parsedJson === 'object';
expect(isObject).toEqual(true);
});
it('reads spec from file correctly', async () => {
fs.existsSync.mockReturnValue(true);
fs.promises.readFile.mockResolvedValue(JSON.stringify({ test: 'value' }));
const result = await getSpec('test.json');
expect(result).toEqual({ test: 'value' });
});
it('returns false when file does not exist', async () => {
fs.existsSync.mockReturnValue(false);
const result = await getSpec('test.json');
expect(result).toBe(false);
});
});
describe('createOpenAPIPlugin', () => {
it('returns null when getSpec throws an error', async () => {
const result = await createOpenAPIPlugin({ data: { api: { url: 'invalid' } } });
expect(result).toBe(null);
});
it('returns null when no spec is found', async () => {
const result = await createOpenAPIPlugin({});
expect(result).toBe(null);
});
// Add more tests here for different scenarios
});

View file

@ -8,10 +8,10 @@ const { HttpsProxyAgent } = require('https-proxy-agent');
const { FileContext, ContentTypes } = require('librechat-data-provider');
const { getImageBasename } = require('~/server/services/Files/images');
const extractBaseURL = require('~/utils/extractBaseURL');
const { logger } = require('~/config');
const logger = require('~/config/winston');
const displayMessage =
'DALL-E displayed an image. All generated images are already plainly visible, so don\'t repeat the descriptions in detail. Do not list download links as they are available in the UI already. The user may download the images by clicking on them, but do not mention anything about downloading to the user.';
"DALL-E displayed an image. All generated images are already plainly visible, so don't repeat the descriptions in detail. Do not list download links as they are available in the UI already. The user may download the images by clicking on them, but do not mention anything about downloading to the user.";
class DALLE3 extends Tool {
constructor(fields = {}) {
super();

View file

@ -4,12 +4,13 @@ const { v4 } = require('uuid');
const OpenAI = require('openai');
const FormData = require('form-data');
const { tool } = require('@langchain/core/tools');
const { logAxiosError } = require('@librechat/api');
const { logger } = require('@librechat/data-schemas');
const { HttpsProxyAgent } = require('https-proxy-agent');
const { ContentTypes, EImageOutputType } = require('librechat-data-provider');
const { getStrategyFunctions } = require('~/server/services/Files/strategies');
const { logAxiosError, extractBaseURL } = require('~/utils');
const { extractBaseURL } = require('~/utils');
const { getFiles } = require('~/models/File');
const { logger } = require('~/config');
/** Default descriptions for image generation tool */
const DEFAULT_IMAGE_GEN_DESCRIPTION = `

View file

@ -1,10 +1,29 @@
const OpenAI = require('openai');
const DALLE3 = require('../DALLE3');
const { logger } = require('~/config');
const logger = require('~/config/winston');
jest.mock('openai');
jest.mock('@librechat/data-schemas', () => {
return {
logger: {
info: jest.fn(),
warn: jest.fn(),
debug: jest.fn(),
error: jest.fn(),
},
};
});
jest.mock('tiktoken', () => {
return {
encoding_for_model: jest.fn().mockReturnValue({
encode: jest.fn(),
decode: jest.fn(),
}),
};
});
const processFileURL = jest.fn();
jest.mock('~/server/services/Files/images', () => ({
@ -37,6 +56,11 @@ jest.mock('fs', () => {
return {
existsSync: jest.fn(),
mkdirSync: jest.fn(),
promises: {
writeFile: jest.fn(),
readFile: jest.fn(),
unlink: jest.fn(),
},
};
});

View file

@ -135,7 +135,7 @@ const createFileSearchTool = async ({ req, files, entity_id }) => {
query: z
.string()
.describe(
'A natural language query to search for relevant information in the files. Be specific and use keywords related to the information you\'re looking for. The query will be used for semantic similarity matching against the file contents.',
"A natural language query to search for relevant information in the files. Be specific and use keywords related to the information you're looking for. The query will be used for semantic similarity matching against the file contents.",
),
}),
},

View file

@ -1,7 +1,6 @@
const axios = require('axios');
const { EventSource } = require('eventsource');
const { Time, CacheKeys } = require('librechat-data-provider');
const { MCPManager, FlowStateManager } = require('librechat-mcp');
const { Time } = require('librechat-data-provider');
const { MCPManager, FlowStateManager } = require('@librechat/api');
const logger = require('./winston');
global.EventSource = EventSource;
@ -37,60 +36,8 @@ function getFlowStateManager(flowsCache) {
return flowManager;
}
/**
* Sends message data in Server Sent Events format.
* @param {ServerResponse} res - The server response.
* @param {{ data: string | Record<string, unknown>, event?: string }} event - The message event.
* @param {string} event.event - The type of event.
* @param {string} event.data - The message to be sent.
*/
const sendEvent = (res, event) => {
if (typeof event.data === 'string' && event.data.length === 0) {
return;
}
res.write(`event: message\ndata: ${JSON.stringify(event)}\n\n`);
};
/**
* Creates and configures an Axios instance with optional proxy settings.
*
* @typedef {import('axios').AxiosInstance} AxiosInstance
* @typedef {import('axios').AxiosProxyConfig} AxiosProxyConfig
*
* @returns {AxiosInstance} A configured Axios instance
* @throws {Error} If there's an issue creating the Axios instance or parsing the proxy URL
*/
function createAxiosInstance() {
const instance = axios.create();
if (process.env.proxy) {
try {
const url = new URL(process.env.proxy);
/** @type {AxiosProxyConfig} */
const proxyConfig = {
host: url.hostname.replace(/^\[|\]$/g, ''),
protocol: url.protocol.replace(':', ''),
};
if (url.port) {
proxyConfig.port = parseInt(url.port, 10);
}
instance.defaults.proxy = proxyConfig;
} catch (error) {
console.error('Error parsing proxy URL:', error);
throw new Error(`Invalid proxy URL: ${process.env.proxy}`);
}
}
return instance;
}
module.exports = {
logger,
sendEvent,
getMCPManager,
createAxiosInstance,
getFlowStateManager,
};

View file

@ -1,67 +1,20 @@
/** @type {import('jest').Config} */
module.exports = {
// Define separate Jest projects
projects: [
// Default config for most tests
{
displayName: 'default',
testEnvironment: 'node',
clearMocks: true,
roots: ['<rootDir>'],
coverageDirectory: 'coverage',
setupFiles: [
'./test/jestSetup.js',
'./test/__mocks__/logger.js',
'./test/__mocks__/fetchEventSource.js',
],
moduleNameMapper: {
'~/(.*)': '<rootDir>/$1',
'~/data/auth.json': '<rootDir>/__mocks__/auth.mock.json',
'^openid-client/passport$': '<rootDir>/test/__mocks__/openid-client-passport.js',
'^openid-client$': '<rootDir>/test/__mocks__/openid-client.js',
},
transformIgnorePatterns: ['/node_modules/(?!(openid-client|oauth4webapi|jose)/).*/'],
// testMatch: ['<rootDir>/**/*.spec.js', '<rootDir>/**/*.spec.ts'],
testPathIgnorePatterns: [
'<rootDir>/strategies/openidStrategy.spec.js',
'<rootDir>/strategies/samlStrategy.spec.js',
'<rootDir>/strategies/appleStrategy.test.js',
],
},
// Special config just for openidStrategy.spec.js
{
displayName: 'openid-strategy',
testEnvironment: 'node',
clearMocks: true,
setupFiles: [
'./test/jestSetup.js',
'./test/__mocks__/logger.js',
'./test/__mocks__/fetchEventSource.js',
],
moduleNameMapper: {
'~/(.*)': '<rootDir>/$1',
'~/data/auth.json': '<rootDir>/__mocks__/auth.mock.json',
'^openid-client/passport$': '<rootDir>/test/__mocks__/openid-client-passport.js',
'^openid-client$': '<rootDir>/test/__mocks__/openid-client.js',
},
transformIgnorePatterns: ['/node_modules/(?!(openid-client|oauth4webapi|jose)/).*/'],
transform: {
'^.+\\.tsx?$': [
'ts-jest',
{
tsconfig: {
esModuleInterop: true,
allowSyntheticDefaultImports: true,
},
},
],
},
testMatch: [
'<rootDir>/strategies/openidStrategy.spec.js',
'<rootDir>/strategies/samlStrategy.spec.js',
'<rootDir>/strategies/appleStrategy.test.js',
],
},
displayName: 'default',
testEnvironment: 'node',
clearMocks: true,
roots: ['<rootDir>'],
coverageDirectory: 'coverage',
setupFiles: [
'./test/jestSetup.js',
'./test/__mocks__/logger.js',
'./test/__mocks__/fetchEventSource.js',
],
moduleNameMapper: {
'~/(.*)': '<rootDir>/$1',
'~/data/auth.json': '<rootDir>/__mocks__/auth.mock.json',
'^openid-client/passport$': '<rootDir>/test/__mocks__/openid-client-passport.js',
'^openid-client$': '<rootDir>/test/__mocks__/openid-client.js',
},
transformIgnorePatterns: ['/node_modules/(?!(openid-client|oauth4webapi|jose)/).*/'],
};

View file

@ -170,7 +170,6 @@ const isDuplicateVersion = (updateData, currentData, versions, actionsHash = nul
'created_at',
'updated_at',
'__v',
'agent_ids',
'versions',
'actionsHash', // Exclude actionsHash from direct comparison
];
@ -260,11 +259,12 @@ const isDuplicateVersion = (updateData, currentData, versions, actionsHash = nul
* @param {Object} [options] - Optional configuration object.
* @param {string} [options.updatingUserId] - The ID of the user performing the update (used for tracking non-author updates).
* @param {boolean} [options.forceVersion] - Force creation of a new version even if no fields changed.
* @param {boolean} [options.skipVersioning] - Skip version creation entirely (useful for isolated operations like sharing).
* @returns {Promise<Agent>} The updated or newly created agent document as a plain object.
* @throws {Error} If the update would create a duplicate version
*/
const updateAgent = async (searchParameter, updateData, options = {}) => {
const { updatingUserId = null, forceVersion = false } = options;
const { updatingUserId = null, forceVersion = false, skipVersioning = false } = options;
const mongoOptions = { new: true, upsert: false };
const currentAgent = await Agent.findOne(searchParameter);
@ -301,10 +301,8 @@ const updateAgent = async (searchParameter, updateData, options = {}) => {
}
const shouldCreateVersion =
forceVersion ||
(versions &&
versions.length > 0 &&
(Object.keys(directUpdates).length > 0 || $push || $pull || $addToSet));
!skipVersioning &&
(forceVersion || Object.keys(directUpdates).length > 0 || $push || $pull || $addToSet);
if (shouldCreateVersion) {
const duplicateVersion = isDuplicateVersion(updateData, versionData, versions, actionsHash);
@ -339,7 +337,7 @@ const updateAgent = async (searchParameter, updateData, options = {}) => {
versionEntry.updatedBy = new mongoose.Types.ObjectId(updatingUserId);
}
if (shouldCreateVersion || forceVersion) {
if (shouldCreateVersion) {
updateData.$push = {
...($push || {}),
versions: versionEntry,
@ -550,7 +548,10 @@ const updateAgentProjects = async ({ user, agentId, projectIds, removeProjectIds
delete updateQuery.author;
}
const updatedAgent = await updateAgent(updateQuery, updateOps, { updatingUserId: user.id });
const updatedAgent = await updateAgent(updateQuery, updateOps, {
updatingUserId: user.id,
skipVersioning: true,
});
if (updatedAgent) {
return updatedAgent;
}

File diff suppressed because it is too large Load diff

View file

@ -12,6 +12,10 @@ const comparePassword = async (user, candidatePassword) => {
throw new Error('No user provided');
}
if (!user.password) {
throw new Error('No password, likely an email first registered via Social/OIDC login');
}
return new Promise((resolve, reject) => {
bcrypt.compare(candidatePassword, user.password, (err, isMatch) => {
if (err) {

View file

@ -48,8 +48,9 @@
"@langchain/google-genai": "^0.2.9",
"@langchain/google-vertexai": "^0.2.9",
"@langchain/textsplitters": "^0.1.0",
"@librechat/agents": "^2.4.37",
"@librechat/agents": "^2.4.38",
"@librechat/auth": "*",
"@librechat/api": "*",
"@librechat/data-schemas": "*",
"@node-saml/passport-saml": "^5.0.0",
"@waylaidwanderer/fetch-event-source": "^3.0.1",
@ -82,15 +83,15 @@
"keyv-file": "^5.1.2",
"klona": "^2.0.6",
"librechat-data-provider": "*",
"librechat-mcp": "*",
"lodash": "^4.17.21",
"meilisearch": "^0.38.0",
"memorystore": "^1.6.7",
"mime": "^3.0.0",
"module-alias": "^2.2.3",
"mongoose": "^8.12.1",
"multer": "^2.0.0",
"multer": "^2.0.1",
"nanoid": "^3.3.7",
"node-fetch": "^2.7.0",
"nodemailer": "^6.9.15",
"ollama": "^0.5.0",
"openai": "^4.96.2",
@ -110,8 +111,9 @@
"tiktoken": "^1.0.15",
"traverse": "^0.6.7",
"ua-parser-js": "^1.0.36",
"undici": "^7.10.0",
"winston": "^3.11.0",
"winston-daily-rotate-file": "^4.7.1",
"winston-daily-rotate-file": "^5.0.0",
"youtube-transcript": "^1.2.1",
"zod": "^3.22.4"
},

View file

@ -220,6 +220,9 @@ function disposeClient(client) {
if (client.maxResponseTokens) {
client.maxResponseTokens = null;
}
if (client.processMemory) {
client.processMemory = null;
}
if (client.run) {
// Break circular references in run
if (client.run.Graph) {

View file

@ -163,7 +163,11 @@ const deleteUserController = async (req, res) => {
await Balance.deleteMany({ user: user._id }); // delete user balances
await deletePresets(user.id); // delete user presets
/* TODO: Delete Assistant Threads */
await deleteConvos(user.id); // delete user convos
try {
await deleteConvos(user.id); // delete user convos
} catch (error) {
logger.error('[deleteUserController] Error deleting user convos, likely no convos', error);
}
await deleteUserPluginAuth(user.id, null, true); // delete user plugin auth
await deleteUserById(user.id); // delete user
await deleteAllSharedLinks(user.id); // delete user shared links

View file

@ -1,4 +1,6 @@
const { nanoid } = require('nanoid');
const { sendEvent } = require('@librechat/api');
const { logger } = require('@librechat/data-schemas');
const { Tools, StepTypes, FileContext } = require('librechat-data-provider');
const {
EnvVar,
@ -12,7 +14,6 @@ const {
const { processCodeOutput } = require('~/server/services/Files/Code/process');
const { loadAuthValues } = require('~/server/services/Tools/credentials');
const { saveBase64Image } = require('~/server/services/Files/process');
const { logger, sendEvent } = require('~/config');
class ModelEndHandler {
/**
@ -240,9 +241,7 @@ function createToolEndCallback({ req, res, artifactPromises }) {
if (output.artifact[Tools.web_search]) {
artifactPromises.push(
(async () => {
const name = `${output.name}_${output.tool_call_id}_${nanoid()}`;
const attachment = {
name,
type: Tools.web_search,
messageId: metadata.run_id,
toolCallId: output.tool_call_id,

View file

@ -1,13 +1,12 @@
// const { HttpsProxyAgent } = require('https-proxy-agent');
// const {
// Constants,
// ImageDetail,
// EModelEndpoint,
// resolveHeaders,
// validateVisionModel,
// mapModelToAzureConfig,
// } = require('librechat-data-provider');
require('events').EventEmitter.defaultMaxListeners = 100;
const { logger } = require('@librechat/data-schemas');
const {
sendEvent,
createRun,
Tokenizer,
memoryInstructions,
createMemoryProcessor,
} = require('@librechat/api');
const {
Callback,
GraphEvents,
@ -19,25 +18,30 @@ const {
} = require('@librechat/agents');
const {
Constants,
Permissions,
VisionModes,
ContentTypes,
EModelEndpoint,
KnownEndpoints,
PermissionTypes,
isAgentsEndpoint,
AgentCapabilities,
bedrockInputSchema,
removeNullishValues,
} = require('librechat-data-provider');
const { DynamicStructuredTool } = require('@langchain/core/tools');
const { getBufferString, HumanMessage } = require('@langchain/core/messages');
const { getCustomEndpointConfig, checkCapability } = require('~/server/services/Config');
const { addCacheControl, createContextHandlers } = require('~/app/clients/prompts');
const { initializeAgent } = require('~/server/services/Endpoints/agents/agent');
const { spendTokens, spendStructuredTokens } = require('~/models/spendTokens');
const { getBufferString, HumanMessage } = require('@langchain/core/messages');
const { setMemory, deleteMemory, getFormattedMemories } = require('~/models');
const { encodeAndFormat } = require('~/server/services/Files/images/encode');
const initOpenAI = require('~/server/services/Endpoints/openAI/initialize');
const Tokenizer = require('~/server/services/Tokenizer');
const { checkAccess } = require('~/server/middleware/roles/access');
const BaseClient = require('~/app/clients/BaseClient');
const { logger, sendEvent } = require('~/config');
const { createRun } = require('./run');
const { loadAgent } = require('~/models/Agent');
const { getMCPManager } = require('~/config');
/**
* @param {ServerRequest} req
@ -57,12 +61,8 @@ const legacyContentEndpoints = new Set([KnownEndpoints.groq, KnownEndpoints.deep
const noSystemModelRegex = [/\b(o1-preview|o1-mini|amazon\.titan-text)\b/gi];
// const { processMemory, memoryInstructions } = require('~/server/services/Endpoints/agents/memory');
// const { getFormattedMemories } = require('~/models/Memory');
// const { getCurrentDateTime } = require('~/utils');
function createTokenCounter(encoding) {
return (message) => {
return function (message) {
const countTokens = (text) => Tokenizer.getTokenCount(text, encoding);
return getTokenCountForMessage(message, countTokens);
};
@ -123,6 +123,8 @@ class AgentClient extends BaseClient {
this.usage;
/** @type {Record<string, number>} */
this.indexTokenCountMap = {};
/** @type {(messages: BaseMessage[]) => Promise<void>} */
this.processMemory;
}
/**
@ -137,55 +139,10 @@ class AgentClient extends BaseClient {
}
/**
*
* Checks if the model is a vision model based on request attachments and sets the appropriate options:
* - Sets `this.modelOptions.model` to `gpt-4-vision-preview` if the request is a vision request.
* - Sets `this.isVisionModel` to `true` if vision request.
* - Deletes `this.modelOptions.stop` if vision request.
* `AgentClient` is not opinionated about vision requests, so we don't do anything here
* @param {MongoFile[]} attachments
*/
checkVisionRequest(attachments) {
// if (!attachments) {
// return;
// }
// const availableModels = this.options.modelsConfig?.[this.options.endpoint];
// if (!availableModels) {
// return;
// }
// let visionRequestDetected = false;
// for (const file of attachments) {
// if (file?.type?.includes('image')) {
// visionRequestDetected = true;
// break;
// }
// }
// if (!visionRequestDetected) {
// return;
// }
// this.isVisionModel = validateVisionModel({ model: this.modelOptions.model, availableModels });
// if (this.isVisionModel) {
// delete this.modelOptions.stop;
// return;
// }
// for (const model of availableModels) {
// if (!validateVisionModel({ model, availableModels })) {
// continue;
// }
// this.modelOptions.model = model;
// this.isVisionModel = true;
// delete this.modelOptions.stop;
// return;
// }
// if (!availableModels.includes(this.defaultVisionModel)) {
// return;
// }
// if (!validateVisionModel({ model: this.defaultVisionModel, availableModels })) {
// return;
// }
// this.modelOptions.model = this.defaultVisionModel;
// this.isVisionModel = true;
// delete this.modelOptions.stop;
}
checkVisionRequest() {}
getSaveOptions() {
// TODO:
@ -269,24 +226,6 @@ class AgentClient extends BaseClient {
.filter(Boolean)
.join('\n')
.trim();
// this.systemMessage = getCurrentDateTime();
// const { withKeys, withoutKeys } = await getFormattedMemories({
// userId: this.options.req.user.id,
// });
// processMemory({
// userId: this.options.req.user.id,
// message: this.options.req.body.text,
// parentMessageId,
// memory: withKeys,
// thread_id: this.conversationId,
// }).catch((error) => {
// logger.error('Memory Agent failed to process memory', error);
// });
// this.systemMessage += '\n\n' + memoryInstructions;
// if (withoutKeys) {
// this.systemMessage += `\n\n# Existing memory about the user:\n${withoutKeys}`;
// }
if (this.options.attachments) {
const attachments = await this.options.attachments;
@ -370,6 +309,37 @@ class AgentClient extends BaseClient {
systemContent = this.augmentedPrompt + systemContent;
}
// Inject MCP server instructions if available
const ephemeralAgent = this.options.req.body.ephemeralAgent;
let mcpServers = [];
// Check for ephemeral agent MCP servers
if (ephemeralAgent && ephemeralAgent.mcp && ephemeralAgent.mcp.length > 0) {
mcpServers = ephemeralAgent.mcp;
}
// Check for regular agent MCP tools
else if (this.options.agent && this.options.agent.tools) {
mcpServers = this.options.agent.tools
.filter(
(tool) =>
tool instanceof DynamicStructuredTool && tool.name.includes(Constants.mcp_delimiter),
)
.map((tool) => tool.name.split(Constants.mcp_delimiter).pop())
.filter(Boolean);
}
if (mcpServers.length > 0) {
try {
const mcpInstructions = getMCPManager().formatInstructionsForContext(mcpServers);
if (mcpInstructions) {
systemContent = [systemContent, mcpInstructions].filter(Boolean).join('\n\n');
logger.debug('[AgentClient] Injected MCP instructions for servers:', mcpServers);
}
} catch (error) {
logger.error('[AgentClient] Failed to inject MCP instructions:', error);
}
}
if (systemContent) {
this.options.agent.instructions = systemContent;
}
@ -399,9 +369,150 @@ class AgentClient extends BaseClient {
opts.getReqData({ promptTokens });
}
const withoutKeys = await this.useMemory();
if (withoutKeys) {
systemContent += `${memoryInstructions}\n\n# Existing memory about the user:\n${withoutKeys}`;
}
if (systemContent) {
this.options.agent.instructions = systemContent;
}
return result;
}
/**
* @returns {Promise<string | undefined>}
*/
async useMemory() {
const user = this.options.req.user;
if (user.personalization?.memories === false) {
return;
}
const hasAccess = await checkAccess(user, PermissionTypes.MEMORIES, [Permissions.USE]);
if (!hasAccess) {
logger.debug(
`[api/server/controllers/agents/client.js #useMemory] User ${user.id} does not have USE permission for memories`,
);
return;
}
/** @type {TCustomConfig['memory']} */
const memoryConfig = this.options.req?.app?.locals?.memory;
if (!memoryConfig || memoryConfig.disabled === true) {
return;
}
/** @type {Agent} */
let prelimAgent;
const allowedProviders = new Set(
this.options.req?.app?.locals?.[EModelEndpoint.agents]?.allowedProviders,
);
try {
if (memoryConfig.agent?.id != null && memoryConfig.agent.id !== this.options.agent.id) {
prelimAgent = await loadAgent({
req: this.options.req,
agent_id: memoryConfig.agent.id,
endpoint: EModelEndpoint.agents,
});
} else if (
memoryConfig.agent?.id == null &&
memoryConfig.agent?.model != null &&
memoryConfig.agent?.provider != null
) {
prelimAgent = { id: Constants.EPHEMERAL_AGENT_ID, ...memoryConfig.agent };
}
} catch (error) {
logger.error(
'[api/server/controllers/agents/client.js #useMemory] Error loading agent for memory',
error,
);
}
const agent = await initializeAgent({
req: this.options.req,
res: this.options.res,
agent: prelimAgent,
allowedProviders,
});
if (!agent) {
logger.warn(
'[api/server/controllers/agents/client.js #useMemory] No agent found for memory',
memoryConfig,
);
return;
}
const llmConfig = Object.assign(
{
provider: agent.provider,
model: agent.model,
},
agent.model_parameters,
);
/** @type {import('@librechat/api').MemoryConfig} */
const config = {
validKeys: memoryConfig.validKeys,
instructions: agent.instructions,
llmConfig,
tokenLimit: memoryConfig.tokenLimit,
};
const userId = this.options.req.user.id + '';
const messageId = this.responseMessageId + '';
const conversationId = this.conversationId + '';
const [withoutKeys, processMemory] = await createMemoryProcessor({
userId,
config,
messageId,
conversationId,
memoryMethods: {
setMemory,
deleteMemory,
getFormattedMemories,
},
res: this.options.res,
});
this.processMemory = processMemory;
return withoutKeys;
}
/**
* @param {BaseMessage[]} messages
* @returns {Promise<void | (TAttachment | null)[]>}
*/
async runMemory(messages) {
try {
if (this.processMemory == null) {
return;
}
/** @type {TCustomConfig['memory']} */
const memoryConfig = this.options.req?.app?.locals?.memory;
const messageWindowSize = memoryConfig?.messageWindowSize ?? 5;
let messagesToProcess = [...messages];
if (messages.length > messageWindowSize) {
for (let i = messages.length - messageWindowSize; i >= 0; i--) {
const potentialWindow = messages.slice(i, i + messageWindowSize);
if (potentialWindow[0]?.role === 'user') {
messagesToProcess = [...potentialWindow];
break;
}
}
if (messagesToProcess.length === messages.length) {
messagesToProcess = [...messages.slice(-messageWindowSize)];
}
}
return await this.processMemory(messagesToProcess);
} catch (error) {
logger.error('Memory Agent failed to process memory', error);
}
}
/** @type {sendCompletion} */
async sendCompletion(payload, opts = {}) {
await this.chatCompletion({
@ -544,100 +655,13 @@ class AgentClient extends BaseClient {
let config;
/** @type {ReturnType<createRun>} */
let run;
/** @type {Promise<(TAttachment | null)[] | undefined>} */
let memoryPromise;
try {
if (!abortController) {
abortController = new AbortController();
}
// if (this.options.headers) {
// opts.defaultHeaders = { ...opts.defaultHeaders, ...this.options.headers };
// }
// if (this.options.proxy) {
// opts.httpAgent = new HttpsProxyAgent(this.options.proxy);
// }
// if (this.isVisionModel) {
// modelOptions.max_tokens = 4000;
// }
// /** @type {TAzureConfig | undefined} */
// const azureConfig = this.options?.req?.app?.locals?.[EModelEndpoint.azureOpenAI];
// if (
// (this.azure && this.isVisionModel && azureConfig) ||
// (azureConfig && this.isVisionModel && this.options.endpoint === EModelEndpoint.azureOpenAI)
// ) {
// const { modelGroupMap, groupMap } = azureConfig;
// const {
// azureOptions,
// baseURL,
// headers = {},
// serverless,
// } = mapModelToAzureConfig({
// modelName: modelOptions.model,
// modelGroupMap,
// groupMap,
// });
// opts.defaultHeaders = resolveHeaders(headers);
// this.langchainProxy = extractBaseURL(baseURL);
// this.apiKey = azureOptions.azureOpenAIApiKey;
// const groupName = modelGroupMap[modelOptions.model].group;
// this.options.addParams = azureConfig.groupMap[groupName].addParams;
// this.options.dropParams = azureConfig.groupMap[groupName].dropParams;
// // Note: `forcePrompt` not re-assigned as only chat models are vision models
// this.azure = !serverless && azureOptions;
// this.azureEndpoint =
// !serverless && genAzureChatCompletion(this.azure, modelOptions.model, this);
// }
// if (this.azure || this.options.azure) {
// /* Azure Bug, extremely short default `max_tokens` response */
// if (!modelOptions.max_tokens && modelOptions.model === 'gpt-4-vision-preview') {
// modelOptions.max_tokens = 4000;
// }
// /* Azure does not accept `model` in the body, so we need to remove it. */
// delete modelOptions.model;
// opts.baseURL = this.langchainProxy
// ? constructAzureURL({
// baseURL: this.langchainProxy,
// azureOptions: this.azure,
// })
// : this.azureEndpoint.split(/(?<!\/)\/(chat|completion)\//)[0];
// opts.defaultQuery = { 'api-version': this.azure.azureOpenAIApiVersion };
// opts.defaultHeaders = { ...opts.defaultHeaders, 'api-key': this.apiKey };
// }
// if (process.env.OPENAI_ORGANIZATION) {
// opts.organization = process.env.OPENAI_ORGANIZATION;
// }
// if (this.options.addParams && typeof this.options.addParams === 'object') {
// modelOptions = {
// ...modelOptions,
// ...this.options.addParams,
// };
// logger.debug('[api/server/controllers/agents/client.js #chatCompletion] added params', {
// addParams: this.options.addParams,
// modelOptions,
// });
// }
// if (this.options.dropParams && Array.isArray(this.options.dropParams)) {
// this.options.dropParams.forEach((param) => {
// delete modelOptions[param];
// });
// logger.debug('[api/server/controllers/agents/client.js #chatCompletion] dropped params', {
// dropParams: this.options.dropParams,
// modelOptions,
// });
// }
/** @type {TCustomConfig['endpoints']['agents']} */
const agentsEConfig = this.options.req.app.locals[EModelEndpoint.agents];
@ -647,6 +671,7 @@ class AgentClient extends BaseClient {
last_agent_index: this.agentConfigs?.size ?? 0,
user_id: this.user ?? this.options.req.user?.id,
hide_sequential_outputs: this.options.agent.hide_sequential_outputs,
user: this.options.req.user,
},
recursionLimit: agentsEConfig?.recursionLimit,
signal: abortController.signal,
@ -734,6 +759,10 @@ class AgentClient extends BaseClient {
messages = addCacheControl(messages);
}
if (i === 0) {
memoryPromise = this.runMemory(messages);
}
run = await createRun({
agent,
req: this.options.req,
@ -769,10 +798,9 @@ class AgentClient extends BaseClient {
run.Graph.contentData = contentData;
}
const encoding = this.getEncoding();
await run.processStream({ messages }, config, {
keepContent: i !== 0,
tokenCounter: createTokenCounter(encoding),
tokenCounter: createTokenCounter(this.getEncoding()),
indexTokenCountMap: currentIndexCountMap,
maxContextTokens: agent.maxContextTokens,
callbacks: {
@ -887,6 +915,12 @@ class AgentClient extends BaseClient {
});
try {
if (memoryPromise) {
const attachments = await memoryPromise;
if (attachments && attachments.length > 0) {
this.artifactPromises.push(...attachments);
}
}
await this.recordCollectedUsage({ context: 'message' });
} catch (err) {
logger.error(
@ -895,6 +929,12 @@ class AgentClient extends BaseClient {
);
}
} catch (err) {
if (memoryPromise) {
const attachments = await memoryPromise;
if (attachments && attachments.length > 0) {
this.artifactPromises.push(...attachments);
}
}
logger.error(
'[api/server/controllers/agents/client.js #sendCompletion] Operation aborted',
err,

View file

@ -1,94 +0,0 @@
const { Run, Providers } = require('@librechat/agents');
const { providerEndpointMap, KnownEndpoints } = require('librechat-data-provider');
/**
* @typedef {import('@librechat/agents').t} t
* @typedef {import('@librechat/agents').StandardGraphConfig} StandardGraphConfig
* @typedef {import('@librechat/agents').StreamEventData} StreamEventData
* @typedef {import('@librechat/agents').EventHandler} EventHandler
* @typedef {import('@librechat/agents').GraphEvents} GraphEvents
* @typedef {import('@librechat/agents').LLMConfig} LLMConfig
* @typedef {import('@librechat/agents').IState} IState
*/
const customProviders = new Set([
Providers.XAI,
Providers.OLLAMA,
Providers.DEEPSEEK,
Providers.OPENROUTER,
]);
/**
* Creates a new Run instance with custom handlers and configuration.
*
* @param {Object} options - The options for creating the Run instance.
* @param {ServerRequest} [options.req] - The server request.
* @param {string | undefined} [options.runId] - Optional run ID; otherwise, a new run ID will be generated.
* @param {Agent} options.agent - The agent for this run.
* @param {AbortSignal} options.signal - The signal for this run.
* @param {Record<GraphEvents, EventHandler> | undefined} [options.customHandlers] - Custom event handlers.
* @param {boolean} [options.streaming=true] - Whether to use streaming.
* @param {boolean} [options.streamUsage=true] - Whether to stream usage information.
* @returns {Promise<Run<IState>>} A promise that resolves to a new Run instance.
*/
async function createRun({
runId,
agent,
signal,
customHandlers,
streaming = true,
streamUsage = true,
}) {
const provider = providerEndpointMap[agent.provider] ?? agent.provider;
/** @type {LLMConfig} */
const llmConfig = Object.assign(
{
provider,
streaming,
streamUsage,
},
agent.model_parameters,
);
/** Resolves issues with new OpenAI usage field */
if (
customProviders.has(agent.provider) ||
(agent.provider === Providers.OPENAI && agent.endpoint !== agent.provider)
) {
llmConfig.streamUsage = false;
llmConfig.usage = true;
}
/** @type {'reasoning_content' | 'reasoning'} */
let reasoningKey;
if (
llmConfig.configuration?.baseURL?.includes(KnownEndpoints.openrouter) ||
(agent.endpoint && agent.endpoint.toLowerCase().includes(KnownEndpoints.openrouter))
) {
reasoningKey = 'reasoning';
}
/** @type {StandardGraphConfig} */
const graphConfig = {
signal,
llmConfig,
reasoningKey,
tools: agent.tools,
instructions: agent.instructions,
additional_instructions: agent.additional_instructions,
// toolEnd: agent.end_after_tools,
};
// TEMPORARY FOR TESTING
if (agent.provider === Providers.ANTHROPIC || agent.provider === Providers.BEDROCK) {
graphConfig.streamBuffer = 2000;
}
return Run.create({
runId,
graphConfig,
customHandlers,
});
}
module.exports = { createRun };

View file

@ -18,6 +18,7 @@ const {
} = require('~/models/Agent');
const { uploadImageBuffer, filterFile } = require('~/server/services/Files/process');
const { getStrategyFunctions } = require('~/server/services/Files/strategies');
const { resizeAvatar } = require('@librechat/auth');
const { refreshS3Url } = require('~/server/services/Files/S3/crud');
const { updateAction, getActions } = require('~/models/Action');
const { updateAgentProjects } = require('~/models/Agent');
@ -168,12 +169,18 @@ const updateAgentHandler = async (req, res) => {
});
}
/** @type {boolean} */
const isProjectUpdate = (projectIds?.length ?? 0) > 0 || (removeProjectIds?.length ?? 0) > 0;
let updatedAgent =
Object.keys(updateData).length > 0
? await updateAgent({ id }, updateData, { updatingUserId: req.user.id })
? await updateAgent({ id }, updateData, {
updatingUserId: req.user.id,
skipVersioning: isProjectUpdate,
})
: existingAgent;
if (projectIds || removeProjectIds) {
if (isProjectUpdate) {
updatedAgent = await updateAgentProjects({
user: req.user,
agentId: id,
@ -373,12 +380,27 @@ const uploadAgentAvatarHandler = async (req, res) => {
}
const buffer = await fs.readFile(req.file.path);
const image = await uploadImageBuffer({
req,
context: FileContext.avatar,
metadata: { buffer },
const fileStrategy = req.app.locals.fileStrategy;
const resizedBuffer = await resizeAvatar({
userId: req.user.id,
input: buffer,
});
const { processAvatar } = getStrategyFunctions(fileStrategy);
const avatarUrl = await processAvatar({
buffer: resizedBuffer,
userId: req.user.id,
manual: 'false',
agentId: agent_id,
});
const image = {
filepath: avatarUrl,
source: fileStrategy,
};
let _avatar;
try {
const agent = await getAgent({ id: agent_id });
@ -403,7 +425,7 @@ const uploadAgentAvatarHandler = async (req, res) => {
const data = {
avatar: {
filepath: image.filepath,
source: req.app.locals.fileStrategy,
source: image.source,
},
};

View file

@ -124,7 +124,7 @@ const startServer = async () => {
app.use('/api/agents', routes.agents);
app.use('/api/banner', routes.banner);
app.use('/api/bedrock', routes.bedrock);
app.use('/api/memories', routes.memories);
app.use('/api/tags', routes.tags);
app.use((req, res) => {

View file

@ -1,5 +1,5 @@
const checkAdmin = require('./checkAdmin');
const { checkAccess, generateCheckAccess } = require('./generateCheckAccess');
const checkAdmin = require('./admin');
const { checkAccess, generateCheckAccess } = require('./access');
module.exports = {
checkAdmin,

View file

@ -53,6 +53,7 @@ router.get('/:action_id/oauth/callback', async (req, res) => {
identifier,
client_url: flowState.metadata.client_url,
redirect_uri: flowState.metadata.redirect_uri,
token_exchange_method: flowState.metadata.token_exchange_method,
/** Encrypted values */
encrypted_oauth_client_id: flowState.metadata.encrypted_oauth_client_id,
encrypted_oauth_client_secret: flowState.metadata.encrypted_oauth_client_secret,

View file

@ -65,8 +65,14 @@ router.post('/gen_title', async (req, res) => {
let title = await titleCache.get(key);
if (!title) {
await sleep(2500);
title = await titleCache.get(key);
// Retry every 1s for up to 20s
for (let i = 0; i < 20; i++) {
await sleep(1000);
title = await titleCache.get(key);
if (title) {
break;
}
}
}
if (title) {

View file

@ -2,8 +2,8 @@ const fs = require('fs');
const path = require('path');
const crypto = require('crypto');
const multer = require('multer');
const { sanitizeFilename } = require('@librechat/api');
const { fileConfig: defaultFileConfig, mergeFileConfig } = require('librechat-data-provider');
const { sanitizeFilename } = require('~/server/utils/handleText');
const { getCustomConfig } = require('~/server/services/Config');
const storage = multer.diskStorage({

View file

@ -0,0 +1,571 @@
/* eslint-disable no-unused-vars */
/* eslint-disable jest/no-done-callback */
const fs = require('fs');
const os = require('os');
const path = require('path');
const crypto = require('crypto');
const { createMulterInstance, storage, importFileFilter } = require('./multer');
// Mock only the config service that requires external dependencies
jest.mock('~/server/services/Config', () => ({
getCustomConfig: jest.fn(() =>
Promise.resolve({
fileConfig: {
endpoints: {
openAI: {
supportedMimeTypes: ['image/jpeg', 'image/png', 'application/pdf'],
},
default: {
supportedMimeTypes: ['image/jpeg', 'image/png', 'text/plain'],
},
},
serverFileSizeLimit: 10000000, // 10MB
},
}),
),
}));
describe('Multer Configuration', () => {
let tempDir;
let mockReq;
let mockFile;
beforeEach(() => {
// Create a temporary directory for each test
tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'multer-test-'));
mockReq = {
user: { id: 'test-user-123' },
app: {
locals: {
paths: {
uploads: tempDir,
},
},
},
body: {},
originalUrl: '/api/files/upload',
};
mockFile = {
originalname: 'test-file.jpg',
mimetype: 'image/jpeg',
size: 1024,
};
// Clear mocks
jest.clearAllMocks();
});
afterEach(() => {
// Clean up temporary directory
if (fs.existsSync(tempDir)) {
fs.rmSync(tempDir, { recursive: true, force: true });
}
});
describe('Storage Configuration', () => {
describe('destination function', () => {
it('should create the correct destination path', (done) => {
const cb = jest.fn((err, destination) => {
expect(err).toBeNull();
expect(destination).toBe(path.join(tempDir, 'temp', 'test-user-123'));
expect(fs.existsSync(destination)).toBe(true);
done();
});
storage.getDestination(mockReq, mockFile, cb);
});
it("should create directory recursively if it doesn't exist", (done) => {
const deepPath = path.join(tempDir, 'deep', 'nested', 'path');
mockReq.app.locals.paths.uploads = deepPath;
const cb = jest.fn((err, destination) => {
expect(err).toBeNull();
expect(destination).toBe(path.join(deepPath, 'temp', 'test-user-123'));
expect(fs.existsSync(destination)).toBe(true);
done();
});
storage.getDestination(mockReq, mockFile, cb);
});
});
describe('filename function', () => {
it('should generate a UUID for req.file_id', (done) => {
const cb = jest.fn((err, filename) => {
expect(err).toBeNull();
expect(mockReq.file_id).toBeDefined();
expect(mockReq.file_id).toMatch(
/^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$/i,
);
done();
});
storage.getFilename(mockReq, mockFile, cb);
});
it('should decode URI components in filename', (done) => {
const encodedFile = {
...mockFile,
originalname: encodeURIComponent('test file with spaces.jpg'),
};
const cb = jest.fn((err, filename) => {
expect(err).toBeNull();
expect(encodedFile.originalname).toBe('test file with spaces.jpg');
done();
});
storage.getFilename(mockReq, encodedFile, cb);
});
it('should call real sanitizeFilename with properly encoded filename', (done) => {
// Test with a properly URI-encoded filename that needs sanitization
const unsafeFile = {
...mockFile,
originalname: encodeURIComponent('test@#$%^&*()file with spaces!.jpg'),
};
const cb = jest.fn((err, filename) => {
expect(err).toBeNull();
// The actual sanitizeFilename should have cleaned this up after decoding
expect(filename).not.toContain('@');
expect(filename).not.toContain('#');
expect(filename).not.toContain('*');
expect(filename).not.toContain('!');
// Should still preserve dots and hyphens
expect(filename).toContain('.jpg');
done();
});
storage.getFilename(mockReq, unsafeFile, cb);
});
it('should handle very long filenames with actual crypto', (done) => {
const longFile = {
...mockFile,
originalname: 'a'.repeat(300) + '.jpg',
};
const cb = jest.fn((err, filename) => {
expect(err).toBeNull();
expect(filename.length).toBeLessThanOrEqual(255);
expect(filename).toMatch(/\.jpg$/); // Should still end with .jpg
// Should contain a hex suffix if truncated
if (filename.length === 255) {
expect(filename).toMatch(/-[a-f0-9]{6}\.jpg$/);
}
done();
});
storage.getFilename(mockReq, longFile, cb);
});
it('should generate unique file_id for each call', (done) => {
let firstFileId;
const firstCb = jest.fn((err, filename) => {
expect(err).toBeNull();
firstFileId = mockReq.file_id;
// Reset req for second call
delete mockReq.file_id;
const secondCb = jest.fn((err, filename) => {
expect(err).toBeNull();
expect(mockReq.file_id).toBeDefined();
expect(mockReq.file_id).not.toBe(firstFileId);
done();
});
storage.getFilename(mockReq, mockFile, secondCb);
});
storage.getFilename(mockReq, mockFile, firstCb);
});
});
});
describe('Import File Filter', () => {
it('should accept JSON files by mimetype', (done) => {
const jsonFile = {
...mockFile,
mimetype: 'application/json',
originalname: 'data.json',
};
const cb = jest.fn((err, result) => {
expect(err).toBeNull();
expect(result).toBe(true);
done();
});
importFileFilter(mockReq, jsonFile, cb);
});
it('should accept files with .json extension', (done) => {
const jsonFile = {
...mockFile,
mimetype: 'text/plain',
originalname: 'data.json',
};
const cb = jest.fn((err, result) => {
expect(err).toBeNull();
expect(result).toBe(true);
done();
});
importFileFilter(mockReq, jsonFile, cb);
});
it('should reject non-JSON files', (done) => {
const textFile = {
...mockFile,
mimetype: 'text/plain',
originalname: 'document.txt',
};
const cb = jest.fn((err, result) => {
expect(err).toBeInstanceOf(Error);
expect(err.message).toBe('Only JSON files are allowed');
expect(result).toBe(false);
done();
});
importFileFilter(mockReq, textFile, cb);
});
it('should handle files with uppercase .JSON extension', (done) => {
const jsonFile = {
...mockFile,
mimetype: 'text/plain',
originalname: 'DATA.JSON',
};
const cb = jest.fn((err, result) => {
expect(err).toBeNull();
expect(result).toBe(true);
done();
});
importFileFilter(mockReq, jsonFile, cb);
});
});
describe('File Filter with Real defaultFileConfig', () => {
it('should use real fileConfig.checkType for validation', async () => {
// Test with actual librechat-data-provider functions
const {
fileConfig,
imageMimeTypes,
applicationMimeTypes,
} = require('librechat-data-provider');
// Test that the real checkType function works with regex patterns
expect(fileConfig.checkType('image/jpeg', [imageMimeTypes])).toBe(true);
expect(fileConfig.checkType('video/mp4', [imageMimeTypes])).toBe(false);
expect(fileConfig.checkType('application/pdf', [applicationMimeTypes])).toBe(true);
expect(fileConfig.checkType('application/pdf', [])).toBe(false);
});
it('should handle audio files for speech-to-text endpoint with real config', async () => {
mockReq.originalUrl = '/api/speech/stt';
const multerInstance = await createMulterInstance();
expect(multerInstance).toBeDefined();
expect(typeof multerInstance.single).toBe('function');
});
it('should reject unsupported file types using real config', async () => {
// Mock defaultFileConfig for this specific test
const originalCheckType = require('librechat-data-provider').fileConfig.checkType;
const mockCheckType = jest.fn().mockReturnValue(false);
require('librechat-data-provider').fileConfig.checkType = mockCheckType;
try {
const multerInstance = await createMulterInstance();
expect(multerInstance).toBeDefined();
// Test the actual file filter behavior would reject unsupported files
expect(mockCheckType).toBeDefined();
} finally {
// Restore original function
require('librechat-data-provider').fileConfig.checkType = originalCheckType;
}
});
it('should use real mergeFileConfig function', async () => {
const { mergeFileConfig, mbToBytes } = require('librechat-data-provider');
// Test with actual merge function - note that it converts MB to bytes
const testConfig = {
serverFileSizeLimit: 5, // 5 MB
endpoints: {
custom: {
supportedMimeTypes: ['text/plain'],
},
},
};
const result = mergeFileConfig(testConfig);
// The function converts MB to bytes, so 5 MB becomes 5 * 1024 * 1024 bytes
expect(result.serverFileSizeLimit).toBe(mbToBytes(5));
expect(result.endpoints.custom.supportedMimeTypes).toBeDefined();
// Should still have the default endpoints
expect(result.endpoints.default).toBeDefined();
});
});
describe('createMulterInstance with Real Functions', () => {
it('should create a multer instance with correct configuration', async () => {
const multerInstance = await createMulterInstance();
expect(multerInstance).toBeDefined();
expect(typeof multerInstance.single).toBe('function');
expect(typeof multerInstance.array).toBe('function');
expect(typeof multerInstance.fields).toBe('function');
});
it('should use real config merging', async () => {
const { getCustomConfig } = require('~/server/services/Config');
const multerInstance = await createMulterInstance();
expect(getCustomConfig).toHaveBeenCalled();
expect(multerInstance).toBeDefined();
});
it('should create multer instance with expected interface', async () => {
const multerInstance = await createMulterInstance();
expect(multerInstance).toBeDefined();
expect(typeof multerInstance.single).toBe('function');
expect(typeof multerInstance.array).toBe('function');
expect(typeof multerInstance.fields).toBe('function');
});
});
describe('Real Crypto Integration', () => {
it('should use actual crypto.randomUUID()', (done) => {
// Spy on crypto.randomUUID to ensure it's called
const uuidSpy = jest.spyOn(crypto, 'randomUUID');
const cb = jest.fn((err, filename) => {
expect(err).toBeNull();
expect(uuidSpy).toHaveBeenCalled();
expect(mockReq.file_id).toMatch(
/^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$/i,
);
uuidSpy.mockRestore();
done();
});
storage.getFilename(mockReq, mockFile, cb);
});
it('should generate different UUIDs on subsequent calls', (done) => {
const uuids = [];
let callCount = 0;
const totalCalls = 5;
const cb = jest.fn((err, filename) => {
expect(err).toBeNull();
uuids.push(mockReq.file_id);
callCount++;
if (callCount === totalCalls) {
// Check that all UUIDs are unique
const uniqueUuids = new Set(uuids);
expect(uniqueUuids.size).toBe(totalCalls);
done();
} else {
// Reset for next call
delete mockReq.file_id;
storage.getFilename(mockReq, mockFile, cb);
}
});
// Start the chain
storage.getFilename(mockReq, mockFile, cb);
});
it('should generate cryptographically secure UUIDs', (done) => {
const generatedUuids = new Set();
let callCount = 0;
const totalCalls = 10;
const cb = jest.fn((err, filename) => {
expect(err).toBeNull();
// Verify UUID format and uniqueness
expect(mockReq.file_id).toMatch(
/^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$/i,
);
generatedUuids.add(mockReq.file_id);
callCount++;
if (callCount === totalCalls) {
// All UUIDs should be unique
expect(generatedUuids.size).toBe(totalCalls);
done();
} else {
// Reset for next call
delete mockReq.file_id;
storage.getFilename(mockReq, mockFile, cb);
}
});
// Start the chain
storage.getFilename(mockReq, mockFile, cb);
});
});
describe('Error Handling', () => {
it('should handle CVE-2024-28870: empty field name DoS vulnerability', async () => {
// Test for the CVE where empty field name could cause unhandled exception
const multerInstance = await createMulterInstance();
// Create a mock request with empty field name (the vulnerability scenario)
const mockReqWithEmptyField = {
...mockReq,
headers: {
'content-type': 'multipart/form-data',
},
};
const mockRes = {
status: jest.fn().mockReturnThis(),
json: jest.fn(),
end: jest.fn(),
};
// This should not crash or throw unhandled exceptions
const uploadMiddleware = multerInstance.single(''); // Empty field name
const mockNext = jest.fn((err) => {
// If there's an error, it should be handled gracefully, not crash
if (err) {
expect(err).toBeInstanceOf(Error);
// The error should be handled, not crash the process
}
});
// This should complete without crashing the process
expect(() => {
uploadMiddleware(mockReqWithEmptyField, mockRes, mockNext);
}).not.toThrow();
});
it('should handle file system errors when directory creation fails', (done) => {
// Test with a non-existent parent directory to simulate fs issues
const invalidPath = '/nonexistent/path/that/should/not/exist';
mockReq.app.locals.paths.uploads = invalidPath;
try {
// Call getDestination which should fail due to permission/path issues
storage.getDestination(mockReq, mockFile, (err, destination) => {
// If callback is reached, we didn't get the expected error
done(new Error('Expected mkdirSync to throw an error but callback was called'));
});
// If we get here without throwing, something unexpected happened
done(new Error('Expected mkdirSync to throw an error but no error was thrown'));
} catch (error) {
// This is the expected behavior - mkdirSync throws synchronously for invalid paths
expect(error.code).toBe('EACCES');
done();
}
});
it('should handle malformed filenames with real sanitization', (done) => {
const malformedFile = {
...mockFile,
originalname: null, // This should be handled gracefully
};
const cb = jest.fn((err, filename) => {
// The function should handle this gracefully
expect(typeof err === 'object' || err === null).toBe(true);
done();
});
try {
storage.getFilename(mockReq, malformedFile, cb);
} catch (error) {
// If it throws, that's also acceptable behavior
done();
}
});
it('should handle edge cases in filename sanitization', (done) => {
const edgeCaseFiles = [
{ originalname: '', expected: /_/ },
{ originalname: '.hidden', expected: /^_\.hidden/ },
{ originalname: '../../../etc/passwd', expected: /passwd/ },
{ originalname: 'file\x00name.txt', expected: /file_name\.txt/ },
];
let testCount = 0;
const testNextFile = (fileData) => {
const fileToTest = { ...mockFile, originalname: fileData.originalname };
const cb = jest.fn((err, filename) => {
expect(err).toBeNull();
expect(filename).toMatch(fileData.expected);
testCount++;
if (testCount === edgeCaseFiles.length) {
done();
} else {
testNextFile(edgeCaseFiles[testCount]);
}
});
storage.getFilename(mockReq, fileToTest, cb);
};
testNextFile(edgeCaseFiles[0]);
});
});
describe('Real Configuration Testing', () => {
it('should handle missing custom config gracefully with real mergeFileConfig', async () => {
const { getCustomConfig } = require('~/server/services/Config');
// Mock getCustomConfig to return undefined
getCustomConfig.mockResolvedValueOnce(undefined);
const multerInstance = await createMulterInstance();
expect(multerInstance).toBeDefined();
expect(typeof multerInstance.single).toBe('function');
});
it('should properly integrate real fileConfig with custom endpoints', async () => {
const { getCustomConfig } = require('~/server/services/Config');
// Mock a custom config with additional endpoints
getCustomConfig.mockResolvedValueOnce({
fileConfig: {
endpoints: {
anthropic: {
supportedMimeTypes: ['text/plain', 'image/png'],
},
},
serverFileSizeLimit: 20, // 20 MB
},
});
const multerInstance = await createMulterInstance();
expect(multerInstance).toBeDefined();
// Verify that getCustomConfig was called (we can't spy on the actual merge function easily)
expect(getCustomConfig).toHaveBeenCalled();
});
});
});

View file

@ -4,6 +4,7 @@ const tokenizer = require('./tokenizer');
const endpoints = require('./endpoints');
const staticRoute = require('./static');
const messages = require('./messages');
const memories = require('./memories');
const presets = require('./presets');
const prompts = require('./prompts');
const balance = require('./balance');
@ -51,6 +52,7 @@ module.exports = {
presets,
balance,
messages,
memories,
endpoints,
tokenizer,
assistants,

View file

@ -0,0 +1,231 @@
const express = require('express');
const { Tokenizer } = require('@librechat/api');
const { PermissionTypes, Permissions } = require('librechat-data-provider');
const {
getAllUserMemories,
toggleUserMemories,
createMemory,
setMemory,
deleteMemory,
} = require('~/models');
const { requireJwtAuth, generateCheckAccess } = require('~/server/middleware');
const router = express.Router();
const checkMemoryRead = generateCheckAccess(PermissionTypes.MEMORIES, [
Permissions.USE,
Permissions.READ,
]);
const checkMemoryCreate = generateCheckAccess(PermissionTypes.MEMORIES, [
Permissions.USE,
Permissions.CREATE,
]);
const checkMemoryUpdate = generateCheckAccess(PermissionTypes.MEMORIES, [
Permissions.USE,
Permissions.UPDATE,
]);
const checkMemoryDelete = generateCheckAccess(PermissionTypes.MEMORIES, [
Permissions.USE,
Permissions.UPDATE,
]);
const checkMemoryOptOut = generateCheckAccess(PermissionTypes.MEMORIES, [
Permissions.USE,
Permissions.OPT_OUT,
]);
router.use(requireJwtAuth);
/**
* GET /memories
* Returns all memories for the authenticated user, sorted by updated_at (newest first).
* Also includes memory usage percentage based on token limit.
*/
router.get('/', checkMemoryRead, async (req, res) => {
try {
const memories = await getAllUserMemories(req.user.id);
const sortedMemories = memories.sort(
(a, b) => new Date(b.updated_at).getTime() - new Date(a.updated_at).getTime(),
);
const totalTokens = memories.reduce((sum, memory) => {
return sum + (memory.tokenCount || 0);
}, 0);
const memoryConfig = req.app.locals?.memory;
const tokenLimit = memoryConfig?.tokenLimit;
let usagePercentage = null;
if (tokenLimit && tokenLimit > 0) {
usagePercentage = Math.min(100, Math.round((totalTokens / tokenLimit) * 100));
}
res.json({
memories: sortedMemories,
totalTokens,
tokenLimit: tokenLimit || null,
usagePercentage,
});
} catch (error) {
res.status(500).json({ error: error.message });
}
});
/**
* POST /memories
* Creates a new memory entry for the authenticated user.
* Body: { key: string, value: string }
* Returns 201 and { created: true, memory: <createdDoc> } when successful.
*/
router.post('/', checkMemoryCreate, async (req, res) => {
const { key, value } = req.body;
if (typeof key !== 'string' || key.trim() === '') {
return res.status(400).json({ error: 'Key is required and must be a non-empty string.' });
}
if (typeof value !== 'string' || value.trim() === '') {
return res.status(400).json({ error: 'Value is required and must be a non-empty string.' });
}
try {
const tokenCount = Tokenizer.getTokenCount(value, 'o200k_base');
const memories = await getAllUserMemories(req.user.id);
// Check token limit
const memoryConfig = req.app.locals?.memory;
const tokenLimit = memoryConfig?.tokenLimit;
if (tokenLimit) {
const currentTotalTokens = memories.reduce(
(sum, memory) => sum + (memory.tokenCount || 0),
0,
);
if (currentTotalTokens + tokenCount > tokenLimit) {
return res.status(400).json({
error: `Adding this memory would exceed the token limit of ${tokenLimit}. Current usage: ${currentTotalTokens} tokens.`,
});
}
}
const result = await createMemory({
userId: req.user.id,
key: key.trim(),
value: value.trim(),
tokenCount,
});
if (!result.ok) {
return res.status(500).json({ error: 'Failed to create memory.' });
}
const updatedMemories = await getAllUserMemories(req.user.id);
const newMemory = updatedMemories.find((m) => m.key === key.trim());
res.status(201).json({ created: true, memory: newMemory });
} catch (error) {
if (error.message && error.message.includes('already exists')) {
return res.status(409).json({ error: 'Memory with this key already exists.' });
}
res.status(500).json({ error: error.message });
}
});
/**
* PATCH /memories/preferences
* Updates the user's memory preferences (e.g., enabling/disabling memories).
* Body: { memories: boolean }
* Returns 200 and { updated: true, preferences: { memories: boolean } } when successful.
*/
router.patch('/preferences', checkMemoryOptOut, async (req, res) => {
const { memories } = req.body;
if (typeof memories !== 'boolean') {
return res.status(400).json({ error: 'memories must be a boolean value.' });
}
try {
const updatedUser = await toggleUserMemories(req.user.id, memories);
if (!updatedUser) {
return res.status(404).json({ error: 'User not found.' });
}
res.json({
updated: true,
preferences: {
memories: updatedUser.personalization?.memories ?? true,
},
});
} catch (error) {
res.status(500).json({ error: error.message });
}
});
/**
* PATCH /memories/:key
* Updates the value of an existing memory entry for the authenticated user.
* Body: { value: string }
* Returns 200 and { updated: true, memory: <updatedDoc> } when successful.
*/
router.patch('/:key', checkMemoryUpdate, async (req, res) => {
const { key } = req.params;
const { value } = req.body || {};
if (typeof value !== 'string' || value.trim() === '') {
return res.status(400).json({ error: 'Value is required and must be a non-empty string.' });
}
try {
const tokenCount = Tokenizer.getTokenCount(value, 'o200k_base');
const memories = await getAllUserMemories(req.user.id);
const existingMemory = memories.find((m) => m.key === key);
if (!existingMemory) {
return res.status(404).json({ error: 'Memory not found.' });
}
const result = await setMemory({
userId: req.user.id,
key,
value,
tokenCount,
});
if (!result.ok) {
return res.status(500).json({ error: 'Failed to update memory.' });
}
const updatedMemories = await getAllUserMemories(req.user.id);
const updatedMemory = updatedMemories.find((m) => m.key === key);
res.json({ updated: true, memory: updatedMemory });
} catch (error) {
res.status(500).json({ error: error.message });
}
});
/**
* DELETE /memories/:key
* Deletes a memory entry for the authenticated user.
* Returns 200 and { deleted: true } when successful.
*/
router.delete('/:key', checkMemoryDelete, async (req, res) => {
const { key } = req.params;
try {
const result = await deleteMemory({ userId: req.user.id, key });
if (!result.ok) {
return res.status(404).json({ error: 'Memory not found.' });
}
res.json({ deleted: true });
} catch (error) {
res.status(500).json({ error: error.message });
}
});
module.exports = router;

View file

@ -1,6 +1,7 @@
const express = require('express');
const {
promptPermissionsSchema,
memoryPermissionsSchema,
agentPermissionsSchema,
PermissionTypes,
roleDefaults,
@ -118,4 +119,43 @@ router.put('/:roleName/agents', checkAdmin, async (req, res) => {
}
});
/**
* PUT /api/roles/:roleName/memories
* Update memory permissions for a specific role
*/
router.put('/:roleName/memories', checkAdmin, async (req, res) => {
const { roleName: _r } = req.params;
// TODO: TEMP, use a better parsing for roleName
const roleName = _r.toUpperCase();
/** @type {TRole['permissions']['MEMORIES']} */
const updates = req.body;
try {
const parsedUpdates = memoryPermissionsSchema.partial().parse(updates);
const role = await getRoleByName(roleName);
if (!role) {
return res.status(404).send({ message: 'Role not found' });
}
const currentPermissions =
role.permissions?.[PermissionTypes.MEMORIES] || role[PermissionTypes.MEMORIES] || {};
const mergedUpdates = {
permissions: {
...role.permissions,
[PermissionTypes.MEMORIES]: {
...currentPermissions,
...parsedUpdates,
},
},
};
const updatedRole = await updateRoleByName(roleName, mergedUpdates);
res.status(200).send(updatedRole);
} catch (error) {
return res.status(400).send({ message: 'Invalid memory permissions.', error: error.errors });
}
});
module.exports = router;

View file

@ -1,7 +1,9 @@
const jwt = require('jsonwebtoken');
const { nanoid } = require('nanoid');
const { tool } = require('@langchain/core/tools');
const { logger } = require('@librechat/data-schemas');
const { GraphEvents, sleep } = require('@librechat/agents');
const { sendEvent, logAxiosError } = require('@librechat/api');
const {
Time,
CacheKeys,
@ -13,11 +15,10 @@ const {
actionDomainSeparator,
} = require('librechat-data-provider');
const { refreshAccessToken } = require('~/server/services/TokenService');
const { logger, getFlowStateManager, sendEvent } = require('~/config');
const { encryptV2, decryptV2 } = require('~/server/utils/crypto');
const { getActions, deleteActions } = require('~/models/Action');
const { deleteAssistant } = require('~/models/Assistant');
const { logAxiosError } = require('~/utils');
const { getFlowStateManager } = require('~/config');
const { getLogStores } = require('~/cache');
const { findToken } = require('~/models');
@ -208,6 +209,7 @@ async function createActionTool({
userId: userId,
client_url: metadata.auth.client_url,
redirect_uri: `${process.env.DOMAIN_SERVER}/api/actions/${action_id}/oauth/callback`,
token_exchange_method: metadata.auth.token_exchange_method,
/** Encrypted values */
encrypted_oauth_client_id: encrypted.oauth_client_id,
encrypted_oauth_client_secret: encrypted.oauth_client_secret,
@ -262,6 +264,7 @@ async function createActionTool({
refresh_token,
client_url: metadata.auth.client_url,
encrypted_oauth_client_id: encrypted.oauth_client_id,
token_exchange_method: metadata.auth.token_exchange_method,
encrypted_oauth_client_secret: encrypted.oauth_client_secret,
});
const flowsCache = getLogStores(CacheKeys.FLOWS);

View file

@ -3,9 +3,11 @@ const {
loadOCRConfig,
processMCPEnv,
EModelEndpoint,
loadMemoryConfig,
getConfigDefaults,
loadWebSearchConfig,
} = require('librechat-data-provider');
const { agentsConfigSetup } = require('@librechat/api');
const {
checkHealth,
checkConfig,
@ -24,7 +26,6 @@ const { azureConfigSetup } = require('./start/azureOpenAI');
const { processModelSpecs } = require('./start/modelSpecs');
const { initializeS3 } = require('./Files/S3/initialize');
const { loadAndFormatTools } = require('./ToolService');
const { agentsConfigSetup } = require('./start/agents');
const { isEnabled } = require('~/server/utils');
const { initializeRoles } = require('~/models');
const { getMCPManager } = require('~/config');
@ -44,6 +45,7 @@ const AppService = async (app) => {
const ocr = loadOCRConfig(config.ocr);
const webSearch = loadWebSearchConfig(config.webSearch);
checkWebSearchConfig(webSearch);
const memory = loadMemoryConfig(config.memory);
const filteredTools = config.filteredTools;
const includedTools = config.includedTools;
const fileStrategy = config.fileStrategy ?? configDefaults.fileStrategy;
@ -88,6 +90,7 @@ const AppService = async (app) => {
const defaultLocals = {
ocr,
paths,
memory,
webSearch,
fileStrategy,
socialLogins,
@ -100,8 +103,13 @@ const AppService = async (app) => {
balance,
};
const agentsDefaults = agentsConfigSetup(config);
if (!Object.keys(config).length) {
app.locals = defaultLocals;
app.locals = {
...defaultLocals,
[EModelEndpoint.agents]: agentsDefaults,
};
return;
}
@ -136,9 +144,7 @@ const AppService = async (app) => {
);
}
if (endpoints?.[EModelEndpoint.agents]) {
endpointLocals[EModelEndpoint.agents] = agentsConfigSetup(config);
}
endpointLocals[EModelEndpoint.agents] = agentsConfigSetup(config, agentsDefaults);
const endpointKeys = [
EModelEndpoint.openAI,

View file

@ -2,8 +2,10 @@ const {
FileSources,
EModelEndpoint,
EImageOutputType,
AgentCapabilities,
defaultSocialLogins,
validateAzureGroups,
defaultAgentCapabilities,
deprecatedAzureVariables,
conflictingAzureVariables,
} = require('librechat-data-provider');
@ -151,6 +153,11 @@ describe('AppService', () => {
safeSearch: 1,
serperApiKey: '${SERPER_API_KEY}',
},
memory: undefined,
agents: {
disableBuilder: false,
capabilities: expect.arrayContaining([...defaultAgentCapabilities]),
},
});
});
@ -268,6 +275,71 @@ describe('AppService', () => {
);
});
it('should correctly configure Agents endpoint based on custom config', async () => {
require('./Config/loadCustomConfig').mockImplementationOnce(() =>
Promise.resolve({
endpoints: {
[EModelEndpoint.agents]: {
disableBuilder: true,
recursionLimit: 10,
maxRecursionLimit: 20,
allowedProviders: ['openai', 'anthropic'],
capabilities: [AgentCapabilities.tools, AgentCapabilities.actions],
},
},
}),
);
await AppService(app);
expect(app.locals).toHaveProperty(EModelEndpoint.agents);
expect(app.locals[EModelEndpoint.agents]).toEqual(
expect.objectContaining({
disableBuilder: true,
recursionLimit: 10,
maxRecursionLimit: 20,
allowedProviders: expect.arrayContaining(['openai', 'anthropic']),
capabilities: expect.arrayContaining([AgentCapabilities.tools, AgentCapabilities.actions]),
}),
);
});
it('should configure Agents endpoint with defaults when no config is provided', async () => {
require('./Config/loadCustomConfig').mockImplementationOnce(() => Promise.resolve({}));
await AppService(app);
expect(app.locals).toHaveProperty(EModelEndpoint.agents);
expect(app.locals[EModelEndpoint.agents]).toEqual(
expect.objectContaining({
disableBuilder: false,
capabilities: expect.arrayContaining([...defaultAgentCapabilities]),
}),
);
});
it('should configure Agents endpoint with defaults when endpoints exist but agents is not defined', async () => {
require('./Config/loadCustomConfig').mockImplementationOnce(() =>
Promise.resolve({
endpoints: {
[EModelEndpoint.openAI]: {
titleConvo: true,
},
},
}),
);
await AppService(app);
expect(app.locals).toHaveProperty(EModelEndpoint.agents);
expect(app.locals[EModelEndpoint.agents]).toEqual(
expect.objectContaining({
disableBuilder: false,
capabilities: expect.arrayContaining([...defaultAgentCapabilities]),
}),
);
});
it('should correctly configure minimum Azure OpenAI Assistant values', async () => {
const assistantGroups = [azureGroups[0], { ...azureGroups[1], assistants: true }];
require('./Config/loadCustomConfig').mockImplementationOnce(() =>

View file

@ -0,0 +1,196 @@
const { Providers } = require('@librechat/agents');
const { primeResources, optionalChainWithEmptyCheck } = require('@librechat/api');
const {
ErrorTypes,
EModelEndpoint,
EToolResources,
replaceSpecialVars,
providerEndpointMap,
} = require('librechat-data-provider');
const initAnthropic = require('~/server/services/Endpoints/anthropic/initialize');
const getBedrockOptions = require('~/server/services/Endpoints/bedrock/options');
const initOpenAI = require('~/server/services/Endpoints/openAI/initialize');
const initCustom = require('~/server/services/Endpoints/custom/initialize');
const initGoogle = require('~/server/services/Endpoints/google/initialize');
const generateArtifactsPrompt = require('~/app/clients/prompts/artifacts');
const { getCustomEndpointConfig } = require('~/server/services/Config');
const { processFiles } = require('~/server/services/Files/process');
const { getConvoFiles } = require('~/models/Conversation');
const { getToolFilesByIds } = require('~/models/File');
const { getModelMaxTokens } = require('~/utils');
const { getFiles } = require('~/models/File');
const providerConfigMap = {
[Providers.XAI]: initCustom,
[Providers.OLLAMA]: initCustom,
[Providers.DEEPSEEK]: initCustom,
[Providers.OPENROUTER]: initCustom,
[EModelEndpoint.openAI]: initOpenAI,
[EModelEndpoint.google]: initGoogle,
[EModelEndpoint.azureOpenAI]: initOpenAI,
[EModelEndpoint.anthropic]: initAnthropic,
[EModelEndpoint.bedrock]: getBedrockOptions,
};
/**
* @param {object} params
* @param {ServerRequest} params.req
* @param {ServerResponse} params.res
* @param {Agent} params.agent
* @param {string | null} [params.conversationId]
* @param {Array<IMongoFile>} [params.requestFiles]
* @param {typeof import('~/server/services/ToolService').loadAgentTools | undefined} [params.loadTools]
* @param {TEndpointOption} [params.endpointOption]
* @param {Set<string>} [params.allowedProviders]
* @param {boolean} [params.isInitialAgent]
* @returns {Promise<Agent & { tools: StructuredTool[], attachments: Array<MongoFile>, toolContextMap: Record<string, unknown>, maxContextTokens: number }>}
*/
const initializeAgent = async ({
req,
res,
agent,
loadTools,
requestFiles,
conversationId,
endpointOption,
allowedProviders,
isInitialAgent = false,
}) => {
if (allowedProviders.size > 0 && !allowedProviders.has(agent.provider)) {
throw new Error(
`{ "type": "${ErrorTypes.INVALID_AGENT_PROVIDER}", "info": "${agent.provider}" }`,
);
}
let currentFiles;
if (
isInitialAgent &&
conversationId != null &&
(agent.model_parameters?.resendFiles ?? true) === true
) {
const fileIds = (await getConvoFiles(conversationId)) ?? [];
/** @type {Set<EToolResources>} */
const toolResourceSet = new Set();
for (const tool of agent.tools) {
if (EToolResources[tool]) {
toolResourceSet.add(EToolResources[tool]);
}
}
const toolFiles = await getToolFilesByIds(fileIds, toolResourceSet);
if (requestFiles.length || toolFiles.length) {
currentFiles = await processFiles(requestFiles.concat(toolFiles));
}
} else if (isInitialAgent && requestFiles.length) {
currentFiles = await processFiles(requestFiles);
}
const { attachments, tool_resources } = await primeResources({
req,
getFiles,
attachments: currentFiles,
tool_resources: agent.tool_resources,
requestFileSet: new Set(requestFiles?.map((file) => file.file_id)),
});
const provider = agent.provider;
const { tools, toolContextMap } =
(await loadTools?.({
req,
res,
provider,
agentId: agent.id,
tools: agent.tools,
model: agent.model,
tool_resources,
})) ?? {};
agent.endpoint = provider;
let getOptions = providerConfigMap[provider];
if (!getOptions && providerConfigMap[provider.toLowerCase()] != null) {
agent.provider = provider.toLowerCase();
getOptions = providerConfigMap[agent.provider];
} else if (!getOptions) {
const customEndpointConfig = await getCustomEndpointConfig(provider);
if (!customEndpointConfig) {
throw new Error(`Provider ${provider} not supported`);
}
getOptions = initCustom;
agent.provider = Providers.OPENAI;
}
const model_parameters = Object.assign(
{},
agent.model_parameters ?? { model: agent.model },
isInitialAgent === true ? endpointOption?.model_parameters : {},
);
const _endpointOption =
isInitialAgent === true
? Object.assign({}, endpointOption, { model_parameters })
: { model_parameters };
const options = await getOptions({
req,
res,
optionsOnly: true,
overrideEndpoint: provider,
overrideModel: agent.model,
endpointOption: _endpointOption,
});
if (
agent.endpoint === EModelEndpoint.azureOpenAI &&
options.llmConfig?.azureOpenAIApiInstanceName == null
) {
agent.provider = Providers.OPENAI;
}
if (options.provider != null) {
agent.provider = options.provider;
}
/** @type {import('@librechat/agents').ClientOptions} */
agent.model_parameters = Object.assign(model_parameters, options.llmConfig);
if (options.configOptions) {
agent.model_parameters.configuration = options.configOptions;
}
if (!agent.model_parameters.model) {
agent.model_parameters.model = agent.model;
}
if (agent.instructions && agent.instructions !== '') {
agent.instructions = replaceSpecialVars({
text: agent.instructions,
user: req.user,
});
}
if (typeof agent.artifacts === 'string' && agent.artifacts !== '') {
agent.additional_instructions = generateArtifactsPrompt({
endpoint: agent.provider,
artifacts: agent.artifacts,
});
}
const tokensModel =
agent.provider === EModelEndpoint.azureOpenAI ? agent.model : agent.model_parameters.model;
const maxTokens = optionalChainWithEmptyCheck(
agent.model_parameters.maxOutputTokens,
agent.model_parameters.maxTokens,
0,
);
const maxContextTokens = optionalChainWithEmptyCheck(
agent.model_parameters.maxContextTokens,
agent.max_context_tokens,
getModelMaxTokens(tokensModel, providerEndpointMap[provider]),
4096,
);
return {
...agent,
tools,
attachments,
toolContextMap,
maxContextTokens: (maxContextTokens - maxTokens) * 0.9,
};
};
module.exports = { initializeAgent };

View file

@ -1,294 +1,41 @@
const { createContentAggregator, Providers } = require('@librechat/agents');
const {
Constants,
ErrorTypes,
EModelEndpoint,
EToolResources,
getResponseSender,
AgentCapabilities,
replaceSpecialVars,
providerEndpointMap,
} = require('librechat-data-provider');
const { logger } = require('@librechat/data-schemas');
const { createContentAggregator } = require('@librechat/agents');
const { Constants, EModelEndpoint, getResponseSender } = require('librechat-data-provider');
const {
getDefaultHandlers,
createToolEndCallback,
} = require('~/server/controllers/agents/callbacks');
const initAnthropic = require('~/server/services/Endpoints/anthropic/initialize');
const getBedrockOptions = require('~/server/services/Endpoints/bedrock/options');
const initOpenAI = require('~/server/services/Endpoints/openAI/initialize');
const initCustom = require('~/server/services/Endpoints/custom/initialize');
const initGoogle = require('~/server/services/Endpoints/google/initialize');
const generateArtifactsPrompt = require('~/app/clients/prompts/artifacts');
const { getCustomEndpointConfig } = require('~/server/services/Config');
const { processFiles } = require('~/server/services/Files/process');
const { initializeAgent } = require('~/server/services/Endpoints/agents/agent');
const { loadAgentTools } = require('~/server/services/ToolService');
const AgentClient = require('~/server/controllers/agents/client');
const { getConvoFiles } = require('~/models/Conversation');
const { getToolFilesByIds } = require('~/models/File');
const { getModelMaxTokens } = require('~/utils');
const { getAgent } = require('~/models/Agent');
const { getFiles } = require('~/models/File');
const { logger } = require('~/config');
const providerConfigMap = {
[Providers.XAI]: initCustom,
[Providers.OLLAMA]: initCustom,
[Providers.DEEPSEEK]: initCustom,
[Providers.OPENROUTER]: initCustom,
[EModelEndpoint.openAI]: initOpenAI,
[EModelEndpoint.google]: initGoogle,
[EModelEndpoint.azureOpenAI]: initOpenAI,
[EModelEndpoint.anthropic]: initAnthropic,
[EModelEndpoint.bedrock]: getBedrockOptions,
};
/**
* @param {Object} params
* @param {ServerRequest} params.req
* @param {Promise<Array<MongoFile | null>> | undefined} [params.attachments]
* @param {Set<string>} params.requestFileSet
* @param {AgentToolResources | undefined} [params.tool_resources]
* @returns {Promise<{ attachments: Array<MongoFile | undefined> | undefined, tool_resources: AgentToolResources | undefined }>}
*/
const primeResources = async ({
req,
attachments: _attachments,
tool_resources: _tool_resources,
requestFileSet,
}) => {
try {
/** @type {Array<MongoFile | undefined> | undefined} */
let attachments;
const tool_resources = _tool_resources ?? {};
const isOCREnabled = (req.app.locals?.[EModelEndpoint.agents]?.capabilities ?? []).includes(
AgentCapabilities.ocr,
);
if (tool_resources[EToolResources.ocr]?.file_ids && isOCREnabled) {
const context = await getFiles(
{
file_id: { $in: tool_resources.ocr.file_ids },
},
{},
{},
);
attachments = (attachments ?? []).concat(context);
function createToolLoader() {
/**
* @param {object} params
* @param {ServerRequest} params.req
* @param {ServerResponse} params.res
* @param {string} params.agentId
* @param {string[]} params.tools
* @param {string} params.provider
* @param {string} params.model
* @param {AgentToolResources} params.tool_resources
* @returns {Promise<{ tools: StructuredTool[], toolContextMap: Record<string, unknown> } | undefined>}
*/
return async function loadTools({ req, res, agentId, tools, provider, model, tool_resources }) {
const agent = { id: agentId, tools, provider, model };
try {
return await loadAgentTools({
req,
res,
agent,
tool_resources,
});
} catch (error) {
logger.error('Error loading tools for agent ' + agentId, error);
}
if (!_attachments) {
return { attachments, tool_resources };
}
/** @type {Array<MongoFile | undefined> | undefined} */
const files = await _attachments;
if (!attachments) {
/** @type {Array<MongoFile | undefined>} */
attachments = [];
}
for (const file of files) {
if (!file) {
continue;
}
if (file.metadata?.fileIdentifier) {
const execute_code = tool_resources[EToolResources.execute_code] ?? {};
if (!execute_code.files) {
tool_resources[EToolResources.execute_code] = { ...execute_code, files: [] };
}
tool_resources[EToolResources.execute_code].files.push(file);
} else if (file.embedded === true) {
const file_search = tool_resources[EToolResources.file_search] ?? {};
if (!file_search.files) {
tool_resources[EToolResources.file_search] = { ...file_search, files: [] };
}
tool_resources[EToolResources.file_search].files.push(file);
} else if (
requestFileSet.has(file.file_id) &&
file.type.startsWith('image') &&
file.height &&
file.width
) {
const image_edit = tool_resources[EToolResources.image_edit] ?? {};
if (!image_edit.files) {
tool_resources[EToolResources.image_edit] = { ...image_edit, files: [] };
}
tool_resources[EToolResources.image_edit].files.push(file);
}
attachments.push(file);
}
return { attachments, tool_resources };
} catch (error) {
logger.error('Error priming resources', error);
return { attachments: _attachments, tool_resources: _tool_resources };
}
};
/**
* @param {...string | number} values
* @returns {string | number | undefined}
*/
function optionalChainWithEmptyCheck(...values) {
for (const value of values) {
if (value !== undefined && value !== null && value !== '') {
return value;
}
}
return values[values.length - 1];
}
/**
* @param {object} params
* @param {ServerRequest} params.req
* @param {ServerResponse} params.res
* @param {Agent} params.agent
* @param {Set<string>} [params.allowedProviders]
* @param {object} [params.endpointOption]
* @param {boolean} [params.isInitialAgent]
* @returns {Promise<Agent>}
*/
const initializeAgentOptions = async ({
req,
res,
agent,
endpointOption,
allowedProviders,
isInitialAgent = false,
}) => {
if (allowedProviders.size > 0 && !allowedProviders.has(agent.provider)) {
throw new Error(
`{ "type": "${ErrorTypes.INVALID_AGENT_PROVIDER}", "info": "${agent.provider}" }`,
);
}
let currentFiles;
/** @type {Array<MongoFile>} */
const requestFiles = req.body.files ?? [];
if (
isInitialAgent &&
req.body.conversationId != null &&
(agent.model_parameters?.resendFiles ?? true) === true
) {
const fileIds = (await getConvoFiles(req.body.conversationId)) ?? [];
/** @type {Set<EToolResources>} */
const toolResourceSet = new Set();
for (const tool of agent.tools) {
if (EToolResources[tool]) {
toolResourceSet.add(EToolResources[tool]);
}
}
const toolFiles = await getToolFilesByIds(fileIds, toolResourceSet);
if (requestFiles.length || toolFiles.length) {
currentFiles = await processFiles(requestFiles.concat(toolFiles));
}
} else if (isInitialAgent && requestFiles.length) {
currentFiles = await processFiles(requestFiles);
}
const { attachments, tool_resources } = await primeResources({
req,
attachments: currentFiles,
tool_resources: agent.tool_resources,
requestFileSet: new Set(requestFiles.map((file) => file.file_id)),
});
const provider = agent.provider;
const { tools, toolContextMap } = await loadAgentTools({
req,
res,
agent: {
id: agent.id,
tools: agent.tools,
provider,
model: agent.model,
},
tool_resources,
});
agent.endpoint = provider;
let getOptions = providerConfigMap[provider];
if (!getOptions && providerConfigMap[provider.toLowerCase()] != null) {
agent.provider = provider.toLowerCase();
getOptions = providerConfigMap[agent.provider];
} else if (!getOptions) {
const customEndpointConfig = await getCustomEndpointConfig(provider);
if (!customEndpointConfig) {
throw new Error(`Provider ${provider} not supported`);
}
getOptions = initCustom;
agent.provider = Providers.OPENAI;
}
const model_parameters = Object.assign(
{},
agent.model_parameters ?? { model: agent.model },
isInitialAgent === true ? endpointOption?.model_parameters : {},
);
const _endpointOption =
isInitialAgent === true
? Object.assign({}, endpointOption, { model_parameters })
: { model_parameters };
const options = await getOptions({
req,
res,
optionsOnly: true,
overrideEndpoint: provider,
overrideModel: agent.model,
endpointOption: _endpointOption,
});
if (
agent.endpoint === EModelEndpoint.azureOpenAI &&
options.llmConfig?.azureOpenAIApiInstanceName == null
) {
agent.provider = Providers.OPENAI;
}
if (options.provider != null) {
agent.provider = options.provider;
}
/** @type {import('@librechat/agents').ClientOptions} */
agent.model_parameters = Object.assign(model_parameters, options.llmConfig);
if (options.configOptions) {
agent.model_parameters.configuration = options.configOptions;
}
if (!agent.model_parameters.model) {
agent.model_parameters.model = agent.model;
}
if (agent.instructions && agent.instructions !== '') {
agent.instructions = replaceSpecialVars({
text: agent.instructions,
user: req.user,
});
}
if (typeof agent.artifacts === 'string' && agent.artifacts !== '') {
agent.additional_instructions = generateArtifactsPrompt({
endpoint: agent.provider,
artifacts: agent.artifacts,
});
}
const tokensModel =
agent.provider === EModelEndpoint.azureOpenAI ? agent.model : agent.model_parameters.model;
const maxTokens = optionalChainWithEmptyCheck(
agent.model_parameters.maxOutputTokens,
agent.model_parameters.maxTokens,
0,
);
const maxContextTokens = optionalChainWithEmptyCheck(
agent.model_parameters.maxContextTokens,
agent.max_context_tokens,
getModelMaxTokens(tokensModel, providerEndpointMap[provider]),
4096,
);
return {
...agent,
tools,
attachments,
toolContextMap,
maxContextTokens: (maxContextTokens - maxTokens) * 0.9,
};
};
}
const initializeClient = async ({ req, res, endpointOption }) => {
if (!endpointOption) {
@ -313,7 +60,6 @@ const initializeClient = async ({ req, res, endpointOption }) => {
throw new Error('No agent promise provided');
}
// Initialize primary agent
const primaryAgent = await endpointOption.agent;
if (!primaryAgent) {
throw new Error('Agent not found');
@ -323,10 +69,18 @@ const initializeClient = async ({ req, res, endpointOption }) => {
/** @type {Set<string>} */
const allowedProviders = new Set(req?.app?.locals?.[EModelEndpoint.agents]?.allowedProviders);
// Handle primary agent
const primaryConfig = await initializeAgentOptions({
const loadTools = createToolLoader();
/** @type {Array<MongoFile>} */
const requestFiles = req.body.files ?? [];
/** @type {string} */
const conversationId = req.body.conversationId;
const primaryConfig = await initializeAgent({
req,
res,
loadTools,
requestFiles,
conversationId,
agent: primaryAgent,
endpointOption,
allowedProviders,
@ -340,10 +94,13 @@ const initializeClient = async ({ req, res, endpointOption }) => {
if (!agent) {
throw new Error(`Agent ${agentId} not found`);
}
const config = await initializeAgentOptions({
const config = await initializeAgent({
req,
res,
agent,
loadTools,
requestFiles,
conversationId,
endpointOption,
allowedProviders,
});

View file

@ -1,5 +1,6 @@
const OpenAI = require('openai');
const { HttpsProxyAgent } = require('https-proxy-agent');
const { constructAzureURL, isUserProvided } = require('@librechat/api');
const {
ErrorTypes,
EModelEndpoint,
@ -12,8 +13,6 @@ const {
checkUserKeyExpiry,
} = require('~/server/services/UserService');
const OpenAIClient = require('~/app/clients/OpenAIClient');
const { isUserProvided } = require('~/server/utils');
const { constructAzureURL } = require('~/utils');
class Files {
constructor(client) {

View file

@ -1,4 +1,5 @@
const { HttpsProxyAgent } = require('https-proxy-agent');
const { createHandleLLMNewToken } = require('@librechat/api');
const {
AuthType,
Constants,
@ -8,7 +9,6 @@ const {
removeNullishValues,
} = require('librechat-data-provider');
const { getUserKey, checkUserKeyExpiry } = require('~/server/services/UserService');
const { createHandleLLMNewToken } = require('~/app/clients/generators');
const getOptions = async ({ req, overrideModel, endpointOption }) => {
const {

View file

@ -6,10 +6,9 @@ const {
extractEnvVariable,
} = require('librechat-data-provider');
const { Providers } = require('@librechat/agents');
const { getOpenAIConfig, createHandleLLMNewToken } = require('@librechat/api');
const { getUserKeyValues, checkUserKeyExpiry } = require('~/server/services/UserService');
const { getLLMConfig } = require('~/server/services/Endpoints/openAI/llm');
const { getCustomEndpointConfig } = require('~/server/services/Config');
const { createHandleLLMNewToken } = require('~/app/clients/generators');
const { fetchModels } = require('~/server/services/ModelService');
const OpenAIClient = require('~/app/clients/OpenAIClient');
const { isUserProvided } = require('~/server/utils');
@ -144,7 +143,7 @@ const initializeClient = async ({ req, res, endpointOption, optionsOnly, overrid
clientOptions,
);
clientOptions.modelOptions.user = req.user.id;
const options = getLLMConfig(apiKey, clientOptions, endpoint);
const options = getOpenAIConfig(apiKey, clientOptions, endpoint);
if (!customOptions.streamRate) {
return options;
}

View file

@ -25,9 +25,9 @@ const initializeClient = async ({ req, res, endpointOption, overrideModel, optio
const credentials = isUserProvided
? userKey
: {
[AuthKeys.GOOGLE_SERVICE_KEY]: serviceKey,
[AuthKeys.GOOGLE_API_KEY]: GOOGLE_KEY,
};
[AuthKeys.GOOGLE_SERVICE_KEY]: serviceKey,
[AuthKeys.GOOGLE_API_KEY]: GOOGLE_KEY,
};
let clientOptions = {};

View file

@ -94,7 +94,7 @@ function getLLMConfig(credentials, options = {}) {
// Extract from credentials
const serviceKeyRaw = creds[AuthKeys.GOOGLE_SERVICE_KEY] ?? {};
const serviceKey =
typeof serviceKeyRaw === 'string' ? JSON.parse(serviceKeyRaw) : serviceKeyRaw ?? {};
typeof serviceKeyRaw === 'string' ? JSON.parse(serviceKeyRaw) : (serviceKeyRaw ?? {});
const project_id = serviceKey?.project_id ?? null;
const apiKey = creds[AuthKeys.GOOGLE_API_KEY] ?? null;
@ -156,10 +156,6 @@ function getLLMConfig(credentials, options = {}) {
}
if (authHeader) {
/**
* NOTE: NOT SUPPORTED BY LANGCHAIN GENAI CLIENT,
* REQUIRES PR IN https://github.com/langchain-ai/langchainjs
*/
llmConfig.customHeaders = {
Authorization: `Bearer ${apiKey}`,
};

View file

@ -1,11 +1,10 @@
const {
EModelEndpoint,
mapModelToAzureConfig,
resolveHeaders,
mapModelToAzureConfig,
} = require('librechat-data-provider');
const { isEnabled, isUserProvided, getAzureCredentials } = require('@librechat/api');
const { getUserKeyValues, checkUserKeyExpiry } = require('~/server/services/UserService');
const { isEnabled, isUserProvided } = require('~/server/utils');
const { getAzureCredentials } = require('~/utils');
const { PluginsClient } = require('~/app');
const initializeClient = async ({ req, res, endpointOption }) => {

View file

@ -114,11 +114,11 @@ describe('gptPlugins/initializeClient', () => {
test('should initialize PluginsClient with Azure credentials when PLUGINS_USE_AZURE is true', async () => {
process.env.AZURE_API_KEY = 'test-azure-api-key';
(process.env.AZURE_OPENAI_API_INSTANCE_NAME = 'some-value'),
(process.env.AZURE_OPENAI_API_DEPLOYMENT_NAME = 'some-value'),
(process.env.AZURE_OPENAI_API_VERSION = 'some-value'),
(process.env.AZURE_OPENAI_API_COMPLETIONS_DEPLOYMENT_NAME = 'some-value'),
(process.env.AZURE_OPENAI_API_EMBEDDINGS_DEPLOYMENT_NAME = 'some-value'),
(process.env.PLUGINS_USE_AZURE = 'true');
(process.env.AZURE_OPENAI_API_DEPLOYMENT_NAME = 'some-value'),
(process.env.AZURE_OPENAI_API_VERSION = 'some-value'),
(process.env.AZURE_OPENAI_API_COMPLETIONS_DEPLOYMENT_NAME = 'some-value'),
(process.env.AZURE_OPENAI_API_EMBEDDINGS_DEPLOYMENT_NAME = 'some-value'),
(process.env.PLUGINS_USE_AZURE = 'true');
process.env.DEBUG_PLUGINS = 'false';
process.env.OPENAI_SUMMARIZE = 'false';

View file

@ -4,12 +4,15 @@ const {
resolveHeaders,
mapModelToAzureConfig,
} = require('librechat-data-provider');
const {
isEnabled,
isUserProvided,
getOpenAIConfig,
getAzureCredentials,
createHandleLLMNewToken,
} = require('@librechat/api');
const { getUserKeyValues, checkUserKeyExpiry } = require('~/server/services/UserService');
const { getLLMConfig } = require('~/server/services/Endpoints/openAI/llm');
const { createHandleLLMNewToken } = require('~/app/clients/generators');
const { isEnabled, isUserProvided } = require('~/server/utils');
const OpenAIClient = require('~/app/clients/OpenAIClient');
const { getAzureCredentials } = require('~/utils');
const initializeClient = async ({
req,
@ -140,7 +143,7 @@ const initializeClient = async ({
modelOptions.model = modelName;
clientOptions = Object.assign({ modelOptions }, clientOptions);
clientOptions.modelOptions.user = req.user.id;
const options = getLLMConfig(apiKey, clientOptions);
const options = getOpenAIConfig(apiKey, clientOptions);
const streamRate = clientOptions.streamRate;
if (!streamRate) {
return options;

View file

@ -1,170 +0,0 @@
const { HttpsProxyAgent } = require('https-proxy-agent');
const { KnownEndpoints } = require('librechat-data-provider');
const { sanitizeModelName, constructAzureURL } = require('~/utils');
const { isEnabled } = require('~/server/utils');
/**
* Generates configuration options for creating a language model (LLM) instance.
* @param {string} apiKey - The API key for authentication.
* @param {Object} options - Additional options for configuring the LLM.
* @param {Object} [options.modelOptions] - Model-specific options.
* @param {string} [options.modelOptions.model] - The name of the model to use.
* @param {string} [options.modelOptions.user] - The user ID
* @param {number} [options.modelOptions.temperature] - Controls randomness in output generation (0-2).
* @param {number} [options.modelOptions.top_p] - Controls diversity via nucleus sampling (0-1).
* @param {number} [options.modelOptions.frequency_penalty] - Reduces repetition of token sequences (-2 to 2).
* @param {number} [options.modelOptions.presence_penalty] - Encourages discussing new topics (-2 to 2).
* @param {number} [options.modelOptions.max_tokens] - The maximum number of tokens to generate.
* @param {string[]} [options.modelOptions.stop] - Sequences where the API will stop generating further tokens.
* @param {string} [options.reverseProxyUrl] - URL for a reverse proxy, if used.
* @param {boolean} [options.useOpenRouter] - Flag to use OpenRouter API.
* @param {Object} [options.headers] - Additional headers for API requests.
* @param {string} [options.proxy] - Proxy server URL.
* @param {Object} [options.azure] - Azure-specific configurations.
* @param {boolean} [options.streaming] - Whether to use streaming mode.
* @param {Object} [options.addParams] - Additional parameters to add to the model options.
* @param {string[]} [options.dropParams] - Parameters to remove from the model options.
* @param {string|null} [endpoint=null] - The endpoint name
* @returns {Object} Configuration options for creating an LLM instance.
*/
function getLLMConfig(apiKey, options = {}, endpoint = null) {
let {
modelOptions = {},
reverseProxyUrl,
defaultQuery,
headers,
proxy,
azure,
streaming = true,
addParams,
dropParams,
} = options;
/** @type {OpenAIClientOptions} */
let llmConfig = {
streaming,
};
Object.assign(llmConfig, modelOptions);
if (addParams && typeof addParams === 'object') {
Object.assign(llmConfig, addParams);
}
/** Note: OpenAI Web Search models do not support any known parameters besdies `max_tokens` */
if (modelOptions.model && /gpt-4o.*search/.test(modelOptions.model)) {
const searchExcludeParams = [
'frequency_penalty',
'presence_penalty',
'temperature',
'top_p',
'top_k',
'stop',
'logit_bias',
'seed',
'response_format',
'n',
'logprobs',
'user',
];
dropParams = dropParams || [];
dropParams = [...new Set([...dropParams, ...searchExcludeParams])];
}
if (dropParams && Array.isArray(dropParams)) {
dropParams.forEach((param) => {
if (llmConfig[param]) {
llmConfig[param] = undefined;
}
});
}
let useOpenRouter;
/** @type {OpenAIClientOptions['configuration']} */
const configOptions = {};
if (
(reverseProxyUrl && reverseProxyUrl.includes(KnownEndpoints.openrouter)) ||
(endpoint && endpoint.toLowerCase().includes(KnownEndpoints.openrouter))
) {
useOpenRouter = true;
llmConfig.include_reasoning = true;
configOptions.baseURL = reverseProxyUrl;
configOptions.defaultHeaders = Object.assign(
{
'HTTP-Referer': 'https://librechat.ai',
'X-Title': 'LibreChat',
},
headers,
);
} else if (reverseProxyUrl) {
configOptions.baseURL = reverseProxyUrl;
if (headers) {
configOptions.defaultHeaders = headers;
}
}
if (defaultQuery) {
configOptions.defaultQuery = defaultQuery;
}
if (proxy) {
const proxyAgent = new HttpsProxyAgent(proxy);
Object.assign(configOptions, {
httpAgent: proxyAgent,
httpsAgent: proxyAgent,
});
}
if (azure) {
const useModelName = isEnabled(process.env.AZURE_USE_MODEL_AS_DEPLOYMENT_NAME);
azure.azureOpenAIApiDeploymentName = useModelName
? sanitizeModelName(llmConfig.model)
: azure.azureOpenAIApiDeploymentName;
if (process.env.AZURE_OPENAI_DEFAULT_MODEL) {
llmConfig.model = process.env.AZURE_OPENAI_DEFAULT_MODEL;
}
if (configOptions.baseURL) {
const azureURL = constructAzureURL({
baseURL: configOptions.baseURL,
azureOptions: azure,
});
azure.azureOpenAIBasePath = azureURL.split(`/${azure.azureOpenAIApiDeploymentName}`)[0];
}
Object.assign(llmConfig, azure);
llmConfig.model = llmConfig.azureOpenAIApiDeploymentName;
} else {
llmConfig.apiKey = apiKey;
// Object.assign(llmConfig, {
// configuration: { apiKey },
// });
}
if (process.env.OPENAI_ORGANIZATION && this.azure) {
llmConfig.organization = process.env.OPENAI_ORGANIZATION;
}
if (useOpenRouter && llmConfig.reasoning_effort != null) {
llmConfig.reasoning = {
effort: llmConfig.reasoning_effort,
};
delete llmConfig.reasoning_effort;
}
if (llmConfig?.['max_tokens'] != null) {
/** @type {number} */
llmConfig.maxTokens = llmConfig['max_tokens'];
delete llmConfig['max_tokens'];
}
return {
/** @type {OpenAIClientOptions} */
llmConfig,
/** @type {OpenAIClientOptions['configuration']} */
configOptions,
};
}
module.exports = { getLLMConfig };

View file

@ -2,9 +2,9 @@ const axios = require('axios');
const fs = require('fs').promises;
const FormData = require('form-data');
const { Readable } = require('stream');
const { genAzureEndpoint } = require('@librechat/api');
const { extractEnvVariable, STTProviders } = require('librechat-data-provider');
const { getCustomConfig } = require('~/server/services/Config');
const { genAzureEndpoint } = require('~/utils');
const { logger } = require('~/config');
/**

View file

@ -1,8 +1,8 @@
const axios = require('axios');
const { genAzureEndpoint } = require('@librechat/api');
const { extractEnvVariable, TTSProviders } = require('librechat-data-provider');
const { getRandomVoiceId, createChunkProcessor, splitTextIntoChunks } = require('./streamAudio');
const { getCustomConfig } = require('~/server/services/Config');
const { genAzureEndpoint } = require('~/utils');
const { logger } = require('~/config');
/**

View file

@ -91,24 +91,44 @@ async function prepareAzureImageURL(req, file) {
* @param {Buffer} params.buffer - The avatar image buffer.
* @param {string} params.userId - The user's id.
* @param {string} params.manual - Flag to indicate manual update.
* @param {string} [params.agentId] - Optional agent ID if this is an agent avatar.
* @param {string} [params.basePath='images'] - The base folder within the container.
* @param {string} [params.containerName] - The Azure Blob container name.
* @returns {Promise<string>} The URL of the avatar.
*/
async function processAzureAvatar({ buffer, userId, manual, basePath = 'images', containerName }) {
async function processAzureAvatar({
buffer,
userId,
manual,
agentId,
basePath = 'images',
containerName,
}) {
try {
const metadata = await sharp(buffer).metadata();
const extension = metadata.format === 'gif' ? 'gif' : 'png';
const timestamp = new Date().getTime();
/** Unique filename with timestamp and optional agent ID */
const fileName = agentId
? `agent-${agentId}-avatar-${timestamp}.${extension}`
: `avatar-${timestamp}.${extension}`;
const downloadURL = await saveBufferToAzure({
userId,
buffer,
fileName: 'avatar.png',
fileName,
basePath,
containerName,
});
const isManual = manual === 'true';
const url = `${downloadURL}?manual=${isManual}`;
if (isManual) {
// Only update user record if this is a user avatar (manual === 'true')
if (isManual && !agentId) {
await updateUser(userId, { avatar: url });
}
return url;
} catch (error) {
logger.error('[processAzureAvatar] Error uploading profile picture to Azure:', error);

View file

@ -1,7 +1,6 @@
const FormData = require('form-data');
const { getCodeBaseURL } = require('@librechat/agents');
const { createAxiosInstance } = require('~/config');
const { logAxiosError } = require('~/utils');
const { createAxiosInstance, logAxiosError } = require('@librechat/api');
const axios = createAxiosInstance();

View file

@ -1,6 +1,8 @@
const path = require('path');
const { v4 } = require('uuid');
const axios = require('axios');
const { logAxiosError } = require('@librechat/api');
const { logger } = require('@librechat/data-schemas');
const { getCodeBaseURL } = require('@librechat/agents');
const {
Tools,
@ -12,8 +14,6 @@ const {
const { getStrategyFunctions } = require('~/server/services/Files/strategies');
const { convertImage } = require('~/server/services/Files/images/convert');
const { createFile, getFiles, updateFile } = require('~/models/File');
const { logAxiosError } = require('~/utils');
const { logger } = require('~/config');
/**
* Process OpenAI image files, convert to target format, save and return file metadata.

View file

@ -82,22 +82,32 @@ async function prepareImageURL(req, file) {
* @param {Buffer} params.buffer - The Buffer containing the avatar image.
* @param {string} params.userId - The user ID.
* @param {string} params.manual - A string flag indicating whether the update is manual ('true' or 'false').
* @param {string} [params.agentId] - Optional agent ID if this is an agent avatar.
* @returns {Promise<string>} - A promise that resolves with the URL of the uploaded avatar.
* @throws {Error} - Throws an error if Firebase is not initialized or if there is an error in uploading.
*/
async function processFirebaseAvatar({ buffer, userId, manual }) {
async function processFirebaseAvatar({ buffer, userId, manual, agentId }) {
try {
const metadata = await sharp(buffer).metadata();
const extension = metadata.format === 'gif' ? 'gif' : 'png';
const timestamp = new Date().getTime();
/** Unique filename with timestamp and optional agent ID */
const fileName = agentId
? `agent-${agentId}-avatar-${timestamp}.${extension}`
: `avatar-${timestamp}.${extension}`;
const downloadURL = await saveBufferToFirebase({
userId,
buffer,
fileName: 'avatar.png',
fileName,
});
const isManual = manual === 'true';
const url = `${downloadURL}?manual=${isManual}`;
if (isManual) {
// Only update user record if this is a user avatar (manual === 'true')
if (isManual && !agentId) {
await updateUser(userId, { avatar: url });
}

View file

@ -201,6 +201,10 @@ const unlinkFile = async (filepath) => {
*/
const deleteLocalFile = async (req, file) => {
const { publicPath, uploads } = req.app.locals.paths;
/** Filepath stripped of query parameters (e.g., ?manual=true) */
const cleanFilepath = file.filepath.split('?')[0];
if (file.embedded && process.env.RAG_API_URL) {
const jwtToken = req.headers.authorization.split(' ')[1];
axios.delete(`${process.env.RAG_API_URL}/documents`, {
@ -213,32 +217,32 @@ const deleteLocalFile = async (req, file) => {
});
}
if (file.filepath.startsWith(`/uploads/${req.user.id}`)) {
if (cleanFilepath.startsWith(`/uploads/${req.user.id}`)) {
const userUploadDir = path.join(uploads, req.user.id);
const basePath = file.filepath.split(`/uploads/${req.user.id}/`)[1];
const basePath = cleanFilepath.split(`/uploads/${req.user.id}/`)[1];
if (!basePath) {
throw new Error(`Invalid file path: ${file.filepath}`);
throw new Error(`Invalid file path: ${cleanFilepath}`);
}
const filepath = path.join(userUploadDir, basePath);
const rel = path.relative(userUploadDir, filepath);
if (rel.startsWith('..') || path.isAbsolute(rel) || rel.includes(`..${path.sep}`)) {
throw new Error(`Invalid file path: ${file.filepath}`);
throw new Error(`Invalid file path: ${cleanFilepath}`);
}
await unlinkFile(filepath);
return;
}
const parts = file.filepath.split(path.sep);
const parts = cleanFilepath.split(path.sep);
const subfolder = parts[1];
if (!subfolder && parts[0] === EModelEndpoint.agents) {
logger.warn(`Agent File ${file.file_id} is missing filepath, may have been deleted already`);
return;
}
const filepath = path.join(publicPath, file.filepath);
const filepath = path.join(publicPath, cleanFilepath);
if (!isValidPath(req, publicPath, subfolder, filepath)) {
throw new Error('Invalid file path');

View file

@ -112,10 +112,11 @@ async function prepareImagesLocal(req, file) {
* @param {Buffer} params.buffer - The Buffer containing the avatar image.
* @param {string} params.userId - The user ID.
* @param {string} params.manual - A string flag indicating whether the update is manual ('true' or 'false').
* @param {string} [params.agentId] - Optional agent ID if this is an agent avatar.
* @returns {Promise<string>} - A promise that resolves with the URL of the uploaded avatar.
* @throws {Error} - Throws an error if Firebase is not initialized or if there is an error in uploading.
*/
async function processLocalAvatar({ buffer, userId, manual }) {
async function processLocalAvatar({ buffer, userId, manual, agentId }) {
const userDir = path.resolve(
__dirname,
'..',
@ -129,7 +130,14 @@ async function processLocalAvatar({ buffer, userId, manual }) {
userId,
);
const fileName = `avatar-${new Date().getTime()}.png`;
const metadata = await sharp(buffer).metadata();
const extension = metadata.format === 'gif' ? 'gif' : 'png';
const timestamp = new Date().getTime();
/** Unique filename with timestamp and optional agent ID */
const fileName = agentId
? `agent-${agentId}-avatar-${timestamp}.${extension}`
: `avatar-${timestamp}.${extension}`;
const urlRoute = `/images/${userId}/${fileName}`;
const avatarPath = path.join(userDir, fileName);
@ -139,7 +147,8 @@ async function processLocalAvatar({ buffer, userId, manual }) {
const isManual = manual === 'true';
let url = `${urlRoute}?manual=${isManual}`;
if (isManual) {
// Only update user record if this is a user avatar (manual === 'true')
if (isManual && !agentId) {
await updateUser(userId, { avatar: url });
}

View file

@ -1,238 +0,0 @@
// ~/server/services/Files/MistralOCR/crud.js
const fs = require('fs');
const path = require('path');
const FormData = require('form-data');
const {
FileSources,
envVarRegex,
extractEnvVariable,
extractVariableName,
} = require('librechat-data-provider');
const { loadAuthValues } = require('~/server/services/Tools/credentials');
const { logger, createAxiosInstance } = require('~/config');
const { logAxiosError } = require('~/utils/axios');
const axios = createAxiosInstance();
/**
* Uploads a document to Mistral API using file streaming to avoid loading the entire file into memory
*
* @param {Object} params Upload parameters
* @param {string} params.filePath The path to the file on disk
* @param {string} [params.fileName] Optional filename to use (defaults to the name from filePath)
* @param {string} params.apiKey Mistral API key
* @param {string} [params.baseURL=https://api.mistral.ai/v1] Mistral API base URL
* @returns {Promise<Object>} The response from Mistral API
*/
async function uploadDocumentToMistral({
filePath,
fileName = '',
apiKey,
baseURL = 'https://api.mistral.ai/v1',
}) {
const form = new FormData();
form.append('purpose', 'ocr');
const actualFileName = fileName || path.basename(filePath);
const fileStream = fs.createReadStream(filePath);
form.append('file', fileStream, { filename: actualFileName });
return axios
.post(`${baseURL}/files`, form, {
headers: {
Authorization: `Bearer ${apiKey}`,
...form.getHeaders(),
},
maxBodyLength: Infinity,
maxContentLength: Infinity,
})
.then((res) => res.data)
.catch((error) => {
throw error;
});
}
async function getSignedUrl({
apiKey,
fileId,
expiry = 24,
baseURL = 'https://api.mistral.ai/v1',
}) {
return axios
.get(`${baseURL}/files/${fileId}/url?expiry=${expiry}`, {
headers: {
Authorization: `Bearer ${apiKey}`,
},
})
.then((res) => res.data)
.catch((error) => {
logger.error('Error fetching signed URL:', error.message);
throw error;
});
}
/**
* @param {Object} params
* @param {string} params.apiKey
* @param {string} params.url - The document or image URL
* @param {string} [params.documentType='document_url'] - 'document_url' or 'image_url'
* @param {string} [params.model]
* @param {string} [params.baseURL]
* @returns {Promise<OCRResult>}
*/
async function performOCR({
apiKey,
url,
documentType = 'document_url',
model = 'mistral-ocr-latest',
baseURL = 'https://api.mistral.ai/v1',
}) {
const documentKey = documentType === 'image_url' ? 'image_url' : 'document_url';
return axios
.post(
`${baseURL}/ocr`,
{
model,
image_limit: 0,
include_image_base64: false,
document: {
type: documentType,
[documentKey]: url,
},
},
{
headers: {
'Content-Type': 'application/json',
Authorization: `Bearer ${apiKey}`,
},
},
)
.then((res) => res.data)
.catch((error) => {
logger.error('Error performing OCR:', error.message);
throw error;
});
}
/**
* Uploads a file to the Mistral OCR API and processes the OCR result.
*
* @param {Object} params - The params object.
* @param {ServerRequest} params.req - The request object from Express. It should have a `user` property with an `id`
* representing the user
* @param {Express.Multer.File} params.file - The file object, which is part of the request. The file object should
* have a `mimetype` property that tells us the file type
* @param {string} params.file_id - The file ID.
* @param {string} [params.entity_id] - The entity ID, not used here but passed for consistency.
* @returns {Promise<{ filepath: string, bytes: number }>} - The result object containing the processed `text` and `images` (not currently used),
* along with the `filename` and `bytes` properties.
*/
const uploadMistralOCR = async ({ req, file, file_id, entity_id }) => {
try {
/** @type {TCustomConfig['ocr']} */
const ocrConfig = req.app.locals?.ocr;
const apiKeyConfig = ocrConfig.apiKey || '';
const baseURLConfig = ocrConfig.baseURL || '';
const isApiKeyEnvVar = envVarRegex.test(apiKeyConfig);
const isBaseURLEnvVar = envVarRegex.test(baseURLConfig);
const isApiKeyEmpty = !apiKeyConfig.trim();
const isBaseURLEmpty = !baseURLConfig.trim();
let apiKey, baseURL;
if (isApiKeyEnvVar || isBaseURLEnvVar || isApiKeyEmpty || isBaseURLEmpty) {
const apiKeyVarName = isApiKeyEnvVar ? extractVariableName(apiKeyConfig) : 'OCR_API_KEY';
const baseURLVarName = isBaseURLEnvVar ? extractVariableName(baseURLConfig) : 'OCR_BASEURL';
const authValues = await loadAuthValues({
userId: req.user.id,
authFields: [baseURLVarName, apiKeyVarName],
optional: new Set([baseURLVarName]),
});
apiKey = authValues[apiKeyVarName];
baseURL = authValues[baseURLVarName];
} else {
apiKey = apiKeyConfig;
baseURL = baseURLConfig;
}
const mistralFile = await uploadDocumentToMistral({
filePath: file.path,
fileName: file.originalname,
apiKey,
baseURL,
});
const modelConfig = ocrConfig.mistralModel || '';
const model = envVarRegex.test(modelConfig)
? extractEnvVariable(modelConfig)
: modelConfig.trim() || 'mistral-ocr-latest';
const signedUrlResponse = await getSignedUrl({
apiKey,
baseURL,
fileId: mistralFile.id,
});
const mimetype = (file.mimetype || '').toLowerCase();
const originalname = file.originalname || '';
const isImage =
mimetype.startsWith('image') || /\.(png|jpe?g|gif|bmp|webp|tiff?)$/i.test(originalname);
const documentType = isImage ? 'image_url' : 'document_url';
const ocrResult = await performOCR({
apiKey,
baseURL,
model,
url: signedUrlResponse.url,
documentType,
});
let aggregatedText = '';
const images = [];
ocrResult.pages.forEach((page, index) => {
if (ocrResult.pages.length > 1) {
aggregatedText += `# PAGE ${index + 1}\n`;
}
aggregatedText += page.markdown + '\n\n';
if (page.images && page.images.length > 0) {
page.images.forEach((image) => {
if (image.image_base64) {
images.push(image.image_base64);
}
});
}
});
return {
filename: file.originalname,
bytes: aggregatedText.length * 4,
filepath: FileSources.mistral_ocr,
text: aggregatedText,
images,
};
} catch (error) {
let message = 'Error uploading document to Mistral OCR API';
const detail = error?.response?.data?.detail;
if (detail && detail !== '') {
message = detail;
}
const responseMessage = error?.response?.data?.message;
throw new Error(
`${logAxiosError({ error, message })}${responseMessage && responseMessage !== '' ? ` - ${responseMessage}` : ''}`,
);
}
};
module.exports = {
uploadDocumentToMistral,
uploadMistralOCR,
getSignedUrl,
performOCR,
};

View file

@ -1,848 +0,0 @@
const fs = require('fs');
const mockAxios = {
interceptors: {
request: { use: jest.fn(), eject: jest.fn() },
response: { use: jest.fn(), eject: jest.fn() },
},
create: jest.fn().mockReturnValue({
defaults: {
proxy: null,
},
get: jest.fn().mockResolvedValue({ data: {} }),
post: jest.fn().mockResolvedValue({ data: {} }),
put: jest.fn().mockResolvedValue({ data: {} }),
delete: jest.fn().mockResolvedValue({ data: {} }),
}),
get: jest.fn().mockResolvedValue({ data: {} }),
post: jest.fn().mockResolvedValue({ data: {} }),
put: jest.fn().mockResolvedValue({ data: {} }),
delete: jest.fn().mockResolvedValue({ data: {} }),
reset: jest.fn().mockImplementation(function () {
this.get.mockClear();
this.post.mockClear();
this.put.mockClear();
this.delete.mockClear();
this.create.mockClear();
}),
};
jest.mock('axios', () => mockAxios);
jest.mock('fs');
jest.mock('~/config', () => ({
logger: {
error: jest.fn(),
},
createAxiosInstance: () => mockAxios,
}));
jest.mock('~/server/services/Tools/credentials', () => ({
loadAuthValues: jest.fn(),
}));
const { uploadDocumentToMistral, uploadMistralOCR, getSignedUrl, performOCR } = require('./crud');
describe('MistralOCR Service', () => {
afterEach(() => {
mockAxios.reset();
jest.clearAllMocks();
});
describe('uploadDocumentToMistral', () => {
beforeEach(() => {
// Create a more complete mock for file streams that FormData can work with
const mockReadStream = {
on: jest.fn().mockImplementation(function (event, handler) {
// Simulate immediate 'end' event to make FormData complete processing
if (event === 'end') {
handler();
}
return this;
}),
pipe: jest.fn().mockImplementation(function () {
return this;
}),
pause: jest.fn(),
resume: jest.fn(),
emit: jest.fn(),
once: jest.fn(),
destroy: jest.fn(),
};
fs.createReadStream = jest.fn().mockReturnValue(mockReadStream);
// Mock FormData's append to avoid actual stream processing
jest.mock('form-data', () => {
const mockFormData = function () {
return {
append: jest.fn(),
getHeaders: jest
.fn()
.mockReturnValue({ 'content-type': 'multipart/form-data; boundary=---boundary' }),
getBuffer: jest.fn().mockReturnValue(Buffer.from('mock-form-data')),
getLength: jest.fn().mockReturnValue(100),
};
};
return mockFormData;
});
});
it('should upload a document to Mistral API using file streaming', async () => {
const mockResponse = { data: { id: 'file-123', purpose: 'ocr' } };
mockAxios.post.mockResolvedValueOnce(mockResponse);
const result = await uploadDocumentToMistral({
filePath: '/path/to/test.pdf',
fileName: 'test.pdf',
apiKey: 'test-api-key',
});
// Check that createReadStream was called with the correct file path
expect(fs.createReadStream).toHaveBeenCalledWith('/path/to/test.pdf');
// Since we're mocking FormData, we'll just check that axios was called correctly
expect(mockAxios.post).toHaveBeenCalledWith(
'https://api.mistral.ai/v1/files',
expect.anything(),
expect.objectContaining({
headers: expect.objectContaining({
Authorization: 'Bearer test-api-key',
}),
maxBodyLength: Infinity,
maxContentLength: Infinity,
}),
);
expect(result).toEqual(mockResponse.data);
});
it('should handle errors during document upload', async () => {
const errorMessage = 'API error';
mockAxios.post.mockRejectedValueOnce(new Error(errorMessage));
await expect(
uploadDocumentToMistral({
filePath: '/path/to/test.pdf',
fileName: 'test.pdf',
apiKey: 'test-api-key',
}),
).rejects.toThrow(errorMessage);
});
});
describe('getSignedUrl', () => {
it('should fetch signed URL from Mistral API', async () => {
const mockResponse = { data: { url: 'https://document-url.com' } };
mockAxios.get.mockResolvedValueOnce(mockResponse);
const result = await getSignedUrl({
fileId: 'file-123',
apiKey: 'test-api-key',
});
expect(mockAxios.get).toHaveBeenCalledWith(
'https://api.mistral.ai/v1/files/file-123/url?expiry=24',
{
headers: {
Authorization: 'Bearer test-api-key',
},
},
);
expect(result).toEqual(mockResponse.data);
});
it('should handle errors when fetching signed URL', async () => {
const errorMessage = 'API error';
mockAxios.get.mockRejectedValueOnce(new Error(errorMessage));
await expect(
getSignedUrl({
fileId: 'file-123',
apiKey: 'test-api-key',
}),
).rejects.toThrow();
const { logger } = require('~/config');
expect(logger.error).toHaveBeenCalledWith('Error fetching signed URL:', errorMessage);
});
});
describe('performOCR', () => {
it('should perform OCR using Mistral API (document_url)', async () => {
const mockResponse = {
data: {
pages: [{ markdown: 'Page 1 content' }, { markdown: 'Page 2 content' }],
},
};
mockAxios.post.mockResolvedValueOnce(mockResponse);
const result = await performOCR({
apiKey: 'test-api-key',
url: 'https://document-url.com',
model: 'mistral-ocr-latest',
documentType: 'document_url',
});
expect(mockAxios.post).toHaveBeenCalledWith(
'https://api.mistral.ai/v1/ocr',
{
model: 'mistral-ocr-latest',
include_image_base64: false,
image_limit: 0,
document: {
type: 'document_url',
document_url: 'https://document-url.com',
},
},
{
headers: {
'Content-Type': 'application/json',
Authorization: 'Bearer test-api-key',
},
},
);
expect(result).toEqual(mockResponse.data);
});
it('should perform OCR using Mistral API (image_url)', async () => {
const mockResponse = {
data: {
pages: [{ markdown: 'Image OCR content' }],
},
};
mockAxios.post.mockResolvedValueOnce(mockResponse);
const result = await performOCR({
apiKey: 'test-api-key',
url: 'https://image-url.com/image.png',
model: 'mistral-ocr-latest',
documentType: 'image_url',
});
expect(mockAxios.post).toHaveBeenCalledWith(
'https://api.mistral.ai/v1/ocr',
{
model: 'mistral-ocr-latest',
include_image_base64: false,
image_limit: 0,
document: {
type: 'image_url',
image_url: 'https://image-url.com/image.png',
},
},
{
headers: {
'Content-Type': 'application/json',
Authorization: 'Bearer test-api-key',
},
},
);
expect(result).toEqual(mockResponse.data);
});
it('should handle errors during OCR processing', async () => {
const errorMessage = 'OCR processing error';
mockAxios.post.mockRejectedValueOnce(new Error(errorMessage));
await expect(
performOCR({
apiKey: 'test-api-key',
url: 'https://document-url.com',
}),
).rejects.toThrow();
const { logger } = require('~/config');
expect(logger.error).toHaveBeenCalledWith('Error performing OCR:', errorMessage);
});
});
describe('uploadMistralOCR', () => {
beforeEach(() => {
const mockReadStream = {
on: jest.fn().mockImplementation(function (event, handler) {
if (event === 'end') {
handler();
}
return this;
}),
pipe: jest.fn().mockImplementation(function () {
return this;
}),
pause: jest.fn(),
resume: jest.fn(),
emit: jest.fn(),
once: jest.fn(),
destroy: jest.fn(),
};
fs.createReadStream = jest.fn().mockReturnValue(mockReadStream);
});
it('should process OCR for a file with standard configuration', async () => {
// Setup mocks
const { loadAuthValues } = require('~/server/services/Tools/credentials');
loadAuthValues.mockResolvedValue({
OCR_API_KEY: 'test-api-key',
OCR_BASEURL: 'https://api.mistral.ai/v1',
});
// Mock file upload response
mockAxios.post.mockResolvedValueOnce({
data: { id: 'file-123', purpose: 'ocr' },
});
// Mock signed URL response
mockAxios.get.mockResolvedValueOnce({
data: { url: 'https://signed-url.com' },
});
// Mock OCR response with text and images
mockAxios.post.mockResolvedValueOnce({
data: {
pages: [
{
markdown: 'Page 1 content',
images: [{ image_base64: 'base64image1' }],
},
{
markdown: 'Page 2 content',
images: [{ image_base64: 'base64image2' }],
},
],
},
});
const req = {
user: { id: 'user123' },
app: {
locals: {
ocr: {
// Use environment variable syntax to ensure loadAuthValues is called
apiKey: '${OCR_API_KEY}',
baseURL: '${OCR_BASEURL}',
mistralModel: 'mistral-medium',
},
},
},
};
const file = {
path: '/tmp/upload/file.pdf',
originalname: 'document.pdf',
mimetype: 'application/pdf',
};
const result = await uploadMistralOCR({
req,
file,
file_id: 'file123',
entity_id: 'entity123',
});
expect(fs.createReadStream).toHaveBeenCalledWith('/tmp/upload/file.pdf');
expect(loadAuthValues).toHaveBeenCalledWith({
userId: 'user123',
authFields: ['OCR_BASEURL', 'OCR_API_KEY'],
optional: expect.any(Set),
});
// Verify OCR result
expect(result).toEqual({
filename: 'document.pdf',
bytes: expect.any(Number),
filepath: 'mistral_ocr',
text: expect.stringContaining('# PAGE 1'),
images: ['base64image1', 'base64image2'],
});
});
it('should process OCR for an image file and use image_url type', async () => {
const { loadAuthValues } = require('~/server/services/Tools/credentials');
loadAuthValues.mockResolvedValue({
OCR_API_KEY: 'test-api-key',
OCR_BASEURL: 'https://api.mistral.ai/v1',
});
// Mock file upload response
mockAxios.post.mockResolvedValueOnce({
data: { id: 'file-456', purpose: 'ocr' },
});
// Mock signed URL response
mockAxios.get.mockResolvedValueOnce({
data: { url: 'https://signed-url.com/image.png' },
});
// Mock OCR response for image
mockAxios.post.mockResolvedValueOnce({
data: {
pages: [
{
markdown: 'Image OCR result',
images: [{ image_base64: 'imgbase64' }],
},
],
},
});
const req = {
user: { id: 'user456' },
app: {
locals: {
ocr: {
apiKey: '${OCR_API_KEY}',
baseURL: '${OCR_BASEURL}',
mistralModel: 'mistral-medium',
},
},
},
};
const file = {
path: '/tmp/upload/image.png',
originalname: 'image.png',
mimetype: 'image/png',
};
const result = await uploadMistralOCR({
req,
file,
file_id: 'file456',
entity_id: 'entity456',
});
expect(fs.createReadStream).toHaveBeenCalledWith('/tmp/upload/image.png');
expect(loadAuthValues).toHaveBeenCalledWith({
userId: 'user456',
authFields: ['OCR_BASEURL', 'OCR_API_KEY'],
optional: expect.any(Set),
});
// Check that the OCR API was called with image_url type
expect(mockAxios.post).toHaveBeenCalledWith(
'https://api.mistral.ai/v1/ocr',
expect.objectContaining({
document: expect.objectContaining({
type: 'image_url',
image_url: 'https://signed-url.com/image.png',
}),
}),
expect.any(Object),
);
expect(result).toEqual({
filename: 'image.png',
bytes: expect.any(Number),
filepath: 'mistral_ocr',
text: expect.stringContaining('Image OCR result'),
images: ['imgbase64'],
});
});
it('should process variable references in configuration', async () => {
// Setup mocks with environment variables
const { loadAuthValues } = require('~/server/services/Tools/credentials');
loadAuthValues.mockResolvedValue({
CUSTOM_API_KEY: 'custom-api-key',
CUSTOM_BASEURL: 'https://custom-api.mistral.ai/v1',
});
// Mock API responses
mockAxios.post.mockResolvedValueOnce({
data: { id: 'file-123', purpose: 'ocr' },
});
mockAxios.get.mockResolvedValueOnce({
data: { url: 'https://signed-url.com' },
});
mockAxios.post.mockResolvedValueOnce({
data: {
pages: [{ markdown: 'Content from custom API' }],
},
});
const req = {
user: { id: 'user123' },
app: {
locals: {
ocr: {
apiKey: '${CUSTOM_API_KEY}',
baseURL: '${CUSTOM_BASEURL}',
mistralModel: '${CUSTOM_MODEL}',
},
},
},
};
// Set environment variable for model
process.env.CUSTOM_MODEL = 'mistral-large';
const file = {
path: '/tmp/upload/file.pdf',
originalname: 'document.pdf',
};
const result = await uploadMistralOCR({
req,
file,
file_id: 'file123',
entity_id: 'entity123',
});
expect(fs.createReadStream).toHaveBeenCalledWith('/tmp/upload/file.pdf');
// Verify that custom environment variables were extracted and used
expect(loadAuthValues).toHaveBeenCalledWith({
userId: 'user123',
authFields: ['CUSTOM_BASEURL', 'CUSTOM_API_KEY'],
optional: expect.any(Set),
});
// Check that mistral-large was used in the OCR API call
expect(mockAxios.post).toHaveBeenCalledWith(
expect.anything(),
expect.objectContaining({
model: 'mistral-large',
}),
expect.anything(),
);
expect(result.text).toEqual('Content from custom API\n\n');
});
it('should fall back to default values when variables are not properly formatted', async () => {
const { loadAuthValues } = require('~/server/services/Tools/credentials');
loadAuthValues.mockResolvedValue({
OCR_API_KEY: 'default-api-key',
OCR_BASEURL: undefined, // Testing optional parameter
});
mockAxios.post.mockResolvedValueOnce({
data: { id: 'file-123', purpose: 'ocr' },
});
mockAxios.get.mockResolvedValueOnce({
data: { url: 'https://signed-url.com' },
});
mockAxios.post.mockResolvedValueOnce({
data: {
pages: [{ markdown: 'Default API result' }],
},
});
const req = {
user: { id: 'user123' },
app: {
locals: {
ocr: {
// Use environment variable syntax to ensure loadAuthValues is called
apiKey: '${INVALID_FORMAT}', // Using valid env var format but with an invalid name
baseURL: '${OCR_BASEURL}', // Using valid env var format
mistralModel: 'mistral-ocr-latest', // Plain string value
},
},
},
};
const file = {
path: '/tmp/upload/file.pdf',
originalname: 'document.pdf',
};
await uploadMistralOCR({
req,
file,
file_id: 'file123',
entity_id: 'entity123',
});
expect(fs.createReadStream).toHaveBeenCalledWith('/tmp/upload/file.pdf');
// Should use the default values
expect(loadAuthValues).toHaveBeenCalledWith({
userId: 'user123',
authFields: ['OCR_BASEURL', 'INVALID_FORMAT'],
optional: expect.any(Set),
});
// Should use the default model when not using environment variable format
expect(mockAxios.post).toHaveBeenCalledWith(
expect.anything(),
expect.objectContaining({
model: 'mistral-ocr-latest',
}),
expect.anything(),
);
});
it('should handle API errors during OCR process', async () => {
const { loadAuthValues } = require('~/server/services/Tools/credentials');
loadAuthValues.mockResolvedValue({
OCR_API_KEY: 'test-api-key',
});
// Mock file upload to fail
mockAxios.post.mockRejectedValueOnce(new Error('Upload failed'));
const req = {
user: { id: 'user123' },
app: {
locals: {
ocr: {
apiKey: 'OCR_API_KEY',
baseURL: 'OCR_BASEURL',
},
},
},
};
const file = {
path: '/tmp/upload/file.pdf',
originalname: 'document.pdf',
};
await expect(
uploadMistralOCR({
req,
file,
file_id: 'file123',
entity_id: 'entity123',
}),
).rejects.toThrow('Error uploading document to Mistral OCR API');
expect(fs.createReadStream).toHaveBeenCalledWith('/tmp/upload/file.pdf');
});
it('should handle single page documents without page numbering', async () => {
const { loadAuthValues } = require('~/server/services/Tools/credentials');
loadAuthValues.mockResolvedValue({
OCR_API_KEY: 'test-api-key',
OCR_BASEURL: 'https://api.mistral.ai/v1', // Make sure this is included
});
// Clear all previous mocks
mockAxios.post.mockClear();
mockAxios.get.mockClear();
// 1. First mock: File upload response
mockAxios.post.mockImplementationOnce(() =>
Promise.resolve({ data: { id: 'file-123', purpose: 'ocr' } }),
);
// 2. Second mock: Signed URL response
mockAxios.get.mockImplementationOnce(() =>
Promise.resolve({ data: { url: 'https://signed-url.com' } }),
);
// 3. Third mock: OCR response
mockAxios.post.mockImplementationOnce(() =>
Promise.resolve({
data: {
pages: [{ markdown: 'Single page content' }],
},
}),
);
const req = {
user: { id: 'user123' },
app: {
locals: {
ocr: {
apiKey: 'OCR_API_KEY',
baseURL: 'OCR_BASEURL',
mistralModel: 'mistral-ocr-latest',
},
},
},
};
const file = {
path: '/tmp/upload/file.pdf',
originalname: 'single-page.pdf',
};
const result = await uploadMistralOCR({
req,
file,
file_id: 'file123',
entity_id: 'entity123',
});
expect(fs.createReadStream).toHaveBeenCalledWith('/tmp/upload/file.pdf');
// Verify that single page documents don't include page numbering
expect(result.text).not.toContain('# PAGE');
expect(result.text).toEqual('Single page content\n\n');
});
it('should use literal values in configuration when provided directly', async () => {
const { loadAuthValues } = require('~/server/services/Tools/credentials');
// We'll still mock this but it should not be used for literal values
loadAuthValues.mockResolvedValue({});
// Clear all previous mocks
mockAxios.post.mockClear();
mockAxios.get.mockClear();
// 1. First mock: File upload response
mockAxios.post.mockImplementationOnce(() =>
Promise.resolve({ data: { id: 'file-123', purpose: 'ocr' } }),
);
// 2. Second mock: Signed URL response
mockAxios.get.mockImplementationOnce(() =>
Promise.resolve({ data: { url: 'https://signed-url.com' } }),
);
// 3. Third mock: OCR response
mockAxios.post.mockImplementationOnce(() =>
Promise.resolve({
data: {
pages: [{ markdown: 'Processed with literal config values' }],
},
}),
);
const req = {
user: { id: 'user123' },
app: {
locals: {
ocr: {
// Direct values that should be used as-is, without variable substitution
apiKey: 'actual-api-key-value',
baseURL: 'https://direct-api-url.mistral.ai/v1',
mistralModel: 'mistral-direct-model',
},
},
},
};
const file = {
path: '/tmp/upload/file.pdf',
originalname: 'direct-values.pdf',
};
const result = await uploadMistralOCR({
req,
file,
file_id: 'file123',
entity_id: 'entity123',
});
expect(fs.createReadStream).toHaveBeenCalledWith('/tmp/upload/file.pdf');
// Verify the correct URL was used with the direct baseURL value
expect(mockAxios.post).toHaveBeenCalledWith(
'https://direct-api-url.mistral.ai/v1/files',
expect.any(Object),
expect.objectContaining({
headers: expect.objectContaining({
Authorization: 'Bearer actual-api-key-value',
}),
}),
);
// Check the OCR call was made with the direct model value
expect(mockAxios.post).toHaveBeenCalledWith(
'https://direct-api-url.mistral.ai/v1/ocr',
expect.objectContaining({
model: 'mistral-direct-model',
}),
expect.any(Object),
);
// Verify the result
expect(result.text).toEqual('Processed with literal config values\n\n');
// Verify loadAuthValues was never called since we used direct values
expect(loadAuthValues).not.toHaveBeenCalled();
});
it('should handle empty configuration values and use defaults', async () => {
const { loadAuthValues } = require('~/server/services/Tools/credentials');
// Set up the mock values to be returned by loadAuthValues
loadAuthValues.mockResolvedValue({
OCR_API_KEY: 'default-from-env-key',
OCR_BASEURL: 'https://default-from-env.mistral.ai/v1',
});
// Clear all previous mocks
mockAxios.post.mockClear();
mockAxios.get.mockClear();
// 1. First mock: File upload response
mockAxios.post.mockImplementationOnce(() =>
Promise.resolve({ data: { id: 'file-123', purpose: 'ocr' } }),
);
// 2. Second mock: Signed URL response
mockAxios.get.mockImplementationOnce(() =>
Promise.resolve({ data: { url: 'https://signed-url.com' } }),
);
// 3. Third mock: OCR response
mockAxios.post.mockImplementationOnce(() =>
Promise.resolve({
data: {
pages: [{ markdown: 'Content from default configuration' }],
},
}),
);
const req = {
user: { id: 'user123' },
app: {
locals: {
ocr: {
// Empty string values - should fall back to defaults
apiKey: '',
baseURL: '',
mistralModel: '',
},
},
},
};
const file = {
path: '/tmp/upload/file.pdf',
originalname: 'empty-config.pdf',
};
const result = await uploadMistralOCR({
req,
file,
file_id: 'file123',
entity_id: 'entity123',
});
expect(fs.createReadStream).toHaveBeenCalledWith('/tmp/upload/file.pdf');
// Verify loadAuthValues was called with the default variable names
expect(loadAuthValues).toHaveBeenCalledWith({
userId: 'user123',
authFields: ['OCR_BASEURL', 'OCR_API_KEY'],
optional: expect.any(Set),
});
// Verify the API calls used the default values from loadAuthValues
expect(mockAxios.post).toHaveBeenCalledWith(
'https://default-from-env.mistral.ai/v1/files',
expect.any(Object),
expect.objectContaining({
headers: expect.objectContaining({
Authorization: 'Bearer default-from-env-key',
}),
}),
);
// Verify the OCR model defaulted to mistral-ocr-latest
expect(mockAxios.post).toHaveBeenCalledWith(
'https://default-from-env.mistral.ai/v1/ocr',
expect.objectContaining({
model: 'mistral-ocr-latest',
}),
expect.any(Object),
);
// Check result
expect(result.text).toEqual('Content from default configuration\n\n');
});
});
});

View file

@ -1,5 +0,0 @@
const crud = require('./crud');
module.exports = {
...crud,
};

View file

@ -94,15 +94,28 @@ async function prepareImageURLS3(req, file) {
* @param {Buffer} params.buffer - Avatar image buffer.
* @param {string} params.userId - User's unique identifier.
* @param {string} params.manual - 'true' or 'false' flag for manual update.
* @param {string} [params.agentId] - Optional agent ID if this is an agent avatar.
* @param {string} [params.basePath='images'] - Base path in the bucket.
* @returns {Promise<string>} Signed URL of the uploaded avatar.
*/
async function processS3Avatar({ buffer, userId, manual, basePath = defaultBasePath }) {
async function processS3Avatar({ buffer, userId, manual, agentId, basePath = defaultBasePath }) {
try {
const downloadURL = await saveBufferToS3({ userId, buffer, fileName: 'avatar.png', basePath });
if (manual === 'true') {
const metadata = await sharp(buffer).metadata();
const extension = metadata.format === 'gif' ? 'gif' : 'png';
const timestamp = new Date().getTime();
/** Unique filename with timestamp and optional agent ID */
const fileName = agentId
? `agent-${agentId}-avatar-${timestamp}.${extension}`
: `avatar-${timestamp}.${extension}`;
const downloadURL = await saveBufferToS3({ userId, buffer, fileName, basePath });
// Only update user record if this is a user avatar (manual === 'true')
if (manual === 'true' && !agentId) {
await updateUser(userId, { avatar: downloadURL });
}
return downloadURL;
} catch (error) {
logger.error('[processS3Avatar] Error processing S3 avatar:', error.message);

View file

@ -1,9 +1,9 @@
const fs = require('fs');
const axios = require('axios');
const FormData = require('form-data');
const { logAxiosError } = require('@librechat/api');
const { logger } = require('@librechat/data-schemas');
const { FileSources } = require('librechat-data-provider');
const { logAxiosError } = require('~/utils');
const { logger } = require('~/config');
/**
* Deletes a file from the vector database. This function takes a file object, constructs the full path, and

View file

@ -1,4 +1,5 @@
const axios = require('axios');
const { logAxiosError } = require('@librechat/api');
const {
FileSources,
VisionModes,
@ -7,8 +8,6 @@ const {
EModelEndpoint,
} = require('librechat-data-provider');
const { getStrategyFunctions } = require('~/server/services/Files/strategies');
const { logAxiosError } = require('~/utils');
const { logger } = require('~/config');
/**
* Converts a readable stream to a base64 encoded string.

View file

@ -519,7 +519,7 @@ const processAgentFileUpload = async ({ req, res, metadata }) => {
throw new Error('OCR capability is not enabled for Agents');
}
const { handleFileUpload: uploadMistralOCR } = getStrategyFunctions(
const { handleFileUpload: uploadOCR } = getStrategyFunctions(
req.app.locals?.ocr?.strategy ?? FileSources.mistral_ocr,
);
const { file_id, temp_file_id } = metadata;
@ -531,7 +531,7 @@ const processAgentFileUpload = async ({ req, res, metadata }) => {
images,
filename,
filepath: ocrFileURL,
} = await uploadMistralOCR({ req, file, file_id, entity_id: agent_id, basePath });
} = await uploadOCR({ req, file, loadAuthValues });
const fileInfo = removeNullishValues({
text,

View file

@ -1,4 +1,5 @@
const { FileSources } = require('librechat-data-provider');
const { uploadMistralOCR, uploadAzureMistralOCR } = require('@librechat/api');
const {
getFirebaseURL,
prepareImageURL,
@ -46,7 +47,6 @@ const {
const { uploadOpenAIFile, deleteOpenAIFile, getOpenAIFileStream } = require('./OpenAI');
const { getCodeOutputDownloadStream, uploadCodeEnvFile } = require('./Code');
const { uploadVectors, deleteVectors } = require('./VectorDB');
const { uploadMistralOCR } = require('./MistralOCR');
/**
* Firebase Storage Strategy Functions
@ -190,6 +190,26 @@ const mistralOCRStrategy = () => ({
handleFileUpload: uploadMistralOCR,
});
const azureMistralOCRStrategy = () => ({
/** @type {typeof saveFileFromURL | null} */
saveURL: null,
/** @type {typeof getLocalFileURL | null} */
getFileURL: null,
/** @type {typeof saveLocalBuffer | null} */
saveBuffer: null,
/** @type {typeof processLocalAvatar | null} */
processAvatar: null,
/** @type {typeof uploadLocalImage | null} */
handleImageUpload: null,
/** @type {typeof prepareImagesLocal | null} */
prepareImagePayload: null,
/** @type {typeof deleteLocalFile | null} */
deleteFile: null,
/** @type {typeof getLocalFileStream | null} */
getDownloadStream: null,
handleFileUpload: uploadAzureMistralOCR,
});
// Strategy Selector
const getStrategyFunctions = (fileSource) => {
if (fileSource === FileSources.firebase) {
@ -210,6 +230,8 @@ const getStrategyFunctions = (fileSource) => {
return codeOutputStrategy();
} else if (fileSource === FileSources.mistral_ocr) {
return mistralOCRStrategy();
} else if (fileSource === FileSources.azure_mistral_ocr) {
return azureMistralOCRStrategy();
} else {
throw new Error('Invalid file source');
}

View file

@ -1,6 +1,6 @@
const { z } = require('zod');
const { tool } = require('@langchain/core/tools');
const { normalizeServerName } = require('librechat-mcp');
const { normalizeServerName } = require('@librechat/api');
const { Constants: AgentConstants, Providers } = require('@librechat/agents');
const {
Constants,
@ -50,9 +50,10 @@ async function createMCPTool({ req, toolKey, provider: _provider }) {
/** @type {(toolArguments: Object | string, config?: GraphRunnableConfig) => Promise<unknown>} */
const _call = async (toolArguments, config) => {
const userId = config?.configurable?.user?.id || config?.configurable?.user_id;
try {
const derivedSignal = config?.signal ? AbortSignal.any([config.signal]) : undefined;
const mcpManager = getMCPManager(config?.configurable?.user_id);
const mcpManager = getMCPManager(userId);
const provider = (config?.metadata?.provider || _provider)?.toLowerCase();
const result = await mcpManager.callTool({
serverName,
@ -60,8 +61,8 @@ async function createMCPTool({ req, toolKey, provider: _provider }) {
provider,
toolArguments,
options: {
userId: config?.configurable?.user_id,
signal: derivedSignal,
user: config?.configurable?.user,
},
});
@ -74,7 +75,7 @@ async function createMCPTool({ req, toolKey, provider: _provider }) {
return result;
} catch (error) {
logger.error(
`[MCP][User: ${config?.configurable?.user_id}][${serverName}] Error calling "${toolName}" MCP tool:`,
`[MCP][User: ${userId}][${serverName}] Error calling "${toolName}" MCP tool:`,
error,
);
throw new Error(

View file

@ -1,12 +1,13 @@
const axios = require('axios');
const { Providers } = require('@librechat/agents');
const { logAxiosError } = require('@librechat/api');
const { logger } = require('@librechat/data-schemas');
const { HttpsProxyAgent } = require('https-proxy-agent');
const { EModelEndpoint, defaultModels, CacheKeys } = require('librechat-data-provider');
const { inputSchema, logAxiosError, extractBaseURL, processModelData } = require('~/utils');
const { inputSchema, extractBaseURL, processModelData } = require('~/utils');
const { OllamaClient } = require('~/app/clients/OllamaClient');
const { isUserProvided } = require('~/server/utils');
const getLogStores = require('~/cache/getLogStores');
const { logger } = require('~/config');
/**
* Splits a string by commas and trims each resulting value.

View file

@ -1,6 +1,6 @@
const axios = require('axios');
const { logger } = require('@librechat/data-schemas');
const { EModelEndpoint, defaultModels } = require('librechat-data-provider');
const { logger } = require('~/config');
const {
fetchModels,
@ -28,7 +28,8 @@ jest.mock('~/cache/getLogStores', () =>
set: jest.fn().mockResolvedValue(true),
})),
);
jest.mock('~/config', () => ({
jest.mock('@librechat/data-schemas', () => ({
...jest.requireActual('@librechat/data-schemas'),
logger: {
error: jest.fn(),
},

View file

@ -1,6 +1,6 @@
const axios = require('axios');
const { logAxiosError } = require('@librechat/api');
const { EModelEndpoint } = require('librechat-data-provider');
const { logAxiosError } = require('~/utils');
/**
* @typedef {Object} RetrieveOptions

View file

@ -1,8 +1,9 @@
const axios = require('axios');
const { logAxiosError } = require('@librechat/api');
const { logger } = require('@librechat/data-schemas');
const { TokenExchangeMethodEnum } = require('librechat-data-provider');
const { handleOAuthToken } = require('~/models/Token');
const { decryptV2 } = require('~/server/utils/crypto');
const { logAxiosError } = require('~/utils');
const { logger } = require('~/config');
/**
* Processes the access tokens and stores them in the database.
@ -49,6 +50,7 @@ async function processAccessTokens(tokenData, { userId, identifier }) {
* @param {string} fields.client_url - The URL of the OAuth provider.
* @param {string} fields.identifier - The identifier for the token.
* @param {string} fields.refresh_token - The refresh token to use.
* @param {string} fields.token_exchange_method - The token exchange method ('default_post' or 'basic_auth_header').
* @param {string} fields.encrypted_oauth_client_id - The client ID for the OAuth provider.
* @param {string} fields.encrypted_oauth_client_secret - The client secret for the OAuth provider.
* @returns {Promise<{
@ -63,26 +65,36 @@ const refreshAccessToken = async ({
client_url,
identifier,
refresh_token,
token_exchange_method,
encrypted_oauth_client_id,
encrypted_oauth_client_secret,
}) => {
try {
const oauth_client_id = await decryptV2(encrypted_oauth_client_id);
const oauth_client_secret = await decryptV2(encrypted_oauth_client_secret);
const headers = {
'Content-Type': 'application/x-www-form-urlencoded',
Accept: 'application/json',
};
const params = new URLSearchParams({
client_id: oauth_client_id,
client_secret: oauth_client_secret,
grant_type: 'refresh_token',
refresh_token,
});
if (token_exchange_method === TokenExchangeMethodEnum.BasicAuthHeader) {
const basicAuth = Buffer.from(`${oauth_client_id}:${oauth_client_secret}`).toString('base64');
headers['Authorization'] = `Basic ${basicAuth}`;
} else {
params.append('client_id', oauth_client_id);
params.append('client_secret', oauth_client_secret);
}
const response = await axios({
method: 'POST',
url: client_url,
headers: {
'Content-Type': 'application/x-www-form-urlencoded',
Accept: 'application/json',
},
headers,
data: params.toString(),
});
await processAccessTokens(response.data, {
@ -110,6 +122,7 @@ const refreshAccessToken = async ({
* @param {string} fields.identifier - The identifier for the token.
* @param {string} fields.client_url - The URL of the OAuth provider.
* @param {string} fields.redirect_uri - The redirect URI for the OAuth provider.
* @param {string} fields.token_exchange_method - The token exchange method ('default_post' or 'basic_auth_header').
* @param {string} fields.encrypted_oauth_client_id - The client ID for the OAuth provider.
* @param {string} fields.encrypted_oauth_client_secret - The client secret for the OAuth provider.
* @returns {Promise<{
@ -125,27 +138,37 @@ const getAccessToken = async ({
identifier,
client_url,
redirect_uri,
token_exchange_method,
encrypted_oauth_client_id,
encrypted_oauth_client_secret,
}) => {
const oauth_client_id = await decryptV2(encrypted_oauth_client_id);
const oauth_client_secret = await decryptV2(encrypted_oauth_client_secret);
const headers = {
'Content-Type': 'application/x-www-form-urlencoded',
Accept: 'application/json',
};
const params = new URLSearchParams({
code,
client_id: oauth_client_id,
client_secret: oauth_client_secret,
grant_type: 'authorization_code',
redirect_uri,
});
if (token_exchange_method === TokenExchangeMethodEnum.BasicAuthHeader) {
const basicAuth = Buffer.from(`${oauth_client_id}:${oauth_client_secret}`).toString('base64');
headers['Authorization'] = `Basic ${basicAuth}`;
} else {
params.append('client_id', oauth_client_id);
params.append('client_secret', oauth_client_secret);
}
try {
const response = await axios({
method: 'POST',
url: client_url,
headers: {
'Content-Type': 'application/x-www-form-urlencoded',
Accept: 'application/json',
},
headers,
data: params.toString(),
});

View file

@ -500,6 +500,8 @@ async function processRequiredActions(client, requiredActions) {
async function loadAgentTools({ req, res, agent, tool_resources, openAIApiKey }) {
if (!agent.tools || agent.tools.length === 0) {
return {};
} else if (agent.tools && agent.tools.length === 1 && agent.tools[0] === AgentCapabilities.ocr) {
return {};
}
const endpointsConfig = await getEndpointsConfig(req);

View file

@ -1,14 +0,0 @@
const { EModelEndpoint, agentsEndpointSChema } = require('librechat-data-provider');
/**
* Sets up the Agents configuration from the config (`librechat.yaml`) file.
* @param {TCustomConfig} config - The loaded custom configuration.
* @returns {Partial<TAgentsEndpoint>} The Agents endpoint configuration.
*/
function agentsConfigSetup(config) {
const agentsConfig = config.endpoints[EModelEndpoint.agents];
const parsedConfig = agentsEndpointSChema.parse(agentsConfig);
return parsedConfig;
}
module.exports = { agentsConfigSetup };

View file

@ -2,6 +2,7 @@ const {
SystemRoles,
Permissions,
PermissionTypes,
isMemoryEnabled,
removeNullishValues,
} = require('librechat-data-provider');
const { updateAccessPermissions } = require('~/models/Role');
@ -20,6 +21,14 @@ async function loadDefaultInterface(config, configDefaults, roleName = SystemRol
const hasModelSpecs = config?.modelSpecs?.list?.length > 0;
const includesAddedEndpoints = config?.modelSpecs?.addedEndpoints?.length > 0;
const memoryConfig = config?.memory;
const memoryEnabled = isMemoryEnabled(memoryConfig);
/** Only disable memories if memory config is present but disabled/invalid */
const shouldDisableMemories = memoryConfig && !memoryEnabled;
/** Check if personalization is enabled (defaults to true if memory is configured and enabled) */
const isPersonalizationEnabled =
memoryConfig && memoryEnabled && memoryConfig.personalize !== false;
/** @type {TCustomConfig['interface']} */
const loadedInterface = removeNullishValues({
endpointsMenu:
@ -33,6 +42,7 @@ async function loadDefaultInterface(config, configDefaults, roleName = SystemRol
privacyPolicy: interfaceConfig?.privacyPolicy ?? defaults.privacyPolicy,
termsOfService: interfaceConfig?.termsOfService ?? defaults.termsOfService,
bookmarks: interfaceConfig?.bookmarks ?? defaults.bookmarks,
memories: shouldDisableMemories ? false : (interfaceConfig?.memories ?? defaults.memories),
prompts: interfaceConfig?.prompts ?? defaults.prompts,
multiConvo: interfaceConfig?.multiConvo ?? defaults.multiConvo,
agents: interfaceConfig?.agents ?? defaults.agents,
@ -45,6 +55,10 @@ async function loadDefaultInterface(config, configDefaults, roleName = SystemRol
await updateAccessPermissions(roleName, {
[PermissionTypes.PROMPTS]: { [Permissions.USE]: loadedInterface.prompts },
[PermissionTypes.BOOKMARKS]: { [Permissions.USE]: loadedInterface.bookmarks },
[PermissionTypes.MEMORIES]: {
[Permissions.USE]: loadedInterface.memories,
[Permissions.OPT_OUT]: isPersonalizationEnabled,
},
[PermissionTypes.MULTI_CONVO]: { [Permissions.USE]: loadedInterface.multiConvo },
[PermissionTypes.AGENTS]: { [Permissions.USE]: loadedInterface.agents },
[PermissionTypes.TEMPORARY_CHAT]: { [Permissions.USE]: loadedInterface.temporaryChat },
@ -54,6 +68,10 @@ async function loadDefaultInterface(config, configDefaults, roleName = SystemRol
await updateAccessPermissions(SystemRoles.ADMIN, {
[PermissionTypes.PROMPTS]: { [Permissions.USE]: loadedInterface.prompts },
[PermissionTypes.BOOKMARKS]: { [Permissions.USE]: loadedInterface.bookmarks },
[PermissionTypes.MEMORIES]: {
[Permissions.USE]: loadedInterface.memories,
[Permissions.OPT_OUT]: isPersonalizationEnabled,
},
[PermissionTypes.MULTI_CONVO]: { [Permissions.USE]: loadedInterface.multiConvo },
[PermissionTypes.AGENTS]: { [Permissions.USE]: loadedInterface.agents },
[PermissionTypes.TEMPORARY_CHAT]: { [Permissions.USE]: loadedInterface.temporaryChat },

View file

@ -12,6 +12,7 @@ describe('loadDefaultInterface', () => {
interface: {
prompts: true,
bookmarks: true,
memories: true,
multiConvo: true,
agents: true,
temporaryChat: true,
@ -26,6 +27,7 @@ describe('loadDefaultInterface', () => {
expect(updateAccessPermissions).toHaveBeenCalledWith(SystemRoles.USER, {
[PermissionTypes.PROMPTS]: { [Permissions.USE]: true },
[PermissionTypes.BOOKMARKS]: { [Permissions.USE]: true },
[PermissionTypes.MEMORIES]: { [Permissions.USE]: true },
[PermissionTypes.MULTI_CONVO]: { [Permissions.USE]: true },
[PermissionTypes.AGENTS]: { [Permissions.USE]: true },
[PermissionTypes.TEMPORARY_CHAT]: { [Permissions.USE]: true },
@ -39,6 +41,7 @@ describe('loadDefaultInterface', () => {
interface: {
prompts: false,
bookmarks: false,
memories: false,
multiConvo: false,
agents: false,
temporaryChat: false,
@ -53,6 +56,7 @@ describe('loadDefaultInterface', () => {
expect(updateAccessPermissions).toHaveBeenCalledWith(SystemRoles.USER, {
[PermissionTypes.PROMPTS]: { [Permissions.USE]: false },
[PermissionTypes.BOOKMARKS]: { [Permissions.USE]: false },
[PermissionTypes.MEMORIES]: { [Permissions.USE]: false },
[PermissionTypes.MULTI_CONVO]: { [Permissions.USE]: false },
[PermissionTypes.AGENTS]: { [Permissions.USE]: false },
[PermissionTypes.TEMPORARY_CHAT]: { [Permissions.USE]: false },
@ -70,6 +74,7 @@ describe('loadDefaultInterface', () => {
expect(updateAccessPermissions).toHaveBeenCalledWith(SystemRoles.USER, {
[PermissionTypes.PROMPTS]: { [Permissions.USE]: undefined },
[PermissionTypes.BOOKMARKS]: { [Permissions.USE]: undefined },
[PermissionTypes.MEMORIES]: { [Permissions.USE]: undefined },
[PermissionTypes.MULTI_CONVO]: { [Permissions.USE]: undefined },
[PermissionTypes.AGENTS]: { [Permissions.USE]: undefined },
[PermissionTypes.TEMPORARY_CHAT]: { [Permissions.USE]: undefined },
@ -83,6 +88,7 @@ describe('loadDefaultInterface', () => {
interface: {
prompts: undefined,
bookmarks: undefined,
memories: undefined,
multiConvo: undefined,
agents: undefined,
temporaryChat: undefined,
@ -97,6 +103,7 @@ describe('loadDefaultInterface', () => {
expect(updateAccessPermissions).toHaveBeenCalledWith(SystemRoles.USER, {
[PermissionTypes.PROMPTS]: { [Permissions.USE]: undefined },
[PermissionTypes.BOOKMARKS]: { [Permissions.USE]: undefined },
[PermissionTypes.MEMORIES]: { [Permissions.USE]: undefined },
[PermissionTypes.MULTI_CONVO]: { [Permissions.USE]: undefined },
[PermissionTypes.AGENTS]: { [Permissions.USE]: undefined },
[PermissionTypes.TEMPORARY_CHAT]: { [Permissions.USE]: undefined },
@ -110,6 +117,7 @@ describe('loadDefaultInterface', () => {
interface: {
prompts: true,
bookmarks: false,
memories: true,
multiConvo: undefined,
agents: true,
temporaryChat: undefined,
@ -124,6 +132,7 @@ describe('loadDefaultInterface', () => {
expect(updateAccessPermissions).toHaveBeenCalledWith(SystemRoles.USER, {
[PermissionTypes.PROMPTS]: { [Permissions.USE]: true },
[PermissionTypes.BOOKMARKS]: { [Permissions.USE]: false },
[PermissionTypes.MEMORIES]: { [Permissions.USE]: true },
[PermissionTypes.MULTI_CONVO]: { [Permissions.USE]: undefined },
[PermissionTypes.AGENTS]: { [Permissions.USE]: true },
[PermissionTypes.TEMPORARY_CHAT]: { [Permissions.USE]: undefined },
@ -138,6 +147,7 @@ describe('loadDefaultInterface', () => {
interface: {
prompts: true,
bookmarks: true,
memories: true,
multiConvo: true,
agents: true,
temporaryChat: true,
@ -151,6 +161,7 @@ describe('loadDefaultInterface', () => {
expect(updateAccessPermissions).toHaveBeenCalledWith(SystemRoles.USER, {
[PermissionTypes.PROMPTS]: { [Permissions.USE]: true },
[PermissionTypes.BOOKMARKS]: { [Permissions.USE]: true },
[PermissionTypes.MEMORIES]: { [Permissions.USE]: true },
[PermissionTypes.MULTI_CONVO]: { [Permissions.USE]: true },
[PermissionTypes.AGENTS]: { [Permissions.USE]: true },
[PermissionTypes.TEMPORARY_CHAT]: { [Permissions.USE]: true },
@ -168,6 +179,7 @@ describe('loadDefaultInterface', () => {
expect(updateAccessPermissions).toHaveBeenCalledWith(SystemRoles.USER, {
[PermissionTypes.PROMPTS]: { [Permissions.USE]: undefined },
[PermissionTypes.BOOKMARKS]: { [Permissions.USE]: undefined },
[PermissionTypes.MEMORIES]: { [Permissions.USE]: undefined },
[PermissionTypes.MULTI_CONVO]: { [Permissions.USE]: true },
[PermissionTypes.AGENTS]: { [Permissions.USE]: undefined },
[PermissionTypes.TEMPORARY_CHAT]: { [Permissions.USE]: undefined },
@ -185,6 +197,7 @@ describe('loadDefaultInterface', () => {
expect(updateAccessPermissions).toHaveBeenCalledWith(SystemRoles.USER, {
[PermissionTypes.PROMPTS]: { [Permissions.USE]: undefined },
[PermissionTypes.BOOKMARKS]: { [Permissions.USE]: undefined },
[PermissionTypes.MEMORIES]: { [Permissions.USE]: undefined },
[PermissionTypes.MULTI_CONVO]: { [Permissions.USE]: false },
[PermissionTypes.AGENTS]: { [Permissions.USE]: undefined },
[PermissionTypes.TEMPORARY_CHAT]: { [Permissions.USE]: undefined },
@ -202,6 +215,7 @@ describe('loadDefaultInterface', () => {
expect(updateAccessPermissions).toHaveBeenCalledWith(SystemRoles.USER, {
[PermissionTypes.PROMPTS]: { [Permissions.USE]: undefined },
[PermissionTypes.BOOKMARKS]: { [Permissions.USE]: undefined },
[PermissionTypes.MEMORIES]: { [Permissions.USE]: undefined },
[PermissionTypes.MULTI_CONVO]: { [Permissions.USE]: undefined },
[PermissionTypes.AGENTS]: { [Permissions.USE]: undefined },
[PermissionTypes.TEMPORARY_CHAT]: { [Permissions.USE]: undefined },
@ -215,6 +229,7 @@ describe('loadDefaultInterface', () => {
interface: {
prompts: true,
bookmarks: false,
memories: true,
multiConvo: true,
agents: false,
temporaryChat: true,
@ -228,6 +243,7 @@ describe('loadDefaultInterface', () => {
expect(updateAccessPermissions).toHaveBeenCalledWith(SystemRoles.USER, {
[PermissionTypes.PROMPTS]: { [Permissions.USE]: true },
[PermissionTypes.BOOKMARKS]: { [Permissions.USE]: false },
[PermissionTypes.MEMORIES]: { [Permissions.USE]: true },
[PermissionTypes.MULTI_CONVO]: { [Permissions.USE]: true },
[PermissionTypes.AGENTS]: { [Permissions.USE]: false },
[PermissionTypes.TEMPORARY_CHAT]: { [Permissions.USE]: true },
@ -242,6 +258,7 @@ describe('loadDefaultInterface', () => {
interface: {
prompts: true,
bookmarks: true,
memories: false,
multiConvo: false,
agents: undefined,
temporaryChat: undefined,
@ -255,6 +272,7 @@ describe('loadDefaultInterface', () => {
expect(updateAccessPermissions).toHaveBeenCalledWith(SystemRoles.USER, {
[PermissionTypes.PROMPTS]: { [Permissions.USE]: true },
[PermissionTypes.BOOKMARKS]: { [Permissions.USE]: true },
[PermissionTypes.MEMORIES]: { [Permissions.USE]: false },
[PermissionTypes.MULTI_CONVO]: { [Permissions.USE]: false },
[PermissionTypes.AGENTS]: { [Permissions.USE]: undefined },
[PermissionTypes.TEMPORARY_CHAT]: { [Permissions.USE]: undefined },
@ -268,6 +286,7 @@ describe('loadDefaultInterface', () => {
interface: {
prompts: true,
bookmarks: false,
memories: true,
multiConvo: true,
agents: false,
temporaryChat: true,
@ -281,6 +300,7 @@ describe('loadDefaultInterface', () => {
expect(updateAccessPermissions).toHaveBeenCalledWith(SystemRoles.USER, {
[PermissionTypes.PROMPTS]: { [Permissions.USE]: true },
[PermissionTypes.BOOKMARKS]: { [Permissions.USE]: false },
[PermissionTypes.MEMORIES]: { [Permissions.USE]: true },
[PermissionTypes.MULTI_CONVO]: { [Permissions.USE]: true },
[PermissionTypes.AGENTS]: { [Permissions.USE]: false },
[PermissionTypes.TEMPORARY_CHAT]: { [Permissions.USE]: true },

View file

@ -1,5 +1,3 @@
const path = require('path');
const crypto = require('crypto');
const {
Capabilities,
EModelEndpoint,
@ -218,38 +216,6 @@ function normalizeEndpointName(name = '') {
return name.toLowerCase() === Providers.OLLAMA ? Providers.OLLAMA : name;
}
/**
* Sanitize a filename by removing any directory components, replacing non-alphanumeric characters
* @param {string} inputName
* @returns {string}
*/
function sanitizeFilename(inputName) {
// Remove any directory components
let name = path.basename(inputName);
// Replace any non-alphanumeric characters except for '.' and '-'
name = name.replace(/[^a-zA-Z0-9.-]/g, '_');
// Ensure the name doesn't start with a dot (hidden file in Unix-like systems)
if (name.startsWith('.') || name === '') {
name = '_' + name;
}
// Limit the length of the filename
const MAX_LENGTH = 255;
if (name.length > MAX_LENGTH) {
const ext = path.extname(name);
const nameWithoutExt = path.basename(name, ext);
name =
nameWithoutExt.slice(0, MAX_LENGTH - ext.length - 7) +
'-' +
crypto.randomBytes(3).toString('hex') +
ext;
}
return name;
}
module.exports = {
isEnabled,
handleText,
@ -260,6 +226,5 @@ module.exports = {
generateConfig,
addSpaceIfNeeded,
createOnProgress,
sanitizeFilename,
normalizeEndpointName,
};

View file

@ -1,103 +0,0 @@
const { isEnabled, sanitizeFilename } = require('./handleText');
describe('isEnabled', () => {
test('should return true when input is "true"', () => {
expect(isEnabled('true')).toBe(true);
});
test('should return true when input is "TRUE"', () => {
expect(isEnabled('TRUE')).toBe(true);
});
test('should return true when input is true', () => {
expect(isEnabled(true)).toBe(true);
});
test('should return false when input is "false"', () => {
expect(isEnabled('false')).toBe(false);
});
test('should return false when input is false', () => {
expect(isEnabled(false)).toBe(false);
});
test('should return false when input is null', () => {
expect(isEnabled(null)).toBe(false);
});
test('should return false when input is undefined', () => {
expect(isEnabled()).toBe(false);
});
test('should return false when input is an empty string', () => {
expect(isEnabled('')).toBe(false);
});
test('should return false when input is a whitespace string', () => {
expect(isEnabled(' ')).toBe(false);
});
test('should return false when input is a number', () => {
expect(isEnabled(123)).toBe(false);
});
test('should return false when input is an object', () => {
expect(isEnabled({})).toBe(false);
});
test('should return false when input is an array', () => {
expect(isEnabled([])).toBe(false);
});
});
jest.mock('crypto', () => {
const actualModule = jest.requireActual('crypto');
return {
...actualModule,
randomBytes: jest.fn().mockReturnValue(Buffer.from('abc123', 'hex')),
};
});
describe('sanitizeFilename', () => {
test('removes directory components (1/2)', () => {
expect(sanitizeFilename('/path/to/file.txt')).toBe('file.txt');
});
test('removes directory components (2/2)', () => {
expect(sanitizeFilename('../../../../file.txt')).toBe('file.txt');
});
test('replaces non-alphanumeric characters', () => {
expect(sanitizeFilename('file name@#$.txt')).toBe('file_name___.txt');
});
test('preserves dots and hyphens', () => {
expect(sanitizeFilename('file-name.with.dots.txt')).toBe('file-name.with.dots.txt');
});
test('prepends underscore to filenames starting with a dot', () => {
expect(sanitizeFilename('.hiddenfile')).toBe('_.hiddenfile');
});
test('truncates long filenames', () => {
const longName = 'a'.repeat(300) + '.txt';
const result = sanitizeFilename(longName);
expect(result.length).toBe(255);
expect(result).toMatch(/^a+-abc123\.txt$/);
});
test('handles filenames with no extension', () => {
const longName = 'a'.repeat(300);
const result = sanitizeFilename(longName);
expect(result.length).toBe(255);
expect(result).toMatch(/^a+-abc123$/);
});
test('handles empty input', () => {
expect(sanitizeFilename('')).toBe('_');
});
test('handles input with only special characters', () => {
expect(sanitizeFilename('@#$%^&*')).toBe('_______');
});
});

View file

@ -41,10 +41,7 @@ jest.mock('winston-daily-rotate-file', () => {
});
jest.mock('~/config', () => {
const actualModule = jest.requireActual('~/config');
return {
sendEvent: actualModule.sendEvent,
createAxiosInstance: actualModule.createAxiosInstance,
logger: {
info: jest.fn(),
warn: jest.fn(),

View file

@ -1073,7 +1073,7 @@
/**
* @exports MCPServers
* @typedef {import('librechat-mcp').MCPServers} MCPServers
* @typedef {import('@librechat/api').MCPServers} MCPServers
* @memberof typedefs
*/
@ -1085,31 +1085,31 @@
/**
* @exports MCPManager
* @typedef {import('librechat-mcp').MCPManager} MCPManager
* @typedef {import('@librechat/api').MCPManager} MCPManager
* @memberof typedefs
*/
/**
* @exports FlowStateManager
* @typedef {import('librechat-mcp').FlowStateManager} FlowStateManager
* @typedef {import('@librechat/api').FlowStateManager} FlowStateManager
* @memberof typedefs
*/
/**
* @exports LCAvailableTools
* @typedef {import('librechat-mcp').LCAvailableTools} LCAvailableTools
* @typedef {import('@librechat/api').LCAvailableTools} LCAvailableTools
* @memberof typedefs
*/
/**
* @exports LCTool
* @typedef {import('librechat-mcp').LCTool} LCTool
* @typedef {import('@librechat/api').LCTool} LCTool
* @memberof typedefs
*/
/**
* @exports FormattedContent
* @typedef {import('librechat-mcp').FormattedContent} FormattedContent
* @typedef {import('@librechat/api').FormattedContent} FormattedContent
* @memberof typedefs
*/
@ -1232,7 +1232,7 @@
* @typedef {Object} AgentClientOptions
* @property {Agent} agent - The agent configuration object
* @property {string} endpoint - The endpoint identifier for the agent
* @property {Object} req - The request object
* @property {ServerRequest} req - The request object
* @property {string} [name] - The username
* @property {string} [modelLabel] - The label for the model being used
* @property {number} [maxContextTokens] - Maximum number of tokens allowed in context

View file

@ -1,46 +0,0 @@
const { logger } = require('~/config');
/**
* Logs Axios errors based on the error object and a custom message.
*
* @param {Object} options - The options object.
* @param {string} options.message - The custom message to be logged.
* @param {import('axios').AxiosError} options.error - The Axios error object.
* @returns {string} The log message.
*/
const logAxiosError = ({ message, error }) => {
let logMessage = message;
try {
const stack = error.stack || 'No stack trace available';
if (error.response?.status) {
const { status, headers, data } = error.response;
logMessage = `${message} The server responded with status ${status}: ${error.message}`;
logger.error(logMessage, {
status,
headers,
data,
stack,
});
} else if (error.request) {
const { method, url } = error.config || {};
logMessage = `${message} No response received for ${method ? method.toUpperCase() : ''} ${url || ''}: ${error.message}`;
logger.error(logMessage, {
requestInfo: { method, url },
stack,
});
} else if (error?.message?.includes("Cannot read properties of undefined (reading 'status')")) {
logMessage = `${message} It appears the request timed out or was unsuccessful: ${error.message}`;
logger.error(logMessage, { stack });
} else {
logMessage = `${message} An error occurred while setting up the request: ${error.message}`;
logger.error(logMessage, { stack });
}
} catch (err) {
logMessage = `Error in logAxiosError: ${err.message}`;
logger.error(logMessage, { stack: err.stack || 'No stack trace available' });
}
return logMessage;
};
module.exports = { logAxiosError };

View file

@ -1,7 +1,5 @@
const loadYaml = require('./loadYaml');
const axiosHelpers = require('./axios');
const tokenHelpers = require('./tokens');
const azureUtils = require('./azureUtils');
const deriveBaseURL = require('./deriveBaseURL');
const extractBaseURL = require('./extractBaseURL');
const findMessageContent = require('./findMessageContent');
@ -10,8 +8,6 @@ module.exports = {
loadYaml,
deriveBaseURL,
extractBaseURL,
...azureUtils,
...axiosHelpers,
...tokenHelpers,
findMessageContent,
};

View file

@ -0,0 +1,45 @@
import React, { createContext, useContext, useState } from 'react';
import { Action, MCP, EModelEndpoint } from 'librechat-data-provider';
import type { AgentPanelContextType } from '~/common';
import { useGetActionsQuery } from '~/data-provider';
import { Panel } from '~/common';
const AgentPanelContext = createContext<AgentPanelContextType | undefined>(undefined);
export function useAgentPanelContext() {
const context = useContext(AgentPanelContext);
if (context === undefined) {
throw new Error('useAgentPanelContext must be used within an AgentPanelProvider');
}
return context;
}
/** Houses relevant state for the Agent Form Panels (formerly 'commonProps') */
export function AgentPanelProvider({ children }: { children: React.ReactNode }) {
const [mcp, setMcp] = useState<MCP | undefined>(undefined);
const [mcps, setMcps] = useState<MCP[] | undefined>(undefined);
const [action, setAction] = useState<Action | undefined>(undefined);
const [activePanel, setActivePanel] = useState<Panel>(Panel.builder);
const [agent_id, setCurrentAgentId] = useState<string | undefined>(undefined);
const { data: actions } = useGetActionsQuery(EModelEndpoint.agents, {
enabled: !!agent_id,
});
const value = {
action,
setAction,
mcp,
setMcp,
mcps,
setMcps,
activePanel,
setActivePanel,
setCurrentAgentId,
agent_id,
/** Query data for actions */
actions,
};
return <AgentPanelContext.Provider value={value}>{children}</AgentPanelContext.Provider>;
}

View file

@ -1,8 +1,8 @@
import { useForm, FormProvider } from 'react-hook-form';
import { createContext, useContext } from 'react';
import { defaultAgentFormValues } from 'librechat-data-provider';
import type { UseFormReturn } from 'react-hook-form';
import type { AgentForm } from '~/common';
import { getDefaultAgentFormValues } from '~/utils';
type AgentsContextType = UseFormReturn<AgentForm>;
@ -20,7 +20,7 @@ export function useAgentsContext() {
export default function AgentsProvider({ children }) {
const methods = useForm<AgentForm>({
defaultValues: defaultAgentFormValues,
defaultValues: getDefaultAgentFormValues(),
});
return <FormProvider {...methods}>{children}</FormProvider>;

View file

@ -1,6 +1,7 @@
export { default as ToastProvider } from './ToastContext';
export { default as AssistantsProvider } from './AssistantsContext';
export { default as AgentsProvider } from './AgentsContext';
export { default as ToastProvider } from './ToastContext';
export * from './AgentPanelContext';
export * from './ChatContext';
export * from './ShareContext';
export * from './ToastContext';

26
client/src/common/mcp.ts Normal file
View file

@ -0,0 +1,26 @@
import {
AuthorizationTypeEnum,
AuthTypeEnum,
TokenExchangeMethodEnum,
} from 'librechat-data-provider';
import { MCPForm } from '~/common/types';
export const defaultMCPFormValues: MCPForm = {
type: AuthTypeEnum.None,
saved_auth_fields: false,
api_key: '',
authorization_type: AuthorizationTypeEnum.Basic,
custom_auth_header: '',
oauth_client_id: '',
oauth_client_secret: '',
authorization_url: '',
client_url: '',
scope: '',
token_exchange_method: TokenExchangeMethodEnum.DefaultPost,
name: '',
description: '',
url: '',
tools: [],
icon: '',
trust: false,
};

View file

@ -143,6 +143,7 @@ export enum Panel {
actions = 'actions',
model = 'model',
version = 'version',
mcp = 'mcp',
}
export type FileSetter =
@ -166,6 +167,15 @@ export type ActionAuthForm = {
token_exchange_method: t.TokenExchangeMethodEnum;
};
export type MCPForm = ActionAuthForm & {
name?: string;
description?: string;
url?: string;
tools?: string[];
icon?: string;
trust?: boolean;
};
export type ActionWithNullableMetadata = Omit<t.Action, 'metadata'> & {
metadata: t.ActionMetadata | null;
};
@ -188,16 +198,33 @@ export type AgentPanelProps = {
index?: number;
agent_id?: string;
activePanel?: string;
mcp?: t.MCP;
mcps?: t.MCP[];
action?: t.Action;
actions?: t.Action[];
createMutation: UseMutationResult<t.Agent, Error, t.AgentCreateParams>;
setActivePanel: React.Dispatch<React.SetStateAction<Panel>>;
setMcp: React.Dispatch<React.SetStateAction<t.MCP | undefined>>;
setAction: React.Dispatch<React.SetStateAction<t.Action | undefined>>;
endpointsConfig?: t.TEndpointsConfig;
setCurrentAgentId: React.Dispatch<React.SetStateAction<string | undefined>>;
agentsConfig?: t.TAgentsEndpoint | null;
};
export type AgentPanelContextType = {
action?: t.Action;
actions?: t.Action[];
setAction: React.Dispatch<React.SetStateAction<t.Action | undefined>>;
mcp?: t.MCP;
mcps?: t.MCP[];
setMcp: React.Dispatch<React.SetStateAction<t.MCP | undefined>>;
setMcps: React.Dispatch<React.SetStateAction<t.MCP[] | undefined>>;
activePanel?: string;
setActivePanel: React.Dispatch<React.SetStateAction<Panel>>;
setCurrentAgentId: React.Dispatch<React.SetStateAction<string | undefined>>;
agent_id?: string;
};
export type AgentModelPanelProps = {
agent_id?: string;
providers: Option[];

View file

@ -1,37 +0,0 @@
// client/src/hooks/useDebounceCodeBlock.ts
import { useCallback, useEffect } from 'react';
import debounce from 'lodash/debounce';
import { useSetRecoilState } from 'recoil';
import { codeBlocksState, codeBlockIdsState } from '~/store/artifacts';
import type { CodeBlock } from '~/common';
export function useDebounceCodeBlock() {
const setCodeBlocks = useSetRecoilState(codeBlocksState);
const setCodeBlockIds = useSetRecoilState(codeBlockIdsState);
const updateCodeBlock = useCallback((codeBlock: CodeBlock) => {
console.log('Updating code block:', codeBlock);
setCodeBlocks((prev) => ({
...prev,
[codeBlock.id]: codeBlock,
}));
setCodeBlockIds((prev) =>
prev.includes(codeBlock.id) ? prev : [...prev, codeBlock.id],
);
}, [setCodeBlocks, setCodeBlockIds]);
const debouncedUpdateCodeBlock = useCallback(
debounce((codeBlock: CodeBlock) => {
updateCodeBlock(codeBlock);
}, 25),
[updateCodeBlock],
);
useEffect(() => {
return () => {
debouncedUpdateCodeBlock.cancel();
};
}, [debouncedUpdateCodeBlock]);
return debouncedUpdateCodeBlock;
}

View file

@ -1,23 +1,12 @@
import { TranslationKeys, useLocalize } from '~/hooks';
import { BlinkAnimation } from './BlinkAnimation';
import { TStartupConfig } from 'librechat-data-provider';
import { ErrorMessage } from '~/components/Auth/ErrorMessage';
import SocialLoginRender from './SocialLoginRender';
import { ThemeSelector } from '~/components/ui';
import { BlinkAnimation } from './BlinkAnimation';
import { ThemeSelector } from '~/components';
import { Banner } from '../Banners';
import Footer from './Footer';
const ErrorRender = ({ children }: { children: React.ReactNode }) => (
<div className="mt-16 flex justify-center">
<div
role="alert"
aria-live="assertive"
className="rounded-md border border-red-500 bg-red-500/10 px-3 py-2 text-sm text-gray-600 dark:text-gray-200"
>
{children}
</div>
</div>
);
function AuthLayout({
children,
header,
@ -40,19 +29,29 @@ function AuthLayout({
const hasStartupConfigError = startupConfigError !== null && startupConfigError !== undefined;
const DisplayError = () => {
if (hasStartupConfigError) {
return <ErrorRender>{localize('com_auth_error_login_server')}</ErrorRender>;
return (
<div className="mx-auto sm:max-w-sm">
<ErrorMessage>{localize('com_auth_error_login_server')}</ErrorMessage>
</div>
);
} else if (error === 'com_auth_error_invalid_reset_token') {
return (
<ErrorRender>
{localize('com_auth_error_invalid_reset_token')}{' '}
<a className="font-semibold text-green-600 hover:underline" href="/forgot-password">
{localize('com_auth_click_here')}
</a>{' '}
{localize('com_auth_to_try_again')}
</ErrorRender>
<div className="mx-auto sm:max-w-sm">
<ErrorMessage>
{localize('com_auth_error_invalid_reset_token')}{' '}
<a className="font-semibold text-green-600 hover:underline" href="/forgot-password">
{localize('com_auth_click_here')}
</a>{' '}
{localize('com_auth_to_try_again')}
</ErrorMessage>
</div>
);
} else if (error != null && error) {
return <ErrorRender>{localize(error)}</ErrorRender>;
return (
<div className="mx-auto sm:max-w-sm">
<ErrorMessage>{localize(error)}</ErrorMessage>
</div>
);
}
return null;
};
@ -87,8 +86,8 @@ function AuthLayout({
{children}
{!pathname.includes('2fa') &&
(pathname.includes('login') || pathname.includes('register')) && (
<SocialLoginRender startupConfig={startupConfig} />
)}
<SocialLoginRender startupConfig={startupConfig} />
)}
</div>
</div>
<Footer startupConfig={startupConfig} />

View file

@ -2,7 +2,7 @@ export const ErrorMessage = ({ children }: { children: React.ReactNode }) => (
<div
role="alert"
aria-live="assertive"
className="relative mt-6 rounded-lg border border-red-500/20 bg-red-50/50 px-6 py-4 text-red-700 shadow-sm transition-all dark:bg-red-950/30 dark:text-red-100"
className="relative mt-6 rounded-xl border border-red-500/20 bg-red-50/50 px-6 py-4 text-red-700 shadow-sm transition-all dark:bg-red-950/30 dark:text-red-100"
>
{children}
</div>

View file

@ -3,11 +3,11 @@ import { useEffect, useState } from 'react';
import { useAuthContext } from '~/hooks/AuthContext';
import type { TLoginLayoutContext } from '~/common';
import { ErrorMessage } from '~/components/Auth/ErrorMessage';
import SocialButton from '~/components/Auth/SocialButton';
import { OpenIDIcon } from '~/components';
import { getLoginError } from '~/utils';
import { useLocalize } from '~/hooks';
import LoginForm from './LoginForm';
import SocialButton from '~/components/Auth/SocialButton';
import { OpenIDIcon } from '~/components';
function Login() {
const localize = useLocalize();

View file

@ -5,6 +5,7 @@ import type { TLoginUser, TStartupConfig } from 'librechat-data-provider';
import type { TAuthContext } from '~/common';
import { useResendVerificationEmail, useGetStartupConfig } from '~/data-provider';
import { ThemeContext, useLocalize } from '~/hooks';
import { Spinner, Button } from '~/components';
type TLoginFormProps = {
onSubmit: (data: TLoginUser) => void;
@ -20,7 +21,7 @@ const LoginForm: React.FC<TLoginFormProps> = ({ onSubmit, startupConfig, error,
register,
getValues,
handleSubmit,
formState: { errors },
formState: { errors, isSubmitting },
} = useForm<TLoginUser>();
const [showResendLink, setShowResendLink] = useState<boolean>(false);
const [turnstileToken, setTurnstileToken] = useState<string | null>(null);
@ -165,15 +166,16 @@ const LoginForm: React.FC<TLoginFormProps> = ({ onSubmit, startupConfig, error,
)}
<div className="mt-6">
<button
<Button
aria-label={localize('com_auth_continue')}
data-testid="login-button"
type="submit"
disabled={requireCaptcha && !turnstileToken}
className="w-full rounded-2xl bg-green-600 px-4 py-3 text-sm font-medium text-white transition-colors hover:bg-green-700 disabled:opacity-50 disabled:hover:bg-green-600 dark:bg-green-600 dark:hover:bg-green-700"
disabled={(requireCaptcha && !turnstileToken) || isSubmitting}
variant="submit"
className="h-12 w-full rounded-2xl"
>
{localize('com_auth_continue')}
</button>
{isSubmitting ? <Spinner /> : localize('com_auth_continue')}
</Button>
</div>
</form>
</>

View file

@ -4,10 +4,10 @@ import { Turnstile } from '@marsidev/react-turnstile';
import { useNavigate, useOutletContext, useLocation } from 'react-router-dom';
import { useRegisterUserMutation } from 'librechat-data-provider/react-query';
import type { TRegisterUser, TError } from 'librechat-data-provider';
import type { TLoginLayoutContext } from '~/common';
import { ErrorMessage } from './ErrorMessage';
import { Spinner } from '~/components/svg';
import { useLocalize, TranslationKeys, ThemeContext } from '~/hooks';
import type { TLoginLayoutContext } from '~/common';
import { Spinner, Button } from '~/components';
import { ErrorMessage } from './ErrorMessage';
const Registration: React.FC = () => {
const navigate = useNavigate();
@ -194,7 +194,7 @@ const Registration: React.FC = () => {
)}
<div className="mt-6">
<button
<Button
disabled={
Object.keys(errors).length > 0 ||
isSubmitting ||
@ -202,10 +202,11 @@ const Registration: React.FC = () => {
}
type="submit"
aria-label="Submit registration"
className="w-full rounded-2xl bg-green-600 px-4 py-3 text-sm font-medium text-white transition-colors hover:bg-green-700 focus:outline-none focus:ring-2 focus:ring-green-500 focus:ring-offset-2 disabled:opacity-50 disabled:hover:bg-green-600 dark:bg-green-600 dark:hover:bg-green-700"
variant="submit"
className="h-12 w-full rounded-2xl"
>
{isSubmitting ? <Spinner /> : localize('com_auth_continue')}
</button>
</Button>
</div>
</form>

View file

@ -5,12 +5,13 @@ import { useRequestPasswordResetMutation } from 'librechat-data-provider/react-q
import type { TRequestPasswordReset, TRequestPasswordResetResponse } from 'librechat-data-provider';
import type { FC } from 'react';
import type { TLoginLayoutContext } from '~/common';
import { Spinner, Button } from '~/components';
import { useLocalize } from '~/hooks';
const BodyTextWrapper: FC<{ children: ReactNode }> = ({ children }) => {
return (
<div
className="relative mt-6 rounded-lg border border-green-500/20 bg-green-50/50 px-6 py-4 text-green-700 shadow-sm transition-all dark:bg-green-950/30 dark:text-green-100"
className="relative mt-6 rounded-xl border border-green-500/20 bg-green-50/50 px-6 py-4 text-green-700 shadow-sm transition-all dark:bg-green-950/30 dark:text-green-100"
role="alert"
>
{children}
@ -44,6 +45,7 @@ function RequestPasswordReset() {
const { startupConfig, setHeaderText } = useOutletContext<TLoginLayoutContext>();
const requestPasswordReset = useRequestPasswordResetMutation();
const { isLoading } = requestPasswordReset;
const onSubmit = (data: TRequestPasswordReset) => {
requestPasswordReset.mutate(data, {
@ -105,23 +107,12 @@ function RequestPasswordReset() {
},
})}
aria-invalid={!!errors.email}
className="
peer w-full rounded-lg border border-gray-300 bg-transparent px-4 py-3
text-base text-gray-900 placeholder-transparent transition-all
focus:border-green-500 focus:outline-none focus:ring-2 focus:ring-green-500/20
dark:border-gray-700 dark:text-white dark:focus:border-green-500
"
placeholder="email@example.com"
className="webkit-dark-styles transition-color peer w-full rounded-2xl border border-border-light bg-surface-primary px-3.5 pb-2.5 pt-3 text-text-primary duration-200 focus:border-green-500 focus:outline-none"
placeholder=" "
/>
<label
htmlFor="email"
className="
absolute -top-2 left-2 z-10 bg-white px-2 text-sm text-gray-600
transition-all peer-placeholder-shown:top-3 peer-placeholder-shown:text-base
peer-placeholder-shown:text-gray-500 peer-focus:-top-2 peer-focus:text-sm
peer-focus:text-green-600 dark:bg-gray-900 dark:text-gray-400
dark:peer-focus:text-green-500
"
className="absolute -top-2 left-2 z-10 bg-white px-2 text-sm text-gray-600 transition-all peer-placeholder-shown:top-3 peer-placeholder-shown:text-base peer-placeholder-shown:text-gray-500 peer-focus:-top-2 peer-focus:text-sm peer-focus:text-green-600 dark:bg-gray-900 dark:text-gray-400 dark:peer-focus:text-green-500"
>
{localize('com_auth_email_address')}
</label>
@ -133,18 +124,15 @@ function RequestPasswordReset() {
)}
</div>
<div className="space-y-4">
<button
<Button
aria-label="Continue with password reset"
type="submit"
disabled={!!errors.email}
className="
w-full rounded-2xl bg-green-600 px-4 py-3 text-sm font-medium text-white
transition-colors hover:bg-green-700 focus:outline-none focus:ring-2
focus:ring-green-500 focus:ring-offset-2 disabled:opacity-50
disabled:hover:bg-green-600 dark:bg-green-600 dark:hover:bg-green-700
"
disabled={!!errors.email || isLoading}
variant="submit"
className="h-12 w-full rounded-2xl"
>
{localize('com_auth_continue')}
</button>
{isLoading ? <Spinner /> : localize('com_auth_continue')}
</Button>
<a
href="/login"
className="block text-center text-sm font-medium text-green-600 transition-colors hover:text-green-700 dark:text-green-400 dark:hover:text-green-300"

Some files were not shown because too many files have changed in this diff Show more