📎 feat: Upload as Text Support for Plaintext, STT, RAG, and Token Limits (#8868)

* 🪶 feat: Add Support for Uploading Plaintext Files

feat: delineate between OCR and text handling in fileConfig field of config file

- also adds support for passing in mimetypes as just plain file extensions

feat: add showLabel bool to support future synthetic component DynamicDropdownInput

feat: add new combination dropdown-input component in params panel to support file type token limits

refactor: move hovercard to side to align with other hovercards

chore: clean up autogenerated comments

feat: add delineation to file upload path between text and ocr configured filetypes

feat: add token limit checks during file upload

refactor: move textParsing out of ocrEnabled logic

refactor: clean up types for filetype config

refactor: finish decoupling DynamicDropdownInput from fileTokenLimits

fix: move image token cost function into file to fix circular dependency causing unittest to fail and remove unused var for linter

chore: remove out of scope code following review

refactor: make fileTokenLimit conform to existing styles

chore: remove unused localization string

chore: undo changes to DynamicInput and other strays

feat: add fileTokenLimit to all provider config panels

fix: move textParsing back into ocr tool_resource block for now so that it doesn't interfere with other upload types

* 📤 feat: Add RAG API Endpoint Support for Text Parsing (#8849)

* feat: implement RAG API integration for text parsing with fallback to native parsing

* chore: remove TODO now that placeholder and fllback are implemented

* ✈️ refactor: Migrate Text Parsing to TS (#8892)

* refactor: move generateShortLivedToken to packages/api

* refactor: move textParsing logic into packages/api

* refactor: reduce nesting and dry code with createTextFile

* fix: add proper source handling

* fix: mock new parseText and parseTextNative functions in jest file

* ci: add test coverage for textParser

* 💬 feat: Add Audio File Support to Upload as Text (#8893)

* feat: add STT support for Upload as Text

* refactor: move processAudioFile to packages/api

* refactor: move textParsing from utils to files

* fix: remove audio/mp3 from unsupported mimetypes test since it is now supported

* ✂️ feat: Configurable File Token Limits and Truncation (#8911)

* feat: add configurable fileTokenLimit default value

* fix: add stt to fileConfig merge logic

* fix: add fileTokenLimit to mergeFileConfig logic so configurable value is actually respected from yaml

* feat: add token limiting to parsed text files

* fix: add extraction logic and update tests so fileTokenLimit isnt sent to LLM providers

* fix: address comments

* refactor: rename textTokenLimiter.ts to text.ts

* chore: update form-data package to address CVE-2025-7783 and update package-lock

* feat: use default supported mime types for ocr on frontend file validation

* fix: should be using logger.debug not console.debug

* fix: mock existsSync in text.spec.ts

* fix: mock logger rather than every one of its function calls

* fix: reorganize imports and streamline file upload processing logic

* refactor: update createTextFile function to use destructured parameters and improve readability

* chore: update file validation to use EToolResources for improved type safety

* chore: update import path for types in audio processing module

* fix: update file configuration access and replace console.debug with logger.debug for improved logging

---------

Co-authored-by: Dustin Healy <dustinhealy1@gmail.com>
Co-authored-by: Dustin Healy <54083382+dustinhealy@users.noreply.github.com>
This commit is contained in:
Danny Avila 2025-08-27 03:44:39 -04:00 committed by GitHub
parent 74bc0440f0
commit 48f6f8f2f8
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
41 changed files with 847 additions and 75 deletions

View file

@ -53,6 +53,7 @@
"@types/diff": "^6.0.0",
"@types/express": "^5.0.0",
"@types/jest": "^29.5.2",
"@types/jsonwebtoken": "^9.0.0",
"@types/multer": "^1.4.13",
"@types/node": "^20.3.0",
"@types/react": "^18.2.18",
@ -80,7 +81,9 @@
"diff": "^7.0.0",
"eventsource": "^3.0.2",
"express": "^4.21.2",
"form-data": "^4.0.4",
"js-yaml": "^4.1.0",
"jsonwebtoken": "^9.0.0",
"keyv": "^5.3.2",
"librechat-data-provider": "*",
"node-fetch": "2.7.0",

View file

@ -1 +1,2 @@
export * from './encryption';
export * from './jwt';

View file

@ -0,0 +1,14 @@
import jwt from 'jsonwebtoken';
/**
* Generate a short-lived JWT token
* @param {String} userId - The ID of the user
* @param {String} [expireIn='5m'] - The expiration time for the token (default is 5 minutes)
* @returns {String} - The generated JWT token
*/
export const generateShortLivedToken = (userId: string, expireIn: string = '5m'): string => {
return jwt.sign({ id: userId }, process.env.JWT_SECRET!, {
expiresIn: expireIn,
algorithm: 'HS256',
});
};

View file

@ -0,0 +1,38 @@
import fs from 'fs';
import { logger } from '@librechat/data-schemas';
import type { STTService, AudioFileInfo, FileObject, AudioProcessingResult } from '~/types';
/**
* Processes audio files using Speech-to-Text (STT) service.
* @param {Object} params - The parameters object.
* @param {FileObject} params.file - The audio file object.
* @param {STTService} params.sttService - The STT service instance.
* @returns {Promise<AudioProcessingResult>} A promise that resolves to an object containing text and bytes.
*/
export async function processAudioFile({
file,
sttService,
}: {
file: FileObject;
sttService: STTService;
}): Promise<AudioProcessingResult> {
try {
const audioBuffer = await fs.promises.readFile(file.path);
const audioFile: AudioFileInfo = {
originalname: file.originalname,
mimetype: file.mimetype,
size: file.size,
};
const [provider, sttSchema] = await sttService.getProviderSchema();
const text = await sttService.sttRequest(provider, sttSchema, { audioBuffer, audioFile });
return {
text,
bytes: Buffer.byteLength(text, 'utf8'),
};
} catch (error) {
logger.error('Error processing audio file with STT:', error);
throw new Error(`Failed to process audio file: ${(error as Error).message}`);
}
}

View file

@ -1,2 +1,4 @@
export * from './mistral/crud';
export * from './audio';
export * from './text';
export * from './parse';

View file

@ -0,0 +1,255 @@
import { FileSources } from 'librechat-data-provider';
import { Readable } from 'stream';
jest.mock('@librechat/data-schemas', () => ({
logger: {
debug: jest.fn(),
warn: jest.fn(),
error: jest.fn(),
},
}));
import { parseTextNative, parseText } from './text';
jest.mock('fs', () => ({
readFileSync: jest.fn(),
createReadStream: jest.fn(),
}));
jest.mock('../crypto/jwt', () => ({
generateShortLivedToken: jest.fn(),
}));
jest.mock('axios', () => ({
get: jest.fn(),
post: jest.fn(),
interceptors: {
request: { use: jest.fn(), eject: jest.fn() },
response: { use: jest.fn(), eject: jest.fn() },
},
}));
jest.mock('form-data', () => {
return jest.fn().mockImplementation(() => ({
append: jest.fn(),
getHeaders: jest.fn().mockReturnValue({ 'content-type': 'multipart/form-data' }),
}));
});
import fs, { ReadStream } from 'fs';
import axios from 'axios';
import FormData from 'form-data';
import { generateShortLivedToken } from '../crypto/jwt';
const mockedFs = fs as jest.Mocked<typeof fs>;
const mockedAxios = axios as jest.Mocked<typeof axios>;
const mockedFormData = FormData as jest.MockedClass<typeof FormData>;
const mockedGenerateShortLivedToken = generateShortLivedToken as jest.MockedFunction<
typeof generateShortLivedToken
>;
describe('text', () => {
const mockFile: Express.Multer.File = {
fieldname: 'file',
originalname: 'test.txt',
encoding: '7bit',
mimetype: 'text/plain',
size: 100,
destination: '/tmp',
filename: 'test.txt',
path: '/tmp/test.txt',
buffer: Buffer.from('test content'),
stream: new Readable(),
};
const mockReq = {
user: { id: 'user123' },
};
const mockFileId = 'file123';
beforeEach(() => {
jest.clearAllMocks();
delete process.env.RAG_API_URL;
});
describe('parseTextNative', () => {
it('should successfully parse a text file', () => {
const mockText = 'Hello, world!';
mockedFs.readFileSync.mockReturnValue(mockText);
const result = parseTextNative(mockFile);
expect(mockedFs.readFileSync).toHaveBeenCalledWith('/tmp/test.txt', 'utf8');
expect(result).toEqual({
text: mockText,
bytes: Buffer.byteLength(mockText, 'utf8'),
source: FileSources.text,
});
});
it('should throw an error when file cannot be read', () => {
const mockError = new Error('File not found');
mockedFs.readFileSync.mockImplementation(() => {
throw mockError;
});
expect(() => parseTextNative(mockFile)).toThrow(
'Failed to read file as text: Error: File not found',
);
});
});
describe('parseText', () => {
beforeEach(() => {
mockedGenerateShortLivedToken.mockReturnValue('mock-jwt-token');
const mockFormDataInstance = {
append: jest.fn(),
getHeaders: jest.fn().mockReturnValue({ 'content-type': 'multipart/form-data' }),
};
mockedFormData.mockImplementation(() => mockFormDataInstance as unknown as FormData);
mockedFs.createReadStream.mockReturnValue({} as unknown as ReadStream);
});
it('should fall back to native parsing when RAG_API_URL is not defined', async () => {
const mockText = 'Native parsing result';
mockedFs.readFileSync.mockReturnValue(mockText);
const result = await parseText({
req: mockReq,
file: mockFile,
file_id: mockFileId,
});
expect(result).toEqual({
text: mockText,
bytes: Buffer.byteLength(mockText, 'utf8'),
source: FileSources.text,
});
expect(mockedAxios.get).not.toHaveBeenCalled();
});
it('should fall back to native parsing when health check fails', async () => {
process.env.RAG_API_URL = 'http://rag-api.test';
const mockText = 'Native parsing result';
mockedFs.readFileSync.mockReturnValue(mockText);
mockedAxios.get.mockRejectedValue(new Error('Health check failed'));
const result = await parseText({
req: mockReq,
file: mockFile,
file_id: mockFileId,
});
expect(mockedAxios.get).toHaveBeenCalledWith('http://rag-api.test/health', {
timeout: 5000,
});
expect(result).toEqual({
text: mockText,
bytes: Buffer.byteLength(mockText, 'utf8'),
source: FileSources.text,
});
});
it('should fall back to native parsing when health check returns non-OK status', async () => {
process.env.RAG_API_URL = 'http://rag-api.test';
const mockText = 'Native parsing result';
mockedFs.readFileSync.mockReturnValue(mockText);
mockedAxios.get.mockResolvedValue({
status: 500,
statusText: 'Internal Server Error',
});
const result = await parseText({
req: mockReq,
file: mockFile,
file_id: mockFileId,
});
expect(result).toEqual({
text: mockText,
bytes: Buffer.byteLength(mockText, 'utf8'),
source: FileSources.text,
});
});
it('should accept empty text as valid RAG API response', async () => {
process.env.RAG_API_URL = 'http://rag-api.test';
mockedAxios.get.mockResolvedValue({
status: 200,
statusText: 'OK',
});
mockedAxios.post.mockResolvedValue({
data: {
text: '',
},
});
const result = await parseText({
req: mockReq,
file: mockFile,
file_id: mockFileId,
});
expect(result).toEqual({
text: '',
bytes: 0,
source: FileSources.text,
});
});
it('should fall back to native parsing when RAG API response lacks text property', async () => {
process.env.RAG_API_URL = 'http://rag-api.test';
const mockText = 'Native parsing result';
mockedFs.readFileSync.mockReturnValue(mockText);
mockedAxios.get.mockResolvedValue({
status: 200,
statusText: 'OK',
});
mockedAxios.post.mockResolvedValue({
data: {},
});
const result = await parseText({
req: mockReq,
file: mockFile,
file_id: mockFileId,
});
expect(result).toEqual({
text: mockText,
bytes: Buffer.byteLength(mockText, 'utf8'),
source: FileSources.text,
});
});
it('should fall back to native parsing when user is undefined', async () => {
process.env.RAG_API_URL = 'http://rag-api.test';
const mockText = 'Native parsing result';
mockedFs.readFileSync.mockReturnValue(mockText);
const result = await parseText({
req: { user: undefined },
file: mockFile,
file_id: mockFileId,
});
expect(mockedGenerateShortLivedToken).not.toHaveBeenCalled();
expect(mockedAxios.get).not.toHaveBeenCalled();
expect(mockedAxios.post).not.toHaveBeenCalled();
expect(result).toEqual({
text: mockText,
bytes: Buffer.byteLength(mockText, 'utf8'),
source: FileSources.text,
});
});
});
});

View file

@ -0,0 +1,113 @@
import fs from 'fs';
import axios from 'axios';
import FormData from 'form-data';
import { logger } from '@librechat/data-schemas';
import { FileSources } from 'librechat-data-provider';
import type { Request as ServerRequest } from 'express';
import { generateShortLivedToken } from '~/crypto/jwt';
/**
* Attempts to parse text using RAG API, falls back to native text parsing
* @param {Object} params - The parameters object
* @param {Express.Request} params.req - The Express request object
* @param {Express.Multer.File} params.file - The uploaded file
* @param {string} params.file_id - The file ID
* @returns {Promise<{text: string, bytes: number, source: string}>}
*/
export async function parseText({
req,
file,
file_id,
}: {
req: Pick<ServerRequest, 'user'> & {
user?: { id: string };
};
file: Express.Multer.File;
file_id: string;
}): Promise<{ text: string; bytes: number; source: string }> {
if (!process.env.RAG_API_URL) {
logger.debug('[parseText] RAG_API_URL not defined, falling back to native text parsing');
return parseTextNative(file);
}
if (!req.user?.id) {
logger.debug('[parseText] No user ID provided, falling back to native text parsing');
return parseTextNative(file);
}
try {
const healthResponse = await axios.get(`${process.env.RAG_API_URL}/health`, {
timeout: 5000,
});
if (healthResponse?.statusText !== 'OK' && healthResponse?.status !== 200) {
logger.debug('[parseText] RAG API health check failed, falling back to native parsing');
return parseTextNative(file);
}
} catch (healthError) {
logger.debug(
'[parseText] RAG API health check failed, falling back to native parsing',
healthError,
);
return parseTextNative(file);
}
try {
const jwtToken = generateShortLivedToken(req.user.id);
const formData = new FormData();
formData.append('file_id', file_id);
formData.append('file', fs.createReadStream(file.path));
const formHeaders = formData.getHeaders();
const response = await axios.post(`${process.env.RAG_API_URL}/text`, formData, {
headers: {
Authorization: `Bearer ${jwtToken}`,
accept: 'application/json',
...formHeaders,
},
timeout: 30000,
});
const responseData = response.data;
logger.debug('[parseText] Response from RAG API', responseData);
if (!('text' in responseData)) {
throw new Error('RAG API did not return parsed text');
}
return {
text: responseData.text,
bytes: Buffer.byteLength(responseData.text, 'utf8'),
source: FileSources.text,
};
} catch (error) {
logger.warn('[parseText] RAG API text parsing failed, falling back to native parsing', error);
return parseTextNative(file);
}
}
/**
* Native JavaScript text parsing fallback
* Simple text file reading - complex formats handled by RAG API
* @param {Express.Multer.File} file - The uploaded file
* @returns {{text: string, bytes: number, source: string}}
*/
export function parseTextNative(file: Express.Multer.File): {
text: string;
bytes: number;
source: string;
} {
try {
const text = fs.readFileSync(file.path, 'utf8');
const bytes = Buffer.byteLength(text, 'utf8');
return {
text,
bytes,
source: FileSources.text,
};
} catch (error) {
console.error('[parseTextNative] Failed to parse file:', error);
throw new Error(`Failed to read file as text: ${error}`);
}
}

View file

@ -0,0 +1,27 @@
export interface STTService {
getInstance(): Promise<STTService>;
getProviderSchema(): Promise<[string, object]>;
sttRequest(
provider: string,
schema: object,
params: { audioBuffer: Buffer; audioFile: AudioFileInfo },
): Promise<string>;
}
export interface AudioFileInfo {
originalname: string;
mimetype: string;
size: number;
}
export interface FileObject {
path: string;
originalname: string;
mimetype: string;
size: number;
}
export interface AudioProcessingResult {
text: string;
bytes: number;
}

View file

@ -4,6 +4,7 @@ export * from './balance';
export * from './endpoints';
export * from './events';
export * from './error';
export * from './files';
export * from './google';
export * from './http';
export * from './mistral';

View file

@ -11,6 +11,7 @@ export * from './llm';
export * from './math';
export * from './openid';
export * from './tempChatRetention';
export * from './text';
export { default as Tokenizer } from './tokenizer';
export * from './yaml';
export * from './http';

View file

@ -7,6 +7,7 @@ describe('extractLibreChatParams', () => {
expect(result.resendFiles).toBe(true);
expect(result.promptPrefix).toBeUndefined();
expect(result.maxContextTokens).toBeUndefined();
expect(result.fileTokenLimit).toBeUndefined();
expect(result.modelLabel).toBeUndefined();
expect(result.modelOptions).toEqual({});
});
@ -17,6 +18,7 @@ describe('extractLibreChatParams', () => {
expect(result.resendFiles).toBe(true);
expect(result.promptPrefix).toBeUndefined();
expect(result.maxContextTokens).toBeUndefined();
expect(result.fileTokenLimit).toBeUndefined();
expect(result.modelLabel).toBeUndefined();
expect(result.modelOptions).toEqual({});
});
@ -26,6 +28,7 @@ describe('extractLibreChatParams', () => {
resendFiles: false,
promptPrefix: 'You are a helpful assistant',
maxContextTokens: 4096,
fileTokenLimit: 50000,
modelLabel: 'GPT-4',
model: 'gpt-4',
temperature: 0.7,
@ -37,6 +40,7 @@ describe('extractLibreChatParams', () => {
expect(result.resendFiles).toBe(false);
expect(result.promptPrefix).toBe('You are a helpful assistant');
expect(result.maxContextTokens).toBe(4096);
expect(result.fileTokenLimit).toBe(50000);
expect(result.modelLabel).toBe('GPT-4');
expect(result.modelOptions).toEqual({
model: 'gpt-4',
@ -50,6 +54,7 @@ describe('extractLibreChatParams', () => {
resendFiles: true,
promptPrefix: null,
maxContextTokens: 2048,
fileTokenLimit: undefined,
modelLabel: null,
model: 'claude-3',
};
@ -59,6 +64,7 @@ describe('extractLibreChatParams', () => {
expect(result.resendFiles).toBe(true);
expect(result.promptPrefix).toBeNull();
expect(result.maxContextTokens).toBe(2048);
expect(result.fileTokenLimit).toBeUndefined();
expect(result.modelLabel).toBeNull();
expect(result.modelOptions).toEqual({
model: 'claude-3',
@ -77,6 +83,7 @@ describe('extractLibreChatParams', () => {
expect(result.resendFiles).toBe(true); // Should use default
expect(result.promptPrefix).toBe('Test prefix');
expect(result.maxContextTokens).toBeUndefined();
expect(result.fileTokenLimit).toBeUndefined();
expect(result.modelLabel).toBeUndefined();
expect(result.modelOptions).toEqual({
model: 'gpt-3.5-turbo',
@ -90,6 +97,7 @@ describe('extractLibreChatParams', () => {
expect(result.resendFiles).toBe(true); // Should use default
expect(result.promptPrefix).toBeUndefined();
expect(result.maxContextTokens).toBeUndefined();
expect(result.fileTokenLimit).toBeUndefined();
expect(result.modelLabel).toBeUndefined();
expect(result.modelOptions).toEqual({});
});
@ -99,6 +107,7 @@ describe('extractLibreChatParams', () => {
resendFiles: false,
promptPrefix: 'Custom prompt',
maxContextTokens: 8192,
fileTokenLimit: 25000,
modelLabel: 'Custom Model',
// Model options
model: 'gpt-4',
@ -117,6 +126,7 @@ describe('extractLibreChatParams', () => {
expect(result.resendFiles).toBe(false);
expect(result.promptPrefix).toBe('Custom prompt');
expect(result.maxContextTokens).toBe(8192);
expect(result.fileTokenLimit).toBe(25000);
expect(result.modelLabel).toBe('Custom Model');
// Model options should include everything else

View file

@ -8,6 +8,7 @@ type LibreChatParams = {
resendFiles: boolean;
promptPrefix?: string | null;
maxContextTokens?: number;
fileTokenLimit?: number;
modelLabel?: string | null;
};
@ -32,6 +33,7 @@ export function extractLibreChatParams(
(librechat.resendFiles.default as boolean);
const promptPrefix = (delete modelOptions.promptPrefix, options.promptPrefix);
const maxContextTokens = (delete modelOptions.maxContextTokens, options.maxContextTokens);
const fileTokenLimit = (delete modelOptions.fileTokenLimit, options.fileTokenLimit);
const modelLabel = (delete modelOptions.modelLabel, options.modelLabel);
return {
@ -40,6 +42,7 @@ export function extractLibreChatParams(
LibreChatKeys
>,
maxContextTokens,
fileTokenLimit,
promptPrefix,
resendFiles,
modelLabel,

View file

@ -0,0 +1,65 @@
import { logger } from '@librechat/data-schemas';
/**
* Processes text content by counting tokens and truncating if it exceeds the specified limit.
* @param text - The text content to process
* @param tokenLimit - The maximum number of tokens allowed
* @param tokenCountFn - Function to count tokens
* @returns Promise resolving to object with processed text, token count, and truncation status
*/
export async function processTextWithTokenLimit({
text,
tokenLimit,
tokenCountFn,
}: {
text: string;
tokenLimit: number;
tokenCountFn: (text: string) => number;
}): Promise<{ text: string; tokenCount: number; wasTruncated: boolean }> {
const originalTokenCount = await tokenCountFn(text);
if (originalTokenCount <= tokenLimit) {
return {
text,
tokenCount: originalTokenCount,
wasTruncated: false,
};
}
/**
* Doing binary search here to find the truncation point efficiently
* (May be a better way to go about this)
*/
let low = 0;
let high = text.length;
let bestText = '';
logger.debug(
`[textTokenLimiter] Text content exceeds token limit: ${originalTokenCount} > ${tokenLimit}, truncating...`,
);
while (low <= high) {
const mid = Math.floor((low + high) / 2);
const truncatedText = text.substring(0, mid);
const tokenCount = await tokenCountFn(truncatedText);
if (tokenCount <= tokenLimit) {
bestText = truncatedText;
low = mid + 1;
} else {
high = mid - 1;
}
}
const finalTokenCount = await tokenCountFn(bestText);
logger.warn(
`[textTokenLimiter] Text truncated from ${originalTokenCount} to ${finalTokenCount} tokens (limit: ${tokenLimit})`,
);
return {
text: bestText,
tokenCount: finalTokenCount,
wasTruncated: true,
};
}

View file

@ -14,7 +14,7 @@ import {
} from '../src/file-config';
describe('MIME Type Regex Patterns', () => {
const unsupportedMimeTypes = ['text/x-unknown', 'application/unknown', 'image/bmp', 'audio/mp3'];
const unsupportedMimeTypes = ['text/x-unknown', 'application/unknown', 'image/bmp'];
// Testing general supported MIME types
fullMimeTypesList.forEach((mimeType) => {

View file

@ -122,11 +122,27 @@ export const applicationMimeTypes =
export const imageMimeTypes = /^image\/(jpeg|gif|png|webp|heic|heif)$/;
export const audioMimeTypes =
/^audio\/(mp3|mpeg|mpeg3|wav|wave|x-wav|ogg|vorbis|mp4|x-m4a|flac|x-flac|webm)$/;
export const defaultOCRMimeTypes = [
imageMimeTypes,
/^application\/pdf$/,
/^application\/vnd\.openxmlformats-officedocument\.(wordprocessingml\.document|presentationml\.presentation|spreadsheetml\.sheet)$/,
/^application\/vnd\.ms-(word|powerpoint|excel)$/,
/^application\/epub\+zip$/,
];
export const defaultTextMimeTypes = [textMimeTypes];
export const defaultSTTMimeTypes = [audioMimeTypes];
export const supportedMimeTypes = [
textMimeTypes,
excelMimeTypes,
applicationMimeTypes,
imageMimeTypes,
audioMimeTypes,
/** Supported by LC Code Interpreter PAI */
/^image\/(svg|svg\+xml)$/,
];
@ -169,6 +185,7 @@ export const megabyte = 1024 * 1024;
export const mbToBytes = (mb: number): number => mb * megabyte;
const defaultSizeLimit = mbToBytes(512);
const defaultTokenLimit = 100000;
const assistantsFileConfig = {
fileLimit: 10,
fileSizeLimit: defaultSizeLimit,
@ -192,12 +209,22 @@ export const fileConfig = {
},
serverFileSizeLimit: defaultSizeLimit,
avatarSizeLimit: mbToBytes(2),
fileTokenLimit: defaultTokenLimit,
clientImageResize: {
enabled: false,
maxWidth: 1900,
maxHeight: 1900,
quality: 0.92,
},
ocr: {
supportedMimeTypes: defaultOCRMimeTypes,
},
text: {
supportedMimeTypes: defaultTextMimeTypes,
},
stt: {
supportedMimeTypes: defaultSTTMimeTypes,
},
checkType: function (fileType: string, supportedTypes: RegExp[] = supportedMimeTypes) {
return supportedTypes.some((regex) => regex.test(fileType));
},
@ -232,6 +259,7 @@ export const fileConfigSchema = z.object({
endpoints: z.record(endpointFileConfigSchema).optional(),
serverFileSizeLimit: z.number().min(0).optional(),
avatarSizeLimit: z.number().min(0).optional(),
fileTokenLimit: z.number().min(0).optional(),
imageGeneration: z
.object({
percentage: z.number().min(0).max(100).optional(),
@ -246,6 +274,16 @@ export const fileConfigSchema = z.object({
quality: z.number().min(0).max(1).optional(),
})
.optional(),
ocr: z
.object({
supportedMimeTypes: supportedMimeTypesSchema.optional(),
})
.optional(),
text: z
.object({
supportedMimeTypes: supportedMimeTypesSchema.optional(),
})
.optional(),
});
/** Helper function to safely convert string patterns to RegExp objects */
@ -261,7 +299,21 @@ export const convertStringsToRegex = (patterns: string[]): RegExp[] =>
}, []);
export function mergeFileConfig(dynamic: z.infer<typeof fileConfigSchema> | undefined): FileConfig {
const mergedConfig = fileConfig as FileConfig;
const mergedConfig: FileConfig = {
...fileConfig,
ocr: {
...fileConfig.ocr,
supportedMimeTypes: fileConfig.ocr?.supportedMimeTypes || [],
},
text: {
...fileConfig.text,
supportedMimeTypes: fileConfig.text?.supportedMimeTypes || [],
},
stt: {
...fileConfig.stt,
supportedMimeTypes: fileConfig.stt?.supportedMimeTypes || [],
},
};
if (!dynamic) {
return mergedConfig;
}
@ -274,6 +326,10 @@ export function mergeFileConfig(dynamic: z.infer<typeof fileConfigSchema> | unde
mergedConfig.avatarSizeLimit = mbToBytes(dynamic.avatarSizeLimit);
}
if (dynamic.fileTokenLimit !== undefined) {
mergedConfig.fileTokenLimit = dynamic.fileTokenLimit;
}
// Merge clientImageResize configuration
if (dynamic.clientImageResize !== undefined) {
mergedConfig.clientImageResize = {
@ -282,6 +338,26 @@ export function mergeFileConfig(dynamic: z.infer<typeof fileConfigSchema> | unde
};
}
if (dynamic.ocr !== undefined) {
mergedConfig.ocr = {
...mergedConfig.ocr,
...dynamic.ocr,
};
if (dynamic.ocr.supportedMimeTypes) {
mergedConfig.ocr.supportedMimeTypes = convertStringsToRegex(dynamic.ocr.supportedMimeTypes);
}
}
if (dynamic.text !== undefined) {
mergedConfig.text = {
...mergedConfig.text,
...dynamic.text,
};
if (dynamic.text.supportedMimeTypes) {
mergedConfig.text.supportedMimeTypes = convertStringsToRegex(dynamic.text.supportedMimeTypes);
}
}
if (!dynamic.endpoints) {
return mergedConfig;
}

View file

@ -139,6 +139,18 @@ export const librechat = {
placeholderCode: true,
optionType: 'model',
} as const,
fileTokenLimit: {
key: 'fileTokenLimit',
label: 'com_ui_file_token_limit',
labelCode: true,
description: 'com_ui_file_token_limit_desc',
descriptionCode: true,
placeholder: 'com_nav_theme_system',
placeholderCode: true,
type: 'number',
component: 'input',
columnSpan: 2,
} as const,
};
const openAIParams: Record<string, SettingDefinition> = {
@ -625,6 +637,7 @@ const googleConfig: SettingsConfiguration = [
google.thinking,
google.thinkingBudget,
google.web_search,
librechat.fileTokenLimit,
];
const googleCol1: SettingsConfiguration = [
@ -643,6 +656,7 @@ const googleCol2: SettingsConfiguration = [
google.thinking,
google.thinkingBudget,
google.web_search,
librechat.fileTokenLimit,
];
const openAI: SettingsConfiguration = [
@ -663,6 +677,7 @@ const openAI: SettingsConfiguration = [
openAIParams.reasoning_summary,
openAIParams.verbosity,
openAIParams.disableStreaming,
librechat.fileTokenLimit,
];
const openAICol1: SettingsConfiguration = [
@ -687,6 +702,7 @@ const openAICol2: SettingsConfiguration = [
openAIParams.useResponsesApi,
openAIParams.web_search,
openAIParams.disableStreaming,
librechat.fileTokenLimit,
];
const anthropicConfig: SettingsConfiguration = [
@ -702,6 +718,7 @@ const anthropicConfig: SettingsConfiguration = [
anthropic.thinking,
anthropic.thinkingBudget,
anthropic.web_search,
librechat.fileTokenLimit,
];
const anthropicCol1: SettingsConfiguration = [
@ -721,6 +738,7 @@ const anthropicCol2: SettingsConfiguration = [
anthropic.thinking,
anthropic.thinkingBudget,
anthropic.web_search,
librechat.fileTokenLimit,
];
const bedrockAnthropic: SettingsConfiguration = [
@ -736,6 +754,7 @@ const bedrockAnthropic: SettingsConfiguration = [
bedrock.region,
anthropic.thinking,
anthropic.thinkingBudget,
librechat.fileTokenLimit,
];
const bedrockMistral: SettingsConfiguration = [
@ -747,6 +766,7 @@ const bedrockMistral: SettingsConfiguration = [
mistral.topP,
librechat.resendFiles,
bedrock.region,
librechat.fileTokenLimit,
];
const bedrockCohere: SettingsConfiguration = [
@ -758,6 +778,7 @@ const bedrockCohere: SettingsConfiguration = [
cohere.topP,
librechat.resendFiles,
bedrock.region,
librechat.fileTokenLimit,
];
const bedrockGeneral: SettingsConfiguration = [
@ -768,6 +789,7 @@ const bedrockGeneral: SettingsConfiguration = [
meta.topP,
librechat.resendFiles,
bedrock.region,
librechat.fileTokenLimit,
];
const bedrockAnthropicCol1: SettingsConfiguration = [
@ -787,6 +809,7 @@ const bedrockAnthropicCol2: SettingsConfiguration = [
bedrock.region,
anthropic.thinking,
anthropic.thinkingBudget,
librechat.fileTokenLimit,
];
const bedrockMistralCol1: SettingsConfiguration = [
@ -802,6 +825,7 @@ const bedrockMistralCol2: SettingsConfiguration = [
mistral.topP,
librechat.resendFiles,
bedrock.region,
librechat.fileTokenLimit,
];
const bedrockCohereCol1: SettingsConfiguration = [
@ -817,6 +841,7 @@ const bedrockCohereCol2: SettingsConfiguration = [
cohere.topP,
librechat.resendFiles,
bedrock.region,
librechat.fileTokenLimit,
];
const bedrockGeneralCol1: SettingsConfiguration = [
@ -831,6 +856,7 @@ const bedrockGeneralCol2: SettingsConfiguration = [
meta.topP,
librechat.resendFiles,
bedrock.region,
librechat.fileTokenLimit,
];
export const paramSettings: Record<string, SettingsConfiguration | undefined> = {

View file

@ -680,6 +680,8 @@ export const tConversationSchema = z.object({
iconURL: z.string().nullable().optional(),
/* temporary chat */
expiredAt: z.string().nullable().optional(),
/* file token limits */
fileTokenLimit: coerceNumber.optional(),
/** @deprecated */
resendImages: z.boolean().optional(),
/** @deprecated */
@ -794,6 +796,8 @@ export const tQueryParamsSchema = tConversationSchema
* https://platform.openai.com/docs/api-reference/runs/createRun#runs-createrun-instructions
* */
instructions: true,
/** @endpoints openAI, google, anthropic */
fileTokenLimit: true,
})
.merge(
z.object({
@ -850,6 +854,7 @@ export const googleBaseSchema = tConversationSchema.pick({
thinking: true,
thinkingBudget: true,
web_search: true,
fileTokenLimit: true,
iconURL: true,
greeting: true,
spec: true,
@ -1101,6 +1106,7 @@ export const openAIBaseSchema = tConversationSchema.pick({
useResponsesApi: true,
web_search: true,
disableStreaming: true,
fileTokenLimit: true,
});
export const openAISchema = openAIBaseSchema
@ -1145,6 +1151,7 @@ export const anthropicBaseSchema = tConversationSchema.pick({
spec: true,
maxContextTokens: true,
web_search: true,
fileTokenLimit: true,
});
export const anthropicSchema = anthropicBaseSchema

View file

@ -47,6 +47,7 @@ export type FileConfig = {
endpoints: {
[key: string]: EndpointFileConfig;
};
fileTokenLimit?: number;
serverFileSizeLimit?: number;
avatarSizeLimit?: number;
clientImageResize?: {
@ -55,6 +56,39 @@ export type FileConfig = {
maxHeight?: number;
quality?: number;
};
ocr?: {
supportedMimeTypes?: RegExp[];
};
text?: {
supportedMimeTypes?: RegExp[];
};
stt?: {
supportedMimeTypes?: RegExp[];
};
checkType?: (fileType: string, supportedTypes: RegExp[]) => boolean;
};
export type FileConfigInput = {
endpoints?: {
[key: string]: EndpointFileConfig;
};
serverFileSizeLimit?: number;
avatarSizeLimit?: number;
clientImageResize?: {
enabled?: boolean;
maxWidth?: number;
maxHeight?: number;
quality?: number;
};
ocr?: {
supportedMimeTypes?: string[];
};
text?: {
supportedMimeTypes?: string[];
};
stt?: {
supportedMimeTypes?: string[];
};
checkType?: (fileType: string, supportedTypes: RegExp[]) => boolean;
};

View file

@ -141,6 +141,9 @@ export const conversationPreset = {
disableStreaming: {
type: Boolean,
},
fileTokenLimit: {
type: Number,
},
/** Reasoning models only */
reasoning_effort: {
type: String,

View file

@ -51,6 +51,7 @@ export interface IPreset extends Document {
useResponsesApi?: boolean;
web_search?: boolean;
disableStreaming?: boolean;
fileTokenLimit?: number;
// end of additional fields
agentOptions?: unknown;
}

View file

@ -50,6 +50,7 @@ export interface IConversation extends Document {
useResponsesApi?: boolean;
web_search?: boolean;
disableStreaming?: boolean;
fileTokenLimit?: number;
// Additional fields
files?: string[];
expiredAt?: Date;