📎 feat: Upload as Text Support for Plaintext, STT, RAG, and Token Limits (#8868)

* 🪶 feat: Add Support for Uploading Plaintext Files

feat: delineate between OCR and text handling in fileConfig field of config file

- also adds support for passing in mimetypes as just plain file extensions

feat: add showLabel bool to support future synthetic component DynamicDropdownInput

feat: add new combination dropdown-input component in params panel to support file type token limits

refactor: move hovercard to side to align with other hovercards

chore: clean up autogenerated comments

feat: add delineation to file upload path between text and ocr configured filetypes

feat: add token limit checks during file upload

refactor: move textParsing out of ocrEnabled logic

refactor: clean up types for filetype config

refactor: finish decoupling DynamicDropdownInput from fileTokenLimits

fix: move image token cost function into file to fix circular dependency causing unittest to fail and remove unused var for linter

chore: remove out of scope code following review

refactor: make fileTokenLimit conform to existing styles

chore: remove unused localization string

chore: undo changes to DynamicInput and other strays

feat: add fileTokenLimit to all provider config panels

fix: move textParsing back into ocr tool_resource block for now so that it doesn't interfere with other upload types

* 📤 feat: Add RAG API Endpoint Support for Text Parsing (#8849)

* feat: implement RAG API integration for text parsing with fallback to native parsing

* chore: remove TODO now that placeholder and fllback are implemented

* ✈️ refactor: Migrate Text Parsing to TS (#8892)

* refactor: move generateShortLivedToken to packages/api

* refactor: move textParsing logic into packages/api

* refactor: reduce nesting and dry code with createTextFile

* fix: add proper source handling

* fix: mock new parseText and parseTextNative functions in jest file

* ci: add test coverage for textParser

* 💬 feat: Add Audio File Support to Upload as Text (#8893)

* feat: add STT support for Upload as Text

* refactor: move processAudioFile to packages/api

* refactor: move textParsing from utils to files

* fix: remove audio/mp3 from unsupported mimetypes test since it is now supported

* ✂️ feat: Configurable File Token Limits and Truncation (#8911)

* feat: add configurable fileTokenLimit default value

* fix: add stt to fileConfig merge logic

* fix: add fileTokenLimit to mergeFileConfig logic so configurable value is actually respected from yaml

* feat: add token limiting to parsed text files

* fix: add extraction logic and update tests so fileTokenLimit isnt sent to LLM providers

* fix: address comments

* refactor: rename textTokenLimiter.ts to text.ts

* chore: update form-data package to address CVE-2025-7783 and update package-lock

* feat: use default supported mime types for ocr on frontend file validation

* fix: should be using logger.debug not console.debug

* fix: mock existsSync in text.spec.ts

* fix: mock logger rather than every one of its function calls

* fix: reorganize imports and streamline file upload processing logic

* refactor: update createTextFile function to use destructured parameters and improve readability

* chore: update file validation to use EToolResources for improved type safety

* chore: update import path for types in audio processing module

* fix: update file configuration access and replace console.debug with logger.debug for improved logging

---------

Co-authored-by: Dustin Healy <dustinhealy1@gmail.com>
Co-authored-by: Dustin Healy <54083382+dustinhealy@users.noreply.github.com>
This commit is contained in:
Danny Avila 2025-08-27 03:44:39 -04:00 committed by GitHub
parent 74bc0440f0
commit 48f6f8f2f8
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
41 changed files with 847 additions and 75 deletions

View file

@ -1,7 +1,6 @@
const axios = require('axios');
const { isEnabled } = require('@librechat/api');
const { logger } = require('@librechat/data-schemas');
const { generateShortLivedToken } = require('~/server/services/AuthService');
const { isEnabled, generateShortLivedToken } = require('@librechat/api');
const footer = `Use the context as your learned knowledge to better answer the user.

View file

@ -2,9 +2,9 @@ const { z } = require('zod');
const axios = require('axios');
const { tool } = require('@langchain/core/tools');
const { logger } = require('@librechat/data-schemas');
const { generateShortLivedToken } = require('@librechat/api');
const { Tools, EToolResources } = require('librechat-data-provider');
const { filterFilesByAgentAccess } = require('~/server/services/Files/permissions');
const { generateShortLivedToken } = require('~/server/services/AuthService');
const { getFiles } = require('~/models/File');
/**

View file

@ -387,7 +387,8 @@ router.post('/', async (req, res) => {
if (
error.message?.includes('Invalid file format') ||
error.message?.includes('No OCR result')
error.message?.includes('No OCR result') ||
error.message?.includes('exceeds token limit')
) {
message = error.message;
}

View file

@ -29,6 +29,17 @@ router.post('/', async (req, res) => {
} catch (error) {
// TODO: delete remote file if it exists
logger.error('[/files/images] Error processing file:', error);
let message = 'Error processing file';
if (
error.message?.includes('Invalid file format') ||
error.message?.includes('No OCR result') ||
error.message?.includes('exceeds token limit')
) {
message = error.message;
}
try {
const filepath = path.join(
appConfig.paths.imageOutput,
@ -39,7 +50,7 @@ router.post('/', async (req, res) => {
} catch (error) {
logger.error('[/files/images] Error deleting file:', error);
}
res.status(500).json({ message: 'Error processing file' });
res.status(500).json({ message });
} finally {
try {
await fs.unlink(req.file.path);

View file

@ -500,18 +500,6 @@ const resendVerificationEmail = async (req) => {
};
}
};
/**
* Generate a short-lived JWT token
* @param {String} userId - The ID of the user
* @param {String} [expireIn='5m'] - The expiration time for the token (default is 5 minutes)
* @returns {String} - The generated JWT token
*/
const generateShortLivedToken = (userId, expireIn = '5m') => {
return jwt.sign({ id: userId }, process.env.JWT_SECRET, {
expiresIn: expireIn,
algorithm: 'HS256',
});
};
module.exports = {
logoutUser,
@ -522,5 +510,4 @@ module.exports = {
setOpenIDAuthTokens,
requestPasswordReset,
resendVerificationEmail,
generateShortLivedToken,
};

View file

@ -6,6 +6,7 @@ const buildOptions = (endpoint, parsedBody) => {
modelLabel,
promptPrefix,
maxContextTokens,
fileTokenLimit,
resendFiles = anthropicSettings.resendFiles.default,
promptCache = anthropicSettings.promptCache.default,
thinking = anthropicSettings.thinking.default,
@ -29,6 +30,7 @@ const buildOptions = (endpoint, parsedBody) => {
greeting,
spec,
maxContextTokens,
fileTokenLimit,
modelOptions,
});

View file

@ -6,6 +6,7 @@ const buildOptions = (endpoint, parsedBody) => {
modelLabel: name,
promptPrefix,
maxContextTokens,
fileTokenLimit,
resendFiles = true,
imageDetail,
iconURL,
@ -24,6 +25,7 @@ const buildOptions = (endpoint, parsedBody) => {
spec,
promptPrefix,
maxContextTokens,
fileTokenLimit,
model_parameters,
});

View file

@ -7,6 +7,7 @@ const buildOptions = (endpoint, parsedBody, endpointType) => {
chatGptLabel,
promptPrefix,
maxContextTokens,
fileTokenLimit,
resendFiles = true,
imageDetail,
iconURL,
@ -27,6 +28,7 @@ const buildOptions = (endpoint, parsedBody, endpointType) => {
greeting,
spec,
maxContextTokens,
fileTokenLimit,
modelOptions,
});

View file

@ -12,6 +12,7 @@ const buildOptions = (endpoint, parsedBody) => {
spec,
artifacts,
maxContextTokens,
fileTokenLimit,
...modelOptions
} = parsedBody;
const endpointOption = removeNullishValues({
@ -24,6 +25,7 @@ const buildOptions = (endpoint, parsedBody) => {
greeting,
spec,
maxContextTokens,
fileTokenLimit,
modelOptions,
});

View file

@ -7,6 +7,7 @@ const buildOptions = (endpoint, parsedBody) => {
chatGptLabel,
promptPrefix,
maxContextTokens,
fileTokenLimit,
resendFiles = true,
imageDetail,
iconURL,
@ -27,6 +28,7 @@ const buildOptions = (endpoint, parsedBody) => {
greeting,
spec,
maxContextTokens,
fileTokenLimit,
modelOptions,
});

View file

@ -319,4 +319,4 @@ async function speechToText(req, res) {
await sttService.processSpeechToText(req, res);
}
module.exports = { speechToText };
module.exports = { STTService, speechToText };

View file

@ -3,7 +3,7 @@ const path = require('path');
const axios = require('axios');
const { logger } = require('@librechat/data-schemas');
const { EModelEndpoint } = require('librechat-data-provider');
const { generateShortLivedToken } = require('~/server/services/AuthService');
const { generateShortLivedToken } = require('@librechat/api');
const { getBufferMetadata } = require('~/server/utils');
const paths = require('~/config/paths');

View file

@ -1,10 +1,9 @@
const fs = require('fs');
const axios = require('axios');
const FormData = require('form-data');
const { logAxiosError } = require('@librechat/api');
const { logger } = require('@librechat/data-schemas');
const { FileSources } = require('librechat-data-provider');
const { generateShortLivedToken } = require('~/server/services/AuthService');
const { logAxiosError, generateShortLivedToken } = require('@librechat/api');
/**
* Deletes a file from the vector database. This function takes a file object, constructs the full path, and

View file

@ -1,13 +1,16 @@
const axios = require('axios');
const { logAxiosError } = require('@librechat/api');
const { logger } = require('@librechat/data-schemas');
const { logAxiosError, processTextWithTokenLimit } = require('@librechat/api');
const {
FileSources,
VisionModes,
ImageDetail,
ContentTypes,
EModelEndpoint,
mergeFileConfig,
} = require('librechat-data-provider');
const { getStrategyFunctions } = require('~/server/services/Files/strategies');
const countTokens = require('~/server/utils/countTokens');
/**
* Converts a readable stream to a base64 encoded string.
@ -102,11 +105,28 @@ async function encodeAndFormat(req, files, endpoint, mode) {
return result;
}
const fileTokenLimit =
req.body?.fileTokenLimit ?? mergeFileConfig(req.config?.fileConfig).fileTokenLimit;
for (let file of files) {
/** @type {FileSources} */
const source = file.source ?? FileSources.local;
if (source === FileSources.text && file.text) {
result.text += `${!result.text ? 'Attached document(s):\n```md' : '\n\n---\n\n'}# "${file.filename}"\n${file.text}\n`;
let fileText = file.text;
const { text: limitedText, wasTruncated } = await processTextWithTokenLimit({
text: fileText,
tokenLimit: fileTokenLimit,
tokenCountFn: (text) => countTokens(text),
});
if (wasTruncated) {
logger.debug(
`[encodeAndFormat] Text content truncated for file: ${file.filename} due to token limits`,
);
}
result.text += `${!result.text ? 'Attached document(s):\n```md' : '\n\n---\n\n'}# "${file.filename}"\n${limitedText}\n`;
}
if (!file.height) {
@ -135,7 +155,7 @@ async function encodeAndFormat(req, files, endpoint, mode) {
base64Data = null;
continue;
} catch (error) {
// Error handling code
logger.error('Error processing image from blob storage:', error);
}
} else if (source !== FileSources.local && base64Only.has(endpoint)) {
const [_file, imageURL] = await preparePayload(req, file);

View file

@ -17,7 +17,8 @@ const {
isAssistantsEndpoint,
} = require('librechat-data-provider');
const { EnvVar } = require('@librechat/agents');
const { sanitizeFilename } = require('@librechat/api');
const { logger } = require('@librechat/data-schemas');
const { sanitizeFilename, parseText, processAudioFile } = require('@librechat/api');
const {
convertImage,
resizeAndConvert,
@ -33,7 +34,7 @@ const { checkCapability } = require('~/server/services/Config');
const { LB_QueueAsyncCall } = require('~/server/utils/queue');
const { getStrategyFunctions } = require('./strategies');
const { determineFileType } = require('~/server/utils');
const { logger } = require('~/config');
const { STTService } = require('./Audio/STTService');
/**
* Creates a modular file upload wrapper that ensures filename sanitization
@ -552,35 +553,27 @@ const processAgentFileUpload = async ({ req, res, metadata }) => {
}
// Note: File search processing continues to dual storage logic below
} else if (tool_resource === EToolResources.ocr) {
const isOCREnabled = await checkCapability(req, AgentCapabilities.ocr);
if (!isOCREnabled) {
throw new Error('OCR capability is not enabled for Agents');
}
const { handleFileUpload: uploadOCR } = getStrategyFunctions(
appConfig?.ocr?.strategy ?? FileSources.mistral_ocr,
);
const { file_id, temp_file_id = null } = metadata;
const {
text,
bytes,
// TODO: OCR images support?
images: _i,
filename,
filepath: ocrFileURL,
} = await uploadOCR({ req, appConfig, file, loadAuthValues });
/**
* @param {object} params
* @param {string} params.text
* @param {number} params.bytes
* @param {string} params.filepath
* @param {string} params.type
* @return {Promise<void>}
*/
const createTextFile = async ({ text, bytes, filepath, type = 'text/plain' }) => {
const fileInfo = removeNullishValues({
text,
bytes,
file_id,
temp_file_id,
user: req.user.id,
type: 'text/plain',
filepath: ocrFileURL,
type,
filepath: filepath ?? file.path,
source: FileSources.text,
filename: filename ?? file.originalname,
filename: file.originalname,
model: messageAttachment ? undefined : req.body.model,
context: messageAttachment ? FileContext.message_attachment : FileContext.agents,
});
@ -597,6 +590,47 @@ const processAgentFileUpload = async ({ req, res, metadata }) => {
return res
.status(200)
.json({ message: 'Agent file uploaded and processed successfully', ...result });
};
const fileConfig = mergeFileConfig(appConfig.fileConfig);
const shouldUseOCR = fileConfig.checkType(
file.mimetype,
fileConfig.ocr?.supportedMimeTypes || [],
);
if (shouldUseOCR && !(await checkCapability(req, AgentCapabilities.ocr))) {
throw new Error('OCR capability is not enabled for Agents');
} else if (shouldUseOCR) {
const { handleFileUpload: uploadOCR } = getStrategyFunctions(
appConfig?.ocr?.strategy ?? FileSources.mistral_ocr,
);
const { text, bytes, filepath: ocrFileURL } = await uploadOCR({ req, file, loadAuthValues });
return await createTextFile({ text, bytes, filepath: ocrFileURL });
}
const shouldUseSTT = fileConfig.checkType(
file.mimetype,
fileConfig.stt?.supportedMimeTypes || [],
);
if (shouldUseSTT) {
const sttService = await STTService.getInstance();
const { text, bytes } = await processAudioFile({ file, sttService });
return await createTextFile({ text, bytes });
}
const shouldUseText = fileConfig.checkType(
file.mimetype,
fileConfig.text?.supportedMimeTypes || [],
);
if (!shouldUseText) {
throw new Error(`File type ${file.mimetype} is not supported for OCR or text parsing`);
}
const { text, bytes } = await parseText({ req, file, file_id });
return await createTextFile({ text, bytes, type: file.mimetype });
}
// Dual storage pattern for RAG files: Storage + Vector DB

View file

@ -101,6 +101,11 @@ jest.mock('~/server/utils', () => ({
determineFileType: jest.fn(),
}));
jest.mock('@librechat/api', () => ({
parseText: jest.fn(),
parseTextNative: jest.fn(),
}));
// Import the actual processFiles function after all mocks are set up
const { processFiles } = require('./process');
const { updateFileUsage } = require('~/models/File');

View file

@ -135,10 +135,14 @@ const useFileHandling = (params?: UseFileHandling) => {
const file_id = body.get('file_id');
clearUploadTimer(file_id as string);
deleteFileById(file_id as string);
const errorMessage =
error?.code === 'ERR_CANCELED'
? 'com_error_files_upload_canceled'
: (error?.response?.data?.message ?? 'com_error_files_upload');
let errorMessage = 'com_error_files_upload';
if (error?.code === 'ERR_CANCELED') {
errorMessage = 'com_error_files_upload_canceled';
} else if (error?.response?.data?.message) {
errorMessage = error.response.data.message;
}
setError(errorMessage);
},
},
@ -256,6 +260,8 @@ const useFileHandling = (params?: UseFileHandling) => {
fileConfig?.endpoints?.default ??
defaultFileConfig.endpoints[endpoint] ??
defaultFileConfig.endpoints.default,
toolResource: _toolResource,
fileConfig: fileConfig,
});
} catch (error) {
console.error('file validation error', error);

View file

@ -889,6 +889,8 @@
"com_ui_field_required": "This field is required",
"com_ui_file_size": "File Size",
"com_ui_files": "Files",
"com_ui_file_token_limit": "File Token Limit",
"com_ui_file_token_limit_desc": "Set maximum token limit for file processing to control costs and resource usage",
"com_ui_filter_prompts": "Filter Prompts",
"com_ui_filter_prompts_name": "Filter prompts by name",
"com_ui_final_touch": "Final touch",

View file

@ -3,10 +3,11 @@ import {
megabyte,
QueryKeys,
excelMimeTypes,
EToolResources,
codeTypeMapping,
fileConfig as defaultFileConfig,
} from 'librechat-data-provider';
import type { TFile, EndpointFileConfig } from 'librechat-data-provider';
import type { TFile, EndpointFileConfig, FileConfig } from 'librechat-data-provider';
import type { QueryClient } from '@tanstack/react-query';
import type { ExtendedFile } from '~/common';
@ -203,11 +204,15 @@ export const validateFiles = ({
fileList,
setError,
endpointFileConfig,
toolResource,
fileConfig,
}: {
fileList: File[];
files: Map<string, ExtendedFile>;
setError: (error: string) => void;
endpointFileConfig: EndpointFileConfig;
toolResource?: string;
fileConfig: FileConfig | null;
}) => {
const { fileLimit, fileSizeLimit, totalSizeLimit, supportedMimeTypes } = endpointFileConfig;
const existingFiles = Array.from(files.values());
@ -247,7 +252,16 @@ export const validateFiles = ({
fileList[i] = newFile;
}
if (!checkType(originalFile.type, supportedMimeTypes)) {
let mimeTypesToCheck = supportedMimeTypes;
if (toolResource === EToolResources.ocr) {
mimeTypesToCheck = [
...(fileConfig?.text?.supportedMimeTypes || []),
...(fileConfig?.ocr?.supportedMimeTypes || []),
...(fileConfig?.stt?.supportedMimeTypes || []),
];
}
if (!checkType(originalFile.type, mimeTypesToCheck)) {
console.log(originalFile);
setError('Currently, unsupported file type: ' + originalFile.type);
return false;

3
package-lock.json generated
View file

@ -51813,6 +51813,7 @@
"@types/diff": "^6.0.0",
"@types/express": "^5.0.0",
"@types/jest": "^29.5.2",
"@types/jsonwebtoken": "^9.0.0",
"@types/multer": "^1.4.13",
"@types/node": "^20.3.0",
"@types/react": "^18.2.18",
@ -51837,7 +51838,9 @@
"diff": "^7.0.0",
"eventsource": "^3.0.2",
"express": "^4.21.2",
"form-data": "^4.0.4",
"js-yaml": "^4.1.0",
"jsonwebtoken": "^9.0.0",
"keyv": "^5.3.2",
"librechat-data-provider": "*",
"node-fetch": "2.7.0",

View file

@ -53,6 +53,7 @@
"@types/diff": "^6.0.0",
"@types/express": "^5.0.0",
"@types/jest": "^29.5.2",
"@types/jsonwebtoken": "^9.0.0",
"@types/multer": "^1.4.13",
"@types/node": "^20.3.0",
"@types/react": "^18.2.18",
@ -80,7 +81,9 @@
"diff": "^7.0.0",
"eventsource": "^3.0.2",
"express": "^4.21.2",
"form-data": "^4.0.4",
"js-yaml": "^4.1.0",
"jsonwebtoken": "^9.0.0",
"keyv": "^5.3.2",
"librechat-data-provider": "*",
"node-fetch": "2.7.0",

View file

@ -1 +1,2 @@
export * from './encryption';
export * from './jwt';

View file

@ -0,0 +1,14 @@
import jwt from 'jsonwebtoken';
/**
* Generate a short-lived JWT token
* @param {String} userId - The ID of the user
* @param {String} [expireIn='5m'] - The expiration time for the token (default is 5 minutes)
* @returns {String} - The generated JWT token
*/
export const generateShortLivedToken = (userId: string, expireIn: string = '5m'): string => {
return jwt.sign({ id: userId }, process.env.JWT_SECRET!, {
expiresIn: expireIn,
algorithm: 'HS256',
});
};

View file

@ -0,0 +1,38 @@
import fs from 'fs';
import { logger } from '@librechat/data-schemas';
import type { STTService, AudioFileInfo, FileObject, AudioProcessingResult } from '~/types';
/**
* Processes audio files using Speech-to-Text (STT) service.
* @param {Object} params - The parameters object.
* @param {FileObject} params.file - The audio file object.
* @param {STTService} params.sttService - The STT service instance.
* @returns {Promise<AudioProcessingResult>} A promise that resolves to an object containing text and bytes.
*/
export async function processAudioFile({
file,
sttService,
}: {
file: FileObject;
sttService: STTService;
}): Promise<AudioProcessingResult> {
try {
const audioBuffer = await fs.promises.readFile(file.path);
const audioFile: AudioFileInfo = {
originalname: file.originalname,
mimetype: file.mimetype,
size: file.size,
};
const [provider, sttSchema] = await sttService.getProviderSchema();
const text = await sttService.sttRequest(provider, sttSchema, { audioBuffer, audioFile });
return {
text,
bytes: Buffer.byteLength(text, 'utf8'),
};
} catch (error) {
logger.error('Error processing audio file with STT:', error);
throw new Error(`Failed to process audio file: ${(error as Error).message}`);
}
}

View file

@ -1,2 +1,4 @@
export * from './mistral/crud';
export * from './audio';
export * from './text';
export * from './parse';

View file

@ -0,0 +1,255 @@
import { FileSources } from 'librechat-data-provider';
import { Readable } from 'stream';
jest.mock('@librechat/data-schemas', () => ({
logger: {
debug: jest.fn(),
warn: jest.fn(),
error: jest.fn(),
},
}));
import { parseTextNative, parseText } from './text';
jest.mock('fs', () => ({
readFileSync: jest.fn(),
createReadStream: jest.fn(),
}));
jest.mock('../crypto/jwt', () => ({
generateShortLivedToken: jest.fn(),
}));
jest.mock('axios', () => ({
get: jest.fn(),
post: jest.fn(),
interceptors: {
request: { use: jest.fn(), eject: jest.fn() },
response: { use: jest.fn(), eject: jest.fn() },
},
}));
jest.mock('form-data', () => {
return jest.fn().mockImplementation(() => ({
append: jest.fn(),
getHeaders: jest.fn().mockReturnValue({ 'content-type': 'multipart/form-data' }),
}));
});
import fs, { ReadStream } from 'fs';
import axios from 'axios';
import FormData from 'form-data';
import { generateShortLivedToken } from '../crypto/jwt';
const mockedFs = fs as jest.Mocked<typeof fs>;
const mockedAxios = axios as jest.Mocked<typeof axios>;
const mockedFormData = FormData as jest.MockedClass<typeof FormData>;
const mockedGenerateShortLivedToken = generateShortLivedToken as jest.MockedFunction<
typeof generateShortLivedToken
>;
describe('text', () => {
const mockFile: Express.Multer.File = {
fieldname: 'file',
originalname: 'test.txt',
encoding: '7bit',
mimetype: 'text/plain',
size: 100,
destination: '/tmp',
filename: 'test.txt',
path: '/tmp/test.txt',
buffer: Buffer.from('test content'),
stream: new Readable(),
};
const mockReq = {
user: { id: 'user123' },
};
const mockFileId = 'file123';
beforeEach(() => {
jest.clearAllMocks();
delete process.env.RAG_API_URL;
});
describe('parseTextNative', () => {
it('should successfully parse a text file', () => {
const mockText = 'Hello, world!';
mockedFs.readFileSync.mockReturnValue(mockText);
const result = parseTextNative(mockFile);
expect(mockedFs.readFileSync).toHaveBeenCalledWith('/tmp/test.txt', 'utf8');
expect(result).toEqual({
text: mockText,
bytes: Buffer.byteLength(mockText, 'utf8'),
source: FileSources.text,
});
});
it('should throw an error when file cannot be read', () => {
const mockError = new Error('File not found');
mockedFs.readFileSync.mockImplementation(() => {
throw mockError;
});
expect(() => parseTextNative(mockFile)).toThrow(
'Failed to read file as text: Error: File not found',
);
});
});
describe('parseText', () => {
beforeEach(() => {
mockedGenerateShortLivedToken.mockReturnValue('mock-jwt-token');
const mockFormDataInstance = {
append: jest.fn(),
getHeaders: jest.fn().mockReturnValue({ 'content-type': 'multipart/form-data' }),
};
mockedFormData.mockImplementation(() => mockFormDataInstance as unknown as FormData);
mockedFs.createReadStream.mockReturnValue({} as unknown as ReadStream);
});
it('should fall back to native parsing when RAG_API_URL is not defined', async () => {
const mockText = 'Native parsing result';
mockedFs.readFileSync.mockReturnValue(mockText);
const result = await parseText({
req: mockReq,
file: mockFile,
file_id: mockFileId,
});
expect(result).toEqual({
text: mockText,
bytes: Buffer.byteLength(mockText, 'utf8'),
source: FileSources.text,
});
expect(mockedAxios.get).not.toHaveBeenCalled();
});
it('should fall back to native parsing when health check fails', async () => {
process.env.RAG_API_URL = 'http://rag-api.test';
const mockText = 'Native parsing result';
mockedFs.readFileSync.mockReturnValue(mockText);
mockedAxios.get.mockRejectedValue(new Error('Health check failed'));
const result = await parseText({
req: mockReq,
file: mockFile,
file_id: mockFileId,
});
expect(mockedAxios.get).toHaveBeenCalledWith('http://rag-api.test/health', {
timeout: 5000,
});
expect(result).toEqual({
text: mockText,
bytes: Buffer.byteLength(mockText, 'utf8'),
source: FileSources.text,
});
});
it('should fall back to native parsing when health check returns non-OK status', async () => {
process.env.RAG_API_URL = 'http://rag-api.test';
const mockText = 'Native parsing result';
mockedFs.readFileSync.mockReturnValue(mockText);
mockedAxios.get.mockResolvedValue({
status: 500,
statusText: 'Internal Server Error',
});
const result = await parseText({
req: mockReq,
file: mockFile,
file_id: mockFileId,
});
expect(result).toEqual({
text: mockText,
bytes: Buffer.byteLength(mockText, 'utf8'),
source: FileSources.text,
});
});
it('should accept empty text as valid RAG API response', async () => {
process.env.RAG_API_URL = 'http://rag-api.test';
mockedAxios.get.mockResolvedValue({
status: 200,
statusText: 'OK',
});
mockedAxios.post.mockResolvedValue({
data: {
text: '',
},
});
const result = await parseText({
req: mockReq,
file: mockFile,
file_id: mockFileId,
});
expect(result).toEqual({
text: '',
bytes: 0,
source: FileSources.text,
});
});
it('should fall back to native parsing when RAG API response lacks text property', async () => {
process.env.RAG_API_URL = 'http://rag-api.test';
const mockText = 'Native parsing result';
mockedFs.readFileSync.mockReturnValue(mockText);
mockedAxios.get.mockResolvedValue({
status: 200,
statusText: 'OK',
});
mockedAxios.post.mockResolvedValue({
data: {},
});
const result = await parseText({
req: mockReq,
file: mockFile,
file_id: mockFileId,
});
expect(result).toEqual({
text: mockText,
bytes: Buffer.byteLength(mockText, 'utf8'),
source: FileSources.text,
});
});
it('should fall back to native parsing when user is undefined', async () => {
process.env.RAG_API_URL = 'http://rag-api.test';
const mockText = 'Native parsing result';
mockedFs.readFileSync.mockReturnValue(mockText);
const result = await parseText({
req: { user: undefined },
file: mockFile,
file_id: mockFileId,
});
expect(mockedGenerateShortLivedToken).not.toHaveBeenCalled();
expect(mockedAxios.get).not.toHaveBeenCalled();
expect(mockedAxios.post).not.toHaveBeenCalled();
expect(result).toEqual({
text: mockText,
bytes: Buffer.byteLength(mockText, 'utf8'),
source: FileSources.text,
});
});
});
});

View file

@ -0,0 +1,113 @@
import fs from 'fs';
import axios from 'axios';
import FormData from 'form-data';
import { logger } from '@librechat/data-schemas';
import { FileSources } from 'librechat-data-provider';
import type { Request as ServerRequest } from 'express';
import { generateShortLivedToken } from '~/crypto/jwt';
/**
* Attempts to parse text using RAG API, falls back to native text parsing
* @param {Object} params - The parameters object
* @param {Express.Request} params.req - The Express request object
* @param {Express.Multer.File} params.file - The uploaded file
* @param {string} params.file_id - The file ID
* @returns {Promise<{text: string, bytes: number, source: string}>}
*/
export async function parseText({
req,
file,
file_id,
}: {
req: Pick<ServerRequest, 'user'> & {
user?: { id: string };
};
file: Express.Multer.File;
file_id: string;
}): Promise<{ text: string; bytes: number; source: string }> {
if (!process.env.RAG_API_URL) {
logger.debug('[parseText] RAG_API_URL not defined, falling back to native text parsing');
return parseTextNative(file);
}
if (!req.user?.id) {
logger.debug('[parseText] No user ID provided, falling back to native text parsing');
return parseTextNative(file);
}
try {
const healthResponse = await axios.get(`${process.env.RAG_API_URL}/health`, {
timeout: 5000,
});
if (healthResponse?.statusText !== 'OK' && healthResponse?.status !== 200) {
logger.debug('[parseText] RAG API health check failed, falling back to native parsing');
return parseTextNative(file);
}
} catch (healthError) {
logger.debug(
'[parseText] RAG API health check failed, falling back to native parsing',
healthError,
);
return parseTextNative(file);
}
try {
const jwtToken = generateShortLivedToken(req.user.id);
const formData = new FormData();
formData.append('file_id', file_id);
formData.append('file', fs.createReadStream(file.path));
const formHeaders = formData.getHeaders();
const response = await axios.post(`${process.env.RAG_API_URL}/text`, formData, {
headers: {
Authorization: `Bearer ${jwtToken}`,
accept: 'application/json',
...formHeaders,
},
timeout: 30000,
});
const responseData = response.data;
logger.debug('[parseText] Response from RAG API', responseData);
if (!('text' in responseData)) {
throw new Error('RAG API did not return parsed text');
}
return {
text: responseData.text,
bytes: Buffer.byteLength(responseData.text, 'utf8'),
source: FileSources.text,
};
} catch (error) {
logger.warn('[parseText] RAG API text parsing failed, falling back to native parsing', error);
return parseTextNative(file);
}
}
/**
* Native JavaScript text parsing fallback
* Simple text file reading - complex formats handled by RAG API
* @param {Express.Multer.File} file - The uploaded file
* @returns {{text: string, bytes: number, source: string}}
*/
export function parseTextNative(file: Express.Multer.File): {
text: string;
bytes: number;
source: string;
} {
try {
const text = fs.readFileSync(file.path, 'utf8');
const bytes = Buffer.byteLength(text, 'utf8');
return {
text,
bytes,
source: FileSources.text,
};
} catch (error) {
console.error('[parseTextNative] Failed to parse file:', error);
throw new Error(`Failed to read file as text: ${error}`);
}
}

View file

@ -0,0 +1,27 @@
export interface STTService {
getInstance(): Promise<STTService>;
getProviderSchema(): Promise<[string, object]>;
sttRequest(
provider: string,
schema: object,
params: { audioBuffer: Buffer; audioFile: AudioFileInfo },
): Promise<string>;
}
export interface AudioFileInfo {
originalname: string;
mimetype: string;
size: number;
}
export interface FileObject {
path: string;
originalname: string;
mimetype: string;
size: number;
}
export interface AudioProcessingResult {
text: string;
bytes: number;
}

View file

@ -4,6 +4,7 @@ export * from './balance';
export * from './endpoints';
export * from './events';
export * from './error';
export * from './files';
export * from './google';
export * from './http';
export * from './mistral';

View file

@ -11,6 +11,7 @@ export * from './llm';
export * from './math';
export * from './openid';
export * from './tempChatRetention';
export * from './text';
export { default as Tokenizer } from './tokenizer';
export * from './yaml';
export * from './http';

View file

@ -7,6 +7,7 @@ describe('extractLibreChatParams', () => {
expect(result.resendFiles).toBe(true);
expect(result.promptPrefix).toBeUndefined();
expect(result.maxContextTokens).toBeUndefined();
expect(result.fileTokenLimit).toBeUndefined();
expect(result.modelLabel).toBeUndefined();
expect(result.modelOptions).toEqual({});
});
@ -17,6 +18,7 @@ describe('extractLibreChatParams', () => {
expect(result.resendFiles).toBe(true);
expect(result.promptPrefix).toBeUndefined();
expect(result.maxContextTokens).toBeUndefined();
expect(result.fileTokenLimit).toBeUndefined();
expect(result.modelLabel).toBeUndefined();
expect(result.modelOptions).toEqual({});
});
@ -26,6 +28,7 @@ describe('extractLibreChatParams', () => {
resendFiles: false,
promptPrefix: 'You are a helpful assistant',
maxContextTokens: 4096,
fileTokenLimit: 50000,
modelLabel: 'GPT-4',
model: 'gpt-4',
temperature: 0.7,
@ -37,6 +40,7 @@ describe('extractLibreChatParams', () => {
expect(result.resendFiles).toBe(false);
expect(result.promptPrefix).toBe('You are a helpful assistant');
expect(result.maxContextTokens).toBe(4096);
expect(result.fileTokenLimit).toBe(50000);
expect(result.modelLabel).toBe('GPT-4');
expect(result.modelOptions).toEqual({
model: 'gpt-4',
@ -50,6 +54,7 @@ describe('extractLibreChatParams', () => {
resendFiles: true,
promptPrefix: null,
maxContextTokens: 2048,
fileTokenLimit: undefined,
modelLabel: null,
model: 'claude-3',
};
@ -59,6 +64,7 @@ describe('extractLibreChatParams', () => {
expect(result.resendFiles).toBe(true);
expect(result.promptPrefix).toBeNull();
expect(result.maxContextTokens).toBe(2048);
expect(result.fileTokenLimit).toBeUndefined();
expect(result.modelLabel).toBeNull();
expect(result.modelOptions).toEqual({
model: 'claude-3',
@ -77,6 +83,7 @@ describe('extractLibreChatParams', () => {
expect(result.resendFiles).toBe(true); // Should use default
expect(result.promptPrefix).toBe('Test prefix');
expect(result.maxContextTokens).toBeUndefined();
expect(result.fileTokenLimit).toBeUndefined();
expect(result.modelLabel).toBeUndefined();
expect(result.modelOptions).toEqual({
model: 'gpt-3.5-turbo',
@ -90,6 +97,7 @@ describe('extractLibreChatParams', () => {
expect(result.resendFiles).toBe(true); // Should use default
expect(result.promptPrefix).toBeUndefined();
expect(result.maxContextTokens).toBeUndefined();
expect(result.fileTokenLimit).toBeUndefined();
expect(result.modelLabel).toBeUndefined();
expect(result.modelOptions).toEqual({});
});
@ -99,6 +107,7 @@ describe('extractLibreChatParams', () => {
resendFiles: false,
promptPrefix: 'Custom prompt',
maxContextTokens: 8192,
fileTokenLimit: 25000,
modelLabel: 'Custom Model',
// Model options
model: 'gpt-4',
@ -117,6 +126,7 @@ describe('extractLibreChatParams', () => {
expect(result.resendFiles).toBe(false);
expect(result.promptPrefix).toBe('Custom prompt');
expect(result.maxContextTokens).toBe(8192);
expect(result.fileTokenLimit).toBe(25000);
expect(result.modelLabel).toBe('Custom Model');
// Model options should include everything else

View file

@ -8,6 +8,7 @@ type LibreChatParams = {
resendFiles: boolean;
promptPrefix?: string | null;
maxContextTokens?: number;
fileTokenLimit?: number;
modelLabel?: string | null;
};
@ -32,6 +33,7 @@ export function extractLibreChatParams(
(librechat.resendFiles.default as boolean);
const promptPrefix = (delete modelOptions.promptPrefix, options.promptPrefix);
const maxContextTokens = (delete modelOptions.maxContextTokens, options.maxContextTokens);
const fileTokenLimit = (delete modelOptions.fileTokenLimit, options.fileTokenLimit);
const modelLabel = (delete modelOptions.modelLabel, options.modelLabel);
return {
@ -40,6 +42,7 @@ export function extractLibreChatParams(
LibreChatKeys
>,
maxContextTokens,
fileTokenLimit,
promptPrefix,
resendFiles,
modelLabel,

View file

@ -0,0 +1,65 @@
import { logger } from '@librechat/data-schemas';
/**
* Processes text content by counting tokens and truncating if it exceeds the specified limit.
* @param text - The text content to process
* @param tokenLimit - The maximum number of tokens allowed
* @param tokenCountFn - Function to count tokens
* @returns Promise resolving to object with processed text, token count, and truncation status
*/
export async function processTextWithTokenLimit({
text,
tokenLimit,
tokenCountFn,
}: {
text: string;
tokenLimit: number;
tokenCountFn: (text: string) => number;
}): Promise<{ text: string; tokenCount: number; wasTruncated: boolean }> {
const originalTokenCount = await tokenCountFn(text);
if (originalTokenCount <= tokenLimit) {
return {
text,
tokenCount: originalTokenCount,
wasTruncated: false,
};
}
/**
* Doing binary search here to find the truncation point efficiently
* (May be a better way to go about this)
*/
let low = 0;
let high = text.length;
let bestText = '';
logger.debug(
`[textTokenLimiter] Text content exceeds token limit: ${originalTokenCount} > ${tokenLimit}, truncating...`,
);
while (low <= high) {
const mid = Math.floor((low + high) / 2);
const truncatedText = text.substring(0, mid);
const tokenCount = await tokenCountFn(truncatedText);
if (tokenCount <= tokenLimit) {
bestText = truncatedText;
low = mid + 1;
} else {
high = mid - 1;
}
}
const finalTokenCount = await tokenCountFn(bestText);
logger.warn(
`[textTokenLimiter] Text truncated from ${originalTokenCount} to ${finalTokenCount} tokens (limit: ${tokenLimit})`,
);
return {
text: bestText,
tokenCount: finalTokenCount,
wasTruncated: true,
};
}

View file

@ -14,7 +14,7 @@ import {
} from '../src/file-config';
describe('MIME Type Regex Patterns', () => {
const unsupportedMimeTypes = ['text/x-unknown', 'application/unknown', 'image/bmp', 'audio/mp3'];
const unsupportedMimeTypes = ['text/x-unknown', 'application/unknown', 'image/bmp'];
// Testing general supported MIME types
fullMimeTypesList.forEach((mimeType) => {

View file

@ -122,11 +122,27 @@ export const applicationMimeTypes =
export const imageMimeTypes = /^image\/(jpeg|gif|png|webp|heic|heif)$/;
export const audioMimeTypes =
/^audio\/(mp3|mpeg|mpeg3|wav|wave|x-wav|ogg|vorbis|mp4|x-m4a|flac|x-flac|webm)$/;
export const defaultOCRMimeTypes = [
imageMimeTypes,
/^application\/pdf$/,
/^application\/vnd\.openxmlformats-officedocument\.(wordprocessingml\.document|presentationml\.presentation|spreadsheetml\.sheet)$/,
/^application\/vnd\.ms-(word|powerpoint|excel)$/,
/^application\/epub\+zip$/,
];
export const defaultTextMimeTypes = [textMimeTypes];
export const defaultSTTMimeTypes = [audioMimeTypes];
export const supportedMimeTypes = [
textMimeTypes,
excelMimeTypes,
applicationMimeTypes,
imageMimeTypes,
audioMimeTypes,
/** Supported by LC Code Interpreter PAI */
/^image\/(svg|svg\+xml)$/,
];
@ -169,6 +185,7 @@ export const megabyte = 1024 * 1024;
export const mbToBytes = (mb: number): number => mb * megabyte;
const defaultSizeLimit = mbToBytes(512);
const defaultTokenLimit = 100000;
const assistantsFileConfig = {
fileLimit: 10,
fileSizeLimit: defaultSizeLimit,
@ -192,12 +209,22 @@ export const fileConfig = {
},
serverFileSizeLimit: defaultSizeLimit,
avatarSizeLimit: mbToBytes(2),
fileTokenLimit: defaultTokenLimit,
clientImageResize: {
enabled: false,
maxWidth: 1900,
maxHeight: 1900,
quality: 0.92,
},
ocr: {
supportedMimeTypes: defaultOCRMimeTypes,
},
text: {
supportedMimeTypes: defaultTextMimeTypes,
},
stt: {
supportedMimeTypes: defaultSTTMimeTypes,
},
checkType: function (fileType: string, supportedTypes: RegExp[] = supportedMimeTypes) {
return supportedTypes.some((regex) => regex.test(fileType));
},
@ -232,6 +259,7 @@ export const fileConfigSchema = z.object({
endpoints: z.record(endpointFileConfigSchema).optional(),
serverFileSizeLimit: z.number().min(0).optional(),
avatarSizeLimit: z.number().min(0).optional(),
fileTokenLimit: z.number().min(0).optional(),
imageGeneration: z
.object({
percentage: z.number().min(0).max(100).optional(),
@ -246,6 +274,16 @@ export const fileConfigSchema = z.object({
quality: z.number().min(0).max(1).optional(),
})
.optional(),
ocr: z
.object({
supportedMimeTypes: supportedMimeTypesSchema.optional(),
})
.optional(),
text: z
.object({
supportedMimeTypes: supportedMimeTypesSchema.optional(),
})
.optional(),
});
/** Helper function to safely convert string patterns to RegExp objects */
@ -261,7 +299,21 @@ export const convertStringsToRegex = (patterns: string[]): RegExp[] =>
}, []);
export function mergeFileConfig(dynamic: z.infer<typeof fileConfigSchema> | undefined): FileConfig {
const mergedConfig = fileConfig as FileConfig;
const mergedConfig: FileConfig = {
...fileConfig,
ocr: {
...fileConfig.ocr,
supportedMimeTypes: fileConfig.ocr?.supportedMimeTypes || [],
},
text: {
...fileConfig.text,
supportedMimeTypes: fileConfig.text?.supportedMimeTypes || [],
},
stt: {
...fileConfig.stt,
supportedMimeTypes: fileConfig.stt?.supportedMimeTypes || [],
},
};
if (!dynamic) {
return mergedConfig;
}
@ -274,6 +326,10 @@ export function mergeFileConfig(dynamic: z.infer<typeof fileConfigSchema> | unde
mergedConfig.avatarSizeLimit = mbToBytes(dynamic.avatarSizeLimit);
}
if (dynamic.fileTokenLimit !== undefined) {
mergedConfig.fileTokenLimit = dynamic.fileTokenLimit;
}
// Merge clientImageResize configuration
if (dynamic.clientImageResize !== undefined) {
mergedConfig.clientImageResize = {
@ -282,6 +338,26 @@ export function mergeFileConfig(dynamic: z.infer<typeof fileConfigSchema> | unde
};
}
if (dynamic.ocr !== undefined) {
mergedConfig.ocr = {
...mergedConfig.ocr,
...dynamic.ocr,
};
if (dynamic.ocr.supportedMimeTypes) {
mergedConfig.ocr.supportedMimeTypes = convertStringsToRegex(dynamic.ocr.supportedMimeTypes);
}
}
if (dynamic.text !== undefined) {
mergedConfig.text = {
...mergedConfig.text,
...dynamic.text,
};
if (dynamic.text.supportedMimeTypes) {
mergedConfig.text.supportedMimeTypes = convertStringsToRegex(dynamic.text.supportedMimeTypes);
}
}
if (!dynamic.endpoints) {
return mergedConfig;
}

View file

@ -139,6 +139,18 @@ export const librechat = {
placeholderCode: true,
optionType: 'model',
} as const,
fileTokenLimit: {
key: 'fileTokenLimit',
label: 'com_ui_file_token_limit',
labelCode: true,
description: 'com_ui_file_token_limit_desc',
descriptionCode: true,
placeholder: 'com_nav_theme_system',
placeholderCode: true,
type: 'number',
component: 'input',
columnSpan: 2,
} as const,
};
const openAIParams: Record<string, SettingDefinition> = {
@ -625,6 +637,7 @@ const googleConfig: SettingsConfiguration = [
google.thinking,
google.thinkingBudget,
google.web_search,
librechat.fileTokenLimit,
];
const googleCol1: SettingsConfiguration = [
@ -643,6 +656,7 @@ const googleCol2: SettingsConfiguration = [
google.thinking,
google.thinkingBudget,
google.web_search,
librechat.fileTokenLimit,
];
const openAI: SettingsConfiguration = [
@ -663,6 +677,7 @@ const openAI: SettingsConfiguration = [
openAIParams.reasoning_summary,
openAIParams.verbosity,
openAIParams.disableStreaming,
librechat.fileTokenLimit,
];
const openAICol1: SettingsConfiguration = [
@ -687,6 +702,7 @@ const openAICol2: SettingsConfiguration = [
openAIParams.useResponsesApi,
openAIParams.web_search,
openAIParams.disableStreaming,
librechat.fileTokenLimit,
];
const anthropicConfig: SettingsConfiguration = [
@ -702,6 +718,7 @@ const anthropicConfig: SettingsConfiguration = [
anthropic.thinking,
anthropic.thinkingBudget,
anthropic.web_search,
librechat.fileTokenLimit,
];
const anthropicCol1: SettingsConfiguration = [
@ -721,6 +738,7 @@ const anthropicCol2: SettingsConfiguration = [
anthropic.thinking,
anthropic.thinkingBudget,
anthropic.web_search,
librechat.fileTokenLimit,
];
const bedrockAnthropic: SettingsConfiguration = [
@ -736,6 +754,7 @@ const bedrockAnthropic: SettingsConfiguration = [
bedrock.region,
anthropic.thinking,
anthropic.thinkingBudget,
librechat.fileTokenLimit,
];
const bedrockMistral: SettingsConfiguration = [
@ -747,6 +766,7 @@ const bedrockMistral: SettingsConfiguration = [
mistral.topP,
librechat.resendFiles,
bedrock.region,
librechat.fileTokenLimit,
];
const bedrockCohere: SettingsConfiguration = [
@ -758,6 +778,7 @@ const bedrockCohere: SettingsConfiguration = [
cohere.topP,
librechat.resendFiles,
bedrock.region,
librechat.fileTokenLimit,
];
const bedrockGeneral: SettingsConfiguration = [
@ -768,6 +789,7 @@ const bedrockGeneral: SettingsConfiguration = [
meta.topP,
librechat.resendFiles,
bedrock.region,
librechat.fileTokenLimit,
];
const bedrockAnthropicCol1: SettingsConfiguration = [
@ -787,6 +809,7 @@ const bedrockAnthropicCol2: SettingsConfiguration = [
bedrock.region,
anthropic.thinking,
anthropic.thinkingBudget,
librechat.fileTokenLimit,
];
const bedrockMistralCol1: SettingsConfiguration = [
@ -802,6 +825,7 @@ const bedrockMistralCol2: SettingsConfiguration = [
mistral.topP,
librechat.resendFiles,
bedrock.region,
librechat.fileTokenLimit,
];
const bedrockCohereCol1: SettingsConfiguration = [
@ -817,6 +841,7 @@ const bedrockCohereCol2: SettingsConfiguration = [
cohere.topP,
librechat.resendFiles,
bedrock.region,
librechat.fileTokenLimit,
];
const bedrockGeneralCol1: SettingsConfiguration = [
@ -831,6 +856,7 @@ const bedrockGeneralCol2: SettingsConfiguration = [
meta.topP,
librechat.resendFiles,
bedrock.region,
librechat.fileTokenLimit,
];
export const paramSettings: Record<string, SettingsConfiguration | undefined> = {

View file

@ -680,6 +680,8 @@ export const tConversationSchema = z.object({
iconURL: z.string().nullable().optional(),
/* temporary chat */
expiredAt: z.string().nullable().optional(),
/* file token limits */
fileTokenLimit: coerceNumber.optional(),
/** @deprecated */
resendImages: z.boolean().optional(),
/** @deprecated */
@ -794,6 +796,8 @@ export const tQueryParamsSchema = tConversationSchema
* https://platform.openai.com/docs/api-reference/runs/createRun#runs-createrun-instructions
* */
instructions: true,
/** @endpoints openAI, google, anthropic */
fileTokenLimit: true,
})
.merge(
z.object({
@ -850,6 +854,7 @@ export const googleBaseSchema = tConversationSchema.pick({
thinking: true,
thinkingBudget: true,
web_search: true,
fileTokenLimit: true,
iconURL: true,
greeting: true,
spec: true,
@ -1101,6 +1106,7 @@ export const openAIBaseSchema = tConversationSchema.pick({
useResponsesApi: true,
web_search: true,
disableStreaming: true,
fileTokenLimit: true,
});
export const openAISchema = openAIBaseSchema
@ -1145,6 +1151,7 @@ export const anthropicBaseSchema = tConversationSchema.pick({
spec: true,
maxContextTokens: true,
web_search: true,
fileTokenLimit: true,
});
export const anthropicSchema = anthropicBaseSchema

View file

@ -47,6 +47,7 @@ export type FileConfig = {
endpoints: {
[key: string]: EndpointFileConfig;
};
fileTokenLimit?: number;
serverFileSizeLimit?: number;
avatarSizeLimit?: number;
clientImageResize?: {
@ -55,6 +56,39 @@ export type FileConfig = {
maxHeight?: number;
quality?: number;
};
ocr?: {
supportedMimeTypes?: RegExp[];
};
text?: {
supportedMimeTypes?: RegExp[];
};
stt?: {
supportedMimeTypes?: RegExp[];
};
checkType?: (fileType: string, supportedTypes: RegExp[]) => boolean;
};
export type FileConfigInput = {
endpoints?: {
[key: string]: EndpointFileConfig;
};
serverFileSizeLimit?: number;
avatarSizeLimit?: number;
clientImageResize?: {
enabled?: boolean;
maxWidth?: number;
maxHeight?: number;
quality?: number;
};
ocr?: {
supportedMimeTypes?: string[];
};
text?: {
supportedMimeTypes?: string[];
};
stt?: {
supportedMimeTypes?: string[];
};
checkType?: (fileType: string, supportedTypes: RegExp[]) => boolean;
};

View file

@ -141,6 +141,9 @@ export const conversationPreset = {
disableStreaming: {
type: Boolean,
},
fileTokenLimit: {
type: Number,
},
/** Reasoning models only */
reasoning_effort: {
type: String,

View file

@ -51,6 +51,7 @@ export interface IPreset extends Document {
useResponsesApi?: boolean;
web_search?: boolean;
disableStreaming?: boolean;
fileTokenLimit?: number;
// end of additional fields
agentOptions?: unknown;
}

View file

@ -50,6 +50,7 @@ export interface IConversation extends Document {
useResponsesApi?: boolean;
web_search?: boolean;
disableStreaming?: boolean;
fileTokenLimit?: number;
// Additional fields
files?: string[];
expiredAt?: Date;