📎 feat: Upload as Text Support for Plaintext, STT, RAG, and Token Limits (#8868)

* 🪶 feat: Add Support for Uploading Plaintext Files

feat: delineate between OCR and text handling in fileConfig field of config file

- also adds support for passing in mimetypes as just plain file extensions

feat: add showLabel bool to support future synthetic component DynamicDropdownInput

feat: add new combination dropdown-input component in params panel to support file type token limits

refactor: move hovercard to side to align with other hovercards

chore: clean up autogenerated comments

feat: add delineation to file upload path between text and ocr configured filetypes

feat: add token limit checks during file upload

refactor: move textParsing out of ocrEnabled logic

refactor: clean up types for filetype config

refactor: finish decoupling DynamicDropdownInput from fileTokenLimits

fix: move image token cost function into file to fix circular dependency causing unittest to fail and remove unused var for linter

chore: remove out of scope code following review

refactor: make fileTokenLimit conform to existing styles

chore: remove unused localization string

chore: undo changes to DynamicInput and other strays

feat: add fileTokenLimit to all provider config panels

fix: move textParsing back into ocr tool_resource block for now so that it doesn't interfere with other upload types

* 📤 feat: Add RAG API Endpoint Support for Text Parsing (#8849)

* feat: implement RAG API integration for text parsing with fallback to native parsing

* chore: remove TODO now that placeholder and fllback are implemented

* ✈️ refactor: Migrate Text Parsing to TS (#8892)

* refactor: move generateShortLivedToken to packages/api

* refactor: move textParsing logic into packages/api

* refactor: reduce nesting and dry code with createTextFile

* fix: add proper source handling

* fix: mock new parseText and parseTextNative functions in jest file

* ci: add test coverage for textParser

* 💬 feat: Add Audio File Support to Upload as Text (#8893)

* feat: add STT support for Upload as Text

* refactor: move processAudioFile to packages/api

* refactor: move textParsing from utils to files

* fix: remove audio/mp3 from unsupported mimetypes test since it is now supported

* ✂️ feat: Configurable File Token Limits and Truncation (#8911)

* feat: add configurable fileTokenLimit default value

* fix: add stt to fileConfig merge logic

* fix: add fileTokenLimit to mergeFileConfig logic so configurable value is actually respected from yaml

* feat: add token limiting to parsed text files

* fix: add extraction logic and update tests so fileTokenLimit isnt sent to LLM providers

* fix: address comments

* refactor: rename textTokenLimiter.ts to text.ts

* chore: update form-data package to address CVE-2025-7783 and update package-lock

* feat: use default supported mime types for ocr on frontend file validation

* fix: should be using logger.debug not console.debug

* fix: mock existsSync in text.spec.ts

* fix: mock logger rather than every one of its function calls

* fix: reorganize imports and streamline file upload processing logic

* refactor: update createTextFile function to use destructured parameters and improve readability

* chore: update file validation to use EToolResources for improved type safety

* chore: update import path for types in audio processing module

* fix: update file configuration access and replace console.debug with logger.debug for improved logging

---------

Co-authored-by: Dustin Healy <dustinhealy1@gmail.com>
Co-authored-by: Dustin Healy <54083382+dustinhealy@users.noreply.github.com>
This commit is contained in:
Danny Avila 2025-08-27 03:44:39 -04:00 committed by GitHub
parent 74bc0440f0
commit 48f6f8f2f8
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
41 changed files with 847 additions and 75 deletions

View file

@ -1 +1,2 @@
export * from './encryption';
export * from './jwt';

View file

@ -0,0 +1,14 @@
import jwt from 'jsonwebtoken';
/**
* Generate a short-lived JWT token
* @param {String} userId - The ID of the user
* @param {String} [expireIn='5m'] - The expiration time for the token (default is 5 minutes)
* @returns {String} - The generated JWT token
*/
export const generateShortLivedToken = (userId: string, expireIn: string = '5m'): string => {
return jwt.sign({ id: userId }, process.env.JWT_SECRET!, {
expiresIn: expireIn,
algorithm: 'HS256',
});
};

View file

@ -0,0 +1,38 @@
import fs from 'fs';
import { logger } from '@librechat/data-schemas';
import type { STTService, AudioFileInfo, FileObject, AudioProcessingResult } from '~/types';
/**
* Processes audio files using Speech-to-Text (STT) service.
* @param {Object} params - The parameters object.
* @param {FileObject} params.file - The audio file object.
* @param {STTService} params.sttService - The STT service instance.
* @returns {Promise<AudioProcessingResult>} A promise that resolves to an object containing text and bytes.
*/
export async function processAudioFile({
file,
sttService,
}: {
file: FileObject;
sttService: STTService;
}): Promise<AudioProcessingResult> {
try {
const audioBuffer = await fs.promises.readFile(file.path);
const audioFile: AudioFileInfo = {
originalname: file.originalname,
mimetype: file.mimetype,
size: file.size,
};
const [provider, sttSchema] = await sttService.getProviderSchema();
const text = await sttService.sttRequest(provider, sttSchema, { audioBuffer, audioFile });
return {
text,
bytes: Buffer.byteLength(text, 'utf8'),
};
} catch (error) {
logger.error('Error processing audio file with STT:', error);
throw new Error(`Failed to process audio file: ${(error as Error).message}`);
}
}

View file

@ -1,2 +1,4 @@
export * from './mistral/crud';
export * from './audio';
export * from './text';
export * from './parse';

View file

@ -0,0 +1,255 @@
import { FileSources } from 'librechat-data-provider';
import { Readable } from 'stream';
jest.mock('@librechat/data-schemas', () => ({
logger: {
debug: jest.fn(),
warn: jest.fn(),
error: jest.fn(),
},
}));
import { parseTextNative, parseText } from './text';
jest.mock('fs', () => ({
readFileSync: jest.fn(),
createReadStream: jest.fn(),
}));
jest.mock('../crypto/jwt', () => ({
generateShortLivedToken: jest.fn(),
}));
jest.mock('axios', () => ({
get: jest.fn(),
post: jest.fn(),
interceptors: {
request: { use: jest.fn(), eject: jest.fn() },
response: { use: jest.fn(), eject: jest.fn() },
},
}));
jest.mock('form-data', () => {
return jest.fn().mockImplementation(() => ({
append: jest.fn(),
getHeaders: jest.fn().mockReturnValue({ 'content-type': 'multipart/form-data' }),
}));
});
import fs, { ReadStream } from 'fs';
import axios from 'axios';
import FormData from 'form-data';
import { generateShortLivedToken } from '../crypto/jwt';
const mockedFs = fs as jest.Mocked<typeof fs>;
const mockedAxios = axios as jest.Mocked<typeof axios>;
const mockedFormData = FormData as jest.MockedClass<typeof FormData>;
const mockedGenerateShortLivedToken = generateShortLivedToken as jest.MockedFunction<
typeof generateShortLivedToken
>;
describe('text', () => {
const mockFile: Express.Multer.File = {
fieldname: 'file',
originalname: 'test.txt',
encoding: '7bit',
mimetype: 'text/plain',
size: 100,
destination: '/tmp',
filename: 'test.txt',
path: '/tmp/test.txt',
buffer: Buffer.from('test content'),
stream: new Readable(),
};
const mockReq = {
user: { id: 'user123' },
};
const mockFileId = 'file123';
beforeEach(() => {
jest.clearAllMocks();
delete process.env.RAG_API_URL;
});
describe('parseTextNative', () => {
it('should successfully parse a text file', () => {
const mockText = 'Hello, world!';
mockedFs.readFileSync.mockReturnValue(mockText);
const result = parseTextNative(mockFile);
expect(mockedFs.readFileSync).toHaveBeenCalledWith('/tmp/test.txt', 'utf8');
expect(result).toEqual({
text: mockText,
bytes: Buffer.byteLength(mockText, 'utf8'),
source: FileSources.text,
});
});
it('should throw an error when file cannot be read', () => {
const mockError = new Error('File not found');
mockedFs.readFileSync.mockImplementation(() => {
throw mockError;
});
expect(() => parseTextNative(mockFile)).toThrow(
'Failed to read file as text: Error: File not found',
);
});
});
describe('parseText', () => {
beforeEach(() => {
mockedGenerateShortLivedToken.mockReturnValue('mock-jwt-token');
const mockFormDataInstance = {
append: jest.fn(),
getHeaders: jest.fn().mockReturnValue({ 'content-type': 'multipart/form-data' }),
};
mockedFormData.mockImplementation(() => mockFormDataInstance as unknown as FormData);
mockedFs.createReadStream.mockReturnValue({} as unknown as ReadStream);
});
it('should fall back to native parsing when RAG_API_URL is not defined', async () => {
const mockText = 'Native parsing result';
mockedFs.readFileSync.mockReturnValue(mockText);
const result = await parseText({
req: mockReq,
file: mockFile,
file_id: mockFileId,
});
expect(result).toEqual({
text: mockText,
bytes: Buffer.byteLength(mockText, 'utf8'),
source: FileSources.text,
});
expect(mockedAxios.get).not.toHaveBeenCalled();
});
it('should fall back to native parsing when health check fails', async () => {
process.env.RAG_API_URL = 'http://rag-api.test';
const mockText = 'Native parsing result';
mockedFs.readFileSync.mockReturnValue(mockText);
mockedAxios.get.mockRejectedValue(new Error('Health check failed'));
const result = await parseText({
req: mockReq,
file: mockFile,
file_id: mockFileId,
});
expect(mockedAxios.get).toHaveBeenCalledWith('http://rag-api.test/health', {
timeout: 5000,
});
expect(result).toEqual({
text: mockText,
bytes: Buffer.byteLength(mockText, 'utf8'),
source: FileSources.text,
});
});
it('should fall back to native parsing when health check returns non-OK status', async () => {
process.env.RAG_API_URL = 'http://rag-api.test';
const mockText = 'Native parsing result';
mockedFs.readFileSync.mockReturnValue(mockText);
mockedAxios.get.mockResolvedValue({
status: 500,
statusText: 'Internal Server Error',
});
const result = await parseText({
req: mockReq,
file: mockFile,
file_id: mockFileId,
});
expect(result).toEqual({
text: mockText,
bytes: Buffer.byteLength(mockText, 'utf8'),
source: FileSources.text,
});
});
it('should accept empty text as valid RAG API response', async () => {
process.env.RAG_API_URL = 'http://rag-api.test';
mockedAxios.get.mockResolvedValue({
status: 200,
statusText: 'OK',
});
mockedAxios.post.mockResolvedValue({
data: {
text: '',
},
});
const result = await parseText({
req: mockReq,
file: mockFile,
file_id: mockFileId,
});
expect(result).toEqual({
text: '',
bytes: 0,
source: FileSources.text,
});
});
it('should fall back to native parsing when RAG API response lacks text property', async () => {
process.env.RAG_API_URL = 'http://rag-api.test';
const mockText = 'Native parsing result';
mockedFs.readFileSync.mockReturnValue(mockText);
mockedAxios.get.mockResolvedValue({
status: 200,
statusText: 'OK',
});
mockedAxios.post.mockResolvedValue({
data: {},
});
const result = await parseText({
req: mockReq,
file: mockFile,
file_id: mockFileId,
});
expect(result).toEqual({
text: mockText,
bytes: Buffer.byteLength(mockText, 'utf8'),
source: FileSources.text,
});
});
it('should fall back to native parsing when user is undefined', async () => {
process.env.RAG_API_URL = 'http://rag-api.test';
const mockText = 'Native parsing result';
mockedFs.readFileSync.mockReturnValue(mockText);
const result = await parseText({
req: { user: undefined },
file: mockFile,
file_id: mockFileId,
});
expect(mockedGenerateShortLivedToken).not.toHaveBeenCalled();
expect(mockedAxios.get).not.toHaveBeenCalled();
expect(mockedAxios.post).not.toHaveBeenCalled();
expect(result).toEqual({
text: mockText,
bytes: Buffer.byteLength(mockText, 'utf8'),
source: FileSources.text,
});
});
});
});

View file

@ -0,0 +1,113 @@
import fs from 'fs';
import axios from 'axios';
import FormData from 'form-data';
import { logger } from '@librechat/data-schemas';
import { FileSources } from 'librechat-data-provider';
import type { Request as ServerRequest } from 'express';
import { generateShortLivedToken } from '~/crypto/jwt';
/**
* Attempts to parse text using RAG API, falls back to native text parsing
* @param {Object} params - The parameters object
* @param {Express.Request} params.req - The Express request object
* @param {Express.Multer.File} params.file - The uploaded file
* @param {string} params.file_id - The file ID
* @returns {Promise<{text: string, bytes: number, source: string}>}
*/
export async function parseText({
req,
file,
file_id,
}: {
req: Pick<ServerRequest, 'user'> & {
user?: { id: string };
};
file: Express.Multer.File;
file_id: string;
}): Promise<{ text: string; bytes: number; source: string }> {
if (!process.env.RAG_API_URL) {
logger.debug('[parseText] RAG_API_URL not defined, falling back to native text parsing');
return parseTextNative(file);
}
if (!req.user?.id) {
logger.debug('[parseText] No user ID provided, falling back to native text parsing');
return parseTextNative(file);
}
try {
const healthResponse = await axios.get(`${process.env.RAG_API_URL}/health`, {
timeout: 5000,
});
if (healthResponse?.statusText !== 'OK' && healthResponse?.status !== 200) {
logger.debug('[parseText] RAG API health check failed, falling back to native parsing');
return parseTextNative(file);
}
} catch (healthError) {
logger.debug(
'[parseText] RAG API health check failed, falling back to native parsing',
healthError,
);
return parseTextNative(file);
}
try {
const jwtToken = generateShortLivedToken(req.user.id);
const formData = new FormData();
formData.append('file_id', file_id);
formData.append('file', fs.createReadStream(file.path));
const formHeaders = formData.getHeaders();
const response = await axios.post(`${process.env.RAG_API_URL}/text`, formData, {
headers: {
Authorization: `Bearer ${jwtToken}`,
accept: 'application/json',
...formHeaders,
},
timeout: 30000,
});
const responseData = response.data;
logger.debug('[parseText] Response from RAG API', responseData);
if (!('text' in responseData)) {
throw new Error('RAG API did not return parsed text');
}
return {
text: responseData.text,
bytes: Buffer.byteLength(responseData.text, 'utf8'),
source: FileSources.text,
};
} catch (error) {
logger.warn('[parseText] RAG API text parsing failed, falling back to native parsing', error);
return parseTextNative(file);
}
}
/**
* Native JavaScript text parsing fallback
* Simple text file reading - complex formats handled by RAG API
* @param {Express.Multer.File} file - The uploaded file
* @returns {{text: string, bytes: number, source: string}}
*/
export function parseTextNative(file: Express.Multer.File): {
text: string;
bytes: number;
source: string;
} {
try {
const text = fs.readFileSync(file.path, 'utf8');
const bytes = Buffer.byteLength(text, 'utf8');
return {
text,
bytes,
source: FileSources.text,
};
} catch (error) {
console.error('[parseTextNative] Failed to parse file:', error);
throw new Error(`Failed to read file as text: ${error}`);
}
}

View file

@ -0,0 +1,27 @@
export interface STTService {
getInstance(): Promise<STTService>;
getProviderSchema(): Promise<[string, object]>;
sttRequest(
provider: string,
schema: object,
params: { audioBuffer: Buffer; audioFile: AudioFileInfo },
): Promise<string>;
}
export interface AudioFileInfo {
originalname: string;
mimetype: string;
size: number;
}
export interface FileObject {
path: string;
originalname: string;
mimetype: string;
size: number;
}
export interface AudioProcessingResult {
text: string;
bytes: number;
}

View file

@ -4,6 +4,7 @@ export * from './balance';
export * from './endpoints';
export * from './events';
export * from './error';
export * from './files';
export * from './google';
export * from './http';
export * from './mistral';

View file

@ -11,6 +11,7 @@ export * from './llm';
export * from './math';
export * from './openid';
export * from './tempChatRetention';
export * from './text';
export { default as Tokenizer } from './tokenizer';
export * from './yaml';
export * from './http';

View file

@ -7,6 +7,7 @@ describe('extractLibreChatParams', () => {
expect(result.resendFiles).toBe(true);
expect(result.promptPrefix).toBeUndefined();
expect(result.maxContextTokens).toBeUndefined();
expect(result.fileTokenLimit).toBeUndefined();
expect(result.modelLabel).toBeUndefined();
expect(result.modelOptions).toEqual({});
});
@ -17,6 +18,7 @@ describe('extractLibreChatParams', () => {
expect(result.resendFiles).toBe(true);
expect(result.promptPrefix).toBeUndefined();
expect(result.maxContextTokens).toBeUndefined();
expect(result.fileTokenLimit).toBeUndefined();
expect(result.modelLabel).toBeUndefined();
expect(result.modelOptions).toEqual({});
});
@ -26,6 +28,7 @@ describe('extractLibreChatParams', () => {
resendFiles: false,
promptPrefix: 'You are a helpful assistant',
maxContextTokens: 4096,
fileTokenLimit: 50000,
modelLabel: 'GPT-4',
model: 'gpt-4',
temperature: 0.7,
@ -37,6 +40,7 @@ describe('extractLibreChatParams', () => {
expect(result.resendFiles).toBe(false);
expect(result.promptPrefix).toBe('You are a helpful assistant');
expect(result.maxContextTokens).toBe(4096);
expect(result.fileTokenLimit).toBe(50000);
expect(result.modelLabel).toBe('GPT-4');
expect(result.modelOptions).toEqual({
model: 'gpt-4',
@ -50,6 +54,7 @@ describe('extractLibreChatParams', () => {
resendFiles: true,
promptPrefix: null,
maxContextTokens: 2048,
fileTokenLimit: undefined,
modelLabel: null,
model: 'claude-3',
};
@ -59,6 +64,7 @@ describe('extractLibreChatParams', () => {
expect(result.resendFiles).toBe(true);
expect(result.promptPrefix).toBeNull();
expect(result.maxContextTokens).toBe(2048);
expect(result.fileTokenLimit).toBeUndefined();
expect(result.modelLabel).toBeNull();
expect(result.modelOptions).toEqual({
model: 'claude-3',
@ -77,6 +83,7 @@ describe('extractLibreChatParams', () => {
expect(result.resendFiles).toBe(true); // Should use default
expect(result.promptPrefix).toBe('Test prefix');
expect(result.maxContextTokens).toBeUndefined();
expect(result.fileTokenLimit).toBeUndefined();
expect(result.modelLabel).toBeUndefined();
expect(result.modelOptions).toEqual({
model: 'gpt-3.5-turbo',
@ -90,6 +97,7 @@ describe('extractLibreChatParams', () => {
expect(result.resendFiles).toBe(true); // Should use default
expect(result.promptPrefix).toBeUndefined();
expect(result.maxContextTokens).toBeUndefined();
expect(result.fileTokenLimit).toBeUndefined();
expect(result.modelLabel).toBeUndefined();
expect(result.modelOptions).toEqual({});
});
@ -99,6 +107,7 @@ describe('extractLibreChatParams', () => {
resendFiles: false,
promptPrefix: 'Custom prompt',
maxContextTokens: 8192,
fileTokenLimit: 25000,
modelLabel: 'Custom Model',
// Model options
model: 'gpt-4',
@ -117,6 +126,7 @@ describe('extractLibreChatParams', () => {
expect(result.resendFiles).toBe(false);
expect(result.promptPrefix).toBe('Custom prompt');
expect(result.maxContextTokens).toBe(8192);
expect(result.fileTokenLimit).toBe(25000);
expect(result.modelLabel).toBe('Custom Model');
// Model options should include everything else

View file

@ -8,6 +8,7 @@ type LibreChatParams = {
resendFiles: boolean;
promptPrefix?: string | null;
maxContextTokens?: number;
fileTokenLimit?: number;
modelLabel?: string | null;
};
@ -32,6 +33,7 @@ export function extractLibreChatParams(
(librechat.resendFiles.default as boolean);
const promptPrefix = (delete modelOptions.promptPrefix, options.promptPrefix);
const maxContextTokens = (delete modelOptions.maxContextTokens, options.maxContextTokens);
const fileTokenLimit = (delete modelOptions.fileTokenLimit, options.fileTokenLimit);
const modelLabel = (delete modelOptions.modelLabel, options.modelLabel);
return {
@ -40,6 +42,7 @@ export function extractLibreChatParams(
LibreChatKeys
>,
maxContextTokens,
fileTokenLimit,
promptPrefix,
resendFiles,
modelLabel,

View file

@ -0,0 +1,65 @@
import { logger } from '@librechat/data-schemas';
/**
* Processes text content by counting tokens and truncating if it exceeds the specified limit.
* @param text - The text content to process
* @param tokenLimit - The maximum number of tokens allowed
* @param tokenCountFn - Function to count tokens
* @returns Promise resolving to object with processed text, token count, and truncation status
*/
export async function processTextWithTokenLimit({
text,
tokenLimit,
tokenCountFn,
}: {
text: string;
tokenLimit: number;
tokenCountFn: (text: string) => number;
}): Promise<{ text: string; tokenCount: number; wasTruncated: boolean }> {
const originalTokenCount = await tokenCountFn(text);
if (originalTokenCount <= tokenLimit) {
return {
text,
tokenCount: originalTokenCount,
wasTruncated: false,
};
}
/**
* Doing binary search here to find the truncation point efficiently
* (May be a better way to go about this)
*/
let low = 0;
let high = text.length;
let bestText = '';
logger.debug(
`[textTokenLimiter] Text content exceeds token limit: ${originalTokenCount} > ${tokenLimit}, truncating...`,
);
while (low <= high) {
const mid = Math.floor((low + high) / 2);
const truncatedText = text.substring(0, mid);
const tokenCount = await tokenCountFn(truncatedText);
if (tokenCount <= tokenLimit) {
bestText = truncatedText;
low = mid + 1;
} else {
high = mid - 1;
}
}
const finalTokenCount = await tokenCountFn(bestText);
logger.warn(
`[textTokenLimiter] Text truncated from ${originalTokenCount} to ${finalTokenCount} tokens (limit: ${tokenLimit})`,
);
return {
text: bestText,
tokenCount: finalTokenCount,
wasTruncated: true,
};
}