📎 feat: Upload as Text Support for Plaintext, STT, RAG, and Token Limits (#8868)

* 🪶 feat: Add Support for Uploading Plaintext Files

feat: delineate between OCR and text handling in fileConfig field of config file

- also adds support for passing in mimetypes as just plain file extensions

feat: add showLabel bool to support future synthetic component DynamicDropdownInput

feat: add new combination dropdown-input component in params panel to support file type token limits

refactor: move hovercard to side to align with other hovercards

chore: clean up autogenerated comments

feat: add delineation to file upload path between text and ocr configured filetypes

feat: add token limit checks during file upload

refactor: move textParsing out of ocrEnabled logic

refactor: clean up types for filetype config

refactor: finish decoupling DynamicDropdownInput from fileTokenLimits

fix: move image token cost function into file to fix circular dependency causing unittest to fail and remove unused var for linter

chore: remove out of scope code following review

refactor: make fileTokenLimit conform to existing styles

chore: remove unused localization string

chore: undo changes to DynamicInput and other strays

feat: add fileTokenLimit to all provider config panels

fix: move textParsing back into ocr tool_resource block for now so that it doesn't interfere with other upload types

* 📤 feat: Add RAG API Endpoint Support for Text Parsing (#8849)

* feat: implement RAG API integration for text parsing with fallback to native parsing

* chore: remove TODO now that placeholder and fllback are implemented

* ✈️ refactor: Migrate Text Parsing to TS (#8892)

* refactor: move generateShortLivedToken to packages/api

* refactor: move textParsing logic into packages/api

* refactor: reduce nesting and dry code with createTextFile

* fix: add proper source handling

* fix: mock new parseText and parseTextNative functions in jest file

* ci: add test coverage for textParser

* 💬 feat: Add Audio File Support to Upload as Text (#8893)

* feat: add STT support for Upload as Text

* refactor: move processAudioFile to packages/api

* refactor: move textParsing from utils to files

* fix: remove audio/mp3 from unsupported mimetypes test since it is now supported

* ✂️ feat: Configurable File Token Limits and Truncation (#8911)

* feat: add configurable fileTokenLimit default value

* fix: add stt to fileConfig merge logic

* fix: add fileTokenLimit to mergeFileConfig logic so configurable value is actually respected from yaml

* feat: add token limiting to parsed text files

* fix: add extraction logic and update tests so fileTokenLimit isnt sent to LLM providers

* fix: address comments

* refactor: rename textTokenLimiter.ts to text.ts

* chore: update form-data package to address CVE-2025-7783 and update package-lock

* feat: use default supported mime types for ocr on frontend file validation

* fix: should be using logger.debug not console.debug

* fix: mock existsSync in text.spec.ts

* fix: mock logger rather than every one of its function calls

* fix: reorganize imports and streamline file upload processing logic

* refactor: update createTextFile function to use destructured parameters and improve readability

* chore: update file validation to use EToolResources for improved type safety

* chore: update import path for types in audio processing module

* fix: update file configuration access and replace console.debug with logger.debug for improved logging

---------

Co-authored-by: Dustin Healy <dustinhealy1@gmail.com>
Co-authored-by: Dustin Healy <54083382+dustinhealy@users.noreply.github.com>
This commit is contained in:
Danny Avila 2025-08-27 03:44:39 -04:00 committed by GitHub
parent 74bc0440f0
commit 48f6f8f2f8
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
41 changed files with 847 additions and 75 deletions

View file

@ -387,7 +387,8 @@ router.post('/', async (req, res) => {
if (
error.message?.includes('Invalid file format') ||
error.message?.includes('No OCR result')
error.message?.includes('No OCR result') ||
error.message?.includes('exceeds token limit')
) {
message = error.message;
}

View file

@ -29,6 +29,17 @@ router.post('/', async (req, res) => {
} catch (error) {
// TODO: delete remote file if it exists
logger.error('[/files/images] Error processing file:', error);
let message = 'Error processing file';
if (
error.message?.includes('Invalid file format') ||
error.message?.includes('No OCR result') ||
error.message?.includes('exceeds token limit')
) {
message = error.message;
}
try {
const filepath = path.join(
appConfig.paths.imageOutput,
@ -39,7 +50,7 @@ router.post('/', async (req, res) => {
} catch (error) {
logger.error('[/files/images] Error deleting file:', error);
}
res.status(500).json({ message: 'Error processing file' });
res.status(500).json({ message });
} finally {
try {
await fs.unlink(req.file.path);

View file

@ -500,18 +500,6 @@ const resendVerificationEmail = async (req) => {
};
}
};
/**
* Generate a short-lived JWT token
* @param {String} userId - The ID of the user
* @param {String} [expireIn='5m'] - The expiration time for the token (default is 5 minutes)
* @returns {String} - The generated JWT token
*/
const generateShortLivedToken = (userId, expireIn = '5m') => {
return jwt.sign({ id: userId }, process.env.JWT_SECRET, {
expiresIn: expireIn,
algorithm: 'HS256',
});
};
module.exports = {
logoutUser,
@ -522,5 +510,4 @@ module.exports = {
setOpenIDAuthTokens,
requestPasswordReset,
resendVerificationEmail,
generateShortLivedToken,
};

View file

@ -6,6 +6,7 @@ const buildOptions = (endpoint, parsedBody) => {
modelLabel,
promptPrefix,
maxContextTokens,
fileTokenLimit,
resendFiles = anthropicSettings.resendFiles.default,
promptCache = anthropicSettings.promptCache.default,
thinking = anthropicSettings.thinking.default,
@ -29,6 +30,7 @@ const buildOptions = (endpoint, parsedBody) => {
greeting,
spec,
maxContextTokens,
fileTokenLimit,
modelOptions,
});

View file

@ -6,6 +6,7 @@ const buildOptions = (endpoint, parsedBody) => {
modelLabel: name,
promptPrefix,
maxContextTokens,
fileTokenLimit,
resendFiles = true,
imageDetail,
iconURL,
@ -24,6 +25,7 @@ const buildOptions = (endpoint, parsedBody) => {
spec,
promptPrefix,
maxContextTokens,
fileTokenLimit,
model_parameters,
});

View file

@ -7,6 +7,7 @@ const buildOptions = (endpoint, parsedBody, endpointType) => {
chatGptLabel,
promptPrefix,
maxContextTokens,
fileTokenLimit,
resendFiles = true,
imageDetail,
iconURL,
@ -27,6 +28,7 @@ const buildOptions = (endpoint, parsedBody, endpointType) => {
greeting,
spec,
maxContextTokens,
fileTokenLimit,
modelOptions,
});

View file

@ -12,6 +12,7 @@ const buildOptions = (endpoint, parsedBody) => {
spec,
artifacts,
maxContextTokens,
fileTokenLimit,
...modelOptions
} = parsedBody;
const endpointOption = removeNullishValues({
@ -24,6 +25,7 @@ const buildOptions = (endpoint, parsedBody) => {
greeting,
spec,
maxContextTokens,
fileTokenLimit,
modelOptions,
});

View file

@ -7,6 +7,7 @@ const buildOptions = (endpoint, parsedBody) => {
chatGptLabel,
promptPrefix,
maxContextTokens,
fileTokenLimit,
resendFiles = true,
imageDetail,
iconURL,
@ -27,6 +28,7 @@ const buildOptions = (endpoint, parsedBody) => {
greeting,
spec,
maxContextTokens,
fileTokenLimit,
modelOptions,
});

View file

@ -319,4 +319,4 @@ async function speechToText(req, res) {
await sttService.processSpeechToText(req, res);
}
module.exports = { speechToText };
module.exports = { STTService, speechToText };

View file

@ -3,7 +3,7 @@ const path = require('path');
const axios = require('axios');
const { logger } = require('@librechat/data-schemas');
const { EModelEndpoint } = require('librechat-data-provider');
const { generateShortLivedToken } = require('~/server/services/AuthService');
const { generateShortLivedToken } = require('@librechat/api');
const { getBufferMetadata } = require('~/server/utils');
const paths = require('~/config/paths');

View file

@ -1,10 +1,9 @@
const fs = require('fs');
const axios = require('axios');
const FormData = require('form-data');
const { logAxiosError } = require('@librechat/api');
const { logger } = require('@librechat/data-schemas');
const { FileSources } = require('librechat-data-provider');
const { generateShortLivedToken } = require('~/server/services/AuthService');
const { logAxiosError, generateShortLivedToken } = require('@librechat/api');
/**
* Deletes a file from the vector database. This function takes a file object, constructs the full path, and

View file

@ -1,13 +1,16 @@
const axios = require('axios');
const { logAxiosError } = require('@librechat/api');
const { logger } = require('@librechat/data-schemas');
const { logAxiosError, processTextWithTokenLimit } = require('@librechat/api');
const {
FileSources,
VisionModes,
ImageDetail,
ContentTypes,
EModelEndpoint,
mergeFileConfig,
} = require('librechat-data-provider');
const { getStrategyFunctions } = require('~/server/services/Files/strategies');
const countTokens = require('~/server/utils/countTokens');
/**
* Converts a readable stream to a base64 encoded string.
@ -102,11 +105,28 @@ async function encodeAndFormat(req, files, endpoint, mode) {
return result;
}
const fileTokenLimit =
req.body?.fileTokenLimit ?? mergeFileConfig(req.config?.fileConfig).fileTokenLimit;
for (let file of files) {
/** @type {FileSources} */
const source = file.source ?? FileSources.local;
if (source === FileSources.text && file.text) {
result.text += `${!result.text ? 'Attached document(s):\n```md' : '\n\n---\n\n'}# "${file.filename}"\n${file.text}\n`;
let fileText = file.text;
const { text: limitedText, wasTruncated } = await processTextWithTokenLimit({
text: fileText,
tokenLimit: fileTokenLimit,
tokenCountFn: (text) => countTokens(text),
});
if (wasTruncated) {
logger.debug(
`[encodeAndFormat] Text content truncated for file: ${file.filename} due to token limits`,
);
}
result.text += `${!result.text ? 'Attached document(s):\n```md' : '\n\n---\n\n'}# "${file.filename}"\n${limitedText}\n`;
}
if (!file.height) {
@ -135,7 +155,7 @@ async function encodeAndFormat(req, files, endpoint, mode) {
base64Data = null;
continue;
} catch (error) {
// Error handling code
logger.error('Error processing image from blob storage:', error);
}
} else if (source !== FileSources.local && base64Only.has(endpoint)) {
const [_file, imageURL] = await preparePayload(req, file);

View file

@ -17,7 +17,8 @@ const {
isAssistantsEndpoint,
} = require('librechat-data-provider');
const { EnvVar } = require('@librechat/agents');
const { sanitizeFilename } = require('@librechat/api');
const { logger } = require('@librechat/data-schemas');
const { sanitizeFilename, parseText, processAudioFile } = require('@librechat/api');
const {
convertImage,
resizeAndConvert,
@ -33,7 +34,7 @@ const { checkCapability } = require('~/server/services/Config');
const { LB_QueueAsyncCall } = require('~/server/utils/queue');
const { getStrategyFunctions } = require('./strategies');
const { determineFileType } = require('~/server/utils');
const { logger } = require('~/config');
const { STTService } = require('./Audio/STTService');
/**
* Creates a modular file upload wrapper that ensures filename sanitization
@ -552,51 +553,84 @@ const processAgentFileUpload = async ({ req, res, metadata }) => {
}
// Note: File search processing continues to dual storage logic below
} else if (tool_resource === EToolResources.ocr) {
const isOCREnabled = await checkCapability(req, AgentCapabilities.ocr);
if (!isOCREnabled) {
throw new Error('OCR capability is not enabled for Agents');
}
const { handleFileUpload: uploadOCR } = getStrategyFunctions(
appConfig?.ocr?.strategy ?? FileSources.mistral_ocr,
);
const { file_id, temp_file_id = null } = metadata;
const {
text,
bytes,
// TODO: OCR images support?
images: _i,
filename,
filepath: ocrFileURL,
} = await uploadOCR({ req, appConfig, file, loadAuthValues });
const fileInfo = removeNullishValues({
text,
bytes,
file_id,
temp_file_id,
user: req.user.id,
type: 'text/plain',
filepath: ocrFileURL,
source: FileSources.text,
filename: filename ?? file.originalname,
model: messageAttachment ? undefined : req.body.model,
context: messageAttachment ? FileContext.message_attachment : FileContext.agents,
});
if (!messageAttachment && tool_resource) {
await addAgentResourceFile({
req,
/**
* @param {object} params
* @param {string} params.text
* @param {number} params.bytes
* @param {string} params.filepath
* @param {string} params.type
* @return {Promise<void>}
*/
const createTextFile = async ({ text, bytes, filepath, type = 'text/plain' }) => {
const fileInfo = removeNullishValues({
text,
bytes,
file_id,
agent_id,
tool_resource,
temp_file_id,
user: req.user.id,
type,
filepath: filepath ?? file.path,
source: FileSources.text,
filename: file.originalname,
model: messageAttachment ? undefined : req.body.model,
context: messageAttachment ? FileContext.message_attachment : FileContext.agents,
});
if (!messageAttachment && tool_resource) {
await addAgentResourceFile({
req,
file_id,
agent_id,
tool_resource,
});
}
const result = await createFile(fileInfo, true);
return res
.status(200)
.json({ message: 'Agent file uploaded and processed successfully', ...result });
};
const fileConfig = mergeFileConfig(appConfig.fileConfig);
const shouldUseOCR = fileConfig.checkType(
file.mimetype,
fileConfig.ocr?.supportedMimeTypes || [],
);
if (shouldUseOCR && !(await checkCapability(req, AgentCapabilities.ocr))) {
throw new Error('OCR capability is not enabled for Agents');
} else if (shouldUseOCR) {
const { handleFileUpload: uploadOCR } = getStrategyFunctions(
appConfig?.ocr?.strategy ?? FileSources.mistral_ocr,
);
const { text, bytes, filepath: ocrFileURL } = await uploadOCR({ req, file, loadAuthValues });
return await createTextFile({ text, bytes, filepath: ocrFileURL });
}
const result = await createFile(fileInfo, true);
return res
.status(200)
.json({ message: 'Agent file uploaded and processed successfully', ...result });
const shouldUseSTT = fileConfig.checkType(
file.mimetype,
fileConfig.stt?.supportedMimeTypes || [],
);
if (shouldUseSTT) {
const sttService = await STTService.getInstance();
const { text, bytes } = await processAudioFile({ file, sttService });
return await createTextFile({ text, bytes });
}
const shouldUseText = fileConfig.checkType(
file.mimetype,
fileConfig.text?.supportedMimeTypes || [],
);
if (!shouldUseText) {
throw new Error(`File type ${file.mimetype} is not supported for OCR or text parsing`);
}
const { text, bytes } = await parseText({ req, file, file_id });
return await createTextFile({ text, bytes, type: file.mimetype });
}
// Dual storage pattern for RAG files: Storage + Vector DB

View file

@ -101,6 +101,11 @@ jest.mock('~/server/utils', () => ({
determineFileType: jest.fn(),
}));
jest.mock('@librechat/api', () => ({
parseText: jest.fn(),
parseTextNative: jest.fn(),
}));
// Import the actual processFiles function after all mocks are set up
const { processFiles } = require('./process');
const { updateFileUsage } = require('~/models/File');