mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-12-22 11:20:15 +01:00
📎 feat: Upload as Text Support for Plaintext, STT, RAG, and Token Limits (#8868)
* 🪶 feat: Add Support for Uploading Plaintext Files feat: delineate between OCR and text handling in fileConfig field of config file - also adds support for passing in mimetypes as just plain file extensions feat: add showLabel bool to support future synthetic component DynamicDropdownInput feat: add new combination dropdown-input component in params panel to support file type token limits refactor: move hovercard to side to align with other hovercards chore: clean up autogenerated comments feat: add delineation to file upload path between text and ocr configured filetypes feat: add token limit checks during file upload refactor: move textParsing out of ocrEnabled logic refactor: clean up types for filetype config refactor: finish decoupling DynamicDropdownInput from fileTokenLimits fix: move image token cost function into file to fix circular dependency causing unittest to fail and remove unused var for linter chore: remove out of scope code following review refactor: make fileTokenLimit conform to existing styles chore: remove unused localization string chore: undo changes to DynamicInput and other strays feat: add fileTokenLimit to all provider config panels fix: move textParsing back into ocr tool_resource block for now so that it doesn't interfere with other upload types * 📤 feat: Add RAG API Endpoint Support for Text Parsing (#8849) * feat: implement RAG API integration for text parsing with fallback to native parsing * chore: remove TODO now that placeholder and fllback are implemented * ✈️ refactor: Migrate Text Parsing to TS (#8892) * refactor: move generateShortLivedToken to packages/api * refactor: move textParsing logic into packages/api * refactor: reduce nesting and dry code with createTextFile * fix: add proper source handling * fix: mock new parseText and parseTextNative functions in jest file * ci: add test coverage for textParser * 💬 feat: Add Audio File Support to Upload as Text (#8893) * feat: add STT support for Upload as Text * refactor: move processAudioFile to packages/api * refactor: move textParsing from utils to files * fix: remove audio/mp3 from unsupported mimetypes test since it is now supported * ✂️ feat: Configurable File Token Limits and Truncation (#8911) * feat: add configurable fileTokenLimit default value * fix: add stt to fileConfig merge logic * fix: add fileTokenLimit to mergeFileConfig logic so configurable value is actually respected from yaml * feat: add token limiting to parsed text files * fix: add extraction logic and update tests so fileTokenLimit isnt sent to LLM providers * fix: address comments * refactor: rename textTokenLimiter.ts to text.ts * chore: update form-data package to address CVE-2025-7783 and update package-lock * feat: use default supported mime types for ocr on frontend file validation * fix: should be using logger.debug not console.debug * fix: mock existsSync in text.spec.ts * fix: mock logger rather than every one of its function calls * fix: reorganize imports and streamline file upload processing logic * refactor: update createTextFile function to use destructured parameters and improve readability * chore: update file validation to use EToolResources for improved type safety * chore: update import path for types in audio processing module * fix: update file configuration access and replace console.debug with logger.debug for improved logging --------- Co-authored-by: Dustin Healy <dustinhealy1@gmail.com> Co-authored-by: Dustin Healy <54083382+dustinhealy@users.noreply.github.com>
This commit is contained in:
parent
74bc0440f0
commit
48f6f8f2f8
41 changed files with 847 additions and 75 deletions
|
|
@ -11,6 +11,7 @@ export * from './llm';
|
|||
export * from './math';
|
||||
export * from './openid';
|
||||
export * from './tempChatRetention';
|
||||
export * from './text';
|
||||
export { default as Tokenizer } from './tokenizer';
|
||||
export * from './yaml';
|
||||
export * from './http';
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@ describe('extractLibreChatParams', () => {
|
|||
expect(result.resendFiles).toBe(true);
|
||||
expect(result.promptPrefix).toBeUndefined();
|
||||
expect(result.maxContextTokens).toBeUndefined();
|
||||
expect(result.fileTokenLimit).toBeUndefined();
|
||||
expect(result.modelLabel).toBeUndefined();
|
||||
expect(result.modelOptions).toEqual({});
|
||||
});
|
||||
|
|
@ -17,6 +18,7 @@ describe('extractLibreChatParams', () => {
|
|||
expect(result.resendFiles).toBe(true);
|
||||
expect(result.promptPrefix).toBeUndefined();
|
||||
expect(result.maxContextTokens).toBeUndefined();
|
||||
expect(result.fileTokenLimit).toBeUndefined();
|
||||
expect(result.modelLabel).toBeUndefined();
|
||||
expect(result.modelOptions).toEqual({});
|
||||
});
|
||||
|
|
@ -26,6 +28,7 @@ describe('extractLibreChatParams', () => {
|
|||
resendFiles: false,
|
||||
promptPrefix: 'You are a helpful assistant',
|
||||
maxContextTokens: 4096,
|
||||
fileTokenLimit: 50000,
|
||||
modelLabel: 'GPT-4',
|
||||
model: 'gpt-4',
|
||||
temperature: 0.7,
|
||||
|
|
@ -37,6 +40,7 @@ describe('extractLibreChatParams', () => {
|
|||
expect(result.resendFiles).toBe(false);
|
||||
expect(result.promptPrefix).toBe('You are a helpful assistant');
|
||||
expect(result.maxContextTokens).toBe(4096);
|
||||
expect(result.fileTokenLimit).toBe(50000);
|
||||
expect(result.modelLabel).toBe('GPT-4');
|
||||
expect(result.modelOptions).toEqual({
|
||||
model: 'gpt-4',
|
||||
|
|
@ -50,6 +54,7 @@ describe('extractLibreChatParams', () => {
|
|||
resendFiles: true,
|
||||
promptPrefix: null,
|
||||
maxContextTokens: 2048,
|
||||
fileTokenLimit: undefined,
|
||||
modelLabel: null,
|
||||
model: 'claude-3',
|
||||
};
|
||||
|
|
@ -59,6 +64,7 @@ describe('extractLibreChatParams', () => {
|
|||
expect(result.resendFiles).toBe(true);
|
||||
expect(result.promptPrefix).toBeNull();
|
||||
expect(result.maxContextTokens).toBe(2048);
|
||||
expect(result.fileTokenLimit).toBeUndefined();
|
||||
expect(result.modelLabel).toBeNull();
|
||||
expect(result.modelOptions).toEqual({
|
||||
model: 'claude-3',
|
||||
|
|
@ -77,6 +83,7 @@ describe('extractLibreChatParams', () => {
|
|||
expect(result.resendFiles).toBe(true); // Should use default
|
||||
expect(result.promptPrefix).toBe('Test prefix');
|
||||
expect(result.maxContextTokens).toBeUndefined();
|
||||
expect(result.fileTokenLimit).toBeUndefined();
|
||||
expect(result.modelLabel).toBeUndefined();
|
||||
expect(result.modelOptions).toEqual({
|
||||
model: 'gpt-3.5-turbo',
|
||||
|
|
@ -90,6 +97,7 @@ describe('extractLibreChatParams', () => {
|
|||
expect(result.resendFiles).toBe(true); // Should use default
|
||||
expect(result.promptPrefix).toBeUndefined();
|
||||
expect(result.maxContextTokens).toBeUndefined();
|
||||
expect(result.fileTokenLimit).toBeUndefined();
|
||||
expect(result.modelLabel).toBeUndefined();
|
||||
expect(result.modelOptions).toEqual({});
|
||||
});
|
||||
|
|
@ -99,6 +107,7 @@ describe('extractLibreChatParams', () => {
|
|||
resendFiles: false,
|
||||
promptPrefix: 'Custom prompt',
|
||||
maxContextTokens: 8192,
|
||||
fileTokenLimit: 25000,
|
||||
modelLabel: 'Custom Model',
|
||||
// Model options
|
||||
model: 'gpt-4',
|
||||
|
|
@ -117,6 +126,7 @@ describe('extractLibreChatParams', () => {
|
|||
expect(result.resendFiles).toBe(false);
|
||||
expect(result.promptPrefix).toBe('Custom prompt');
|
||||
expect(result.maxContextTokens).toBe(8192);
|
||||
expect(result.fileTokenLimit).toBe(25000);
|
||||
expect(result.modelLabel).toBe('Custom Model');
|
||||
|
||||
// Model options should include everything else
|
||||
|
|
|
|||
|
|
@ -8,6 +8,7 @@ type LibreChatParams = {
|
|||
resendFiles: boolean;
|
||||
promptPrefix?: string | null;
|
||||
maxContextTokens?: number;
|
||||
fileTokenLimit?: number;
|
||||
modelLabel?: string | null;
|
||||
};
|
||||
|
||||
|
|
@ -32,6 +33,7 @@ export function extractLibreChatParams(
|
|||
(librechat.resendFiles.default as boolean);
|
||||
const promptPrefix = (delete modelOptions.promptPrefix, options.promptPrefix);
|
||||
const maxContextTokens = (delete modelOptions.maxContextTokens, options.maxContextTokens);
|
||||
const fileTokenLimit = (delete modelOptions.fileTokenLimit, options.fileTokenLimit);
|
||||
const modelLabel = (delete modelOptions.modelLabel, options.modelLabel);
|
||||
|
||||
return {
|
||||
|
|
@ -40,6 +42,7 @@ export function extractLibreChatParams(
|
|||
LibreChatKeys
|
||||
>,
|
||||
maxContextTokens,
|
||||
fileTokenLimit,
|
||||
promptPrefix,
|
||||
resendFiles,
|
||||
modelLabel,
|
||||
|
|
|
|||
65
packages/api/src/utils/text.ts
Normal file
65
packages/api/src/utils/text.ts
Normal file
|
|
@ -0,0 +1,65 @@
|
|||
import { logger } from '@librechat/data-schemas';
|
||||
|
||||
/**
|
||||
* Processes text content by counting tokens and truncating if it exceeds the specified limit.
|
||||
* @param text - The text content to process
|
||||
* @param tokenLimit - The maximum number of tokens allowed
|
||||
* @param tokenCountFn - Function to count tokens
|
||||
* @returns Promise resolving to object with processed text, token count, and truncation status
|
||||
*/
|
||||
export async function processTextWithTokenLimit({
|
||||
text,
|
||||
tokenLimit,
|
||||
tokenCountFn,
|
||||
}: {
|
||||
text: string;
|
||||
tokenLimit: number;
|
||||
tokenCountFn: (text: string) => number;
|
||||
}): Promise<{ text: string; tokenCount: number; wasTruncated: boolean }> {
|
||||
const originalTokenCount = await tokenCountFn(text);
|
||||
|
||||
if (originalTokenCount <= tokenLimit) {
|
||||
return {
|
||||
text,
|
||||
tokenCount: originalTokenCount,
|
||||
wasTruncated: false,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Doing binary search here to find the truncation point efficiently
|
||||
* (May be a better way to go about this)
|
||||
*/
|
||||
let low = 0;
|
||||
let high = text.length;
|
||||
let bestText = '';
|
||||
|
||||
logger.debug(
|
||||
`[textTokenLimiter] Text content exceeds token limit: ${originalTokenCount} > ${tokenLimit}, truncating...`,
|
||||
);
|
||||
|
||||
while (low <= high) {
|
||||
const mid = Math.floor((low + high) / 2);
|
||||
const truncatedText = text.substring(0, mid);
|
||||
const tokenCount = await tokenCountFn(truncatedText);
|
||||
|
||||
if (tokenCount <= tokenLimit) {
|
||||
bestText = truncatedText;
|
||||
low = mid + 1;
|
||||
} else {
|
||||
high = mid - 1;
|
||||
}
|
||||
}
|
||||
|
||||
const finalTokenCount = await tokenCountFn(bestText);
|
||||
|
||||
logger.warn(
|
||||
`[textTokenLimiter] Text truncated from ${originalTokenCount} to ${finalTokenCount} tokens (limit: ${tokenLimit})`,
|
||||
);
|
||||
|
||||
return {
|
||||
text: bestText,
|
||||
tokenCount: finalTokenCount,
|
||||
wasTruncated: true,
|
||||
};
|
||||
}
|
||||
Loading…
Add table
Add a link
Reference in a new issue