LibreChat/packages/api/src/utils/llm.test.ts
Danny Avila 48f6f8f2f8
📎 feat: Upload as Text Support for Plaintext, STT, RAG, and Token Limits (#8868)
* 🪶 feat: Add Support for Uploading Plaintext Files

feat: delineate between OCR and text handling in fileConfig field of config file

- also adds support for passing in mimetypes as just plain file extensions

feat: add showLabel bool to support future synthetic component DynamicDropdownInput

feat: add new combination dropdown-input component in params panel to support file type token limits

refactor: move hovercard to side to align with other hovercards

chore: clean up autogenerated comments

feat: add delineation to file upload path between text and ocr configured filetypes

feat: add token limit checks during file upload

refactor: move textParsing out of ocrEnabled logic

refactor: clean up types for filetype config

refactor: finish decoupling DynamicDropdownInput from fileTokenLimits

fix: move image token cost function into file to fix circular dependency causing unittest to fail and remove unused var for linter

chore: remove out of scope code following review

refactor: make fileTokenLimit conform to existing styles

chore: remove unused localization string

chore: undo changes to DynamicInput and other strays

feat: add fileTokenLimit to all provider config panels

fix: move textParsing back into ocr tool_resource block for now so that it doesn't interfere with other upload types

* 📤 feat: Add RAG API Endpoint Support for Text Parsing (#8849)

* feat: implement RAG API integration for text parsing with fallback to native parsing

* chore: remove TODO now that placeholder and fllback are implemented

* ✈️ refactor: Migrate Text Parsing to TS (#8892)

* refactor: move generateShortLivedToken to packages/api

* refactor: move textParsing logic into packages/api

* refactor: reduce nesting and dry code with createTextFile

* fix: add proper source handling

* fix: mock new parseText and parseTextNative functions in jest file

* ci: add test coverage for textParser

* 💬 feat: Add Audio File Support to Upload as Text (#8893)

* feat: add STT support for Upload as Text

* refactor: move processAudioFile to packages/api

* refactor: move textParsing from utils to files

* fix: remove audio/mp3 from unsupported mimetypes test since it is now supported

* ✂️ feat: Configurable File Token Limits and Truncation (#8911)

* feat: add configurable fileTokenLimit default value

* fix: add stt to fileConfig merge logic

* fix: add fileTokenLimit to mergeFileConfig logic so configurable value is actually respected from yaml

* feat: add token limiting to parsed text files

* fix: add extraction logic and update tests so fileTokenLimit isnt sent to LLM providers

* fix: address comments

* refactor: rename textTokenLimiter.ts to text.ts

* chore: update form-data package to address CVE-2025-7783 and update package-lock

* feat: use default supported mime types for ocr on frontend file validation

* fix: should be using logger.debug not console.debug

* fix: mock existsSync in text.spec.ts

* fix: mock logger rather than every one of its function calls

* fix: reorganize imports and streamline file upload processing logic

* refactor: update createTextFile function to use destructured parameters and improve readability

* chore: update file validation to use EToolResources for improved type safety

* chore: update import path for types in audio processing module

* fix: update file configuration access and replace console.debug with logger.debug for improved logging

---------

Co-authored-by: Dustin Healy <dustinhealy1@gmail.com>
Co-authored-by: Dustin Healy <54083382+dustinhealy@users.noreply.github.com>
2025-08-27 03:44:39 -04:00

199 lines
5.9 KiB
TypeScript

import { extractLibreChatParams } from './llm';
describe('extractLibreChatParams', () => {
it('should return defaults when options is undefined', () => {
const result = extractLibreChatParams(undefined);
expect(result.resendFiles).toBe(true);
expect(result.promptPrefix).toBeUndefined();
expect(result.maxContextTokens).toBeUndefined();
expect(result.fileTokenLimit).toBeUndefined();
expect(result.modelLabel).toBeUndefined();
expect(result.modelOptions).toEqual({});
});
it('should return defaults when options is null', () => {
const result = extractLibreChatParams();
expect(result.resendFiles).toBe(true);
expect(result.promptPrefix).toBeUndefined();
expect(result.maxContextTokens).toBeUndefined();
expect(result.fileTokenLimit).toBeUndefined();
expect(result.modelLabel).toBeUndefined();
expect(result.modelOptions).toEqual({});
});
it('should extract all LibreChat params and leave model options', () => {
const options = {
resendFiles: false,
promptPrefix: 'You are a helpful assistant',
maxContextTokens: 4096,
fileTokenLimit: 50000,
modelLabel: 'GPT-4',
model: 'gpt-4',
temperature: 0.7,
max_tokens: 1000,
};
const result = extractLibreChatParams(options);
expect(result.resendFiles).toBe(false);
expect(result.promptPrefix).toBe('You are a helpful assistant');
expect(result.maxContextTokens).toBe(4096);
expect(result.fileTokenLimit).toBe(50000);
expect(result.modelLabel).toBe('GPT-4');
expect(result.modelOptions).toEqual({
model: 'gpt-4',
temperature: 0.7,
max_tokens: 1000,
});
});
it('should handle null values for LibreChat params', () => {
const options = {
resendFiles: true,
promptPrefix: null,
maxContextTokens: 2048,
fileTokenLimit: undefined,
modelLabel: null,
model: 'claude-3',
};
const result = extractLibreChatParams(options);
expect(result.resendFiles).toBe(true);
expect(result.promptPrefix).toBeNull();
expect(result.maxContextTokens).toBe(2048);
expect(result.fileTokenLimit).toBeUndefined();
expect(result.modelLabel).toBeNull();
expect(result.modelOptions).toEqual({
model: 'claude-3',
});
});
it('should use default for resendFiles when not provided', () => {
const options = {
promptPrefix: 'Test prefix',
model: 'gpt-3.5-turbo',
temperature: 0.5,
};
const result = extractLibreChatParams(options);
expect(result.resendFiles).toBe(true); // Should use default
expect(result.promptPrefix).toBe('Test prefix');
expect(result.maxContextTokens).toBeUndefined();
expect(result.fileTokenLimit).toBeUndefined();
expect(result.modelLabel).toBeUndefined();
expect(result.modelOptions).toEqual({
model: 'gpt-3.5-turbo',
temperature: 0.5,
});
});
it('should handle empty options object', () => {
const result = extractLibreChatParams({});
expect(result.resendFiles).toBe(true); // Should use default
expect(result.promptPrefix).toBeUndefined();
expect(result.maxContextTokens).toBeUndefined();
expect(result.fileTokenLimit).toBeUndefined();
expect(result.modelLabel).toBeUndefined();
expect(result.modelOptions).toEqual({});
});
it('should only extract known LibreChat params', () => {
const options = {
resendFiles: false,
promptPrefix: 'Custom prompt',
maxContextTokens: 8192,
fileTokenLimit: 25000,
modelLabel: 'Custom Model',
// Model options
model: 'gpt-4',
temperature: 0.9,
top_p: 0.95,
frequency_penalty: 0.5,
presence_penalty: 0.5,
// Unknown params should stay in modelOptions
unknownParam: 'should remain',
customSetting: 123,
};
const result = extractLibreChatParams(options);
// LibreChat params extracted
expect(result.resendFiles).toBe(false);
expect(result.promptPrefix).toBe('Custom prompt');
expect(result.maxContextTokens).toBe(8192);
expect(result.fileTokenLimit).toBe(25000);
expect(result.modelLabel).toBe('Custom Model');
// Model options should include everything else
expect(result.modelOptions).toEqual({
model: 'gpt-4',
temperature: 0.9,
top_p: 0.95,
frequency_penalty: 0.5,
presence_penalty: 0.5,
unknownParam: 'should remain',
customSetting: 123,
});
});
it('should not mutate the original options object', () => {
const options = {
resendFiles: false,
promptPrefix: 'Test',
model: 'gpt-4',
temperature: 0.7,
};
const originalOptions = { ...options };
extractLibreChatParams(options);
// Original object should remain unchanged
expect(options).toEqual(originalOptions);
});
it('should handle undefined values for optional LibreChat params', () => {
const options = {
resendFiles: false,
promptPrefix: undefined,
maxContextTokens: undefined,
modelLabel: undefined,
model: 'claude-2',
};
const result = extractLibreChatParams(options);
expect(result.resendFiles).toBe(false);
expect(result.promptPrefix).toBeUndefined();
expect(result.maxContextTokens).toBeUndefined();
expect(result.modelLabel).toBeUndefined();
expect(result.modelOptions).toEqual({
model: 'claude-2',
});
});
it('should handle mixed null and undefined values', () => {
const options = {
promptPrefix: null,
maxContextTokens: undefined,
modelLabel: null,
model: 'gpt-3.5-turbo',
stop: ['\\n', '\\n\\n'],
};
const result = extractLibreChatParams(options);
expect(result.resendFiles).toBe(true); // default
expect(result.promptPrefix).toBeNull();
expect(result.maxContextTokens).toBeUndefined();
expect(result.modelLabel).toBeNull();
expect(result.modelOptions).toEqual({
model: 'gpt-3.5-turbo',
stop: ['\\n', '\\n\\n'],
});
});
});