mirror of
https://github.com/danny-avila/LibreChat.git
synced 2026-03-11 10:32:37 +01:00
Some checks are pending
Docker Dev Branch Images Build / build (Dockerfile, lc-dev, node) (push) Waiting to run
Docker Dev Branch Images Build / build (Dockerfile.multi, lc-dev-api, api-build) (push) Waiting to run
Docker Dev Images Build / build (Dockerfile, librechat-dev, node) (push) Waiting to run
Docker Dev Images Build / build (Dockerfile.multi, librechat-dev-api, api-build) (push) Waiting to run
Sync Locize Translations & Create Translation PR / Sync Translation Keys with Locize (push) Waiting to run
Sync Locize Translations & Create Translation PR / Create Translation PR on Version Published (push) Blocked by required conditions
* chore: Update dependencies by adding ai-tokenizer and removing tiktoken - Added ai-tokenizer version 1.0.6 to package.json and package-lock.json across multiple packages. - Removed tiktoken version 1.0.15 from package.json and package-lock.json in the same locations, streamlining dependency management. * refactor: replace js-tiktoken with ai-tokenizer - Added support for 'claude' encoding in the AgentClient class to improve model compatibility. - Updated Tokenizer class to utilize 'ai-tokenizer' for both 'o200k_base' and 'claude' encodings, replacing the previous 'tiktoken' dependency. - Refactored tests to reflect changes in tokenizer behavior and ensure accurate token counting for both encoding types. - Removed deprecated references to 'tiktoken' and adjusted related tests for improved clarity and functionality. * chore: remove tiktoken mocks from DALLE3 tests - Eliminated mock implementations of 'tiktoken' from DALLE3-related test files to streamline test setup and align with recent dependency updates. - Adjusted related test structures to ensure compatibility with the new tokenizer implementation. * chore: Add distinct encoding support for Anthropic Claude models - Introduced a new method `getEncoding` in the AgentClient class to handle the specific BPE tokenizer for Claude models, ensuring compatibility with the distinct encoding requirements. - Updated documentation to clarify the encoding logic for Claude and other models. * docs: Update return type documentation for getEncoding method in AgentClient - Clarified the return type of the getEncoding method to specify that it can return an EncodingName or undefined, enhancing code readability and type safety. * refactor: Tokenizer class and error handling - Exported the EncodingName type for broader usage. - Renamed encodingMap to encodingData for clarity. - Improved error handling in getTokenCount method to ensure recovery attempts are logged and return 0 on failure. - Updated countTokens function documentation to specify the use of 'o200k_base' encoding. * refactor: Simplify encoding documentation and export type - Updated the getEncoding method documentation to clarify the default behavior for non-Anthropic Claude models. - Exported the EncodingName type separately from the Tokenizer module for improved clarity and usage. * test: Update text processing tests for token limits - Adjusted test cases to handle smaller text sizes, changing scenarios from ~120k tokens to ~20k tokens for both the real tokenizer and countTokens functions. - Updated token limits in tests to reflect new constraints, ensuring tests accurately assess performance and call reduction. - Enhanced console log messages for clarity regarding token counts and reductions in the updated scenarios. * refactor: Update Tokenizer imports and exports - Moved Tokenizer and countTokens exports to the tokenizer module for better organization. - Adjusted imports in memory.ts to reflect the new structure, ensuring consistent usage across the codebase. - Updated memory.test.ts to mock the Tokenizer from the correct module path, enhancing test accuracy. * refactor: Tokenizer initialization and error handling - Introduced an async `initEncoding` method to preload tokenizers, improving performance and accuracy in token counting. - Updated `getTokenCount` to handle uninitialized tokenizers more gracefully, ensuring proper recovery and logging on errors. - Removed deprecated synchronous tokenizer retrieval, streamlining the overall tokenizer management process. * test: Enhance tokenizer tests with initialization and encoding checks - Added `beforeAll` hooks to initialize tokenizers for 'o200k_base' and 'claude' encodings before running tests, ensuring proper setup. - Updated tests to validate the loading of encodings and the correctness of token counts for both 'o200k_base' and 'claude'. - Improved test structure to deduplicate concurrent initialization calls, enhancing performance and reliability.
59 lines
1.9 KiB
JavaScript
59 lines
1.9 KiB
JavaScript
const DALLE3 = require('../DALLE3');
|
|
const { ProxyAgent } = require('undici');
|
|
|
|
const processFileURL = jest.fn();
|
|
|
|
describe('DALLE3 Proxy Configuration', () => {
|
|
let originalEnv;
|
|
|
|
beforeAll(() => {
|
|
originalEnv = { ...process.env };
|
|
});
|
|
|
|
beforeEach(() => {
|
|
jest.resetModules();
|
|
process.env = { ...originalEnv };
|
|
});
|
|
|
|
afterEach(() => {
|
|
process.env = originalEnv;
|
|
});
|
|
|
|
it('should configure ProxyAgent in fetchOptions.dispatcher when PROXY env is set', () => {
|
|
// Set proxy environment variable
|
|
process.env.PROXY = 'http://proxy.example.com:8080';
|
|
process.env.DALLE_API_KEY = 'test-api-key';
|
|
|
|
// Create instance
|
|
const dalleWithProxy = new DALLE3({ processFileURL });
|
|
|
|
// Check that the openai client exists
|
|
expect(dalleWithProxy.openai).toBeDefined();
|
|
|
|
// Check that _options exists and has fetchOptions with a dispatcher
|
|
expect(dalleWithProxy.openai._options).toBeDefined();
|
|
expect(dalleWithProxy.openai._options.fetchOptions).toBeDefined();
|
|
expect(dalleWithProxy.openai._options.fetchOptions.dispatcher).toBeDefined();
|
|
expect(dalleWithProxy.openai._options.fetchOptions.dispatcher).toBeInstanceOf(ProxyAgent);
|
|
});
|
|
|
|
it('should not configure ProxyAgent when PROXY env is not set', () => {
|
|
// Ensure PROXY is not set
|
|
delete process.env.PROXY;
|
|
process.env.DALLE_API_KEY = 'test-api-key';
|
|
|
|
// Create instance
|
|
const dalleWithoutProxy = new DALLE3({ processFileURL });
|
|
|
|
// Check that the openai client exists
|
|
expect(dalleWithoutProxy.openai).toBeDefined();
|
|
|
|
// Check that _options exists but fetchOptions either doesn't exist or doesn't have a dispatcher
|
|
expect(dalleWithoutProxy.openai._options).toBeDefined();
|
|
|
|
// fetchOptions should either not exist or not have a dispatcher
|
|
if (dalleWithoutProxy.openai._options.fetchOptions) {
|
|
expect(dalleWithoutProxy.openai._options.fetchOptions.dispatcher).toBeUndefined();
|
|
}
|
|
});
|
|
});
|