🧪 ci: Tests for Anthropic and OpenAI LLM Configuration (#9484)

* fix: freq. and pres. penalty use camelcase

* ci: OpenAI Configuration Tests

* ci: Enhance OpenAI Configuration Tests with Azure and Custom Endpoint Scenarios

* Added integration tests for OpenAI and Azure configurations simulating various initialization scenarios.
* Updated OpenAIConfigOptions to allow null values for reverseProxyUrl and proxy.
* Improved handling of reasoning parameters in tests for both OpenAI and Azure setups.
* Ensured robust error handling for missing API keys and malformed configurations.
* Optimized performance for large parameter sets in configuration.

* test: Add comprehensive integration tests for Anthropic LLM configuration

* Introduced real usage integration tests for various Anthropic endpoint configurations, including handling of proxy and reverse proxy setups.
* Implemented model-specific scenarios for Claude-3.7 and web search functionality.
* Enhanced error handling for missing user IDs and large parameter sets.
* Validated parameter logic, including default values, boundary conditions, and type handling for numeric and array parameters.
* Ensured proper exclusion of system options from model options and maintained expected behavior across different model variations.
This commit is contained in:
Danny Avila 2025-09-06 09:42:12 -04:00 committed by GitHub
parent 6f6a34d126
commit 035f85c3ba
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
4 changed files with 1721 additions and 8 deletions

View file

@ -338,4 +338,799 @@ describe('getLLMConfig', () => {
expect(result.llmConfig).toHaveProperty('stopSequences', []); expect(result.llmConfig).toHaveProperty('stopSequences', []);
}); });
}); });
describe('Real Usage Integration Tests', () => {
describe('Initialize.js Simulation', () => {
it('should handle basic Anthropic endpoint configuration like initialize.js', () => {
// Simulate the configuration from Anthropic initialize.js
const anthropicApiKey = 'sk-ant-api-key-123';
const endpointOption = {
model_parameters: {
model: 'claude-3-5-sonnet-latest',
temperature: 0.7,
maxOutputTokens: 4096,
topP: 0.9,
topK: 40,
stop: ['\\n\\n', 'Human:', 'Assistant:'],
stream: true,
},
};
// Simulate clientOptions from initialize.js
const clientOptions = {
proxy: null,
userId: 'test-user-id-123',
reverseProxyUrl: null,
modelOptions: endpointOption.model_parameters,
streamRate: 25,
titleModel: 'claude-3-haiku',
};
const result = getLLMConfig(anthropicApiKey, clientOptions);
expect(result.llmConfig).toMatchObject({
apiKey: anthropicApiKey,
model: 'claude-3-5-sonnet-latest',
temperature: 0.7,
maxTokens: 4096,
topP: 0.9,
topK: 40,
stopSequences: ['\\n\\n', 'Human:', 'Assistant:'],
stream: true,
invocationKwargs: {
metadata: {
user_id: 'test-user-id-123',
},
},
});
expect(result.tools).toEqual([]);
});
it('should handle Anthropic with proxy configuration like initialize.js', () => {
const anthropicApiKey = 'sk-ant-proxy-key';
const clientOptions = {
proxy: 'http://corporate-proxy:8080',
userId: 'proxy-user-456',
reverseProxyUrl: null,
modelOptions: {
model: 'claude-3-opus',
temperature: 0.3,
maxOutputTokens: 2048,
},
};
const result = getLLMConfig(anthropicApiKey, clientOptions);
expect(result.llmConfig).toMatchObject({
apiKey: anthropicApiKey,
model: 'claude-3-opus',
temperature: 0.3,
maxTokens: 2048,
invocationKwargs: {
metadata: {
user_id: 'proxy-user-456',
},
},
});
expect(result.llmConfig.clientOptions.fetchOptions).toHaveProperty('dispatcher');
expect(result.llmConfig.clientOptions.fetchOptions.dispatcher.constructor.name).toBe(
'ProxyAgent',
);
});
it('should handle Anthropic with reverse proxy like initialize.js', () => {
const anthropicApiKey = 'sk-ant-reverse-proxy';
const reverseProxyUrl = 'https://api.custom-anthropic.com/v1';
const clientOptions = {
proxy: null,
userId: 'reverse-proxy-user',
reverseProxyUrl: reverseProxyUrl,
modelOptions: {
model: 'claude-3-5-haiku',
temperature: 0.5,
stream: false,
},
};
const result = getLLMConfig(anthropicApiKey, clientOptions);
expect(result.llmConfig).toMatchObject({
apiKey: anthropicApiKey,
model: 'claude-3-5-haiku',
temperature: 0.5,
stream: false,
anthropicApiUrl: reverseProxyUrl,
});
expect(result.llmConfig.clientOptions).toMatchObject({
baseURL: reverseProxyUrl,
});
});
});
describe('Model-Specific Real Usage Scenarios', () => {
it('should handle Claude-3.7 with thinking enabled like production', () => {
const clientOptions = {
userId: 'thinking-user-789',
modelOptions: {
model: 'claude-3-7-sonnet',
temperature: 0.4,
maxOutputTokens: 8192,
topP: 0.95,
topK: 50,
thinking: true,
thinkingBudget: 3000,
promptCache: true,
},
};
const result = getLLMConfig('sk-ant-thinking-key', clientOptions);
expect(result.llmConfig).toMatchObject({
model: 'claude-3-7-sonnet',
temperature: 0.4,
maxTokens: 8192,
stream: true, // default
thinking: {
type: 'enabled',
budget_tokens: 3000,
},
});
// topP and topK should NOT be included for Claude-3.7 with thinking enabled
expect(result.llmConfig).not.toHaveProperty('topP');
expect(result.llmConfig).not.toHaveProperty('topK');
// Should have appropriate headers for Claude-3.7 with prompt cache
expect(result.llmConfig.clientOptions.defaultHeaders).toEqual({
'anthropic-beta':
'token-efficient-tools-2025-02-19,output-128k-2025-02-19,prompt-caching-2024-07-31',
});
});
it('should handle web search functionality like production', () => {
const clientOptions = {
userId: 'websearch-user-303',
modelOptions: {
model: 'claude-3-5-sonnet-latest',
temperature: 0.6,
maxOutputTokens: 4096,
web_search: true,
},
};
const result = getLLMConfig('sk-ant-websearch-key', clientOptions);
expect(result.llmConfig).toMatchObject({
model: 'claude-3-5-sonnet-latest',
temperature: 0.6,
maxTokens: 4096,
});
expect(result.tools).toEqual([
{
type: 'web_search_20250305',
name: 'web_search',
},
]);
});
});
describe('Production-like Configuration Scenarios', () => {
it('should handle complex production configuration', () => {
const clientOptions = {
proxy: 'http://prod-proxy.company.com:3128',
userId: 'prod-user-enterprise-404',
reverseProxyUrl: 'https://anthropic-gateway.company.com/v1',
modelOptions: {
model: 'claude-3-opus-20240229',
temperature: 0.2, // Conservative for production
maxOutputTokens: 4096,
topP: 0.95,
topK: 10,
stop: ['\\n\\nHuman:', '\\n\\nAssistant:', 'END_CONVERSATION'],
stream: true,
promptCache: true,
},
streamRate: 15, // Conservative stream rate
titleModel: 'claude-3-haiku-20240307',
};
const result = getLLMConfig('sk-ant-prod-enterprise-key', clientOptions);
expect(result.llmConfig).toMatchObject({
apiKey: 'sk-ant-prod-enterprise-key',
model: 'claude-3-opus-20240229',
temperature: 0.2,
maxTokens: 4096,
topP: 0.95,
topK: 10,
stopSequences: ['\\n\\nHuman:', '\\n\\nAssistant:', 'END_CONVERSATION'],
stream: true,
anthropicApiUrl: 'https://anthropic-gateway.company.com/v1',
invocationKwargs: {
metadata: {
user_id: 'prod-user-enterprise-404',
},
},
});
expect(result.llmConfig.clientOptions).toMatchObject({
baseURL: 'https://anthropic-gateway.company.com/v1',
fetchOptions: {
dispatcher: expect.any(Object),
},
});
expect(result.tools).toEqual([]);
});
it('should handle multiple system options removal from modelOptions', () => {
const modelOptions = {
model: 'claude-3-5-sonnet',
temperature: 0.7,
maxOutputTokens: 8192,
// System options that should be removed
thinking: true,
promptCache: true,
thinkingBudget: 2500,
// Regular options that should remain
topP: 0.9,
topK: 40,
};
const clientOptions = {
userId: 'system-options-user',
modelOptions,
};
getLLMConfig('sk-ant-system-key', clientOptions);
// System options should be removed from original modelOptions
expect(modelOptions).not.toHaveProperty('thinking');
expect(modelOptions).not.toHaveProperty('promptCache');
expect(modelOptions).not.toHaveProperty('thinkingBudget');
// Regular options should remain
expect(modelOptions).toHaveProperty('temperature', 0.7);
expect(modelOptions).toHaveProperty('topP', 0.9);
expect(modelOptions).toHaveProperty('topK', 40);
});
});
describe('Error Handling and Edge Cases from Real Usage', () => {
it('should handle missing userId gracefully', () => {
const clientOptions = {
modelOptions: {
model: 'claude-3-haiku',
temperature: 0.5,
},
// userId is missing
};
const result = getLLMConfig('sk-ant-no-user-key', clientOptions);
expect(result.llmConfig.invocationKwargs.metadata).toMatchObject({
user_id: undefined,
});
});
it('should handle large parameter sets without performance issues', () => {
const largeModelOptions = {
model: 'claude-3-opus',
temperature: 0.7,
maxOutputTokens: 4096,
topP: 0.9,
topK: 40,
};
// Add many additional properties to test performance
for (let i = 0; i < 100; i++) {
largeModelOptions[`custom_param_${i}`] = `value_${i}`;
}
const clientOptions = {
userId: 'performance-test-user',
modelOptions: largeModelOptions,
proxy: 'http://performance-proxy:8080',
reverseProxyUrl: 'https://performance-reverse-proxy.com',
};
const startTime = Date.now();
const result = getLLMConfig('sk-ant-performance-key', clientOptions);
const endTime = Date.now();
expect(endTime - startTime).toBeLessThan(50); // Should be very fast
expect(result.llmConfig).toMatchObject({
model: 'claude-3-opus',
temperature: 0.7,
maxTokens: 4096,
topP: 0.9,
topK: 40,
});
});
it('should handle model name variations and edge cases', () => {
const modelVariations = [
'claude-3-7-sonnet',
'claude-3.7-sonnet',
'anthropic/claude-3-opus-20240229',
'claude-sonnet-4-latest',
'claude-3-5-sonnet-latest',
];
modelVariations.forEach((model) => {
const clientOptions = {
userId: 'model-variation-user',
modelOptions: {
model,
temperature: 0.5,
topP: 0.9,
topK: 40,
thinking: true,
promptCache: true,
},
};
const result = getLLMConfig('sk-ant-variation-key', clientOptions);
expect(result.llmConfig).toHaveProperty('model', model);
expect(result.llmConfig).toHaveProperty('temperature', 0.5);
// The specific behavior (thinking, topP/topK inclusion) depends on model pattern
});
});
});
});
describe('Comprehensive Parameter Logic Tests', () => {
describe('Default Values and Fallbacks', () => {
it('should apply correct default values from anthropicSettings', () => {
const result = getLLMConfig('test-key', { modelOptions: {} });
expect(result.llmConfig).toMatchObject({
model: 'claude-3-5-sonnet-latest', // default model
stream: true, // default stream
maxTokens: 8192, // DEFAULT_MAX_OUTPUT for claude-3-5-sonnet
});
});
it('should handle maxOutputTokens reset logic for different models', () => {
const testCases = [
{ model: 'claude-3-5-sonnet', expectedMaxTokens: 8192 },
{ model: 'claude-3.5-sonnet-20241022', expectedMaxTokens: 8192 },
{ model: 'claude-3-7-sonnet', expectedMaxTokens: 8192 },
{ model: 'claude-3.7-sonnet-20250109', expectedMaxTokens: 8192 },
{ model: 'claude-3-opus', expectedMaxTokens: 4096 },
{ model: 'claude-3-haiku', expectedMaxTokens: 4096 },
{ model: 'claude-2.1', expectedMaxTokens: 4096 },
];
testCases.forEach(({ model, expectedMaxTokens }) => {
const result = getLLMConfig('test-key', {
modelOptions: { model, maxOutputTokens: null }, // Force reset
});
expect(result.llmConfig.maxTokens).toBe(expectedMaxTokens);
});
});
it('should handle system options defaults correctly', () => {
const result = getLLMConfig('test-key', {
modelOptions: {
model: 'claude-3-7-sonnet',
// Don't specify thinking, promptCache, thinkingBudget - should use defaults
},
});
// Should have thinking enabled by default for claude-3-7
expect(result.llmConfig.thinking).toMatchObject({
type: 'enabled',
budget_tokens: 2000, // default thinkingBudget
});
// Should have prompt cache headers by default
expect(result.llmConfig.clientOptions.defaultHeaders).toBeDefined();
});
});
describe('Parameter Boundary and Validation Logic', () => {
it('should handle temperature boundary values', () => {
const testCases = [
{ temperature: 0, expected: 0 }, // min
{ temperature: 1, expected: 1 }, // max
{ temperature: 0.5, expected: 0.5 }, // middle
{ temperature: -0.1, expected: -0.1 }, // below min (should pass through)
{ temperature: 1.1, expected: 1.1 }, // above max (should pass through)
];
testCases.forEach(({ temperature, expected }) => {
const result = getLLMConfig('test-key', {
modelOptions: { temperature },
});
expect(result.llmConfig.temperature).toBe(expected);
});
});
it('should handle topP boundary values', () => {
const testCases = [
{ topP: 0, expected: 0 }, // min
{ topP: 1, expected: 1 }, // max
{ topP: 0.7, expected: 0.7 }, // default
{ topP: -0.1, expected: -0.1 }, // below min
{ topP: 1.1, expected: 1.1 }, // above max
];
testCases.forEach(({ topP, expected }) => {
const result = getLLMConfig('test-key', {
modelOptions: { model: 'claude-3-opus', topP },
});
expect(result.llmConfig.topP).toBe(expected);
});
});
it('should handle topK boundary values', () => {
const testCases = [
{ topK: 1, expected: 1 }, // min
{ topK: 40, expected: 40 }, // max
{ topK: 5, expected: 5 }, // default
{ topK: 0, expected: 0 }, // below min
{ topK: 50, expected: 50 }, // above max
];
testCases.forEach(({ topK, expected }) => {
const result = getLLMConfig('test-key', {
modelOptions: { model: 'claude-3-opus', topK },
});
expect(result.llmConfig.topK).toBe(expected);
});
});
it('should handle maxOutputTokens boundary values', () => {
const testCases = [
{ model: 'claude-3-opus', maxOutputTokens: 1, expected: 1 }, // min
{ model: 'claude-3-opus', maxOutputTokens: 4096, expected: 4096 }, // max for legacy
{ model: 'claude-3-5-sonnet', maxOutputTokens: 1, expected: 1 }, // min
{ model: 'claude-3-5-sonnet', maxOutputTokens: 200000, expected: 200000 }, // max for new
{ model: 'claude-3-7-sonnet', maxOutputTokens: 8192, expected: 8192 }, // default
];
testCases.forEach(({ model, maxOutputTokens, expected }) => {
const result = getLLMConfig('test-key', {
modelOptions: { model, maxOutputTokens },
});
expect(result.llmConfig.maxTokens).toBe(expected);
});
});
it('should handle thinkingBudget boundary values', () => {
const testCases = [
{ thinkingBudget: 1024, expected: 1024 }, // min
{ thinkingBudget: 2000, expected: 2000 }, // default
{ thinkingBudget: 7000, expected: 7000 }, // within max tokens (8192)
{ thinkingBudget: 500, expected: 500 }, // below min
{ thinkingBudget: 200000, expected: 7372 }, // above max tokens, constrained to 90% of 8192
];
testCases.forEach(({ thinkingBudget, expected }) => {
const result = getLLMConfig('test-key', {
modelOptions: {
model: 'claude-3-7-sonnet',
thinking: true,
thinkingBudget,
},
});
expect(result.llmConfig.thinking.budget_tokens).toBe(expected);
});
});
});
describe('Complex Parameter Interactions', () => {
it('should handle thinking budget vs maxTokens constraints', () => {
const testCases = [
// Budget within maxTokens - should keep original
{ maxOutputTokens: 4096, thinkingBudget: 2000, expectedBudget: 2000 },
// Budget exceeds maxTokens - should constrain to 90%
{ maxOutputTokens: 4096, thinkingBudget: 5000, expectedBudget: 3686 }, // 90% of 4096
// Budget equals maxTokens - should keep original (not constrained unless it exceeds)
{ maxOutputTokens: 2000, thinkingBudget: 2000, expectedBudget: 2000 },
// Budget slightly exceeds maxTokens - should constrain to 90%
{ maxOutputTokens: 2000, thinkingBudget: 2001, expectedBudget: 1800 }, // 90% of 2000
// Very small maxTokens
{ maxOutputTokens: 1000, thinkingBudget: 3000, expectedBudget: 900 }, // 90% of 1000
];
testCases.forEach(({ maxOutputTokens, thinkingBudget, expectedBudget }) => {
const result = getLLMConfig('test-key', {
modelOptions: {
model: 'claude-3-7-sonnet',
maxOutputTokens,
thinking: true,
thinkingBudget,
},
});
expect(result.llmConfig.thinking.budget_tokens).toBe(expectedBudget);
});
});
it('should handle topP/topK exclusion logic for Claude-3.7 models', () => {
const testCases = [
// Claude-3.7 with thinking = true - should exclude topP/topK
{ model: 'claude-3-7-sonnet', thinking: true, shouldInclude: false },
{ model: 'claude-3.7-sonnet', thinking: true, shouldInclude: false },
// Claude-3.7 with thinking = false - should include topP/topK
{ model: 'claude-3-7-sonnet', thinking: false, shouldInclude: true },
{ model: 'claude-3.7-sonnet', thinking: false, shouldInclude: true },
// Claude-3.7 with thinking = null - thinking defaults to true, so should exclude topP/topK
{ model: 'claude-3-7-sonnet', thinking: null, shouldInclude: false },
// Non-Claude-3.7 models - should always include topP/topK (thinking doesn't affect them)
{ model: 'claude-3-5-sonnet', thinking: true, shouldInclude: true },
{ model: 'claude-3-opus', thinking: true, shouldInclude: true },
{ model: 'claude-sonnet-4', thinking: true, shouldInclude: true },
];
testCases.forEach(({ model, thinking, shouldInclude }) => {
const result = getLLMConfig('test-key', {
modelOptions: {
model,
thinking,
topP: 0.9,
topK: 40,
},
});
if (shouldInclude) {
expect(result.llmConfig).toHaveProperty('topP', 0.9);
expect(result.llmConfig).toHaveProperty('topK', 40);
} else {
expect(result.llmConfig).not.toHaveProperty('topP');
expect(result.llmConfig).not.toHaveProperty('topK');
}
});
});
it('should handle prompt cache support logic for different models', () => {
const testCases = [
// Models that support prompt cache
{ model: 'claude-3-5-sonnet', promptCache: true, shouldHaveHeaders: true },
{ model: 'claude-3.5-sonnet-20241022', promptCache: true, shouldHaveHeaders: true },
{ model: 'claude-3-7-sonnet', promptCache: true, shouldHaveHeaders: true },
{ model: 'claude-3.7-sonnet-20250109', promptCache: true, shouldHaveHeaders: true },
{ model: 'claude-3-opus', promptCache: true, shouldHaveHeaders: true },
{ model: 'claude-sonnet-4-20250514', promptCache: true, shouldHaveHeaders: true },
// Models that don't support prompt cache
{ model: 'claude-3-5-sonnet-latest', promptCache: true, shouldHaveHeaders: false },
{ model: 'claude-3.5-sonnet-latest', promptCache: true, shouldHaveHeaders: false },
// Prompt cache disabled
{ model: 'claude-3-5-sonnet', promptCache: false, shouldHaveHeaders: false },
];
testCases.forEach(({ model, promptCache, shouldHaveHeaders }) => {
const result = getLLMConfig('test-key', {
modelOptions: { model, promptCache },
});
if (shouldHaveHeaders) {
expect(result.llmConfig.clientOptions.defaultHeaders).toBeDefined();
expect(result.llmConfig.clientOptions.defaultHeaders['anthropic-beta']).toContain(
'prompt-caching',
);
} else {
expect(result.llmConfig.clientOptions.defaultHeaders).toBeUndefined();
}
});
});
});
describe('Parameter Type Handling', () => {
it('should handle different data types for numeric parameters', () => {
const testCases = [
{ temperature: '0.5', expected: '0.5' }, // string
{ temperature: 0.5, expected: 0.5 }, // number
{ topP: '0.9', expected: '0.9' }, // string
{ topP: 0.9, expected: 0.9 }, // number
{ topK: '20', expected: '20' }, // string
{ topK: 20, expected: 20 }, // number
{ maxOutputTokens: '4096', expected: '4096' }, // string
{ maxOutputTokens: 4096, expected: 4096 }, // number
];
testCases.forEach((testCase) => {
const key = Object.keys(testCase)[0];
const value = testCase[key];
const expected = testCase.expected;
const result = getLLMConfig('test-key', {
modelOptions: { model: 'claude-3-opus', [key]: value },
});
const outputKey = key === 'maxOutputTokens' ? 'maxTokens' : key;
expect(result.llmConfig[outputKey]).toBe(expected);
});
});
it('should handle array parameters correctly', () => {
const testCases = [
{ stop: [], expected: [] }, // empty array
{ stop: ['\\n'], expected: ['\\n'] }, // single item
{ stop: ['\\n', 'Human:', 'Assistant:'], expected: ['\\n', 'Human:', 'Assistant:'] }, // multiple items
{ stop: null, expected: null }, // null
{ stop: undefined, expected: undefined }, // undefined
];
testCases.forEach(({ stop, expected }) => {
const result = getLLMConfig('test-key', {
modelOptions: { model: 'claude-3-opus', stop },
});
if (expected === null || expected === undefined) {
expect(result.llmConfig).not.toHaveProperty('stopSequences');
} else {
expect(result.llmConfig.stopSequences).toEqual(expected);
}
});
});
it('should handle boolean parameters correctly', () => {
const testCases = [
{ stream: true, expected: true },
{ stream: false, expected: false },
{ stream: 'true', expected: 'true' }, // string boolean
{ stream: 'false', expected: 'false' }, // string boolean
{ stream: 1, expected: 1 }, // truthy number
{ stream: 0, expected: 0 }, // falsy number
{ thinking: true, expected: true },
{ thinking: false, expected: false },
{ promptCache: true, expected: true },
{ promptCache: false, expected: false },
{ web_search: true, expected: true },
{ web_search: false, expected: false },
];
testCases.forEach((testCase) => {
const key = Object.keys(testCase)[0];
const value = testCase[key];
const expected = testCase.expected;
const result = getLLMConfig('test-key', {
modelOptions: { model: 'claude-3-opus', [key]: value },
});
if (key === 'stream') {
expect(result.llmConfig.stream).toBe(expected);
} else if (key === 'web_search' && expected) {
expect(result.tools).toEqual([{ type: 'web_search_20250305', name: 'web_search' }]);
}
});
});
});
describe('Parameter Precedence and Override Logic', () => {
it('should handle modelOptions vs defaultOptions precedence', () => {
const result = getLLMConfig('test-key', {
modelOptions: {
model: 'claude-3-opus', // override default
maxOutputTokens: 2048, // override default
stream: false, // override default
temperature: 0.3, // new parameter
},
});
expect(result.llmConfig).toMatchObject({
model: 'claude-3-opus', // overridden
maxTokens: 2048, // overridden
stream: false, // overridden
temperature: 0.3, // added
});
});
it('should handle system options extraction and defaults', () => {
const modelOptions = {
model: 'claude-3-7-sonnet',
temperature: 0.5,
// Missing system options should use defaults
};
const result = getLLMConfig('test-key', {
modelOptions,
});
// System options should be removed from modelOptions
expect(modelOptions).not.toHaveProperty('thinking');
expect(modelOptions).not.toHaveProperty('promptCache');
expect(modelOptions).not.toHaveProperty('thinkingBudget');
// Should use defaults for system options
expect(result.llmConfig.thinking).toMatchObject({
type: 'enabled',
budget_tokens: 2000, // default
});
});
it('should handle partial system options with defaults', () => {
const result = getLLMConfig('test-key', {
modelOptions: {
model: 'claude-3-7-sonnet',
thinking: false, // explicit false
// promptCache and thinkingBudget should use defaults
},
});
// thinking is false, so no thinking object should be created
expect(result.llmConfig.thinking).toBeUndefined();
// promptCache default is true, so should have headers
expect(result.llmConfig.clientOptions.defaultHeaders).toBeDefined();
});
});
describe('Edge Cases and Error Conditions', () => {
it('should handle extremely large numbers', () => {
const result = getLLMConfig('test-key', {
modelOptions: {
temperature: Number.MAX_SAFE_INTEGER,
topP: Number.MAX_VALUE,
topK: 999999,
maxOutputTokens: Number.MAX_SAFE_INTEGER,
thinkingBudget: Number.MAX_SAFE_INTEGER,
},
});
// Should pass through without crashing
expect(result.llmConfig.temperature).toBe(Number.MAX_SAFE_INTEGER);
expect(result.llmConfig.topP).toBe(Number.MAX_VALUE);
expect(result.llmConfig.topK).toBe(999999);
expect(result.llmConfig.maxTokens).toBe(Number.MAX_SAFE_INTEGER);
});
it('should handle negative numbers', () => {
const result = getLLMConfig('test-key', {
modelOptions: {
temperature: -1,
topP: -0.5,
topK: -10,
maxOutputTokens: -1000,
thinkingBudget: -500,
},
});
// Should pass through negative values (API will handle validation)
expect(result.llmConfig.temperature).toBe(-1);
expect(result.llmConfig.topP).toBe(-0.5);
expect(result.llmConfig.topK).toBe(-10);
expect(result.llmConfig.maxTokens).toBe(-1000);
});
it('should handle special numeric values', () => {
const testCases = [
{ value: NaN, shouldBeRemoved: false }, // NaN passes through removeNullishValues
{ value: Infinity, shouldBeRemoved: false },
{ value: -Infinity, shouldBeRemoved: false },
{ value: 0, shouldBeRemoved: false },
{ value: -0, shouldBeRemoved: false },
];
testCases.forEach(({ value, shouldBeRemoved }) => {
const result = getLLMConfig('test-key', {
modelOptions: {
model: 'claude-3-opus',
temperature: value,
},
});
if (shouldBeRemoved) {
expect(result.llmConfig).not.toHaveProperty('temperature');
} else {
expect(result.llmConfig.temperature).toBe(value);
}
});
});
it('should handle malformed stop sequences', () => {
const testCases = [
{ stop: 'string', expected: 'string' }, // single string instead of array
{ stop: [null, undefined, ''], expected: [null, undefined, ''] }, // mixed values
{ stop: [123, true, false], expected: [123, true, false] }, // non-string values
{ stop: {}, expected: {} }, // object instead of array
];
testCases.forEach(({ stop, expected }) => {
const result = getLLMConfig('test-key', {
modelOptions: { model: 'claude-3-opus', stop },
});
expect(result.llmConfig.stopSequences).toEqual(expected);
});
});
});
});
}); });

View file

@ -1,6 +1,7 @@
import { ReasoningEffort, ReasoningSummary, Verbosity } from 'librechat-data-provider'; import { Verbosity, ReasoningEffort, ReasoningSummary } from 'librechat-data-provider';
import type { RequestInit } from 'undici'; import type { RequestInit } from 'undici';
import { getOpenAIConfig } from './llm'; import type { OpenAIParameters, AzureOptions } from '~/types';
import { getOpenAIConfig, knownOpenAIParams } from './llm';
describe('getOpenAIConfig', () => { describe('getOpenAIConfig', () => {
const mockApiKey = 'test-api-key'; const mockApiKey = 'test-api-key';
@ -75,7 +76,7 @@ describe('getOpenAIConfig', () => {
expect(result.llmConfig.modelKwargs).toBeUndefined(); expect(result.llmConfig.modelKwargs).toBeUndefined();
}); });
it('should handle reasoning params for useResponsesApi', () => { it('should handle reasoning params for `useResponsesApi`', () => {
const modelOptions = { const modelOptions = {
reasoning_effort: ReasoningEffort.high, reasoning_effort: ReasoningEffort.high,
reasoning_summary: ReasoningSummary.detailed, reasoning_summary: ReasoningSummary.detailed,
@ -93,7 +94,7 @@ describe('getOpenAIConfig', () => {
expect((result.llmConfig as Record<string, unknown>).reasoning_summary).toBeUndefined(); expect((result.llmConfig as Record<string, unknown>).reasoning_summary).toBeUndefined();
}); });
it('should handle reasoning params without useResponsesApi', () => { it('should handle reasoning params without `useResponsesApi`', () => {
const modelOptions = { const modelOptions = {
reasoning_effort: ReasoningEffort.high, reasoning_effort: ReasoningEffort.high,
reasoning_summary: ReasoningSummary.detailed, reasoning_summary: ReasoningSummary.detailed,
@ -421,4 +422,907 @@ describe('getOpenAIConfig', () => {
}, },
}); });
}); });
describe('Azure Configuration', () => {
it('should handle Azure configuration with model name as deployment', () => {
const originalEnv = process.env.AZURE_USE_MODEL_AS_DEPLOYMENT_NAME;
process.env.AZURE_USE_MODEL_AS_DEPLOYMENT_NAME = 'true';
const azure = {
azureOpenAIApiInstanceName: 'test-instance',
azureOpenAIApiDeploymentName: 'original-deployment',
azureOpenAIApiVersion: '2023-05-15',
azureOpenAIApiKey: 'azure-key',
};
const modelOptions = { model: 'gpt-4.0-turbo' };
const result = getOpenAIConfig(mockApiKey, { azure, modelOptions });
// Should sanitize model name by removing dots
expect(result.llmConfig.model).toBe('gpt-40-turbo');
expect((result.llmConfig as Record<string, unknown>).azureOpenAIApiDeploymentName).toBe(
'gpt-40-turbo',
);
// Cleanup
if (originalEnv !== undefined) {
process.env.AZURE_USE_MODEL_AS_DEPLOYMENT_NAME = originalEnv;
} else {
delete process.env.AZURE_USE_MODEL_AS_DEPLOYMENT_NAME;
}
});
it('should use default Azure deployment name when not using model name', () => {
const originalEnv = process.env.AZURE_USE_MODEL_AS_DEPLOYMENT_NAME;
delete process.env.AZURE_USE_MODEL_AS_DEPLOYMENT_NAME;
const azure = {
azureOpenAIApiInstanceName: 'test-instance',
azureOpenAIApiDeploymentName: 'custom-deployment',
azureOpenAIApiVersion: '2023-05-15',
azureOpenAIApiKey: 'azure-key',
};
const result = getOpenAIConfig(mockApiKey, { azure });
expect((result.llmConfig as Record<string, unknown>).azureOpenAIApiDeploymentName).toBe(
'custom-deployment',
);
expect(result.llmConfig.model).toBe('custom-deployment');
// Cleanup
if (originalEnv !== undefined) {
process.env.AZURE_USE_MODEL_AS_DEPLOYMENT_NAME = originalEnv;
}
});
it('should handle Azure default model from environment', () => {
const originalEnv = process.env.AZURE_OPENAI_DEFAULT_MODEL;
process.env.AZURE_OPENAI_DEFAULT_MODEL = 'gpt-4-env-default';
const azure = {
azureOpenAIApiInstanceName: 'test-instance',
azureOpenAIApiDeploymentName: 'deployment',
azureOpenAIApiVersion: '2023-05-15',
azureOpenAIApiKey: 'azure-key',
};
const result = getOpenAIConfig(mockApiKey, { azure });
expect(result.llmConfig.model).toBe('deployment'); // deployment name takes precedence
// Cleanup
if (originalEnv !== undefined) {
process.env.AZURE_OPENAI_DEFAULT_MODEL = originalEnv;
} else {
delete process.env.AZURE_OPENAI_DEFAULT_MODEL;
}
});
it('should construct Azure base URL correctly', () => {
const azure = {
azureOpenAIApiInstanceName: 'test-instance',
azureOpenAIApiDeploymentName: 'test-deployment',
azureOpenAIApiVersion: '2023-05-15',
azureOpenAIApiKey: 'azure-key',
};
const result = getOpenAIConfig(mockApiKey, {
azure,
reverseProxyUrl: 'https://${INSTANCE_NAME}.openai.azure.com/openai/v1',
});
// The constructAzureURL should replace placeholders with actual values
expect((result.llmConfig as Record<string, unknown>).azureOpenAIBasePath).toBe(
'https://test-instance.openai.azure.com/openai/v1',
);
});
it('should handle Azure Responses API configuration', () => {
const azure = {
azureOpenAIApiInstanceName: 'test-instance',
azureOpenAIApiDeploymentName: 'test-deployment',
azureOpenAIApiVersion: '2023-05-15',
azureOpenAIApiKey: 'azure-key',
};
const modelOptions = { useResponsesApi: true };
const result = getOpenAIConfig(mockApiKey, { azure, modelOptions });
// Should construct the responses API URL
expect(result.configOptions?.baseURL).toContain('test-instance.openai.azure.com');
expect(result.configOptions?.defaultHeaders).toMatchObject({
'api-key': mockApiKey,
});
expect(result.configOptions?.defaultQuery).toMatchObject({
'api-version': 'preview',
});
expect(result.llmConfig.apiKey).toBe(mockApiKey);
expect(
(result.llmConfig as Record<string, unknown>).azureOpenAIApiDeploymentName,
).toBeUndefined();
expect(
(result.llmConfig as Record<string, unknown>).azureOpenAIApiInstanceName,
).toBeUndefined();
});
it('should handle Azure with organization from environment', () => {
const originalOrg = process.env.OPENAI_ORGANIZATION;
process.env.OPENAI_ORGANIZATION = 'test-org-123';
const azure = {
azureOpenAIApiInstanceName: 'test-instance',
azureOpenAIApiDeploymentName: 'test-deployment',
azureOpenAIApiVersion: '2023-05-15',
azureOpenAIApiKey: 'azure-key',
};
const result = getOpenAIConfig(mockApiKey, { azure });
expect(result.configOptions?.organization).toBe('test-org-123');
// Cleanup
if (originalOrg !== undefined) {
process.env.OPENAI_ORGANIZATION = originalOrg;
} else {
delete process.env.OPENAI_ORGANIZATION;
}
});
});
describe('OpenRouter Configuration', () => {
it('should detect OpenRouter from endpoint parameter', () => {
const result = getOpenAIConfig(mockApiKey, {}, 'openrouter');
expect(result.llmConfig.include_reasoning).toBe(true);
expect(result.provider).toBe('openrouter');
});
it('should handle OpenRouter with reasoning params', () => {
const modelOptions = {
reasoning_effort: ReasoningEffort.high,
reasoning_summary: ReasoningSummary.detailed,
};
const result = getOpenAIConfig(mockApiKey, {
reverseProxyUrl: 'https://openrouter.ai/api/v1',
modelOptions,
});
expect(result.llmConfig.reasoning).toEqual({
effort: ReasoningEffort.high,
summary: ReasoningSummary.detailed,
});
expect(result.provider).toBe('openrouter');
});
it('should merge custom headers with OpenRouter defaults', () => {
const customHeaders = {
'X-Custom-Header': 'custom-value',
Authorization: 'Bearer custom-token',
};
const result = getOpenAIConfig(mockApiKey, {
reverseProxyUrl: 'https://openrouter.ai/api/v1',
headers: customHeaders,
});
expect(result.configOptions?.defaultHeaders).toEqual({
'HTTP-Referer': 'https://librechat.ai',
'X-Title': 'LibreChat',
'X-Custom-Header': 'custom-value',
Authorization: 'Bearer custom-token',
});
});
});
describe('Direct Endpoint Configuration', () => {
it('should create custom fetch for direct endpoint', () => {
const result = getOpenAIConfig(mockApiKey, {
directEndpoint: true,
reverseProxyUrl: 'https://direct-api.com',
});
// Should have a custom fetch function when directEndpoint is true
expect(result.configOptions?.fetch).toBeDefined();
expect(typeof result.configOptions?.fetch).toBe('function');
});
it('should not create custom fetch when directEndpoint is false', () => {
const result = getOpenAIConfig(mockApiKey, {
directEndpoint: false,
reverseProxyUrl: 'https://proxy-api.com',
});
expect(result.configOptions?.fetch).toBeUndefined();
});
it('should not create custom fetch when baseURL is not set', () => {
const result = getOpenAIConfig(mockApiKey, {
directEndpoint: true,
});
expect(result.configOptions?.fetch).toBeUndefined();
});
});
describe('Edge Cases and Error Handling', () => {
it('should handle null and undefined values in reasoning params', () => {
const testCases = [
{ reasoning_effort: null, reasoning_summary: null, shouldHaveReasoning: false },
{ reasoning_effort: undefined, reasoning_summary: undefined, shouldHaveReasoning: false },
{ reasoning_effort: '', reasoning_summary: '', shouldHaveReasoning: false },
{
reasoning_effort: null,
reasoning_summary: ReasoningSummary.concise,
shouldHaveReasoning: true,
},
{
reasoning_effort: ReasoningEffort.low,
reasoning_summary: null,
shouldHaveReasoning: true,
},
];
testCases.forEach(({ shouldHaveReasoning, ...modelOptions }) => {
const result = getOpenAIConfig(mockApiKey, {
modelOptions: { ...modelOptions, useResponsesApi: true } as Partial<OpenAIParameters>,
});
if (shouldHaveReasoning) {
expect(result.llmConfig?.reasoning).toBeDefined();
} else {
expect(result.llmConfig?.reasoning).toBeUndefined();
}
});
});
it('should handle empty dropParams array', () => {
const modelOptions = {
temperature: 0.7,
topP: 0.9,
};
const result = getOpenAIConfig(mockApiKey, {
modelOptions,
dropParams: [],
});
expect(result.llmConfig.temperature).toBe(0.7);
expect(result.llmConfig.topP).toBe(0.9);
});
it('should handle non-array dropParams gracefully', () => {
const modelOptions = {
temperature: 0.7,
topP: 0.9,
};
const result = getOpenAIConfig(mockApiKey, {
modelOptions,
/** Invalid type */
dropParams: 'temperature' as unknown as string[],
});
// Should not crash and should keep all params
expect(result.llmConfig.temperature).toBe(0.7);
expect(result.llmConfig.topP).toBe(0.9);
});
it('should handle max_tokens conversion edge cases', () => {
const testCases = [
{ model: 'gpt-4', max_tokens: 1000 }, // Should keep maxTokens
{ model: 'gpt-5', max_tokens: null }, // Should not create modelKwargs
{ model: 'gpt-6', max_tokens: undefined }, // Should not create modelKwargs
{ model: 'gpt-7', max_tokens: 0 }, // Should handle zero
];
testCases.forEach(({ model, max_tokens }) => {
const result = getOpenAIConfig(mockApiKey, {
modelOptions: { model, max_tokens: max_tokens ?? undefined },
});
if (model === 'gpt-4') {
expect(result.llmConfig.maxTokens).toBe(1000);
expect(result.llmConfig.modelKwargs).toBeUndefined();
} else if (max_tokens != null) {
expect(result.llmConfig.maxTokens).toBeUndefined();
expect(result.llmConfig.modelKwargs?.max_completion_tokens).toBe(max_tokens);
} else {
expect(result.llmConfig.maxTokens).toBeUndefined();
expect(result.llmConfig.modelKwargs).toBeUndefined();
}
});
});
it('should handle various search model patterns', () => {
const searchModels = [
'gpt-4o-search',
'gpt-4o-mini-search',
'gpt-4o-2024-search',
'custom-gpt-4o-search-model',
];
searchModels.forEach((model) => {
const modelOptions = {
model,
temperature: 0.7,
frequency_penalty: 0.5,
presence_penalty: 0.6,
max_tokens: 1000,
custom_param: 'should-remain',
};
const result = getOpenAIConfig(mockApiKey, { modelOptions });
expect(result.llmConfig.temperature).toBeUndefined();
expect((result.llmConfig as Record<string, unknown>).frequency_penalty).toBeUndefined();
expect((result.llmConfig as Record<string, unknown>).presence_penalty).toBeUndefined();
/** `frequency_penalty` is converted to `frequencyPenalty` */
expect(result.llmConfig.frequencyPenalty).toBe(0.5);
expect(result.llmConfig.presencePenalty).toBe(0.6);
/** `presence_penalty` is converted to `presencePenalty` */
expect(result.llmConfig.maxTokens).toBe(1000); // max_tokens is allowed
expect((result.llmConfig as Record<string, unknown>).custom_param).toBe('should-remain');
});
});
it('should preserve streaming default when not specified', () => {
const result = getOpenAIConfig(mockApiKey, {});
expect(result.llmConfig.streaming).toBe(true);
});
it('should override streaming when explicitly set', () => {
const result = getOpenAIConfig(mockApiKey, { streaming: false });
expect(result.llmConfig.streaming).toBe(false);
});
});
describe('Parameter Classification', () => {
it('should correctly identify all known OpenAI parameters', () => {
const allKnownParams = Array.from(knownOpenAIParams);
const testParams: Record<string, unknown> = {};
// Create test object with all known params
allKnownParams.forEach((param) => {
testParams[param] = `test-${param}`;
});
const result = getOpenAIConfig(mockApiKey, { addParams: testParams });
// All should be in llmConfig, none in modelKwargs
expect(result.llmConfig.modelKwargs).toBeUndefined();
// Check a few key parameters are correctly placed
expect((result.llmConfig as Record<string, unknown>).model).toBe('test-model');
expect((result.llmConfig as Record<string, unknown>).temperature).toBe('test-temperature');
expect((result.llmConfig as Record<string, unknown>).maxTokens).toBe('test-maxTokens');
});
it('should handle mixed case and underscore variations', () => {
const addParams = {
maxTokens: 1000, // camelCase - known
topP: 0.9, // camelCase - known
top_p: 0.8, // snake_case - unknown, should go to modelKwargs
customParam: 'value', // unknown
};
const result = getOpenAIConfig(mockApiKey, { addParams });
expect(result.llmConfig.maxTokens).toBe(1000);
expect(result.llmConfig.topP).toBe(0.9);
expect(result.llmConfig.modelKwargs).toEqual({
top_p: 0.8,
customParam: 'value',
});
});
});
describe('Complex Integration Scenarios', () => {
it('should handle Azure + OpenRouter combination (OpenRouter still detected)', () => {
const azure = {
azureOpenAIApiInstanceName: 'test-instance',
azureOpenAIApiDeploymentName: 'test-deployment',
azureOpenAIApiVersion: '2023-05-15',
azureOpenAIApiKey: 'azure-key',
};
const result = getOpenAIConfig(mockApiKey, {
azure,
reverseProxyUrl: 'https://openrouter.ai/api/v1',
});
// Azure config should be present
expect((result.llmConfig as Record<string, unknown>).azureOpenAIApiInstanceName).toBe(
'test-instance',
);
// But OpenRouter is still detected from URL
expect(result.provider).toBe('openrouter');
expect(result.llmConfig.include_reasoning).toBe(true);
});
it('should handle all configuration options together', () => {
const complexConfig = {
modelOptions: {
model: 'gpt-4-turbo',
temperature: 0.7,
max_tokens: 2000,
verbosity: Verbosity.medium,
reasoning_effort: ReasoningEffort.high,
web_search: true,
},
reverseProxyUrl: 'https://api.custom.com',
headers: { 'X-Custom': 'value' },
defaultQuery: { version: 'v1' },
proxy: 'http://proxy.com:8080',
streaming: false,
addParams: {
customParam: 'custom-value',
temperature: 0.8, // Should override modelOptions
},
dropParams: ['frequency_penalty'],
};
const result = getOpenAIConfig(mockApiKey, complexConfig);
expect(result.llmConfig).toMatchObject({
model: 'gpt-4-turbo',
temperature: 0.8, // From addParams
streaming: false,
useResponsesApi: true, // From web_search
});
expect(result.llmConfig.maxTokens).toBe(2000);
expect(result.llmConfig.modelKwargs).toEqual({
text: { verbosity: Verbosity.medium },
customParam: 'custom-value',
});
expect(result.tools).toEqual([{ type: 'web_search_preview' }]);
expect(result.configOptions).toMatchObject({
baseURL: 'https://api.custom.com',
defaultHeaders: { 'X-Custom': 'value' },
defaultQuery: { version: 'v1' },
fetchOptions: expect.objectContaining({
dispatcher: expect.any(Object),
}),
});
});
});
describe('Real Usage Integration Tests', () => {
describe('OpenAI Initialize.js Simulation', () => {
it('should handle OpenAI endpoint configuration like initialize.js', () => {
// Simulate the configuration from OpenAI initialize.js
const modelName = 'gpt-4-turbo';
const endpointOption = {
model_parameters: {
temperature: 0.7,
max_tokens: 2048,
top_p: 0.9,
frequency_penalty: 0.1,
presence_penalty: 0.1,
},
};
// Simulate clientOptions from initialize.js
const clientOptions = {
contextStrategy: 'summarize',
proxy: null,
debug: false,
reverseProxyUrl: null,
streamRate: 30,
titleModel: 'gpt-3.5-turbo',
titleMethod: 'completion',
modelOptions: {
model: modelName,
user: 'test-user-id',
...endpointOption.model_parameters,
},
};
const result = getOpenAIConfig(mockApiKey, clientOptions);
expect(result.llmConfig).toMatchObject({
model: modelName,
temperature: 0.7,
maxTokens: 2048,
// topP is converted from top_p in modelOptions
frequencyPenalty: 0.1, // converted from frequency_penalty
presencePenalty: 0.1, // converted from presence_penalty
user: 'test-user-id',
streaming: true, // default
apiKey: mockApiKey,
});
expect(result.configOptions).toEqual({});
expect(result.tools).toEqual([]);
});
it('should handle Azure OpenAI configuration like initialize.js', () => {
// Simulate Azure configuration from mapModelToAzureConfig
const modelName = 'gpt-4-turbo';
const azureOptions = {
azureOpenAIApiKey: 'azure-key-123',
azureOpenAIApiInstanceName: 'prod-instance',
azureOpenAIApiDeploymentName: 'gpt-4-turbo-deployment',
azureOpenAIApiVersion: '2023-12-01-preview',
};
const baseURL = 'https://prod-instance.openai.azure.com';
const headers = {
'X-Custom-Header': 'azure-value',
Authorization: 'Bearer custom-token',
};
// Simulate clientOptions from Azure initialize.js
const clientOptions = {
contextStrategy: null,
proxy: null,
debug: false,
reverseProxyUrl: baseURL,
headers,
titleConvo: true,
titleModel: 'gpt-3.5-turbo',
streamRate: 30,
titleMethod: 'completion',
azure: azureOptions,
addParams: {
temperature: 0.8,
max_completion_tokens: 4000,
},
dropParams: ['frequency_penalty'],
forcePrompt: false,
modelOptions: {
model: modelName,
user: 'azure-user-123',
temperature: 0.7, // Should be overridden by addParams
frequency_penalty: 0.2, // Should be dropped
},
};
const result = getOpenAIConfig(mockApiKey, clientOptions);
expect(result.llmConfig).toMatchObject({
model: 'gpt-4-turbo-deployment', // Uses deployment name
temperature: 0.8, // From addParams
user: 'azure-user-123',
streaming: true,
azureOpenAIApiKey: 'azure-key-123',
azureOpenAIApiInstanceName: 'prod-instance',
azureOpenAIApiDeploymentName: 'gpt-4-turbo-deployment',
azureOpenAIApiVersion: '2023-12-01-preview',
});
expect((result.llmConfig as Record<string, unknown>).frequency_penalty).toBeUndefined(); // Dropped
expect(result.llmConfig.modelKwargs).toMatchObject({
max_completion_tokens: 4000,
});
expect(result.configOptions).toMatchObject({
baseURL: baseURL,
defaultHeaders: headers,
});
});
it('should handle Azure serverless configuration', () => {
const modelName = 'gpt-4';
const azureOptions = {
azureOpenAIApiKey: 'serverless-key',
azureOpenAIApiInstanceName: 'serverless-instance',
azureOpenAIApiDeploymentName: 'gpt-4-serverless',
azureOpenAIApiVersion: '2024-02-15-preview',
};
const clientOptions = {
reverseProxyUrl: 'https://serverless.openai.azure.com/openai/v1',
headers: {
'api-key': azureOptions.azureOpenAIApiKey,
},
defaultQuery: {
'api-version': azureOptions.azureOpenAIApiVersion,
},
azure: false as const, // Serverless doesn't use azure object
modelOptions: {
model: modelName,
user: 'serverless-user',
},
};
const result = getOpenAIConfig(azureOptions.azureOpenAIApiKey, clientOptions);
expect(result.llmConfig).toMatchObject({
model: modelName,
user: 'serverless-user',
apiKey: azureOptions.azureOpenAIApiKey,
});
expect(result.configOptions).toMatchObject({
baseURL: 'https://serverless.openai.azure.com/openai/v1',
defaultHeaders: {
'api-key': azureOptions.azureOpenAIApiKey,
},
defaultQuery: {
'api-version': azureOptions.azureOpenAIApiVersion,
},
});
});
});
describe('Custom Endpoint Initialize.js Simulation', () => {
it('should handle custom endpoint configuration like initialize.js', () => {
const endpoint = 'custom-openai';
const apiKey = 'custom-api-key-456';
const baseURL = 'https://api.custom-provider.com/v1';
// Simulate endpointConfig from custom initialize.js
const endpointConfig = {
apiKey: 'user_provided',
baseURL: baseURL,
headers: {
'X-Custom-Provider': 'LibreChat',
'User-Agent': 'LibreChat/1.0',
},
addParams: {
custom_parameter: 'custom_value',
temperature: 0.9,
},
dropParams: ['presence_penalty'],
titleConvo: true,
titleModel: 'gpt-3.5-turbo',
forcePrompt: false,
summaryModel: 'gpt-3.5-turbo',
modelDisplayLabel: 'Custom GPT-4',
titleMethod: 'completion',
contextStrategy: 'summarize',
directEndpoint: true,
titleMessageRole: 'user',
streamRate: 25,
};
const clientOptions = {
reverseProxyUrl: baseURL,
proxy: null,
headers: endpointConfig.headers,
addParams: endpointConfig.addParams,
dropParams: endpointConfig.dropParams,
customParams: {},
titleConvo: endpointConfig.titleConvo,
titleModel: endpointConfig.titleModel,
forcePrompt: endpointConfig.forcePrompt,
summaryModel: endpointConfig.summaryModel,
modelDisplayLabel: endpointConfig.modelDisplayLabel,
titleMethod: endpointConfig.titleMethod,
contextStrategy: endpointConfig.contextStrategy,
directEndpoint: endpointConfig.directEndpoint,
titleMessageRole: endpointConfig.titleMessageRole,
streamRate: endpointConfig.streamRate,
modelOptions: {
model: 'gpt-4-custom',
user: 'custom-user-789',
presence_penalty: 0.3, // Should be dropped
max_tokens: 3000,
},
};
const result = getOpenAIConfig(apiKey, clientOptions, endpoint);
expect(result.llmConfig).toMatchObject({
model: 'gpt-4-custom',
user: 'custom-user-789',
temperature: 0.9, // From addParams
maxTokens: 3000,
apiKey: apiKey,
});
expect((result.llmConfig as Record<string, unknown>).presence_penalty).toBeUndefined(); // Dropped
expect(result.llmConfig.modelKwargs).toMatchObject({
custom_parameter: 'custom_value',
});
expect(result.configOptions).toMatchObject({
baseURL: baseURL,
defaultHeaders: endpointConfig.headers,
fetch: expect.any(Function), // directEndpoint creates custom fetch
});
});
it('should handle OpenRouter configuration like custom initialize.js', () => {
const endpoint = 'openrouter';
const apiKey = 'sk-or-v1-custom-key';
const baseURL = 'https://openrouter.ai/api/v1';
const clientOptions = {
reverseProxyUrl: baseURL,
headers: {
'HTTP-Referer': 'https://librechat.ai',
'X-Title': 'LibreChat',
Authorization: `Bearer ${apiKey}`,
},
addParams: {
top_k: 50,
repetition_penalty: 1.1,
},
modelOptions: {
model: 'anthropic/claude-3-sonnet',
user: 'openrouter-user',
temperature: 0.7,
max_tokens: 4000,
reasoning_effort: ReasoningEffort.high,
reasoning_summary: ReasoningSummary.detailed,
},
};
const result = getOpenAIConfig(apiKey, clientOptions, endpoint);
expect(result.llmConfig).toMatchObject({
model: 'anthropic/claude-3-sonnet',
user: 'openrouter-user',
temperature: 0.7,
maxTokens: 4000,
include_reasoning: true, // OpenRouter specific
reasoning: {
effort: ReasoningEffort.high,
summary: ReasoningSummary.detailed,
},
apiKey: apiKey,
});
expect(result.llmConfig.modelKwargs).toMatchObject({
top_k: 50,
repetition_penalty: 1.1,
});
expect(result.configOptions?.defaultHeaders).toMatchObject({
'HTTP-Referer': 'https://librechat.ai',
'X-Title': 'LibreChat',
Authorization: `Bearer ${apiKey}`,
});
expect(result.provider).toBe('openrouter');
});
});
describe('Production-like Azure Scenarios', () => {
it('should handle complex Azure multi-group configuration', () => {
// Simulate a production Azure setup with multiple groups
const modelName = 'gpt-4-turbo';
const azureConfig = {
azureOpenAIApiKey: 'prod-key-multi',
azureOpenAIApiInstanceName: 'prod-east-instance',
azureOpenAIApiDeploymentName: 'gpt-4-turbo-prod',
azureOpenAIApiVersion: '2024-02-15-preview',
};
const clientOptions = {
reverseProxyUrl: 'https://prod-east-instance.openai.azure.com',
headers: {
'X-Environment': 'production',
'X-Region': 'us-east-1',
'Content-Type': 'application/json',
},
azure: azureConfig,
addParams: {
temperature: 0.2, // Conservative for production
max_completion_tokens: 8192,
topP: 0.95, // Use camelCase for known param
frequencyPenalty: 0.0, // Use camelCase for known param
presencePenalty: 0.0, // Use camelCase for known param
seed: 12345, // For reproducibility
},
dropParams: [], // Don't drop any params in prod
modelOptions: {
model: modelName,
user: 'prod-user-session-abc123',
stream: true,
},
};
const result = getOpenAIConfig(mockApiKey, clientOptions);
expect(result.llmConfig).toMatchObject({
model: 'gpt-4-turbo-prod',
user: 'prod-user-session-abc123',
temperature: 0.2,
// Parameters from addParams are processed
seed: 12345,
stream: true,
azureOpenAIApiKey: 'prod-key-multi',
azureOpenAIApiInstanceName: 'prod-east-instance',
azureOpenAIApiDeploymentName: 'gpt-4-turbo-prod',
azureOpenAIApiVersion: '2024-02-15-preview',
});
// Check that camelCase conversions happened
expect(result.llmConfig.topP).toBe(0.95);
expect(result.llmConfig.frequencyPenalty).toBe(0.0);
expect(result.llmConfig.presencePenalty).toBe(0.0);
expect(result.llmConfig.modelKwargs).toMatchObject({
max_completion_tokens: 8192,
});
expect(result.configOptions?.baseURL).toBe('https://prod-east-instance.openai.azure.com');
});
it('should handle Azure with environment variable placeholders', () => {
const originalEnv = {
INSTANCE_NAME: process.env.INSTANCE_NAME,
DEPLOYMENT_NAME: process.env.DEPLOYMENT_NAME,
API_VERSION: process.env.API_VERSION,
};
// Set environment variables
process.env.INSTANCE_NAME = 'env-instance';
process.env.DEPLOYMENT_NAME = 'env-deployment';
process.env.API_VERSION = '2024-03-01-preview';
const clientOptions = {
reverseProxyUrl: 'https://${INSTANCE_NAME}.openai.azure.com/openai/v1',
azure: {
azureOpenAIApiKey: 'env-key',
azureOpenAIApiInstanceName: '${INSTANCE_NAME}',
azureOpenAIApiDeploymentName: '${DEPLOYMENT_NAME}',
azureOpenAIApiVersion: '${API_VERSION}',
},
modelOptions: {
model: 'gpt-4',
user: 'env-user',
},
};
const result = getOpenAIConfig(mockApiKey, clientOptions);
// The constructAzureURL should process placeholders (actual replacement depends on implementation)
expect((result.llmConfig as Record<string, unknown>).azureOpenAIBasePath).toBeDefined();
expect(result.llmConfig.model).toBe('${DEPLOYMENT_NAME}'); // Model becomes deployment name
// Cleanup
Object.entries(originalEnv).forEach(([key, value]) => {
if (value !== undefined) {
process.env[key] = value;
} else {
delete process.env[key];
}
});
});
});
describe('Error Handling and Edge Cases from Real Usage', () => {
it('should handle missing API key scenario', () => {
expect(() => {
getOpenAIConfig('', {
modelOptions: { model: 'gpt-4' },
});
}).not.toThrow(); // The function itself doesn't validate empty keys
});
it('should handle malformed Azure configuration gracefully', () => {
const clientOptions = {
azure: {
azureOpenAIApiKey: 'valid-key',
// Missing required fields
} as Partial<AzureOptions>,
modelOptions: {
model: 'gpt-4',
},
};
const result = getOpenAIConfig(mockApiKey, clientOptions);
expect(result.llmConfig).toBeDefined();
});
it('should handle large parameter sets without performance issues', () => {
const largeAddParams: Record<string, unknown> = {};
const largeModelKwargs: Record<string, unknown> = {};
// Create 50 unknown parameters (using names not in knownOpenAIParams)
for (let i = 0; i < 50; i++) {
largeAddParams[`unknown_param_${i}`] = 0.5;
}
// Create 50 more unknown parameters
for (let i = 0; i < 50; i++) {
largeAddParams[`custom_param_${i}`] = `value_${i}`;
largeModelKwargs[`unknown_param_${i}`] = 0.5;
largeModelKwargs[`custom_param_${i}`] = `value_${i}`;
}
const startTime = Date.now();
const result = getOpenAIConfig(mockApiKey, {
addParams: largeAddParams,
modelOptions: { model: 'gpt-4' },
});
const endTime = Date.now();
expect(endTime - startTime).toBeLessThan(100); // Should be fast
expect(result.llmConfig.modelKwargs).toEqual(largeModelKwargs);
});
});
});
}); });

View file

@ -104,7 +104,14 @@ export function getOpenAIConfig(
addParams, addParams,
dropParams, dropParams,
} = options; } = options;
const { reasoning_effort, reasoning_summary, verbosity, ...modelOptions } = _modelOptions; const {
reasoning_effort,
reasoning_summary,
verbosity,
frequency_penalty,
presence_penalty,
...modelOptions
} = _modelOptions;
const llmConfig: Partial<t.ClientOptions> & const llmConfig: Partial<t.ClientOptions> &
Partial<t.OpenAIParameters> & Partial<t.OpenAIParameters> &
Partial<AzureOpenAIInput> = Object.assign( Partial<AzureOpenAIInput> = Object.assign(
@ -115,6 +122,13 @@ export function getOpenAIConfig(
modelOptions, modelOptions,
); );
if (frequency_penalty != null) {
llmConfig.frequencyPenalty = frequency_penalty;
}
if (presence_penalty != null) {
llmConfig.presencePenalty = presence_penalty;
}
const modelKwargs: Record<string, unknown> = {}; const modelKwargs: Record<string, unknown> = {};
let hasModelKwargs = false; let hasModelKwargs = false;

View file

@ -14,11 +14,11 @@ export type OpenAIParameters = z.infer<typeof openAISchema>;
export interface OpenAIConfigOptions { export interface OpenAIConfigOptions {
modelOptions?: Partial<OpenAIParameters>; modelOptions?: Partial<OpenAIParameters>;
directEndpoint?: boolean; directEndpoint?: boolean;
reverseProxyUrl?: string; reverseProxyUrl?: string | null;
defaultQuery?: Record<string, string | undefined>; defaultQuery?: Record<string, string | undefined>;
headers?: Record<string, string>; headers?: Record<string, string>;
proxy?: string; proxy?: string | null;
azure?: AzureOptions; azure?: false | AzureOptions;
streaming?: boolean; streaming?: boolean;
addParams?: Record<string, unknown>; addParams?: Record<string, unknown>;
dropParams?: string[]; dropParams?: string[];