mirror of
https://github.com/danny-avila/LibreChat.git
synced 2026-01-30 14:25:19 +01:00
Merge branch 'main' into feature/entra-id-azure-integration
This commit is contained in:
commit
23ac2556da
193 changed files with 3845 additions and 692 deletions
|
|
@ -245,8 +245,8 @@ describe('getLLMConfig', () => {
|
|||
},
|
||||
});
|
||||
|
||||
// The actual anthropicSettings.maxOutputTokens.reset('claude-3-opus') returns 4096
|
||||
expect(result.llmConfig).toHaveProperty('maxTokens', 4096);
|
||||
// The actual anthropicSettings.maxOutputTokens.reset('claude-3-opus') returns 8192
|
||||
expect(result.llmConfig).toHaveProperty('maxTokens', 8192);
|
||||
});
|
||||
|
||||
it('should handle both proxy and reverseProxyUrl', () => {
|
||||
|
|
@ -698,9 +698,17 @@ describe('getLLMConfig', () => {
|
|||
{ model: 'claude-3.5-sonnet-20241022', expectedMaxTokens: 8192 },
|
||||
{ model: 'claude-3-7-sonnet', expectedMaxTokens: 8192 },
|
||||
{ model: 'claude-3.7-sonnet-20250109', expectedMaxTokens: 8192 },
|
||||
{ model: 'claude-3-opus', expectedMaxTokens: 4096 },
|
||||
{ model: 'claude-3-haiku', expectedMaxTokens: 4096 },
|
||||
{ model: 'claude-2.1', expectedMaxTokens: 4096 },
|
||||
{ model: 'claude-3-opus', expectedMaxTokens: 8192 },
|
||||
{ model: 'claude-3-haiku', expectedMaxTokens: 8192 },
|
||||
{ model: 'claude-2.1', expectedMaxTokens: 8192 },
|
||||
{ model: 'claude-sonnet-4-5', expectedMaxTokens: 64000 },
|
||||
{ model: 'claude-sonnet-4-5-20250929', expectedMaxTokens: 64000 },
|
||||
{ model: 'claude-haiku-4-5', expectedMaxTokens: 64000 },
|
||||
{ model: 'claude-haiku-4-5-20251001', expectedMaxTokens: 64000 },
|
||||
{ model: 'claude-opus-4-1', expectedMaxTokens: 32000 },
|
||||
{ model: 'claude-opus-4-1-20250805', expectedMaxTokens: 32000 },
|
||||
{ model: 'claude-sonnet-4-20250514', expectedMaxTokens: 64000 },
|
||||
{ model: 'claude-opus-4-0', expectedMaxTokens: 32000 },
|
||||
];
|
||||
|
||||
testCases.forEach(({ model, expectedMaxTokens }) => {
|
||||
|
|
@ -729,6 +737,222 @@ describe('getLLMConfig', () => {
|
|||
});
|
||||
});
|
||||
|
||||
describe('Claude 4.x Model maxOutputTokens Defaults', () => {
|
||||
it('should default Claude Sonnet 4.x models to 64K tokens', () => {
|
||||
const testCases = ['claude-sonnet-4-5', 'claude-sonnet-4-5-20250929', 'claude-sonnet-4.5'];
|
||||
|
||||
testCases.forEach((model) => {
|
||||
const result = getLLMConfig('test-key', {
|
||||
modelOptions: { model },
|
||||
});
|
||||
expect(result.llmConfig.maxTokens).toBe(64000);
|
||||
});
|
||||
});
|
||||
|
||||
it('should default Claude Haiku 4.x models to 64K tokens', () => {
|
||||
const testCases = ['claude-haiku-4-5', 'claude-haiku-4-5-20251001', 'claude-haiku-4.5'];
|
||||
|
||||
testCases.forEach((model) => {
|
||||
const result = getLLMConfig('test-key', {
|
||||
modelOptions: { model },
|
||||
});
|
||||
expect(result.llmConfig.maxTokens).toBe(64000);
|
||||
});
|
||||
});
|
||||
|
||||
it('should default Claude Opus 4.x models to 32K tokens', () => {
|
||||
const testCases = ['claude-opus-4-1', 'claude-opus-4-1-20250805', 'claude-opus-4.1'];
|
||||
|
||||
testCases.forEach((model) => {
|
||||
const result = getLLMConfig('test-key', {
|
||||
modelOptions: { model },
|
||||
});
|
||||
expect(result.llmConfig.maxTokens).toBe(32000);
|
||||
});
|
||||
});
|
||||
|
||||
it('should default future Claude 4.x Sonnet/Haiku models to 64K (future-proofing)', () => {
|
||||
const testCases = ['claude-sonnet-4-20250514', 'claude-sonnet-4-9', 'claude-haiku-4-8'];
|
||||
|
||||
testCases.forEach((model) => {
|
||||
const result = getLLMConfig('test-key', {
|
||||
modelOptions: { model },
|
||||
});
|
||||
expect(result.llmConfig.maxTokens).toBe(64000);
|
||||
});
|
||||
});
|
||||
|
||||
it('should default future Claude 4.x Opus models to 32K (future-proofing)', () => {
|
||||
const testCases = ['claude-opus-4-0', 'claude-opus-4-7'];
|
||||
|
||||
testCases.forEach((model) => {
|
||||
const result = getLLMConfig('test-key', {
|
||||
modelOptions: { model },
|
||||
});
|
||||
expect(result.llmConfig.maxTokens).toBe(32000);
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle explicit maxOutputTokens override for Claude 4.x models', () => {
|
||||
const result = getLLMConfig('test-key', {
|
||||
modelOptions: {
|
||||
model: 'claude-sonnet-4-5',
|
||||
maxOutputTokens: 64000, // Explicitly set to 64K
|
||||
},
|
||||
});
|
||||
|
||||
expect(result.llmConfig.maxTokens).toBe(64000);
|
||||
});
|
||||
|
||||
it('should handle undefined maxOutputTokens for Claude 4.x (use reset default)', () => {
|
||||
const testCases = [
|
||||
{ model: 'claude-sonnet-4-5', expected: 64000 },
|
||||
{ model: 'claude-haiku-4-5', expected: 64000 },
|
||||
{ model: 'claude-opus-4-1', expected: 32000 },
|
||||
];
|
||||
|
||||
testCases.forEach(({ model, expected }) => {
|
||||
const result = getLLMConfig('test-key', {
|
||||
modelOptions: {
|
||||
model,
|
||||
maxOutputTokens: undefined,
|
||||
},
|
||||
});
|
||||
expect(result.llmConfig.maxTokens).toBe(expected);
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle Claude 4 Sonnet/Haiku with thinking enabled', () => {
|
||||
const testCases = ['claude-sonnet-4-5', 'claude-haiku-4-5'];
|
||||
|
||||
testCases.forEach((model) => {
|
||||
const result = getLLMConfig('test-key', {
|
||||
modelOptions: {
|
||||
model,
|
||||
thinking: true,
|
||||
thinkingBudget: 10000,
|
||||
},
|
||||
});
|
||||
|
||||
expect(result.llmConfig.thinking).toMatchObject({
|
||||
type: 'enabled',
|
||||
budget_tokens: 10000,
|
||||
});
|
||||
expect(result.llmConfig.maxTokens).toBe(64000);
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle Claude 4 Opus with thinking enabled', () => {
|
||||
const result = getLLMConfig('test-key', {
|
||||
modelOptions: {
|
||||
model: 'claude-opus-4-1',
|
||||
thinking: true,
|
||||
thinkingBudget: 10000,
|
||||
},
|
||||
});
|
||||
|
||||
expect(result.llmConfig.thinking).toMatchObject({
|
||||
type: 'enabled',
|
||||
budget_tokens: 10000,
|
||||
});
|
||||
expect(result.llmConfig.maxTokens).toBe(32000);
|
||||
});
|
||||
|
||||
it('should respect model-specific maxOutputTokens for Claude 4.x models', () => {
|
||||
const testCases = [
|
||||
{ model: 'claude-sonnet-4-5', maxOutputTokens: 50000, expected: 50000 },
|
||||
{ model: 'claude-haiku-4-5', maxOutputTokens: 40000, expected: 40000 },
|
||||
{ model: 'claude-opus-4-1', maxOutputTokens: 20000, expected: 20000 },
|
||||
];
|
||||
|
||||
testCases.forEach(({ model, maxOutputTokens, expected }) => {
|
||||
const result = getLLMConfig('test-key', {
|
||||
modelOptions: {
|
||||
model,
|
||||
maxOutputTokens,
|
||||
},
|
||||
});
|
||||
expect(result.llmConfig.maxTokens).toBe(expected);
|
||||
});
|
||||
});
|
||||
|
||||
it('should future-proof Claude 5.x Sonnet models with 64K default', () => {
|
||||
const testCases = [
|
||||
'claude-sonnet-5',
|
||||
'claude-sonnet-5-0',
|
||||
'claude-sonnet-5-2-20260101',
|
||||
'claude-sonnet-5.5',
|
||||
];
|
||||
|
||||
testCases.forEach((model) => {
|
||||
const result = getLLMConfig('test-key', {
|
||||
modelOptions: { model },
|
||||
});
|
||||
expect(result.llmConfig.maxTokens).toBe(64000);
|
||||
});
|
||||
});
|
||||
|
||||
it('should future-proof Claude 5.x Haiku models with 64K default', () => {
|
||||
const testCases = [
|
||||
'claude-haiku-5',
|
||||
'claude-haiku-5-0',
|
||||
'claude-haiku-5-2-20260101',
|
||||
'claude-haiku-5.5',
|
||||
];
|
||||
|
||||
testCases.forEach((model) => {
|
||||
const result = getLLMConfig('test-key', {
|
||||
modelOptions: { model },
|
||||
});
|
||||
expect(result.llmConfig.maxTokens).toBe(64000);
|
||||
});
|
||||
});
|
||||
|
||||
it('should future-proof Claude 5.x Opus models with 32K default', () => {
|
||||
const testCases = [
|
||||
'claude-opus-5',
|
||||
'claude-opus-5-0',
|
||||
'claude-opus-5-2-20260101',
|
||||
'claude-opus-5.5',
|
||||
];
|
||||
|
||||
testCases.forEach((model) => {
|
||||
const result = getLLMConfig('test-key', {
|
||||
modelOptions: { model },
|
||||
});
|
||||
expect(result.llmConfig.maxTokens).toBe(32000);
|
||||
});
|
||||
});
|
||||
|
||||
it('should future-proof Claude 6-9.x models with correct defaults', () => {
|
||||
const testCases = [
|
||||
// Claude 6.x
|
||||
{ model: 'claude-sonnet-6', expected: 64000 },
|
||||
{ model: 'claude-haiku-6-0', expected: 64000 },
|
||||
{ model: 'claude-opus-6-1', expected: 32000 },
|
||||
// Claude 7.x
|
||||
{ model: 'claude-sonnet-7-20270101', expected: 64000 },
|
||||
{ model: 'claude-haiku-7.5', expected: 64000 },
|
||||
{ model: 'claude-opus-7', expected: 32000 },
|
||||
// Claude 8.x
|
||||
{ model: 'claude-sonnet-8', expected: 64000 },
|
||||
{ model: 'claude-haiku-8-2', expected: 64000 },
|
||||
{ model: 'claude-opus-8-latest', expected: 32000 },
|
||||
// Claude 9.x
|
||||
{ model: 'claude-sonnet-9', expected: 64000 },
|
||||
{ model: 'claude-haiku-9', expected: 64000 },
|
||||
{ model: 'claude-opus-9', expected: 32000 },
|
||||
];
|
||||
|
||||
testCases.forEach(({ model, expected }) => {
|
||||
const result = getLLMConfig('test-key', {
|
||||
modelOptions: { model },
|
||||
});
|
||||
expect(result.llmConfig.maxTokens).toBe(expected);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('Parameter Boundary and Validation Logic', () => {
|
||||
it('should handle temperature boundary values', () => {
|
||||
const testCases = [
|
||||
|
|
@ -784,7 +1008,7 @@ describe('getLLMConfig', () => {
|
|||
it('should handle maxOutputTokens boundary values', () => {
|
||||
const testCases = [
|
||||
{ model: 'claude-3-opus', maxOutputTokens: 1, expected: 1 }, // min
|
||||
{ model: 'claude-3-opus', maxOutputTokens: 4096, expected: 4096 }, // max for legacy
|
||||
{ model: 'claude-3-opus', maxOutputTokens: 8192, expected: 8192 }, // default for claude-3
|
||||
{ model: 'claude-3-5-sonnet', maxOutputTokens: 1, expected: 1 }, // min
|
||||
{ model: 'claude-3-5-sonnet', maxOutputTokens: 200000, expected: 200000 }, // max for new
|
||||
{ model: 'claude-3-7-sonnet', maxOutputTokens: 8192, expected: 8192 }, // default
|
||||
|
|
|
|||
|
|
@ -34,7 +34,6 @@ function getLLMConfig(
|
|||
|
||||
const defaultOptions = {
|
||||
model: anthropicSettings.model.default,
|
||||
maxOutputTokens: anthropicSettings.maxOutputTokens.default,
|
||||
stream: true,
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -30,7 +30,7 @@ describe('getOpenAIConfig - Anthropic Compatibility', () => {
|
|||
apiKey: 'sk-xxxx',
|
||||
model: 'claude-sonnet-4',
|
||||
stream: true,
|
||||
maxTokens: 8192,
|
||||
maxTokens: 64000,
|
||||
modelKwargs: {
|
||||
metadata: {
|
||||
user_id: 'some_user_id',
|
||||
|
|
|
|||
|
|
@ -1,4 +1,9 @@
|
|||
import { Verbosity, ReasoningEffort, ReasoningSummary } from 'librechat-data-provider';
|
||||
import {
|
||||
Verbosity,
|
||||
EModelEndpoint,
|
||||
ReasoningEffort,
|
||||
ReasoningSummary,
|
||||
} from 'librechat-data-provider';
|
||||
import type { RequestInit } from 'undici';
|
||||
import type { OpenAIParameters, AzureOptions } from '~/types';
|
||||
import { getOpenAIConfig } from './config';
|
||||
|
|
@ -103,12 +108,89 @@ describe('getOpenAIConfig', () => {
|
|||
|
||||
const result = getOpenAIConfig(mockApiKey, { modelOptions });
|
||||
|
||||
/** When no endpoint is specified, it's treated as non-openAI/azureOpenAI, so uses reasoning object */
|
||||
expect(result.llmConfig.reasoning).toEqual({
|
||||
effort: ReasoningEffort.high,
|
||||
summary: ReasoningSummary.detailed,
|
||||
});
|
||||
expect((result.llmConfig as Record<string, unknown>).reasoning_effort).toBeUndefined();
|
||||
});
|
||||
|
||||
it('should use reasoning_effort for openAI endpoint without useResponsesApi', () => {
|
||||
const modelOptions = {
|
||||
reasoning_effort: ReasoningEffort.high,
|
||||
reasoning_summary: ReasoningSummary.detailed,
|
||||
};
|
||||
|
||||
const result = getOpenAIConfig(mockApiKey, { modelOptions }, EModelEndpoint.openAI);
|
||||
|
||||
expect((result.llmConfig as Record<string, unknown>).reasoning_effort).toBe(
|
||||
ReasoningEffort.high,
|
||||
);
|
||||
expect(result.llmConfig.reasoning).toBeUndefined();
|
||||
});
|
||||
|
||||
it('should use reasoning_effort for azureOpenAI endpoint without useResponsesApi', () => {
|
||||
const modelOptions = {
|
||||
reasoning_effort: ReasoningEffort.high,
|
||||
reasoning_summary: ReasoningSummary.detailed,
|
||||
};
|
||||
|
||||
const result = getOpenAIConfig(mockApiKey, { modelOptions }, EModelEndpoint.azureOpenAI);
|
||||
|
||||
expect((result.llmConfig as Record<string, unknown>).reasoning_effort).toBe(
|
||||
ReasoningEffort.high,
|
||||
);
|
||||
expect(result.llmConfig.reasoning).toBeUndefined();
|
||||
});
|
||||
|
||||
it('should use reasoning object for openAI endpoint with useResponsesApi=true', () => {
|
||||
const modelOptions = {
|
||||
reasoning_effort: ReasoningEffort.high,
|
||||
reasoning_summary: ReasoningSummary.detailed,
|
||||
useResponsesApi: true,
|
||||
};
|
||||
|
||||
const result = getOpenAIConfig(mockApiKey, { modelOptions }, EModelEndpoint.openAI);
|
||||
|
||||
expect(result.llmConfig.reasoning).toEqual({
|
||||
effort: ReasoningEffort.high,
|
||||
summary: ReasoningSummary.detailed,
|
||||
});
|
||||
expect((result.llmConfig as Record<string, unknown>).reasoning_effort).toBeUndefined();
|
||||
});
|
||||
|
||||
it('should use reasoning object for azureOpenAI endpoint with useResponsesApi=true', () => {
|
||||
const modelOptions = {
|
||||
reasoning_effort: ReasoningEffort.high,
|
||||
reasoning_summary: ReasoningSummary.detailed,
|
||||
useResponsesApi: true,
|
||||
};
|
||||
|
||||
const result = getOpenAIConfig(mockApiKey, { modelOptions }, EModelEndpoint.azureOpenAI);
|
||||
|
||||
expect(result.llmConfig.reasoning).toEqual({
|
||||
effort: ReasoningEffort.high,
|
||||
summary: ReasoningSummary.detailed,
|
||||
});
|
||||
expect((result.llmConfig as Record<string, unknown>).reasoning_effort).toBeUndefined();
|
||||
});
|
||||
|
||||
it('should use reasoning object for non-openAI/azureOpenAI endpoints', () => {
|
||||
const modelOptions = {
|
||||
reasoning_effort: ReasoningEffort.high,
|
||||
reasoning_summary: ReasoningSummary.detailed,
|
||||
};
|
||||
|
||||
const result = getOpenAIConfig(mockApiKey, { modelOptions }, 'custom-endpoint');
|
||||
|
||||
expect(result.llmConfig.reasoning).toEqual({
|
||||
effort: ReasoningEffort.high,
|
||||
summary: ReasoningSummary.detailed,
|
||||
});
|
||||
expect((result.llmConfig as Record<string, unknown>).reasoning_effort).toBeUndefined();
|
||||
});
|
||||
|
||||
it('should handle OpenRouter configuration', () => {
|
||||
const reverseProxyUrl = 'https://openrouter.ai/api/v1';
|
||||
|
||||
|
|
@ -655,6 +737,27 @@ describe('getOpenAIConfig', () => {
|
|||
).toBeUndefined();
|
||||
});
|
||||
|
||||
it('should create correct Azure baseURL when response api is selected', () => {
|
||||
const azure = {
|
||||
azureOpenAIApiInstanceName: 'test-instance',
|
||||
azureOpenAIApiDeploymentName: 'test-deployment',
|
||||
azureOpenAIApiVersion: '2023-08-15',
|
||||
azureOpenAIApiKey: 'azure-key',
|
||||
};
|
||||
|
||||
const result = getOpenAIConfig(mockApiKey, {
|
||||
azure,
|
||||
modelOptions: { useResponsesApi: true },
|
||||
reverseProxyUrl:
|
||||
'https://${INSTANCE_NAME}.openai.azure.com/openai/deployments/${DEPLOYMENT_NAME}',
|
||||
});
|
||||
|
||||
expect(result.configOptions?.baseURL).toBe(
|
||||
'https://test-instance.openai.azure.com/openai/v1',
|
||||
);
|
||||
expect(result.configOptions?.baseURL).not.toContain('deployments');
|
||||
});
|
||||
|
||||
it('should handle Azure with organization from environment', () => {
|
||||
const originalOrg = process.env.OPENAI_ORGANIZATION;
|
||||
process.env.OPENAI_ORGANIZATION = 'test-org-123';
|
||||
|
|
|
|||
|
|
@ -68,6 +68,7 @@ export function getOpenAIConfig(
|
|||
azure,
|
||||
apiKey,
|
||||
baseURL,
|
||||
endpoint,
|
||||
streaming,
|
||||
addParams,
|
||||
dropParams,
|
||||
|
|
@ -112,8 +113,10 @@ export function getOpenAIConfig(
|
|||
return;
|
||||
}
|
||||
|
||||
const updatedUrl = configOptions.baseURL?.replace(/\/deployments(?:\/.*)?$/, '/v1');
|
||||
|
||||
configOptions.baseURL = constructAzureURL({
|
||||
baseURL: configOptions.baseURL || 'https://${INSTANCE_NAME}.openai.azure.com/openai/v1',
|
||||
baseURL: updatedUrl || 'https://${INSTANCE_NAME}.openai.azure.com/openai/v1',
|
||||
azureOptions: azure,
|
||||
});
|
||||
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
import { removeNullishValues } from 'librechat-data-provider';
|
||||
import { EModelEndpoint, removeNullishValues } from 'librechat-data-provider';
|
||||
import type { BindToolsInput } from '@langchain/core/language_models/chat_models';
|
||||
import type { AzureOpenAIInput } from '@langchain/openai';
|
||||
import type { OpenAI } from 'openai';
|
||||
|
|
@ -79,6 +79,7 @@ export function getOpenAILLMConfig({
|
|||
azure,
|
||||
apiKey,
|
||||
baseURL,
|
||||
endpoint,
|
||||
streaming,
|
||||
addParams,
|
||||
dropParams,
|
||||
|
|
@ -88,6 +89,7 @@ export function getOpenAILLMConfig({
|
|||
apiKey: string;
|
||||
streaming: boolean;
|
||||
baseURL?: string | null;
|
||||
endpoint?: EModelEndpoint | string | null;
|
||||
modelOptions: Partial<t.OpenAIParameters>;
|
||||
addParams?: Record<string, unknown>;
|
||||
dropParams?: string[];
|
||||
|
|
@ -155,7 +157,8 @@ export function getOpenAILLMConfig({
|
|||
|
||||
if (
|
||||
hasReasoningParams({ reasoning_effort, reasoning_summary }) &&
|
||||
(llmConfig.useResponsesApi === true || useOpenRouter)
|
||||
(llmConfig.useResponsesApi === true ||
|
||||
(endpoint !== EModelEndpoint.openAI && endpoint !== EModelEndpoint.azureOpenAI))
|
||||
) {
|
||||
llmConfig.reasoning = removeNullishValues(
|
||||
{
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue