2026-01-03 17:41:48 +00:00
|
|
|
import { AuthType, EModelEndpoint } from 'librechat-data-provider';
|
|
|
|
|
import { initializeBedrock } from './initialize';
|
|
|
|
|
import type { BaseInitializeParams, BedrockLLMConfigResult } from '~/types';
|
|
|
|
|
import { checkUserKeyExpiry } from '~/utils';
|
|
|
|
|
|
|
|
|
|
jest.mock('https-proxy-agent', () => ({
|
|
|
|
|
HttpsProxyAgent: jest.fn().mockImplementation((proxy) => ({ proxy })),
|
|
|
|
|
}));
|
|
|
|
|
|
|
|
|
|
jest.mock('@smithy/node-http-handler', () => ({
|
|
|
|
|
NodeHttpHandler: jest.fn().mockImplementation((options) => ({ ...options })),
|
|
|
|
|
}));
|
|
|
|
|
|
|
|
|
|
jest.mock('@aws-sdk/client-bedrock-runtime', () => ({
|
|
|
|
|
BedrockRuntimeClient: jest.fn().mockImplementation((config) => ({
|
|
|
|
|
...config,
|
|
|
|
|
_isBedrockClient: true,
|
|
|
|
|
})),
|
|
|
|
|
}));
|
|
|
|
|
|
|
|
|
|
jest.mock('~/utils', () => ({
|
|
|
|
|
checkUserKeyExpiry: jest.fn(),
|
|
|
|
|
}));
|
|
|
|
|
|
|
|
|
|
const mockedCheckUserKeyExpiry = jest.mocked(checkUserKeyExpiry);
|
|
|
|
|
|
|
|
|
|
const createMockParams = (
|
|
|
|
|
overrides: Partial<{
|
|
|
|
|
config: Record<string, unknown>;
|
|
|
|
|
body: Record<string, unknown>;
|
|
|
|
|
user: { id: string };
|
|
|
|
|
model_parameters: Record<string, unknown>;
|
|
|
|
|
env: Record<string, string | undefined>;
|
|
|
|
|
}> = {},
|
|
|
|
|
): BaseInitializeParams => {
|
|
|
|
|
const mockDb = {
|
|
|
|
|
getUserKey: jest.fn().mockResolvedValue(
|
|
|
|
|
JSON.stringify({
|
|
|
|
|
accessKeyId: 'user-access-key',
|
|
|
|
|
secretAccessKey: 'user-secret-key',
|
|
|
|
|
}),
|
|
|
|
|
),
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
return {
|
|
|
|
|
req: {
|
|
|
|
|
config: overrides.config ?? {},
|
|
|
|
|
body: overrides.body ?? {},
|
|
|
|
|
user: overrides.user ?? { id: 'test-user-id' },
|
|
|
|
|
},
|
|
|
|
|
endpoint: EModelEndpoint.bedrock,
|
|
|
|
|
model_parameters: overrides.model_parameters ?? { model: 'anthropic.claude-3-sonnet' },
|
|
|
|
|
db: mockDb,
|
|
|
|
|
} as unknown as BaseInitializeParams;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
describe('initializeBedrock', () => {
|
|
|
|
|
const originalEnv = process.env;
|
|
|
|
|
|
|
|
|
|
beforeEach(() => {
|
|
|
|
|
jest.clearAllMocks();
|
|
|
|
|
process.env = { ...originalEnv };
|
|
|
|
|
process.env.BEDROCK_AWS_ACCESS_KEY_ID = 'test-access-key';
|
|
|
|
|
process.env.BEDROCK_AWS_SECRET_ACCESS_KEY = 'test-secret-key';
|
|
|
|
|
process.env.BEDROCK_AWS_DEFAULT_REGION = 'us-east-1';
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
afterEach(() => {
|
|
|
|
|
process.env = originalEnv;
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
describe('Basic Configuration', () => {
|
|
|
|
|
it('should create a basic configuration with credentials from environment', async () => {
|
|
|
|
|
const params = createMockParams();
|
|
|
|
|
const result = (await initializeBedrock(params)) as BedrockLLMConfigResult;
|
|
|
|
|
|
|
|
|
|
expect(result.llmConfig).toHaveProperty('credentials');
|
|
|
|
|
expect(result.llmConfig.credentials).toEqual({
|
|
|
|
|
accessKeyId: 'test-access-key',
|
|
|
|
|
secretAccessKey: 'test-secret-key',
|
|
|
|
|
});
|
|
|
|
|
expect(result.llmConfig).toHaveProperty('model', 'anthropic.claude-3-sonnet');
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
it('should include region from environment', async () => {
|
|
|
|
|
const params = createMockParams();
|
|
|
|
|
const result = await initializeBedrock(params);
|
|
|
|
|
|
|
|
|
|
expect(result.llmConfig).toHaveProperty('region', 'us-east-1');
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
it('should handle model_parameters', async () => {
|
|
|
|
|
const params = createMockParams({
|
|
|
|
|
model_parameters: {
|
|
|
|
|
model: 'anthropic.claude-3-opus',
|
|
|
|
|
temperature: 0.7,
|
|
|
|
|
maxTokens: 4096,
|
|
|
|
|
},
|
|
|
|
|
});
|
|
|
|
|
const result = await initializeBedrock(params);
|
|
|
|
|
|
|
|
|
|
expect(result.llmConfig).toHaveProperty('model', 'anthropic.claude-3-opus');
|
|
|
|
|
expect(result.llmConfig).toHaveProperty('temperature', 0.7);
|
|
|
|
|
expect(result.llmConfig).toHaveProperty('maxTokens', 4096);
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
it('should handle session token when provided', async () => {
|
|
|
|
|
process.env.BEDROCK_AWS_SESSION_TOKEN = 'test-session-token';
|
|
|
|
|
const params = createMockParams();
|
|
|
|
|
const result = (await initializeBedrock(params)) as BedrockLLMConfigResult;
|
|
|
|
|
|
|
|
|
|
expect(result.llmConfig.credentials).toEqual({
|
|
|
|
|
accessKeyId: 'test-access-key',
|
|
|
|
|
secretAccessKey: 'test-secret-key',
|
|
|
|
|
sessionToken: 'test-session-token',
|
|
|
|
|
});
|
|
|
|
|
});
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
describe('GuardrailConfig', () => {
|
|
|
|
|
it('should apply guardrailConfig from backend config', async () => {
|
|
|
|
|
const guardrailConfig = {
|
|
|
|
|
guardrailIdentifier: 'test-guardrail-id',
|
|
|
|
|
guardrailVersion: '1',
|
|
|
|
|
trace: 'enabled' as const,
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
const params = createMockParams({
|
|
|
|
|
config: {
|
|
|
|
|
endpoints: {
|
|
|
|
|
[EModelEndpoint.bedrock]: {
|
|
|
|
|
guardrailConfig,
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
const result = (await initializeBedrock(params)) as BedrockLLMConfigResult;
|
|
|
|
|
|
|
|
|
|
expect(result.llmConfig).toHaveProperty('guardrailConfig');
|
|
|
|
|
expect(result.llmConfig.guardrailConfig).toEqual(guardrailConfig);
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
it('should NOT include guardrailConfig when not configured', async () => {
|
|
|
|
|
const params = createMockParams({
|
|
|
|
|
config: {
|
|
|
|
|
endpoints: {
|
|
|
|
|
[EModelEndpoint.bedrock]: {},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
const result = await initializeBedrock(params);
|
|
|
|
|
|
|
|
|
|
expect(result.llmConfig).not.toHaveProperty('guardrailConfig');
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
it('should apply guardrailConfig regardless of model_parameters', async () => {
|
|
|
|
|
const guardrailConfig = {
|
|
|
|
|
guardrailIdentifier: 'admin-guardrail',
|
|
|
|
|
guardrailVersion: 'DRAFT',
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
const params = createMockParams({
|
|
|
|
|
config: {
|
|
|
|
|
endpoints: {
|
|
|
|
|
[EModelEndpoint.bedrock]: {
|
|
|
|
|
guardrailConfig,
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
model_parameters: {
|
|
|
|
|
model: 'anthropic.claude-3-sonnet',
|
|
|
|
|
temperature: 0.5,
|
|
|
|
|
},
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
const result = (await initializeBedrock(params)) as BedrockLLMConfigResult;
|
|
|
|
|
|
|
|
|
|
expect(result.llmConfig.guardrailConfig).toEqual(guardrailConfig);
|
|
|
|
|
expect(result.llmConfig).toHaveProperty('temperature', 0.5);
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
it('should handle guardrailConfig with enabled_full trace', async () => {
|
|
|
|
|
const guardrailConfig = {
|
|
|
|
|
guardrailIdentifier: 'compliance-guardrail',
|
|
|
|
|
guardrailVersion: '2',
|
|
|
|
|
trace: 'enabled_full' as const,
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
const params = createMockParams({
|
|
|
|
|
config: {
|
|
|
|
|
endpoints: {
|
|
|
|
|
[EModelEndpoint.bedrock]: {
|
|
|
|
|
guardrailConfig,
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
const result = (await initializeBedrock(params)) as BedrockLLMConfigResult;
|
|
|
|
|
|
|
|
|
|
expect(result.llmConfig.guardrailConfig).toEqual(guardrailConfig);
|
|
|
|
|
expect(result.llmConfig.guardrailConfig?.trace).toBe('enabled_full');
|
|
|
|
|
});
|
2026-02-10 14:26:50 -06:00
|
|
|
|
|
|
|
|
it('should resolve environment variables in guardrailIdentifier', async () => {
|
|
|
|
|
const guardrailId = 'gr-abc123xyz';
|
|
|
|
|
process.env.GUARDRAIL_ID = guardrailId;
|
|
|
|
|
|
|
|
|
|
const params = createMockParams({
|
|
|
|
|
config: {
|
|
|
|
|
endpoints: {
|
|
|
|
|
[EModelEndpoint.bedrock]: {
|
|
|
|
|
guardrailConfig: {
|
|
|
|
|
guardrailIdentifier: '${GUARDRAIL_ID}',
|
|
|
|
|
guardrailVersion: '1',
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
const result = (await initializeBedrock(params)) as BedrockLLMConfigResult;
|
|
|
|
|
|
|
|
|
|
expect(result.llmConfig.guardrailConfig?.guardrailIdentifier).toBe(guardrailId);
|
|
|
|
|
expect(result.llmConfig.guardrailConfig?.guardrailVersion).toBe('1');
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
it('should resolve environment variables in guardrailVersion', async () => {
|
|
|
|
|
const guardrailVersion = 'DRAFT';
|
|
|
|
|
process.env.GUARDRAIL_VERSION = guardrailVersion;
|
|
|
|
|
|
|
|
|
|
const params = createMockParams({
|
|
|
|
|
config: {
|
|
|
|
|
endpoints: {
|
|
|
|
|
[EModelEndpoint.bedrock]: {
|
|
|
|
|
guardrailConfig: {
|
|
|
|
|
guardrailIdentifier: 'static-guardrail-id',
|
|
|
|
|
guardrailVersion: '${GUARDRAIL_VERSION}',
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
const result = (await initializeBedrock(params)) as BedrockLLMConfigResult;
|
|
|
|
|
|
|
|
|
|
expect(result.llmConfig.guardrailConfig?.guardrailIdentifier).toBe('static-guardrail-id');
|
|
|
|
|
expect(result.llmConfig.guardrailConfig?.guardrailVersion).toBe(guardrailVersion);
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
it('should resolve environment variables in both guardrailIdentifier and guardrailVersion', async () => {
|
|
|
|
|
const guardrailId = 'gr-production-123';
|
|
|
|
|
const guardrailVersion = '5';
|
|
|
|
|
process.env.PROD_GUARDRAIL_ID = guardrailId;
|
|
|
|
|
process.env.PROD_GUARDRAIL_VERSION = guardrailVersion;
|
|
|
|
|
|
|
|
|
|
const params = createMockParams({
|
|
|
|
|
config: {
|
|
|
|
|
endpoints: {
|
|
|
|
|
[EModelEndpoint.bedrock]: {
|
|
|
|
|
guardrailConfig: {
|
|
|
|
|
guardrailIdentifier: '${PROD_GUARDRAIL_ID}',
|
|
|
|
|
guardrailVersion: '${PROD_GUARDRAIL_VERSION}',
|
|
|
|
|
trace: 'enabled' as const,
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
const result = (await initializeBedrock(params)) as BedrockLLMConfigResult;
|
|
|
|
|
|
|
|
|
|
expect(result.llmConfig.guardrailConfig?.guardrailIdentifier).toBe(guardrailId);
|
|
|
|
|
expect(result.llmConfig.guardrailConfig?.guardrailVersion).toBe(guardrailVersion);
|
|
|
|
|
expect(result.llmConfig.guardrailConfig?.trace).toBe('enabled');
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
it('should use direct values when no env variable syntax is used', async () => {
|
|
|
|
|
const params = createMockParams({
|
|
|
|
|
config: {
|
|
|
|
|
endpoints: {
|
|
|
|
|
[EModelEndpoint.bedrock]: {
|
|
|
|
|
guardrailConfig: {
|
|
|
|
|
guardrailIdentifier: 'direct-guardrail-id',
|
|
|
|
|
guardrailVersion: '3',
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
const result = (await initializeBedrock(params)) as BedrockLLMConfigResult;
|
|
|
|
|
|
|
|
|
|
expect(result.llmConfig.guardrailConfig?.guardrailIdentifier).toBe('direct-guardrail-id');
|
|
|
|
|
expect(result.llmConfig.guardrailConfig?.guardrailVersion).toBe('3');
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
it('should fall back to original string when env variable is not set', async () => {
|
|
|
|
|
delete process.env.NONEXISTENT_GUARDRAIL_ID;
|
|
|
|
|
|
|
|
|
|
const params = createMockParams({
|
|
|
|
|
config: {
|
|
|
|
|
endpoints: {
|
|
|
|
|
[EModelEndpoint.bedrock]: {
|
|
|
|
|
guardrailConfig: {
|
|
|
|
|
guardrailIdentifier: '${NONEXISTENT_GUARDRAIL_ID}',
|
|
|
|
|
guardrailVersion: '1',
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
const result = (await initializeBedrock(params)) as BedrockLLMConfigResult;
|
|
|
|
|
|
|
|
|
|
expect(result.llmConfig.guardrailConfig?.guardrailIdentifier).toBe(
|
|
|
|
|
'${NONEXISTENT_GUARDRAIL_ID}',
|
|
|
|
|
);
|
|
|
|
|
expect(result.llmConfig.guardrailConfig?.guardrailVersion).toBe('1');
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
it('should handle env variable with whitespace around it', async () => {
|
|
|
|
|
const guardrailId = 'gr-trimmed-123';
|
|
|
|
|
process.env.TRIMMED_GUARDRAIL_ID = guardrailId;
|
|
|
|
|
|
|
|
|
|
const params = createMockParams({
|
|
|
|
|
config: {
|
|
|
|
|
endpoints: {
|
|
|
|
|
[EModelEndpoint.bedrock]: {
|
|
|
|
|
guardrailConfig: {
|
|
|
|
|
guardrailIdentifier: ' ${TRIMMED_GUARDRAIL_ID} ',
|
|
|
|
|
guardrailVersion: '2',
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
const result = (await initializeBedrock(params)) as BedrockLLMConfigResult;
|
|
|
|
|
|
|
|
|
|
expect(result.llmConfig.guardrailConfig?.guardrailIdentifier).toBe(guardrailId);
|
|
|
|
|
expect(result.llmConfig.guardrailConfig?.guardrailVersion).toBe('2');
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
it('should preserve trace field when resolving env variables', async () => {
|
|
|
|
|
const guardrailId = 'gr-with-trace';
|
|
|
|
|
process.env.GUARDRAIL_WITH_TRACE = guardrailId;
|
|
|
|
|
|
|
|
|
|
const params = createMockParams({
|
|
|
|
|
config: {
|
|
|
|
|
endpoints: {
|
|
|
|
|
[EModelEndpoint.bedrock]: {
|
|
|
|
|
guardrailConfig: {
|
|
|
|
|
guardrailIdentifier: '${GUARDRAIL_WITH_TRACE}',
|
|
|
|
|
guardrailVersion: '1',
|
|
|
|
|
trace: 'enabled_full' as const,
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
const result = (await initializeBedrock(params)) as BedrockLLMConfigResult;
|
|
|
|
|
|
|
|
|
|
expect(result.llmConfig.guardrailConfig?.guardrailIdentifier).toBe(guardrailId);
|
|
|
|
|
expect(result.llmConfig.guardrailConfig?.guardrailVersion).toBe('1');
|
|
|
|
|
expect(result.llmConfig.guardrailConfig?.trace).toBe('enabled_full');
|
|
|
|
|
});
|
2026-01-03 17:41:48 +00:00
|
|
|
});
|
|
|
|
|
|
|
|
|
|
describe('Proxy Configuration', () => {
|
|
|
|
|
it('should create BedrockRuntimeClient with proxy when PROXY is set', async () => {
|
|
|
|
|
process.env.PROXY = 'http://proxy:8080';
|
|
|
|
|
const params = createMockParams();
|
|
|
|
|
const result = (await initializeBedrock(params)) as BedrockLLMConfigResult;
|
|
|
|
|
|
|
|
|
|
expect(result.llmConfig).toHaveProperty('client');
|
|
|
|
|
expect(result.llmConfig.client).toHaveProperty('_isBedrockClient', true);
|
|
|
|
|
expect(result.llmConfig).not.toHaveProperty('credentials');
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
it('should include reverse proxy endpoint when BEDROCK_REVERSE_PROXY is set with PROXY', async () => {
|
|
|
|
|
process.env.PROXY = 'http://proxy:8080';
|
|
|
|
|
process.env.BEDROCK_REVERSE_PROXY = 'custom-bedrock-endpoint.com';
|
|
|
|
|
const params = createMockParams();
|
|
|
|
|
const result = (await initializeBedrock(params)) as BedrockLLMConfigResult;
|
|
|
|
|
|
|
|
|
|
expect(result.llmConfig).toHaveProperty('client');
|
|
|
|
|
expect(result.llmConfig.client).toHaveProperty(
|
|
|
|
|
'endpoint',
|
|
|
|
|
'https://custom-bedrock-endpoint.com',
|
|
|
|
|
);
|
|
|
|
|
});
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
describe('Reverse Proxy Configuration', () => {
|
|
|
|
|
it('should set endpointHost when BEDROCK_REVERSE_PROXY is set without PROXY', async () => {
|
|
|
|
|
process.env.BEDROCK_REVERSE_PROXY = 'reverse-proxy.example.com';
|
|
|
|
|
const params = createMockParams();
|
|
|
|
|
const result = await initializeBedrock(params);
|
|
|
|
|
|
|
|
|
|
expect(result.llmConfig).toHaveProperty('endpointHost', 'reverse-proxy.example.com');
|
|
|
|
|
expect(result.llmConfig).not.toHaveProperty('client');
|
|
|
|
|
});
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
describe('User-Provided Credentials', () => {
|
|
|
|
|
it('should fetch credentials from database when user-provided', async () => {
|
|
|
|
|
process.env.BEDROCK_AWS_SECRET_ACCESS_KEY = AuthType.USER_PROVIDED;
|
|
|
|
|
const params = createMockParams({
|
|
|
|
|
body: { key: '2024-12-31T23:59:59Z' },
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
const result = (await initializeBedrock(params)) as BedrockLLMConfigResult;
|
|
|
|
|
|
|
|
|
|
expect(params.db.getUserKey).toHaveBeenCalledWith({
|
|
|
|
|
userId: 'test-user-id',
|
|
|
|
|
name: EModelEndpoint.bedrock,
|
|
|
|
|
});
|
|
|
|
|
expect(result.llmConfig.credentials).toEqual({
|
|
|
|
|
accessKeyId: 'user-access-key',
|
|
|
|
|
secretAccessKey: 'user-secret-key',
|
|
|
|
|
});
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
it('should check key expiry for user-provided credentials', async () => {
|
|
|
|
|
process.env.BEDROCK_AWS_SECRET_ACCESS_KEY = AuthType.USER_PROVIDED;
|
|
|
|
|
const expiresAt = '2024-12-31T23:59:59Z';
|
|
|
|
|
const params = createMockParams({
|
|
|
|
|
body: { key: expiresAt },
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
await initializeBedrock(params);
|
|
|
|
|
|
|
|
|
|
expect(mockedCheckUserKeyExpiry).toHaveBeenCalledWith(expiresAt, EModelEndpoint.bedrock);
|
|
|
|
|
});
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
describe('Credentials Edge Cases', () => {
|
|
|
|
|
it('should set credentials to undefined when access key and secret are empty', async () => {
|
|
|
|
|
process.env.BEDROCK_AWS_ACCESS_KEY_ID = '';
|
|
|
|
|
process.env.BEDROCK_AWS_SECRET_ACCESS_KEY = '';
|
|
|
|
|
const params = createMockParams();
|
|
|
|
|
const result = (await initializeBedrock(params)) as BedrockLLMConfigResult;
|
|
|
|
|
|
|
|
|
|
expect(result.llmConfig.credentials).toBeUndefined();
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
it('should set credentials to undefined when access key and secret are undefined', async () => {
|
|
|
|
|
delete process.env.BEDROCK_AWS_ACCESS_KEY_ID;
|
|
|
|
|
delete process.env.BEDROCK_AWS_SECRET_ACCESS_KEY;
|
|
|
|
|
const params = createMockParams();
|
|
|
|
|
const result = (await initializeBedrock(params)) as BedrockLLMConfigResult;
|
|
|
|
|
|
|
|
|
|
expect(result.llmConfig.credentials).toBeUndefined();
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
it('should throw error when user-provided credentials are not found', async () => {
|
|
|
|
|
process.env.BEDROCK_AWS_SECRET_ACCESS_KEY = AuthType.USER_PROVIDED;
|
|
|
|
|
const params = createMockParams();
|
|
|
|
|
(params.db.getUserKey as jest.Mock).mockResolvedValue(null);
|
|
|
|
|
|
|
|
|
|
await expect(initializeBedrock(params)).rejects.toThrow(
|
|
|
|
|
'Bedrock credentials not provided. Please provide them again.',
|
|
|
|
|
);
|
|
|
|
|
});
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
describe('Return Structure', () => {
|
|
|
|
|
it('should return llmConfig and configOptions', async () => {
|
|
|
|
|
const params = createMockParams();
|
|
|
|
|
const result = await initializeBedrock(params);
|
|
|
|
|
|
|
|
|
|
expect(result).toHaveProperty('llmConfig');
|
|
|
|
|
expect(result).toHaveProperty('configOptions');
|
|
|
|
|
expect(typeof result.configOptions).toBe('object');
|
|
|
|
|
});
|
|
|
|
|
});
|
2026-01-16 10:52:58 -08:00
|
|
|
|
|
|
|
|
describe('Inference Profile Configuration', () => {
|
|
|
|
|
it('should set applicationInferenceProfile when model has matching inference profile config', async () => {
|
|
|
|
|
const inferenceProfileArn =
|
|
|
|
|
'arn:aws:bedrock:us-east-1:123456789012:application-inference-profile/abc123';
|
|
|
|
|
|
|
|
|
|
const params = createMockParams({
|
|
|
|
|
config: {
|
|
|
|
|
endpoints: {
|
|
|
|
|
[EModelEndpoint.bedrock]: {
|
|
|
|
|
inferenceProfiles: {
|
|
|
|
|
'us.anthropic.claude-3-7-sonnet-20250219-v1:0': inferenceProfileArn,
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
model_parameters: {
|
|
|
|
|
model: 'us.anthropic.claude-3-7-sonnet-20250219-v1:0',
|
|
|
|
|
},
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
const result = (await initializeBedrock(params)) as BedrockLLMConfigResult;
|
|
|
|
|
|
|
|
|
|
expect(result.llmConfig).toHaveProperty('applicationInferenceProfile', inferenceProfileArn);
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
it('should NOT set applicationInferenceProfile when model has no matching config', async () => {
|
|
|
|
|
const params = createMockParams({
|
|
|
|
|
config: {
|
|
|
|
|
endpoints: {
|
|
|
|
|
[EModelEndpoint.bedrock]: {
|
|
|
|
|
inferenceProfiles: {
|
|
|
|
|
'us.anthropic.claude-sonnet-4-5-20250929-v1:0':
|
|
|
|
|
'arn:aws:bedrock:us-east-1:123456789012:application-inference-profile/xyz789',
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
model_parameters: {
|
|
|
|
|
model: 'us.anthropic.claude-3-7-sonnet-20250219-v1:0', // Different model
|
|
|
|
|
},
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
const result = (await initializeBedrock(params)) as BedrockLLMConfigResult;
|
|
|
|
|
|
|
|
|
|
expect(result.llmConfig).not.toHaveProperty('applicationInferenceProfile');
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
it('should resolve environment variable in inference profile ARN', async () => {
|
|
|
|
|
const inferenceProfileArn =
|
|
|
|
|
'arn:aws:bedrock:us-east-1:951834775723:application-inference-profile/yjr1elcyt29s';
|
|
|
|
|
process.env.BEDROCK_INFERENCE_PROFILE_ARN = inferenceProfileArn;
|
|
|
|
|
|
|
|
|
|
const params = createMockParams({
|
|
|
|
|
config: {
|
|
|
|
|
endpoints: {
|
|
|
|
|
[EModelEndpoint.bedrock]: {
|
|
|
|
|
inferenceProfiles: {
|
|
|
|
|
'us.anthropic.claude-3-7-sonnet-20250219-v1:0': '${BEDROCK_INFERENCE_PROFILE_ARN}',
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
model_parameters: {
|
|
|
|
|
model: 'us.anthropic.claude-3-7-sonnet-20250219-v1:0',
|
|
|
|
|
},
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
const result = (await initializeBedrock(params)) as BedrockLLMConfigResult;
|
|
|
|
|
|
|
|
|
|
expect(result.llmConfig).toHaveProperty('applicationInferenceProfile', inferenceProfileArn);
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
it('should use direct ARN when no env variable syntax is used', async () => {
|
|
|
|
|
const directArn =
|
|
|
|
|
'arn:aws:bedrock:us-east-1:123456789012:application-inference-profile/direct123';
|
|
|
|
|
|
|
|
|
|
const params = createMockParams({
|
|
|
|
|
config: {
|
|
|
|
|
endpoints: {
|
|
|
|
|
[EModelEndpoint.bedrock]: {
|
|
|
|
|
inferenceProfiles: {
|
|
|
|
|
'us.anthropic.claude-3-7-sonnet-20250219-v1:0': directArn,
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
model_parameters: {
|
|
|
|
|
model: 'us.anthropic.claude-3-7-sonnet-20250219-v1:0',
|
|
|
|
|
},
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
const result = (await initializeBedrock(params)) as BedrockLLMConfigResult;
|
|
|
|
|
|
|
|
|
|
expect(result.llmConfig).toHaveProperty('applicationInferenceProfile', directArn);
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
it('should fall back to original string when env variable is not set', async () => {
|
|
|
|
|
// Ensure the env var is not set
|
|
|
|
|
delete process.env.NONEXISTENT_PROFILE_ARN;
|
|
|
|
|
|
|
|
|
|
const params = createMockParams({
|
|
|
|
|
config: {
|
|
|
|
|
endpoints: {
|
|
|
|
|
[EModelEndpoint.bedrock]: {
|
|
|
|
|
inferenceProfiles: {
|
|
|
|
|
'us.anthropic.claude-3-7-sonnet-20250219-v1:0': '${NONEXISTENT_PROFILE_ARN}',
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
model_parameters: {
|
|
|
|
|
model: 'us.anthropic.claude-3-7-sonnet-20250219-v1:0',
|
|
|
|
|
},
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
const result = (await initializeBedrock(params)) as BedrockLLMConfigResult;
|
|
|
|
|
|
|
|
|
|
// Should return the original ${VAR} string when env var doesn't exist
|
|
|
|
|
expect(result.llmConfig).toHaveProperty(
|
|
|
|
|
'applicationInferenceProfile',
|
|
|
|
|
'${NONEXISTENT_PROFILE_ARN}',
|
|
|
|
|
);
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
it('should resolve multiple different env variables for different models', async () => {
|
|
|
|
|
const claude37Arn =
|
|
|
|
|
'arn:aws:bedrock:us-east-1:123456789012:application-inference-profile/claude37';
|
|
|
|
|
const sonnet45Arn =
|
|
|
|
|
'arn:aws:bedrock:us-east-1:123456789012:application-inference-profile/sonnet45';
|
|
|
|
|
|
|
|
|
|
process.env.CLAUDE_37_PROFILE = claude37Arn;
|
|
|
|
|
process.env.SONNET_45_PROFILE = sonnet45Arn;
|
|
|
|
|
|
|
|
|
|
const params = createMockParams({
|
|
|
|
|
config: {
|
|
|
|
|
endpoints: {
|
|
|
|
|
[EModelEndpoint.bedrock]: {
|
|
|
|
|
inferenceProfiles: {
|
|
|
|
|
'us.anthropic.claude-3-7-sonnet-20250219-v1:0': '${CLAUDE_37_PROFILE}',
|
|
|
|
|
'us.anthropic.claude-sonnet-4-5-20250929-v1:0': '${SONNET_45_PROFILE}',
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
model_parameters: {
|
|
|
|
|
model: 'us.anthropic.claude-3-7-sonnet-20250219-v1:0',
|
|
|
|
|
},
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
const result = (await initializeBedrock(params)) as BedrockLLMConfigResult;
|
|
|
|
|
|
|
|
|
|
expect(result.llmConfig).toHaveProperty('applicationInferenceProfile', claude37Arn);
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
it('should handle env variable with whitespace around it', async () => {
|
|
|
|
|
const inferenceProfileArn =
|
|
|
|
|
'arn:aws:bedrock:us-east-1:123456789012:application-inference-profile/trimmed';
|
|
|
|
|
process.env.TRIMMED_PROFILE_ARN = inferenceProfileArn;
|
|
|
|
|
|
|
|
|
|
const params = createMockParams({
|
|
|
|
|
config: {
|
|
|
|
|
endpoints: {
|
|
|
|
|
[EModelEndpoint.bedrock]: {
|
|
|
|
|
inferenceProfiles: {
|
|
|
|
|
'us.anthropic.claude-3-7-sonnet-20250219-v1:0': ' ${TRIMMED_PROFILE_ARN} ',
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
model_parameters: {
|
|
|
|
|
model: 'us.anthropic.claude-3-7-sonnet-20250219-v1:0',
|
|
|
|
|
},
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
const result = (await initializeBedrock(params)) as BedrockLLMConfigResult;
|
|
|
|
|
|
|
|
|
|
expect(result.llmConfig).toHaveProperty('applicationInferenceProfile', inferenceProfileArn);
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
it('should NOT set applicationInferenceProfile when inferenceProfiles config is empty', async () => {
|
|
|
|
|
const params = createMockParams({
|
|
|
|
|
config: {
|
|
|
|
|
endpoints: {
|
|
|
|
|
[EModelEndpoint.bedrock]: {
|
|
|
|
|
inferenceProfiles: {},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
model_parameters: {
|
|
|
|
|
model: 'us.anthropic.claude-3-7-sonnet-20250219-v1:0',
|
|
|
|
|
},
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
const result = (await initializeBedrock(params)) as BedrockLLMConfigResult;
|
|
|
|
|
|
|
|
|
|
expect(result.llmConfig).not.toHaveProperty('applicationInferenceProfile');
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
it('should NOT set applicationInferenceProfile when no bedrock config exists', async () => {
|
|
|
|
|
const params = createMockParams({
|
|
|
|
|
config: {},
|
|
|
|
|
model_parameters: {
|
|
|
|
|
model: 'us.anthropic.claude-3-7-sonnet-20250219-v1:0',
|
|
|
|
|
},
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
const result = (await initializeBedrock(params)) as BedrockLLMConfigResult;
|
|
|
|
|
|
|
|
|
|
expect(result.llmConfig).not.toHaveProperty('applicationInferenceProfile');
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
it('should handle multiple inference profiles and select the correct one', async () => {
|
|
|
|
|
const sonnet45Arn =
|
|
|
|
|
'arn:aws:bedrock:us-east-1:123456789012:application-inference-profile/sonnet45';
|
|
|
|
|
const claude37Arn =
|
|
|
|
|
'arn:aws:bedrock:us-east-1:123456789012:application-inference-profile/claude37';
|
|
|
|
|
|
|
|
|
|
const params = createMockParams({
|
|
|
|
|
config: {
|
|
|
|
|
endpoints: {
|
|
|
|
|
[EModelEndpoint.bedrock]: {
|
|
|
|
|
inferenceProfiles: {
|
|
|
|
|
'us.anthropic.claude-sonnet-4-5-20250929-v1:0': sonnet45Arn,
|
|
|
|
|
'us.anthropic.claude-3-7-sonnet-20250219-v1:0': claude37Arn,
|
|
|
|
|
'global.anthropic.claude-opus-4-5-20251101-v1:0':
|
|
|
|
|
'arn:aws:bedrock:us-east-1:123456789012:application-inference-profile/opus45',
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
model_parameters: {
|
|
|
|
|
model: 'us.anthropic.claude-3-7-sonnet-20250219-v1:0',
|
|
|
|
|
},
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
const result = (await initializeBedrock(params)) as BedrockLLMConfigResult;
|
|
|
|
|
|
|
|
|
|
expect(result.llmConfig).toHaveProperty('applicationInferenceProfile', claude37Arn);
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
it('should work alongside guardrailConfig', async () => {
|
|
|
|
|
const inferenceProfileArn =
|
|
|
|
|
'arn:aws:bedrock:us-east-1:123456789012:application-inference-profile/abc123';
|
|
|
|
|
const guardrailConfig = {
|
|
|
|
|
guardrailIdentifier: 'test-guardrail',
|
|
|
|
|
guardrailVersion: '1',
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
const params = createMockParams({
|
|
|
|
|
config: {
|
|
|
|
|
endpoints: {
|
|
|
|
|
[EModelEndpoint.bedrock]: {
|
|
|
|
|
inferenceProfiles: {
|
|
|
|
|
'us.anthropic.claude-3-7-sonnet-20250219-v1:0': inferenceProfileArn,
|
|
|
|
|
},
|
|
|
|
|
guardrailConfig,
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
model_parameters: {
|
|
|
|
|
model: 'us.anthropic.claude-3-7-sonnet-20250219-v1:0',
|
|
|
|
|
},
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
const result = (await initializeBedrock(params)) as BedrockLLMConfigResult;
|
|
|
|
|
|
|
|
|
|
expect(result.llmConfig).toHaveProperty('applicationInferenceProfile', inferenceProfileArn);
|
|
|
|
|
expect(result.llmConfig).toHaveProperty('guardrailConfig', guardrailConfig);
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
it('should preserve the original model ID in llmConfig.model', async () => {
|
|
|
|
|
const inferenceProfileArn =
|
|
|
|
|
'arn:aws:bedrock:us-east-1:123456789012:application-inference-profile/abc123';
|
|
|
|
|
|
|
|
|
|
const params = createMockParams({
|
|
|
|
|
config: {
|
|
|
|
|
endpoints: {
|
|
|
|
|
[EModelEndpoint.bedrock]: {
|
|
|
|
|
inferenceProfiles: {
|
|
|
|
|
'us.anthropic.claude-3-7-sonnet-20250219-v1:0': inferenceProfileArn,
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
model_parameters: {
|
|
|
|
|
model: 'us.anthropic.claude-3-7-sonnet-20250219-v1:0',
|
|
|
|
|
},
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
const result = (await initializeBedrock(params)) as BedrockLLMConfigResult;
|
|
|
|
|
|
|
|
|
|
// Model ID should remain unchanged - only applicationInferenceProfile should be set
|
|
|
|
|
expect(result.llmConfig).toHaveProperty(
|
|
|
|
|
'model',
|
|
|
|
|
'us.anthropic.claude-3-7-sonnet-20250219-v1:0',
|
|
|
|
|
);
|
|
|
|
|
expect(result.llmConfig).toHaveProperty('applicationInferenceProfile', inferenceProfileArn);
|
|
|
|
|
});
|
|
|
|
|
});
|
2026-02-06 18:35:36 -05:00
|
|
|
|
|
|
|
|
describe('Opus 4.6 Adaptive Thinking', () => {
|
🆔 fix: Atomic File Dedupe, Bedrock Tokens Fix, and Allowed MIME Types (#11675)
* feat: Add support for Apache Parquet MIME types
- Introduced 'application/x-parquet' to the full MIME types list and code interpreter MIME types list.
- Updated application MIME types regex to include 'x-parquet' and 'vnd.apache.parquet'.
- Added mapping for '.parquet' files to 'application/x-parquet' in code type mapping, enhancing file format support.
* feat: Implement atomic file claiming for code execution outputs
- Added a new `claimCodeFile` function to atomically claim a file_id for code execution outputs, preventing duplicates by using a compound key of filename and conversationId.
- Updated `processCodeOutput` to utilize the new claiming mechanism, ensuring that concurrent calls for the same filename converge on a single record.
- Refactored related tests to validate the new atomic claiming behavior and its impact on file usage tracking and versioning.
* fix: Update image file handling to use cache-busting filepath
- Modified the `processCodeOutput` function to generate a cache-busting filepath for updated image files, improving browser caching behavior.
- Adjusted related tests to reflect the change from versioned filenames to cache-busted filepaths, ensuring accurate validation of image updates.
* fix: Update step handler to prevent undefined content for non-tool call types
- Modified the condition in useStepHandler to ensure that undefined content is only assigned for specific content types, enhancing the robustness of content handling.
* fix: Update bedrockOutputParser to handle maxTokens for adaptive models
- Modified the bedrockOutputParser logic to ensure that maxTokens is not set for adaptive models when neither maxTokens nor maxOutputTokens are provided, improving the handling of adaptive thinking configurations.
- Updated related tests to reflect these changes, ensuring accurate validation of the output for adaptive models.
* chore: Update @librechat/agents to version 3.1.38 in package.json and package-lock.json
* fix: Enhance file claiming and error handling in code processing
- Updated the `processCodeOutput` function to use a consistent file ID for claiming files, preventing duplicates and improving concurrency handling.
- Refactored the `createFileMethods` to include error handling for failed file claims, ensuring robust behavior when claiming files for conversations.
- These changes enhance the reliability of file management in the application.
* fix: Update adaptive thinking test for Opus 4.6 model
- Modified the test for configuring adaptive thinking to reflect that no default maxTokens should be set for the Opus 4.6 model.
- Updated assertions to ensure that maxTokens is undefined, aligning with the expected behavior for adaptive models.
2026-02-07 13:26:18 -05:00
|
|
|
it('should configure adaptive thinking with no default maxTokens for Opus 4.6', async () => {
|
2026-02-06 18:35:36 -05:00
|
|
|
const params = createMockParams({
|
|
|
|
|
model_parameters: {
|
|
|
|
|
model: 'anthropic.claude-opus-4-6-v1',
|
|
|
|
|
},
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
const result = (await initializeBedrock(params)) as BedrockLLMConfigResult;
|
|
|
|
|
const amrf = result.llmConfig.additionalModelRequestFields as Record<string, unknown>;
|
|
|
|
|
|
|
|
|
|
expect(amrf.thinking).toEqual({ type: 'adaptive' });
|
🆔 fix: Atomic File Dedupe, Bedrock Tokens Fix, and Allowed MIME Types (#11675)
* feat: Add support for Apache Parquet MIME types
- Introduced 'application/x-parquet' to the full MIME types list and code interpreter MIME types list.
- Updated application MIME types regex to include 'x-parquet' and 'vnd.apache.parquet'.
- Added mapping for '.parquet' files to 'application/x-parquet' in code type mapping, enhancing file format support.
* feat: Implement atomic file claiming for code execution outputs
- Added a new `claimCodeFile` function to atomically claim a file_id for code execution outputs, preventing duplicates by using a compound key of filename and conversationId.
- Updated `processCodeOutput` to utilize the new claiming mechanism, ensuring that concurrent calls for the same filename converge on a single record.
- Refactored related tests to validate the new atomic claiming behavior and its impact on file usage tracking and versioning.
* fix: Update image file handling to use cache-busting filepath
- Modified the `processCodeOutput` function to generate a cache-busting filepath for updated image files, improving browser caching behavior.
- Adjusted related tests to reflect the change from versioned filenames to cache-busted filepaths, ensuring accurate validation of image updates.
* fix: Update step handler to prevent undefined content for non-tool call types
- Modified the condition in useStepHandler to ensure that undefined content is only assigned for specific content types, enhancing the robustness of content handling.
* fix: Update bedrockOutputParser to handle maxTokens for adaptive models
- Modified the bedrockOutputParser logic to ensure that maxTokens is not set for adaptive models when neither maxTokens nor maxOutputTokens are provided, improving the handling of adaptive thinking configurations.
- Updated related tests to reflect these changes, ensuring accurate validation of the output for adaptive models.
* chore: Update @librechat/agents to version 3.1.38 in package.json and package-lock.json
* fix: Enhance file claiming and error handling in code processing
- Updated the `processCodeOutput` function to use a consistent file ID for claiming files, preventing duplicates and improving concurrency handling.
- Refactored the `createFileMethods` to include error handling for failed file claims, ensuring robust behavior when claiming files for conversations.
- These changes enhance the reliability of file management in the application.
* fix: Update adaptive thinking test for Opus 4.6 model
- Modified the test for configuring adaptive thinking to reflect that no default maxTokens should be set for the Opus 4.6 model.
- Updated assertions to ensure that maxTokens is undefined, aligning with the expected behavior for adaptive models.
2026-02-07 13:26:18 -05:00
|
|
|
expect(result.llmConfig.maxTokens).toBeUndefined();
|
2026-02-06 18:35:36 -05:00
|
|
|
expect(amrf.anthropic_beta).toEqual(
|
|
|
|
|
expect.arrayContaining(['output-128k-2025-02-19', 'context-1m-2025-08-07']),
|
|
|
|
|
);
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
it('should pass effort via output_config for Opus 4.6', async () => {
|
|
|
|
|
const params = createMockParams({
|
|
|
|
|
model_parameters: {
|
|
|
|
|
model: 'anthropic.claude-opus-4-6-v1',
|
|
|
|
|
effort: 'medium',
|
|
|
|
|
},
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
const result = (await initializeBedrock(params)) as BedrockLLMConfigResult;
|
|
|
|
|
const amrf = result.llmConfig.additionalModelRequestFields as Record<string, unknown>;
|
|
|
|
|
|
|
|
|
|
expect(amrf.thinking).toEqual({ type: 'adaptive' });
|
|
|
|
|
expect(amrf.output_config).toEqual({ effort: 'medium' });
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
it('should respect user-provided maxTokens for Opus 4.6', async () => {
|
|
|
|
|
const params = createMockParams({
|
|
|
|
|
model_parameters: {
|
|
|
|
|
model: 'anthropic.claude-opus-4-6-v1',
|
|
|
|
|
maxTokens: 32000,
|
|
|
|
|
},
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
const result = (await initializeBedrock(params)) as BedrockLLMConfigResult;
|
|
|
|
|
|
|
|
|
|
expect(result.llmConfig.maxTokens).toBe(32000);
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
it('should handle cross-region Opus 4.6 model IDs', async () => {
|
|
|
|
|
const params = createMockParams({
|
|
|
|
|
model_parameters: {
|
|
|
|
|
model: 'us.anthropic.claude-opus-4-6-v1',
|
|
|
|
|
effort: 'low',
|
|
|
|
|
},
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
const result = (await initializeBedrock(params)) as BedrockLLMConfigResult;
|
|
|
|
|
const amrf = result.llmConfig.additionalModelRequestFields as Record<string, unknown>;
|
|
|
|
|
|
|
|
|
|
expect(result.llmConfig).toHaveProperty('model', 'us.anthropic.claude-opus-4-6-v1');
|
|
|
|
|
expect(amrf.thinking).toEqual({ type: 'adaptive' });
|
|
|
|
|
expect(amrf.output_config).toEqual({ effort: 'low' });
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
it('should use enabled thinking for non-adaptive models (Sonnet 4.5)', async () => {
|
|
|
|
|
const params = createMockParams({
|
|
|
|
|
model_parameters: {
|
|
|
|
|
model: 'anthropic.claude-sonnet-4-5-20250929-v1:0',
|
|
|
|
|
},
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
const result = (await initializeBedrock(params)) as BedrockLLMConfigResult;
|
|
|
|
|
const amrf = result.llmConfig.additionalModelRequestFields as Record<string, unknown>;
|
|
|
|
|
|
|
|
|
|
expect(amrf.thinking).toEqual({ type: 'enabled', budget_tokens: 2000 });
|
|
|
|
|
expect(amrf.output_config).toBeUndefined();
|
|
|
|
|
expect(result.llmConfig.maxTokens).toBe(8192);
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
it('should not include output_config when effort is empty', async () => {
|
|
|
|
|
const params = createMockParams({
|
|
|
|
|
model_parameters: {
|
|
|
|
|
model: 'anthropic.claude-opus-4-6-v1',
|
|
|
|
|
effort: '',
|
|
|
|
|
},
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
const result = (await initializeBedrock(params)) as BedrockLLMConfigResult;
|
|
|
|
|
const amrf = result.llmConfig.additionalModelRequestFields as Record<string, unknown>;
|
|
|
|
|
|
|
|
|
|
expect(amrf.thinking).toEqual({ type: 'adaptive' });
|
|
|
|
|
expect(amrf.output_config).toBeUndefined();
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
it('should strip effort for non-adaptive models', async () => {
|
|
|
|
|
const params = createMockParams({
|
|
|
|
|
model_parameters: {
|
|
|
|
|
model: 'anthropic.claude-opus-4-1-20250805-v1:0',
|
|
|
|
|
effort: 'high',
|
|
|
|
|
},
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
const result = (await initializeBedrock(params)) as BedrockLLMConfigResult;
|
|
|
|
|
const amrf = result.llmConfig.additionalModelRequestFields as Record<string, unknown>;
|
|
|
|
|
|
|
|
|
|
expect(amrf.thinking).toEqual({ type: 'enabled', budget_tokens: 2000 });
|
|
|
|
|
expect(amrf.output_config).toBeUndefined();
|
|
|
|
|
expect(amrf.effort).toBeUndefined();
|
|
|
|
|
});
|
|
|
|
|
});
|
2026-02-28 15:02:09 -05:00
|
|
|
|
|
|
|
|
describe('Bedrock reasoning_effort for Moonshot/ZAI models', () => {
|
|
|
|
|
it('should map reasoning_effort to reasoning_config for Moonshot Kimi K2.5', async () => {
|
|
|
|
|
const params = createMockParams({
|
|
|
|
|
model_parameters: {
|
|
|
|
|
model: 'moonshotai.kimi-k2.5',
|
|
|
|
|
reasoning_effort: 'high',
|
|
|
|
|
},
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
const result = (await initializeBedrock(params)) as BedrockLLMConfigResult;
|
|
|
|
|
const amrf = result.llmConfig.additionalModelRequestFields as Record<string, unknown>;
|
|
|
|
|
|
|
|
|
|
expect(amrf.reasoning_config).toBe('high');
|
|
|
|
|
expect(amrf.reasoning_effort).toBeUndefined();
|
|
|
|
|
expect(amrf.thinking).toBeUndefined();
|
|
|
|
|
expect(amrf.anthropic_beta).toBeUndefined();
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
it('should map reasoning_effort to reasoning_config for ZAI GLM', async () => {
|
|
|
|
|
const params = createMockParams({
|
|
|
|
|
model_parameters: {
|
|
|
|
|
model: 'zai.glm-4.7',
|
|
|
|
|
reasoning_effort: 'medium',
|
|
|
|
|
},
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
const result = (await initializeBedrock(params)) as BedrockLLMConfigResult;
|
|
|
|
|
const amrf = result.llmConfig.additionalModelRequestFields as Record<string, unknown>;
|
|
|
|
|
|
|
|
|
|
expect(amrf.reasoning_config).toBe('medium');
|
|
|
|
|
expect(amrf.reasoning_effort).toBeUndefined();
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
it('should not include reasoning_config when reasoning_effort is unset', async () => {
|
|
|
|
|
const params = createMockParams({
|
|
|
|
|
model_parameters: {
|
|
|
|
|
model: 'moonshotai.kimi-k2.5',
|
|
|
|
|
reasoning_effort: '',
|
|
|
|
|
},
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
const result = (await initializeBedrock(params)) as BedrockLLMConfigResult;
|
|
|
|
|
|
|
|
|
|
expect(result.llmConfig.additionalModelRequestFields).toBeUndefined();
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
it('should not map reasoning_effort to reasoning_config for Anthropic models', async () => {
|
|
|
|
|
const params = createMockParams({
|
|
|
|
|
model_parameters: {
|
|
|
|
|
model: 'anthropic.claude-opus-4-6-v1',
|
|
|
|
|
reasoning_effort: 'high',
|
|
|
|
|
},
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
const result = (await initializeBedrock(params)) as BedrockLLMConfigResult;
|
|
|
|
|
const amrf = result.llmConfig.additionalModelRequestFields as Record<string, unknown>;
|
|
|
|
|
|
|
|
|
|
expect(amrf.reasoning_config).toBeUndefined();
|
|
|
|
|
expect(amrf.thinking).toEqual({ type: 'adaptive' });
|
|
|
|
|
});
|
|
|
|
|
});
|
2026-01-03 17:41:48 +00:00
|
|
|
});
|