LibreChat/api/server/services/ModelService.spec.js
Dustin Healy c6ecf0095b
🎚️ feat: Anthropic Parameter Set Support via Custom Endpoints (#9415)
* refactor: modularize openai llm config logic into new getOpenAILLMConfig function (#9412)

* ✈️ refactor: Migrate Anthropic's getLLMConfig to TypeScript (#9413)

* refactor: move tokens.js over to packages/api and update imports

* refactor: port tokens.js to typescript

* refactor: move helpers.js over to packages/api and update imports

* refactor: port helpers.js to typescript

* refactor: move anthropic/llm.js over to packages/api and update imports

* refactor: port anthropic/llm.js to typescript with supporting types in types/anthropic.ts and updated tests in llm.spec.js

* refactor: move llm.spec.js over to packages/api and update import

* refactor: port llm.spec.js over to typescript

* 📝  Add Prompt Parameter Support for Anthropic Custom Endpoints (#9414)

feat: add anthropic llm config support for openai-like (custom) endpoints

* fix: missed compiler / type issues from addition of getAnthropicLLMConfig

* refactor: update tokens.ts to export constants and functions, enhance type definitions, and adjust default values

* WIP: first pass, decouple `llmConfig` from `configOptions`

* chore: update import path for OpenAI configuration from 'llm' to 'config'

* refactor: enhance type definitions for ThinkingConfig and update modelOptions in AnthropicConfigOptions

* refactor: cleanup type, introduce openai transform from alt provider

* chore: integrate removeNullishValues in Google llmConfig and update OpenAI exports

* chore: bump version of @librechat/api to 1.3.5 in package.json and package-lock.json

* refactor: update customParams type in OpenAIConfigOptions to use TConfig['customParams']

* refactor: enhance transformToOpenAIConfig to include fromEndpoint and improve config extraction

* refactor: conform userId field for anthropic/openai, cleanup anthropic typing

* ci: add backward compatibility tests for getOpenAIConfig with various endpoints and configurations

* ci: replace userId with user in clientOptions for getLLMConfig

* test: add Azure OpenAI endpoint tests for various configurations in getOpenAIConfig

* refactor: defaultHeaders retrieval for prompt caching for anthropic-based custom endpoint (litellm)

* test: add unit tests for getOpenAIConfig with various Anthropic model configurations

* test: enhance Anthropic compatibility tests with addParams and dropParams handling

* chore: update @librechat/agents dependency to version 2.4.78 in package.json and package-lock.json

* chore: update @librechat/agents dependency to version 2.4.79 in package.json and package-lock.json

---------

Co-authored-by: Danny Avila <danny@librechat.ai>
2025-09-08 14:35:29 -04:00

395 lines
11 KiB
JavaScript

const axios = require('axios');
const { logger } = require('@librechat/data-schemas');
const { EModelEndpoint, defaultModels } = require('librechat-data-provider');
const {
fetchModels,
splitAndTrim,
getOpenAIModels,
getGoogleModels,
getBedrockModels,
getAnthropicModels,
} = require('./ModelService');
jest.mock('@librechat/api', () => {
const originalUtils = jest.requireActual('@librechat/api');
return {
...originalUtils,
processModelData: jest.fn((...args) => {
return originalUtils.processModelData(...args);
}),
};
});
jest.mock('axios');
jest.mock('~/cache/getLogStores', () =>
jest.fn().mockImplementation(() => ({
get: jest.fn().mockResolvedValue(undefined),
set: jest.fn().mockResolvedValue(true),
})),
);
jest.mock('@librechat/data-schemas', () => ({
...jest.requireActual('@librechat/data-schemas'),
logger: {
error: jest.fn(),
},
}));
jest.mock('./Config/EndpointService', () => ({
config: {
openAIApiKey: 'mockedApiKey',
userProvidedOpenAI: false,
},
}));
axios.get.mockResolvedValue({
data: {
data: [{ id: 'model-1' }, { id: 'model-2' }],
},
});
describe('fetchModels', () => {
it('fetches models successfully from the API', async () => {
const models = await fetchModels({
user: 'user123',
apiKey: 'testApiKey',
baseURL: 'https://api.test.com',
name: 'TestAPI',
});
expect(models).toEqual(['model-1', 'model-2']);
expect(axios.get).toHaveBeenCalledWith(
expect.stringContaining('https://api.test.com/models'),
expect.any(Object),
);
});
it('adds the user ID to the models query when option and ID are passed', async () => {
const models = await fetchModels({
user: 'user123',
apiKey: 'testApiKey',
baseURL: 'https://api.test.com',
userIdQuery: true,
name: 'TestAPI',
});
expect(models).toEqual(['model-1', 'model-2']);
expect(axios.get).toHaveBeenCalledWith(
expect.stringContaining('https://api.test.com/models?user=user123'),
expect.any(Object),
);
});
afterEach(() => {
jest.clearAllMocks();
});
});
describe('fetchModels with createTokenConfig true', () => {
const data = {
data: [
{
id: 'model-1',
pricing: {
prompt: '0.002',
completion: '0.001',
},
context_length: 1024,
},
{
id: 'model-2',
pricing: {
prompt: '0.003',
completion: '0.0015',
},
context_length: 2048,
},
],
};
beforeEach(() => {
// Clears the mock's history before each test
const _utils = require('@librechat/api');
axios.get.mockResolvedValue({ data });
});
it('creates and stores token configuration if createTokenConfig is true', async () => {
await fetchModels({
user: 'user123',
apiKey: 'testApiKey',
baseURL: 'https://api.test.com',
createTokenConfig: true,
});
const { processModelData } = require('@librechat/api');
expect(processModelData).toHaveBeenCalled();
expect(processModelData).toHaveBeenCalledWith(data);
});
});
describe('getOpenAIModels', () => {
let originalEnv;
beforeEach(() => {
originalEnv = { ...process.env };
axios.get.mockRejectedValue(new Error('Network error'));
});
afterEach(() => {
process.env = originalEnv;
axios.get.mockReset();
});
it('returns default models when no environment configurations are provided (and fetch fails)', async () => {
const models = await getOpenAIModels({ user: 'user456' });
expect(models).toContain('gpt-4');
});
it('returns `AZURE_OPENAI_MODELS` with `azure` flag (and fetch fails)', async () => {
process.env.AZURE_OPENAI_MODELS = 'azure-model,azure-model-2';
const models = await getOpenAIModels({ azure: true });
expect(models).toEqual(expect.arrayContaining(['azure-model', 'azure-model-2']));
});
it('returns `PLUGIN_MODELS` with `plugins` flag (and fetch fails)', async () => {
process.env.PLUGIN_MODELS = 'plugins-model,plugins-model-2';
const models = await getOpenAIModels({ plugins: true });
expect(models).toEqual(expect.arrayContaining(['plugins-model', 'plugins-model-2']));
});
it('returns `OPENAI_MODELS` with no flags (and fetch fails)', async () => {
process.env.OPENAI_MODELS = 'openai-model,openai-model-2';
const models = await getOpenAIModels({});
expect(models).toEqual(expect.arrayContaining(['openai-model', 'openai-model-2']));
});
it('utilizes proxy configuration when PROXY is set', async () => {
axios.get.mockResolvedValue({
data: {
data: [],
},
});
process.env.PROXY = 'http://localhost:8888';
await getOpenAIModels({ user: 'user456' });
expect(axios.get).toHaveBeenCalledWith(
expect.any(String),
expect.objectContaining({
httpsAgent: expect.anything(),
}),
);
});
});
describe('getOpenAIModels with mocked config', () => {
it('uses alternative behavior when userProvidedOpenAI is true', async () => {
jest.mock('./Config/EndpointService', () => ({
config: {
openAIApiKey: 'mockedApiKey',
userProvidedOpenAI: true,
},
}));
jest.mock('librechat-data-provider', () => {
const original = jest.requireActual('librechat-data-provider');
return {
...original,
defaultModels: {
[original.EModelEndpoint.openAI]: ['some-default-model'],
},
};
});
jest.resetModules();
const { getOpenAIModels } = require('./ModelService');
const models = await getOpenAIModels({ user: 'user456' });
expect(models).toContain('some-default-model');
});
});
describe('getOpenAIModels sorting behavior', () => {
beforeEach(() => {
axios.get.mockResolvedValue({
data: {
data: [
{ id: 'gpt-3.5-turbo-instruct-0914' },
{ id: 'gpt-3.5-turbo-instruct' },
{ id: 'gpt-3.5-turbo' },
{ id: 'gpt-4-0314' },
{ id: 'gpt-4-turbo-preview' },
],
},
});
});
it('ensures instruct models are listed last', async () => {
const models = await getOpenAIModels({ user: 'user456' });
// Check if the last model is an "instruct" model
expect(models[models.length - 1]).toMatch(/instruct/);
// Check if the "instruct" models are placed at the end
const instructIndexes = models
.map((model, index) => (model.includes('instruct') ? index : -1))
.filter((index) => index !== -1);
const nonInstructIndexes = models
.map((model, index) => (!model.includes('instruct') ? index : -1))
.filter((index) => index !== -1);
expect(Math.max(...nonInstructIndexes)).toBeLessThan(Math.min(...instructIndexes));
const expectedOrder = [
'gpt-3.5-turbo',
'gpt-4-0314',
'gpt-4-turbo-preview',
'gpt-3.5-turbo-instruct-0914',
'gpt-3.5-turbo-instruct',
];
expect(models).toEqual(expectedOrder);
});
afterEach(() => {
jest.clearAllMocks();
});
});
describe('fetchModels with Ollama specific logic', () => {
const mockOllamaData = {
data: {
models: [{ name: 'Ollama-Base' }, { name: 'Ollama-Advanced' }],
},
};
beforeEach(() => {
axios.get.mockResolvedValue(mockOllamaData);
});
afterEach(() => {
jest.clearAllMocks();
});
it('should fetch Ollama models when name starts with "ollama"', async () => {
const models = await fetchModels({
user: 'user789',
apiKey: 'testApiKey',
baseURL: 'https://api.ollama.test.com',
name: 'OllamaAPI',
});
expect(models).toEqual(['Ollama-Base', 'Ollama-Advanced']);
expect(axios.get).toHaveBeenCalledWith('https://api.ollama.test.com/api/tags', {
timeout: 5000,
});
});
it('should handle errors gracefully when fetching Ollama models fails', async () => {
axios.get.mockRejectedValue(new Error('Network error'));
const models = await fetchModels({
user: 'user789',
apiKey: 'testApiKey',
baseURL: 'https://api.ollama.test.com',
name: 'OllamaAPI',
});
expect(models).toEqual([]);
expect(logger.error).toHaveBeenCalled();
});
it('should return an empty array if no baseURL is provided', async () => {
const models = await fetchModels({
user: 'user789',
apiKey: 'testApiKey',
name: 'OllamaAPI',
});
expect(models).toEqual([]);
});
it('should not fetch Ollama models if the name does not start with "ollama"', async () => {
// Mock axios to return a different set of models for non-Ollama API calls
axios.get.mockResolvedValue({
data: {
data: [{ id: 'model-1' }, { id: 'model-2' }],
},
});
const models = await fetchModels({
user: 'user789',
apiKey: 'testApiKey',
baseURL: 'https://api.test.com',
name: 'TestAPI',
});
expect(models).toEqual(['model-1', 'model-2']);
expect(axios.get).toHaveBeenCalledWith(
'https://api.test.com/models', // Ensure the correct API endpoint is called
expect.any(Object), // Ensuring some object (headers, etc.) is passed
);
});
});
describe('splitAndTrim', () => {
it('should split a string by commas and trim each value', () => {
const input = ' model1, model2 , model3,model4 ';
const expected = ['model1', 'model2', 'model3', 'model4'];
expect(splitAndTrim(input)).toEqual(expected);
});
it('should return an empty array for empty input', () => {
expect(splitAndTrim('')).toEqual([]);
});
it('should return an empty array for null input', () => {
expect(splitAndTrim(null)).toEqual([]);
});
it('should return an empty array for undefined input', () => {
expect(splitAndTrim(undefined)).toEqual([]);
});
it('should filter out empty values after trimming', () => {
const input = 'model1,, ,model2,';
const expected = ['model1', 'model2'];
expect(splitAndTrim(input)).toEqual(expected);
});
});
describe('getAnthropicModels', () => {
it('returns default models when ANTHROPIC_MODELS is not set', async () => {
delete process.env.ANTHROPIC_MODELS;
const models = await getAnthropicModels();
expect(models).toEqual(defaultModels[EModelEndpoint.anthropic]);
});
it('returns models from ANTHROPIC_MODELS when set', async () => {
process.env.ANTHROPIC_MODELS = 'claude-1, claude-2 ';
const models = await getAnthropicModels();
expect(models).toEqual(['claude-1', 'claude-2']);
});
});
describe('getGoogleModels', () => {
it('returns default models when GOOGLE_MODELS is not set', () => {
delete process.env.GOOGLE_MODELS;
const models = getGoogleModels();
expect(models).toEqual(defaultModels[EModelEndpoint.google]);
});
it('returns models from GOOGLE_MODELS when set', () => {
process.env.GOOGLE_MODELS = 'gemini-pro, bard ';
const models = getGoogleModels();
expect(models).toEqual(['gemini-pro', 'bard']);
});
});
describe('getBedrockModels', () => {
it('returns default models when BEDROCK_AWS_MODELS is not set', () => {
delete process.env.BEDROCK_AWS_MODELS;
const models = getBedrockModels();
expect(models).toEqual(defaultModels[EModelEndpoint.bedrock]);
});
it('returns models from BEDROCK_AWS_MODELS when set', () => {
process.env.BEDROCK_AWS_MODELS = 'anthropic.claude-v2, ai21.j2-ultra ';
const models = getBedrockModels();
expect(models).toEqual(['anthropic.claude-v2', 'ai21.j2-ultra']);
});
});