LibreChat/api/server/services/Config/loadConfigModels.spec.js
Danny Avila 04a4a2aa44
🧵 refactor: Migrate Endpoint Initialization to TypeScript (#10794)
* refactor: move endpoint initialization methods to typescript

* refactor: move agent init to packages/api

- Introduced `initialize.ts` for agent initialization, including file processing and tool loading.
- Updated `resources.ts` to allow optional appConfig parameter.
- Enhanced endpoint configuration handling in various initialization files to support model parameters.
- Added new artifacts and prompts for React component generation.
- Refactored existing code to improve type safety and maintainability.

* refactor: streamline endpoint initialization and enhance type safety

- Updated initialization functions across various endpoints to use a consistent request structure, replacing `unknown` types with `ServerResponse`.
- Simplified request handling by directly extracting keys from the request body.
- Improved type safety by ensuring user IDs are safely accessed with optional chaining.
- Removed unnecessary parameters and streamlined model options handling for better clarity and maintainability.

* refactor: moved ModelService and extractBaseURL to packages/api

- Added comprehensive tests for the models fetching functionality, covering scenarios for OpenAI, Anthropic, Google, and Ollama models.
- Updated existing endpoint index to include the new models module.
- Enhanced utility functions for URL extraction and model data processing.
- Improved type safety and error handling across the models fetching logic.

* refactor: consolidate utility functions and remove unused files

- Merged `deriveBaseURL` and `extractBaseURL` into the `@librechat/api` module for better organization.
- Removed redundant utility files and their associated tests to streamline the codebase.
- Updated imports across various client files to utilize the new consolidated functions.
- Enhanced overall maintainability by reducing the number of utility modules.

* refactor: replace ModelService references with direct imports from @librechat/api and remove ModelService file

* refactor: move encrypt/decrypt methods and key db methods to data-schemas, use `getProviderConfig` from `@librechat/api`

* chore: remove unused 'res' from options in AgentClient

* refactor: file model imports and methods

- Updated imports in various controllers and services to use the unified file model from '~/models' instead of '~/models/File'.
- Consolidated file-related methods into a new file methods module in the data-schemas package.
- Added comprehensive tests for file methods including creation, retrieval, updating, and deletion.
- Enhanced the initializeAgent function to accept dependency injection for file-related methods.
- Improved error handling and logging in file methods.

* refactor: streamline database method references in agent initialization

* refactor: enhance file method tests and update type references to IMongoFile

* refactor: consolidate database method imports in agent client and initialization

* chore: remove redundant import of initializeAgent from @librechat/api

* refactor: move checkUserKeyExpiry utility to @librechat/api and update references across endpoints

* refactor: move updateUserPlugins logic to user.ts and simplify UserController

* refactor: update imports for user key management and remove UserService

* refactor: remove unused Anthropics and Bedrock endpoint files and clean up imports

* refactor: consolidate and update encryption imports across various files to use @librechat/data-schemas

* chore: update file model mock to use unified import from '~/models'

* chore: import order

* refactor: remove migrated to TS agent.js file and its associated logic from the endpoints

* chore: add reusable function to extract imports from source code in unused-packages workflow

* chore: enhance unused-packages workflow to include @librechat/api dependencies and improve dependency extraction

* chore: improve dependency extraction in unused-packages workflow with enhanced error handling and debugging output

* chore: add detailed debugging output to unused-packages workflow for better visibility into unused dependencies and exclusion lists

* chore: refine subpath handling in unused-packages workflow to correctly process scoped and non-scoped package imports

* chore: clean up unused debug output in unused-packages workflow and reorganize type imports in initialize.ts
2025-12-11 16:37:16 -05:00

403 lines
12 KiB
JavaScript

const { fetchModels } = require('@librechat/api');
const loadConfigModels = require('./loadConfigModels');
const { getAppConfig } = require('./app');
jest.mock('@librechat/api', () => ({
...jest.requireActual('@librechat/api'),
fetchModels: jest.fn(),
}));
jest.mock('./app');
const exampleConfig = {
endpoints: {
custom: [
{
name: 'Mistral',
apiKey: '${MY_PRECIOUS_MISTRAL_KEY}',
baseURL: 'https://api.mistral.ai/v1',
models: {
default: ['mistral-tiny', 'mistral-small', 'mistral-medium', 'mistral-large-latest'],
fetch: true,
},
dropParams: ['stop', 'user', 'frequency_penalty', 'presence_penalty'],
},
{
name: 'OpenRouter',
apiKey: '${MY_OPENROUTER_API_KEY}',
baseURL: 'https://openrouter.ai/api/v1',
models: {
default: ['gpt-3.5-turbo'],
fetch: true,
},
dropParams: ['stop'],
},
{
name: 'groq',
apiKey: 'user_provided',
baseURL: 'https://api.groq.com/openai/v1/',
models: {
default: ['llama2-70b-4096', 'mixtral-8x7b-32768'],
fetch: false,
},
},
{
name: 'Ollama',
apiKey: 'user_provided',
baseURL: 'http://localhost:11434/v1/',
models: {
default: ['mistral', 'llama2:13b'],
fetch: false,
},
},
{
name: 'MLX',
apiKey: 'user_provided',
baseURL: 'http://localhost:8080/v1/',
models: {
default: ['Meta-Llama-3-8B-Instruct-4bit'],
fetch: false,
},
},
],
},
};
describe('loadConfigModels', () => {
const mockRequest = { user: { id: 'testUserId' } };
const originalEnv = process.env;
beforeEach(() => {
jest.resetAllMocks();
jest.resetModules();
process.env = { ...originalEnv };
// Default mock for getAppConfig
getAppConfig.mockResolvedValue({});
});
afterEach(() => {
process.env = originalEnv;
});
it('should return an empty object if customConfig is null', async () => {
getAppConfig.mockResolvedValue(null);
const result = await loadConfigModels(mockRequest);
expect(result).toEqual({});
});
it('handles azure models and endpoint correctly', async () => {
getAppConfig.mockResolvedValue({
endpoints: {
azureOpenAI: { modelNames: ['model1', 'model2'] },
},
});
const result = await loadConfigModels(mockRequest);
expect(result.azureOpenAI).toEqual(['model1', 'model2']);
});
it('fetches custom models based on the unique key', async () => {
process.env.BASE_URL = 'http://example.com';
process.env.API_KEY = 'some-api-key';
const customEndpoints = [
{
baseURL: '${BASE_URL}',
apiKey: '${API_KEY}',
name: 'CustomModel',
models: { fetch: true },
},
];
getAppConfig.mockResolvedValue({ endpoints: { custom: customEndpoints } });
fetchModels.mockResolvedValue(['customModel1', 'customModel2']);
const result = await loadConfigModels(mockRequest);
expect(fetchModels).toHaveBeenCalled();
expect(result.CustomModel).toEqual(['customModel1', 'customModel2']);
});
it('correctly associates models to names using unique keys', async () => {
getAppConfig.mockResolvedValue({
endpoints: {
custom: [
{
baseURL: 'http://example.com',
apiKey: 'API_KEY1',
name: 'Model1',
models: { fetch: true },
},
{
baseURL: 'http://example.com',
apiKey: 'API_KEY2',
name: 'Model2',
models: { fetch: true },
},
],
},
});
fetchModels.mockImplementation(({ apiKey }) =>
Promise.resolve(apiKey === 'API_KEY1' ? ['model1Data'] : ['model2Data']),
);
const result = await loadConfigModels(mockRequest);
expect(result.Model1).toEqual(['model1Data']);
expect(result.Model2).toEqual(['model2Data']);
});
it('correctly handles multiple endpoints with the same baseURL but different apiKeys', async () => {
// Mock the custom configuration to simulate the user's scenario
getAppConfig.mockResolvedValue({
endpoints: {
custom: [
{
name: 'LiteLLM',
apiKey: '${LITELLM_ALL_MODELS}',
baseURL: '${LITELLM_HOST}',
models: { fetch: true },
},
{
name: 'OpenAI',
apiKey: '${LITELLM_OPENAI_MODELS}',
baseURL: '${LITELLM_SECOND_HOST}',
models: { fetch: true },
},
{
name: 'Google',
apiKey: '${LITELLM_GOOGLE_MODELS}',
baseURL: '${LITELLM_SECOND_HOST}',
models: { fetch: true },
},
],
},
});
// Mock `fetchModels` to return different models based on the apiKey
fetchModels.mockImplementation(({ apiKey }) => {
switch (apiKey) {
case '${LITELLM_ALL_MODELS}':
return Promise.resolve(['AllModel1', 'AllModel2']);
case '${LITELLM_OPENAI_MODELS}':
return Promise.resolve(['OpenAIModel']);
case '${LITELLM_GOOGLE_MODELS}':
return Promise.resolve(['GoogleModel']);
default:
return Promise.resolve([]);
}
});
const result = await loadConfigModels(mockRequest);
// Assert that the models are correctly fetched and mapped based on unique keys
expect(result.LiteLLM).toEqual(['AllModel1', 'AllModel2']);
expect(result.OpenAI).toEqual(['OpenAIModel']);
expect(result.Google).toEqual(['GoogleModel']);
// Ensure that fetchModels was called with correct parameters
expect(fetchModels).toHaveBeenCalledTimes(3);
expect(fetchModels).toHaveBeenCalledWith(
expect.objectContaining({ apiKey: '${LITELLM_ALL_MODELS}' }),
);
expect(fetchModels).toHaveBeenCalledWith(
expect.objectContaining({ apiKey: '${LITELLM_OPENAI_MODELS}' }),
);
expect(fetchModels).toHaveBeenCalledWith(
expect.objectContaining({ apiKey: '${LITELLM_GOOGLE_MODELS}' }),
);
});
it('loads models based on custom endpoint configuration respecting fetch rules', async () => {
process.env.MY_PRECIOUS_MISTRAL_KEY = 'actual_mistral_api_key';
process.env.MY_OPENROUTER_API_KEY = 'actual_openrouter_api_key';
// Setup custom configuration with specific API keys for Mistral and OpenRouter
// and "user_provided" for groq and Ollama, indicating no fetch for the latter two
getAppConfig.mockResolvedValue(exampleConfig);
// Assuming fetchModels would be called only for Mistral and OpenRouter
fetchModels.mockImplementation(({ name }) => {
switch (name) {
case 'Mistral':
return Promise.resolve([
'mistral-tiny',
'mistral-small',
'mistral-medium',
'mistral-large-latest',
]);
case 'OpenRouter':
return Promise.resolve(['gpt-3.5-turbo']);
default:
return Promise.resolve([]);
}
});
const result = await loadConfigModels(mockRequest);
// Since fetch is true and apiKey is not "user_provided", fetching occurs for Mistral and OpenRouter
expect(result.Mistral).toEqual([
'mistral-tiny',
'mistral-small',
'mistral-medium',
'mistral-large-latest',
]);
expect(fetchModels).toHaveBeenCalledWith(
expect.objectContaining({
name: 'Mistral',
apiKey: process.env.MY_PRECIOUS_MISTRAL_KEY,
}),
);
expect(result.OpenRouter).toEqual(['gpt-3.5-turbo']);
expect(fetchModels).toHaveBeenCalledWith(
expect.objectContaining({
name: 'OpenRouter',
apiKey: process.env.MY_OPENROUTER_API_KEY,
}),
);
// For groq and ollama, since the apiKey is "user_provided", models should not be fetched
// Depending on your implementation's behavior regarding "default" models without fetching,
// you may need to adjust the following assertions:
expect(result.groq).toEqual(exampleConfig.endpoints.custom[2].models.default);
expect(result.ollama).toEqual(exampleConfig.endpoints.custom[3].models.default);
// Verifying fetchModels was not called for groq and ollama
expect(fetchModels).not.toHaveBeenCalledWith(
expect.objectContaining({
name: 'groq',
}),
);
expect(fetchModels).not.toHaveBeenCalledWith(
expect.objectContaining({
name: 'ollama',
}),
);
});
it('falls back to default models if fetching returns an empty array', async () => {
getAppConfig.mockResolvedValue({
endpoints: {
custom: [
{
name: 'EndpointWithSameFetchKey',
apiKey: 'API_KEY',
baseURL: 'http://example.com',
models: {
fetch: true,
default: ['defaultModel1'],
},
},
{
name: 'EmptyFetchModel',
apiKey: 'API_KEY',
baseURL: 'http://example.com',
models: {
fetch: true,
default: ['defaultModel1', 'defaultModel2'],
},
},
],
},
});
fetchModels.mockResolvedValue([]);
const result = await loadConfigModels(mockRequest);
expect(fetchModels).toHaveBeenCalledTimes(1);
expect(result.EmptyFetchModel).toEqual(['defaultModel1', 'defaultModel2']);
});
it('falls back to default models if fetching returns a falsy value', async () => {
getAppConfig.mockResolvedValue({
endpoints: {
custom: [
{
name: 'FalsyFetchModel',
apiKey: 'API_KEY',
baseURL: 'http://example.com',
models: {
fetch: true,
default: ['defaultModel1', 'defaultModel2'],
},
},
],
},
});
fetchModels.mockResolvedValue(false);
const result = await loadConfigModels(mockRequest);
expect(fetchModels).toHaveBeenCalledWith(
expect.objectContaining({
name: 'FalsyFetchModel',
apiKey: 'API_KEY',
}),
);
expect(result.FalsyFetchModel).toEqual(['defaultModel1', 'defaultModel2']);
});
it('normalizes Ollama endpoint name to lowercase', async () => {
const testCases = [
{
name: 'Ollama',
apiKey: 'user_provided',
baseURL: 'http://localhost:11434/v1/',
models: {
default: ['mistral', 'llama2'],
fetch: false,
},
},
{
name: 'OLLAMA',
apiKey: 'user_provided',
baseURL: 'http://localhost:11434/v1/',
models: {
default: ['mixtral', 'codellama'],
fetch: false,
},
},
{
name: 'OLLaMA',
apiKey: 'user_provided',
baseURL: 'http://localhost:11434/v1/',
models: {
default: ['phi', 'neural-chat'],
fetch: false,
},
},
];
getAppConfig.mockResolvedValue({
endpoints: {
custom: testCases,
},
});
const result = await loadConfigModels(mockRequest);
// All variations of "Ollama" should be normalized to lowercase "ollama"
// and the last config in the array should override previous ones
expect(result.Ollama).toBeUndefined();
expect(result.OLLAMA).toBeUndefined();
expect(result.OLLaMA).toBeUndefined();
expect(result.ollama).toEqual(['phi', 'neural-chat']);
// Verify fetchModels was not called since these are user_provided
expect(fetchModels).not.toHaveBeenCalledWith(
expect.objectContaining({
name: 'Ollama',
}),
);
expect(fetchModels).not.toHaveBeenCalledWith(
expect.objectContaining({
name: 'OLLAMA',
}),
);
expect(fetchModels).not.toHaveBeenCalledWith(
expect.objectContaining({
name: 'OLLaMA',
}),
);
});
});