LibreChat/api/server/services/Config/loadConfigModels.spec.js
Danny Avila 9a210971f5
🛜 refactor: Streamline App Config Usage (#9234)
* WIP: app.locals refactoring

WIP: appConfig

fix: update memory configuration retrieval to use getAppConfig based on user role

fix: update comment for AppConfig interface to clarify purpose

🏷️ refactor: Update tests to use getAppConfig for endpoint configurations

ci: Update AppService tests to initialize app config instead of app.locals

ci: Integrate getAppConfig into remaining tests

refactor: Update multer storage destination to use promise-based getAppConfig and improve error handling in tests

refactor: Rename initializeAppConfig to setAppConfig and update related tests

ci: Mock getAppConfig in various tests to provide default configurations

refactor: Update convertMCPToolsToPlugins to use mcpManager for server configuration and adjust related tests

chore: rename `Config/getAppConfig` -> `Config/app`

fix: streamline OpenAI image tools configuration by removing direct appConfig dependency and using function parameters

chore: correct parameter documentation for imageOutputType in ToolService.js

refactor: remove `getCustomConfig` dependency in config route

refactor: update domain validation to use appConfig for allowed domains

refactor: use appConfig registration property

chore: remove app parameter from AppService invocation

refactor: update AppConfig interface to correct registration and turnstile configurations

refactor: remove getCustomConfig dependency and use getAppConfig in PluginController, multer, and MCP services

refactor: replace getCustomConfig with getAppConfig in STTService, TTSService, and related files

refactor: replace getCustomConfig with getAppConfig in Conversation and Message models, update tempChatRetention functions to use AppConfig type

refactor: update getAppConfig calls in Conversation and Message models to include user role for temporary chat expiration

ci: update related tests

refactor: update getAppConfig call in getCustomConfigSpeech to include user role

fix: update appConfig usage to access allowedDomains from actions instead of registration

refactor: enhance AppConfig to include fileStrategies and update related file strategy logic

refactor: update imports to use normalizeEndpointName from @librechat/api and remove redundant definitions

chore: remove deprecated unused RunManager

refactor: get balance config primarily from appConfig

refactor: remove customConfig dependency for appConfig and streamline loadConfigModels logic

refactor: remove getCustomConfig usage and use app config in file citations

refactor: consolidate endpoint loading logic into loadEndpoints function

refactor: update appConfig access to use endpoints structure across various services

refactor: implement custom endpoints configuration and streamline endpoint loading logic

refactor: update getAppConfig call to include user role parameter

refactor: streamline endpoint configuration and enhance appConfig usage across services

refactor: replace getMCPAuthMap with getUserMCPAuthMap and remove unused getCustomConfig file

refactor: add type annotation for loadedEndpoints in loadEndpoints function

refactor: move /services/Files/images/parse to TS API

chore: add missing FILE_CITATIONS permission to IRole interface

refactor: restructure toolkits to TS API

refactor: separate manifest logic into its own module

refactor: consolidate tool loading logic into a new tools module for startup logic

refactor: move interface config logic to TS API

refactor: migrate checkEmailConfig to TypeScript and update imports

refactor: add FunctionTool interface and availableTools to AppConfig

refactor: decouple caching and DB operations from AppService, make part of consolidated `getAppConfig`

WIP: fix tests

* fix: rebase conflicts

* refactor: remove app.locals references

* refactor: replace getBalanceConfig with getAppConfig in various strategies and middleware

* refactor: replace appConfig?.balance with getBalanceConfig in various controllers and clients

* test: add balance configuration to titleConvo method in AgentClient tests

* chore: remove unused `openai-chat-tokens` package

* chore: remove unused imports in initializeMCPs.js

* refactor: update balance configuration to use getAppConfig instead of getBalanceConfig

* refactor: integrate configMiddleware for centralized configuration handling

* refactor: optimize email domain validation by removing unnecessary async calls

* refactor: simplify multer storage configuration by removing async calls

* refactor: reorder imports for better readability in user.js

* refactor: replace getAppConfig calls with req.config for improved performance

* chore: replace getAppConfig calls with req.config in tests for centralized configuration handling

* chore: remove unused override config

* refactor: add configMiddleware to endpoint route and replace getAppConfig with req.config

* chore: remove customConfig parameter from TTSService constructor

* refactor: pass appConfig from request to processFileCitations for improved configuration handling

* refactor: remove configMiddleware from endpoint route and retrieve appConfig directly in getEndpointsConfig if not in `req.config`

* test: add mockAppConfig to processFileCitations tests for improved configuration handling

* fix: pass req.config to hasCustomUserVars and call without await after synchronous refactor

* fix: type safety in useExportConversation

* refactor: retrieve appConfig using getAppConfig in PluginController and remove configMiddleware from plugins route, to avoid always retrieving when plugins are cached

* chore: change `MongoUser` typedef to `IUser`

* fix: Add `user` and `config` fields to ServerRequest and update JSDoc type annotations from Express.Request to ServerRequest

* fix: remove unused setAppConfig mock from Server configuration tests
2025-08-26 12:10:18 -04:00

400 lines
12 KiB
JavaScript

const { fetchModels } = require('~/server/services/ModelService');
const loadConfigModels = require('./loadConfigModels');
const { getAppConfig } = require('./app');
jest.mock('~/server/services/ModelService');
jest.mock('./app');
const exampleConfig = {
endpoints: {
custom: [
{
name: 'Mistral',
apiKey: '${MY_PRECIOUS_MISTRAL_KEY}',
baseURL: 'https://api.mistral.ai/v1',
models: {
default: ['mistral-tiny', 'mistral-small', 'mistral-medium', 'mistral-large-latest'],
fetch: true,
},
dropParams: ['stop', 'user', 'frequency_penalty', 'presence_penalty'],
},
{
name: 'OpenRouter',
apiKey: '${MY_OPENROUTER_API_KEY}',
baseURL: 'https://openrouter.ai/api/v1',
models: {
default: ['gpt-3.5-turbo'],
fetch: true,
},
dropParams: ['stop'],
},
{
name: 'groq',
apiKey: 'user_provided',
baseURL: 'https://api.groq.com/openai/v1/',
models: {
default: ['llama2-70b-4096', 'mixtral-8x7b-32768'],
fetch: false,
},
},
{
name: 'Ollama',
apiKey: 'user_provided',
baseURL: 'http://localhost:11434/v1/',
models: {
default: ['mistral', 'llama2:13b'],
fetch: false,
},
},
{
name: 'MLX',
apiKey: 'user_provided',
baseURL: 'http://localhost:8080/v1/',
models: {
default: ['Meta-Llama-3-8B-Instruct-4bit'],
fetch: false,
},
},
],
},
};
describe('loadConfigModels', () => {
const mockRequest = { user: { id: 'testUserId' } };
const originalEnv = process.env;
beforeEach(() => {
jest.resetAllMocks();
jest.resetModules();
process.env = { ...originalEnv };
// Default mock for getAppConfig
getAppConfig.mockResolvedValue({});
});
afterEach(() => {
process.env = originalEnv;
});
it('should return an empty object if customConfig is null', async () => {
getAppConfig.mockResolvedValue(null);
const result = await loadConfigModels(mockRequest);
expect(result).toEqual({});
});
it('handles azure models and endpoint correctly', async () => {
getAppConfig.mockResolvedValue({
endpoints: {
azureOpenAI: { modelNames: ['model1', 'model2'] },
},
});
const result = await loadConfigModels(mockRequest);
expect(result.azureOpenAI).toEqual(['model1', 'model2']);
});
it('fetches custom models based on the unique key', async () => {
process.env.BASE_URL = 'http://example.com';
process.env.API_KEY = 'some-api-key';
const customEndpoints = [
{
baseURL: '${BASE_URL}',
apiKey: '${API_KEY}',
name: 'CustomModel',
models: { fetch: true },
},
];
getAppConfig.mockResolvedValue({ endpoints: { custom: customEndpoints } });
fetchModels.mockResolvedValue(['customModel1', 'customModel2']);
const result = await loadConfigModels(mockRequest);
expect(fetchModels).toHaveBeenCalled();
expect(result.CustomModel).toEqual(['customModel1', 'customModel2']);
});
it('correctly associates models to names using unique keys', async () => {
getAppConfig.mockResolvedValue({
endpoints: {
custom: [
{
baseURL: 'http://example.com',
apiKey: 'API_KEY1',
name: 'Model1',
models: { fetch: true },
},
{
baseURL: 'http://example.com',
apiKey: 'API_KEY2',
name: 'Model2',
models: { fetch: true },
},
],
},
});
fetchModels.mockImplementation(({ apiKey }) =>
Promise.resolve(apiKey === 'API_KEY1' ? ['model1Data'] : ['model2Data']),
);
const result = await loadConfigModels(mockRequest);
expect(result.Model1).toEqual(['model1Data']);
expect(result.Model2).toEqual(['model2Data']);
});
it('correctly handles multiple endpoints with the same baseURL but different apiKeys', async () => {
// Mock the custom configuration to simulate the user's scenario
getAppConfig.mockResolvedValue({
endpoints: {
custom: [
{
name: 'LiteLLM',
apiKey: '${LITELLM_ALL_MODELS}',
baseURL: '${LITELLM_HOST}',
models: { fetch: true },
},
{
name: 'OpenAI',
apiKey: '${LITELLM_OPENAI_MODELS}',
baseURL: '${LITELLM_SECOND_HOST}',
models: { fetch: true },
},
{
name: 'Google',
apiKey: '${LITELLM_GOOGLE_MODELS}',
baseURL: '${LITELLM_SECOND_HOST}',
models: { fetch: true },
},
],
},
});
// Mock `fetchModels` to return different models based on the apiKey
fetchModels.mockImplementation(({ apiKey }) => {
switch (apiKey) {
case '${LITELLM_ALL_MODELS}':
return Promise.resolve(['AllModel1', 'AllModel2']);
case '${LITELLM_OPENAI_MODELS}':
return Promise.resolve(['OpenAIModel']);
case '${LITELLM_GOOGLE_MODELS}':
return Promise.resolve(['GoogleModel']);
default:
return Promise.resolve([]);
}
});
const result = await loadConfigModels(mockRequest);
// Assert that the models are correctly fetched and mapped based on unique keys
expect(result.LiteLLM).toEqual(['AllModel1', 'AllModel2']);
expect(result.OpenAI).toEqual(['OpenAIModel']);
expect(result.Google).toEqual(['GoogleModel']);
// Ensure that fetchModels was called with correct parameters
expect(fetchModels).toHaveBeenCalledTimes(3);
expect(fetchModels).toHaveBeenCalledWith(
expect.objectContaining({ apiKey: '${LITELLM_ALL_MODELS}' }),
);
expect(fetchModels).toHaveBeenCalledWith(
expect.objectContaining({ apiKey: '${LITELLM_OPENAI_MODELS}' }),
);
expect(fetchModels).toHaveBeenCalledWith(
expect.objectContaining({ apiKey: '${LITELLM_GOOGLE_MODELS}' }),
);
});
it('loads models based on custom endpoint configuration respecting fetch rules', async () => {
process.env.MY_PRECIOUS_MISTRAL_KEY = 'actual_mistral_api_key';
process.env.MY_OPENROUTER_API_KEY = 'actual_openrouter_api_key';
// Setup custom configuration with specific API keys for Mistral and OpenRouter
// and "user_provided" for groq and Ollama, indicating no fetch for the latter two
getAppConfig.mockResolvedValue(exampleConfig);
// Assuming fetchModels would be called only for Mistral and OpenRouter
fetchModels.mockImplementation(({ name }) => {
switch (name) {
case 'Mistral':
return Promise.resolve([
'mistral-tiny',
'mistral-small',
'mistral-medium',
'mistral-large-latest',
]);
case 'OpenRouter':
return Promise.resolve(['gpt-3.5-turbo']);
default:
return Promise.resolve([]);
}
});
const result = await loadConfigModels(mockRequest);
// Since fetch is true and apiKey is not "user_provided", fetching occurs for Mistral and OpenRouter
expect(result.Mistral).toEqual([
'mistral-tiny',
'mistral-small',
'mistral-medium',
'mistral-large-latest',
]);
expect(fetchModels).toHaveBeenCalledWith(
expect.objectContaining({
name: 'Mistral',
apiKey: process.env.MY_PRECIOUS_MISTRAL_KEY,
}),
);
expect(result.OpenRouter).toEqual(['gpt-3.5-turbo']);
expect(fetchModels).toHaveBeenCalledWith(
expect.objectContaining({
name: 'OpenRouter',
apiKey: process.env.MY_OPENROUTER_API_KEY,
}),
);
// For groq and ollama, since the apiKey is "user_provided", models should not be fetched
// Depending on your implementation's behavior regarding "default" models without fetching,
// you may need to adjust the following assertions:
expect(result.groq).toBe(exampleConfig.endpoints.custom[2].models.default);
expect(result.ollama).toBe(exampleConfig.endpoints.custom[3].models.default);
// Verifying fetchModels was not called for groq and ollama
expect(fetchModels).not.toHaveBeenCalledWith(
expect.objectContaining({
name: 'groq',
}),
);
expect(fetchModels).not.toHaveBeenCalledWith(
expect.objectContaining({
name: 'ollama',
}),
);
});
it('falls back to default models if fetching returns an empty array', async () => {
getAppConfig.mockResolvedValue({
endpoints: {
custom: [
{
name: 'EndpointWithSameFetchKey',
apiKey: 'API_KEY',
baseURL: 'http://example.com',
models: {
fetch: true,
default: ['defaultModel1'],
},
},
{
name: 'EmptyFetchModel',
apiKey: 'API_KEY',
baseURL: 'http://example.com',
models: {
fetch: true,
default: ['defaultModel1', 'defaultModel2'],
},
},
],
},
});
fetchModels.mockResolvedValue([]);
const result = await loadConfigModels(mockRequest);
expect(fetchModels).toHaveBeenCalledTimes(1);
expect(result.EmptyFetchModel).toEqual(['defaultModel1', 'defaultModel2']);
});
it('falls back to default models if fetching returns a falsy value', async () => {
getAppConfig.mockResolvedValue({
endpoints: {
custom: [
{
name: 'FalsyFetchModel',
apiKey: 'API_KEY',
baseURL: 'http://example.com',
models: {
fetch: true,
default: ['defaultModel1', 'defaultModel2'],
},
},
],
},
});
fetchModels.mockResolvedValue(false);
const result = await loadConfigModels(mockRequest);
expect(fetchModels).toHaveBeenCalledWith(
expect.objectContaining({
name: 'FalsyFetchModel',
apiKey: 'API_KEY',
}),
);
expect(result.FalsyFetchModel).toEqual(['defaultModel1', 'defaultModel2']);
});
it('normalizes Ollama endpoint name to lowercase', async () => {
const testCases = [
{
name: 'Ollama',
apiKey: 'user_provided',
baseURL: 'http://localhost:11434/v1/',
models: {
default: ['mistral', 'llama2'],
fetch: false,
},
},
{
name: 'OLLAMA',
apiKey: 'user_provided',
baseURL: 'http://localhost:11434/v1/',
models: {
default: ['mixtral', 'codellama'],
fetch: false,
},
},
{
name: 'OLLaMA',
apiKey: 'user_provided',
baseURL: 'http://localhost:11434/v1/',
models: {
default: ['phi', 'neural-chat'],
fetch: false,
},
},
];
getAppConfig.mockResolvedValue({
endpoints: {
custom: testCases,
},
});
const result = await loadConfigModels(mockRequest);
// All variations of "Ollama" should be normalized to lowercase "ollama"
// and the last config in the array should override previous ones
expect(result.Ollama).toBeUndefined();
expect(result.OLLAMA).toBeUndefined();
expect(result.OLLaMA).toBeUndefined();
expect(result.ollama).toEqual(['phi', 'neural-chat']);
// Verify fetchModels was not called since these are user_provided
expect(fetchModels).not.toHaveBeenCalledWith(
expect.objectContaining({
name: 'Ollama',
}),
);
expect(fetchModels).not.toHaveBeenCalledWith(
expect.objectContaining({
name: 'OLLAMA',
}),
);
expect(fetchModels).not.toHaveBeenCalledWith(
expect.objectContaining({
name: 'OLLaMA',
}),
);
});
});