From 46f9c90223ca2dcae0f90b557e827cafcebefa59 Mon Sep 17 00:00:00 2001 From: Danny Avila Date: Mon, 18 Aug 2025 03:51:38 -0400 Subject: [PATCH] refactor: remove customConfig dependency for appConfig and streamline loadConfigModels logic --- .../services/Config/loadConfigModels.js | 20 +- .../services/Config/loadConfigModels.spec.js | 273 ++++++++---------- 2 files changed, 132 insertions(+), 161 deletions(-) diff --git a/api/server/services/Config/loadConfigModels.js b/api/server/services/Config/loadConfigModels.js index 82561a5062..13f12e68c2 100644 --- a/api/server/services/Config/loadConfigModels.js +++ b/api/server/services/Config/loadConfigModels.js @@ -1,7 +1,6 @@ const { isUserProvided, normalizeEndpointName } = require('@librechat/api'); const { EModelEndpoint, extractEnvVariable } = require('librechat-data-provider'); const { fetchModels } = require('~/server/services/ModelService'); -const { getCustomConfig } = require('./getCustomConfig'); const { getAppConfig } = require('./app'); /** @@ -10,36 +9,31 @@ const { getAppConfig } = require('./app'); * @param {Express.Request} req - The Express request object. */ async function loadConfigModels(req) { - const customConfig = await getCustomConfig(); - - if (!customConfig) { + const appConfig = await getAppConfig({ role: req.user?.role }); + if (!appConfig) { return {}; } - - const appConfig = await getAppConfig({ role: req.user?.role }); - const { endpoints = {} } = customConfig ?? {}; const modelsConfig = {}; - const azureEndpoint = endpoints[EModelEndpoint.azureOpenAI]; const azureConfig = appConfig[EModelEndpoint.azureOpenAI]; const { modelNames } = azureConfig ?? {}; - if (modelNames && azureEndpoint) { + if (modelNames && azureConfig) { modelsConfig[EModelEndpoint.azureOpenAI] = modelNames; } - if (modelNames && azureEndpoint && azureEndpoint.plugins) { + if (modelNames && azureConfig && azureConfig.plugins) { modelsConfig[EModelEndpoint.gptPlugins] = modelNames; } - if (azureEndpoint?.assistants && azureConfig.assistantModels) { + if (azureConfig?.assistants && azureConfig.assistantModels) { modelsConfig[EModelEndpoint.azureAssistants] = azureConfig.assistantModels; } - if (!Array.isArray(endpoints[EModelEndpoint.custom])) { + if (!Array.isArray(appConfig[EModelEndpoint.custom])) { return modelsConfig; } - const customEndpoints = endpoints[EModelEndpoint.custom].filter( + const customEndpoints = appConfig[EModelEndpoint.custom].filter( (endpoint) => endpoint.baseURL && endpoint.apiKey && diff --git a/api/server/services/Config/loadConfigModels.spec.js b/api/server/services/Config/loadConfigModels.spec.js index fadefcf0b3..15fa6f545b 100644 --- a/api/server/services/Config/loadConfigModels.spec.js +++ b/api/server/services/Config/loadConfigModels.spec.js @@ -1,64 +1,60 @@ const { fetchModels } = require('~/server/services/ModelService'); -const { getCustomConfig } = require('./getCustomConfig'); const loadConfigModels = require('./loadConfigModels'); const { getAppConfig } = require('./app'); jest.mock('~/server/services/ModelService'); -jest.mock('./getCustomConfig'); jest.mock('./app'); const exampleConfig = { - endpoints: { - custom: [ - { - name: 'Mistral', - apiKey: '${MY_PRECIOUS_MISTRAL_KEY}', - baseURL: 'https://api.mistral.ai/v1', - models: { - default: ['mistral-tiny', 'mistral-small', 'mistral-medium', 'mistral-large-latest'], - fetch: true, - }, - dropParams: ['stop', 'user', 'frequency_penalty', 'presence_penalty'], + custom: [ + { + name: 'Mistral', + apiKey: '${MY_PRECIOUS_MISTRAL_KEY}', + baseURL: 'https://api.mistral.ai/v1', + models: { + default: ['mistral-tiny', 'mistral-small', 'mistral-medium', 'mistral-large-latest'], + fetch: true, }, - { - name: 'OpenRouter', - apiKey: '${MY_OPENROUTER_API_KEY}', - baseURL: 'https://openrouter.ai/api/v1', - models: { - default: ['gpt-3.5-turbo'], - fetch: true, - }, - dropParams: ['stop'], + dropParams: ['stop', 'user', 'frequency_penalty', 'presence_penalty'], + }, + { + name: 'OpenRouter', + apiKey: '${MY_OPENROUTER_API_KEY}', + baseURL: 'https://openrouter.ai/api/v1', + models: { + default: ['gpt-3.5-turbo'], + fetch: true, }, - { - name: 'groq', - apiKey: 'user_provided', - baseURL: 'https://api.groq.com/openai/v1/', - models: { - default: ['llama2-70b-4096', 'mixtral-8x7b-32768'], - fetch: false, - }, + dropParams: ['stop'], + }, + { + name: 'groq', + apiKey: 'user_provided', + baseURL: 'https://api.groq.com/openai/v1/', + models: { + default: ['llama2-70b-4096', 'mixtral-8x7b-32768'], + fetch: false, }, - { - name: 'Ollama', - apiKey: 'user_provided', - baseURL: 'http://localhost:11434/v1/', - models: { - default: ['mistral', 'llama2:13b'], - fetch: false, - }, + }, + { + name: 'Ollama', + apiKey: 'user_provided', + baseURL: 'http://localhost:11434/v1/', + models: { + default: ['mistral', 'llama2:13b'], + fetch: false, }, - { - name: 'MLX', - apiKey: 'user_provided', - baseURL: 'http://localhost:8080/v1/', - models: { - default: ['Meta-Llama-3-8B-Instruct-4bit'], - fetch: false, - }, + }, + { + name: 'MLX', + apiKey: 'user_provided', + baseURL: 'http://localhost:8080/v1/', + models: { + default: ['Meta-Llama-3-8B-Instruct-4bit'], + fetch: false, }, - ], - }, + }, + ], }; describe('loadConfigModels', () => { @@ -80,7 +76,7 @@ describe('loadConfigModels', () => { }); it('should return an empty object if customConfig is null', async () => { - getCustomConfig.mockResolvedValue(null); + getAppConfig.mockResolvedValue(null); const result = await loadConfigModels(mockRequest); expect(result).toEqual({}); }); @@ -89,13 +85,6 @@ describe('loadConfigModels', () => { getAppConfig.mockResolvedValue({ azureOpenAI: { modelNames: ['model1', 'model2'] }, }); - getCustomConfig.mockResolvedValue({ - endpoints: { - azureOpenAI: { - models: ['model1', 'model2'], - }, - }, - }); const result = await loadConfigModels(mockRequest); expect(result.azureOpenAI).toEqual(['model1', 'model2']); @@ -104,18 +93,16 @@ describe('loadConfigModels', () => { it('fetches custom models based on the unique key', async () => { process.env.BASE_URL = 'http://example.com'; process.env.API_KEY = 'some-api-key'; - const customEndpoints = { - custom: [ - { - baseURL: '${BASE_URL}', - apiKey: '${API_KEY}', - name: 'CustomModel', - models: { fetch: true }, - }, - ], - }; + const customEndpoints = [ + { + baseURL: '${BASE_URL}', + apiKey: '${API_KEY}', + name: 'CustomModel', + models: { fetch: true }, + }, + ]; - getCustomConfig.mockResolvedValue({ endpoints: customEndpoints }); + getAppConfig.mockResolvedValue({ custom: customEndpoints }); fetchModels.mockResolvedValue(['customModel1', 'customModel2']); const result = await loadConfigModels(mockRequest); @@ -124,23 +111,21 @@ describe('loadConfigModels', () => { }); it('correctly associates models to names using unique keys', async () => { - getCustomConfig.mockResolvedValue({ - endpoints: { - custom: [ - { - baseURL: 'http://example.com', - apiKey: 'API_KEY1', - name: 'Model1', - models: { fetch: true }, - }, - { - baseURL: 'http://example.com', - apiKey: 'API_KEY2', - name: 'Model2', - models: { fetch: true }, - }, - ], - }, + getAppConfig.mockResolvedValue({ + custom: [ + { + baseURL: 'http://example.com', + apiKey: 'API_KEY1', + name: 'Model1', + models: { fetch: true }, + }, + { + baseURL: 'http://example.com', + apiKey: 'API_KEY2', + name: 'Model2', + models: { fetch: true }, + }, + ], }); fetchModels.mockImplementation(({ apiKey }) => Promise.resolve(apiKey === 'API_KEY1' ? ['model1Data'] : ['model2Data']), @@ -153,29 +138,27 @@ describe('loadConfigModels', () => { it('correctly handles multiple endpoints with the same baseURL but different apiKeys', async () => { // Mock the custom configuration to simulate the user's scenario - getCustomConfig.mockResolvedValue({ - endpoints: { - custom: [ - { - name: 'LiteLLM', - apiKey: '${LITELLM_ALL_MODELS}', - baseURL: '${LITELLM_HOST}', - models: { fetch: true }, - }, - { - name: 'OpenAI', - apiKey: '${LITELLM_OPENAI_MODELS}', - baseURL: '${LITELLM_SECOND_HOST}', - models: { fetch: true }, - }, - { - name: 'Google', - apiKey: '${LITELLM_GOOGLE_MODELS}', - baseURL: '${LITELLM_SECOND_HOST}', - models: { fetch: true }, - }, - ], - }, + getAppConfig.mockResolvedValue({ + custom: [ + { + name: 'LiteLLM', + apiKey: '${LITELLM_ALL_MODELS}', + baseURL: '${LITELLM_HOST}', + models: { fetch: true }, + }, + { + name: 'OpenAI', + apiKey: '${LITELLM_OPENAI_MODELS}', + baseURL: '${LITELLM_SECOND_HOST}', + models: { fetch: true }, + }, + { + name: 'Google', + apiKey: '${LITELLM_GOOGLE_MODELS}', + baseURL: '${LITELLM_SECOND_HOST}', + models: { fetch: true }, + }, + ], }); // Mock `fetchModels` to return different models based on the apiKey @@ -217,7 +200,7 @@ describe('loadConfigModels', () => { process.env.MY_OPENROUTER_API_KEY = 'actual_openrouter_api_key'; // Setup custom configuration with specific API keys for Mistral and OpenRouter // and "user_provided" for groq and Ollama, indicating no fetch for the latter two - getCustomConfig.mockResolvedValue(exampleConfig); + getAppConfig.mockResolvedValue(exampleConfig); // Assuming fetchModels would be called only for Mistral and OpenRouter fetchModels.mockImplementation(({ name }) => { @@ -263,8 +246,8 @@ describe('loadConfigModels', () => { // For groq and ollama, since the apiKey is "user_provided", models should not be fetched // Depending on your implementation's behavior regarding "default" models without fetching, // you may need to adjust the following assertions: - expect(result.groq).toBe(exampleConfig.endpoints.custom[2].models.default); - expect(result.ollama).toBe(exampleConfig.endpoints.custom[3].models.default); + expect(result.groq).toBe(exampleConfig.custom[2].models.default); + expect(result.ollama).toBe(exampleConfig.custom[3].models.default); // Verifying fetchModels was not called for groq and ollama expect(fetchModels).not.toHaveBeenCalledWith( @@ -280,29 +263,27 @@ describe('loadConfigModels', () => { }); it('falls back to default models if fetching returns an empty array', async () => { - getCustomConfig.mockResolvedValue({ - endpoints: { - custom: [ - { - name: 'EndpointWithSameFetchKey', - apiKey: 'API_KEY', - baseURL: 'http://example.com', - models: { - fetch: true, - default: ['defaultModel1'], - }, + getAppConfig.mockResolvedValue({ + custom: [ + { + name: 'EndpointWithSameFetchKey', + apiKey: 'API_KEY', + baseURL: 'http://example.com', + models: { + fetch: true, + default: ['defaultModel1'], }, - { - name: 'EmptyFetchModel', - apiKey: 'API_KEY', - baseURL: 'http://example.com', - models: { - fetch: true, - default: ['defaultModel1', 'defaultModel2'], - }, + }, + { + name: 'EmptyFetchModel', + apiKey: 'API_KEY', + baseURL: 'http://example.com', + models: { + fetch: true, + default: ['defaultModel1', 'defaultModel2'], }, - ], - }, + }, + ], }); fetchModels.mockResolvedValue([]); @@ -313,20 +294,18 @@ describe('loadConfigModels', () => { }); it('falls back to default models if fetching returns a falsy value', async () => { - getCustomConfig.mockResolvedValue({ - endpoints: { - custom: [ - { - name: 'FalsyFetchModel', - apiKey: 'API_KEY', - baseURL: 'http://example.com', - models: { - fetch: true, - default: ['defaultModel1', 'defaultModel2'], - }, + getAppConfig.mockResolvedValue({ + custom: [ + { + name: 'FalsyFetchModel', + apiKey: 'API_KEY', + baseURL: 'http://example.com', + models: { + fetch: true, + default: ['defaultModel1', 'defaultModel2'], }, - ], - }, + }, + ], }); fetchModels.mockResolvedValue(false); @@ -374,10 +353,8 @@ describe('loadConfigModels', () => { }, ]; - getCustomConfig.mockResolvedValue({ - endpoints: { - custom: testCases, - }, + getAppConfig.mockResolvedValue({ + custom: testCases, }); const result = await loadConfigModels(mockRequest);