refactor: remove customConfig dependency for appConfig and streamline loadConfigModels logic

This commit is contained in:
Danny Avila 2025-08-18 03:51:38 -04:00
parent 1d2be247cf
commit 46f9c90223
No known key found for this signature in database
GPG key ID: BF31EEB2C5CA0956
2 changed files with 132 additions and 161 deletions

View file

@ -1,7 +1,6 @@
const { isUserProvided, normalizeEndpointName } = require('@librechat/api'); const { isUserProvided, normalizeEndpointName } = require('@librechat/api');
const { EModelEndpoint, extractEnvVariable } = require('librechat-data-provider'); const { EModelEndpoint, extractEnvVariable } = require('librechat-data-provider');
const { fetchModels } = require('~/server/services/ModelService'); const { fetchModels } = require('~/server/services/ModelService');
const { getCustomConfig } = require('./getCustomConfig');
const { getAppConfig } = require('./app'); const { getAppConfig } = require('./app');
/** /**
@ -10,36 +9,31 @@ const { getAppConfig } = require('./app');
* @param {Express.Request} req - The Express request object. * @param {Express.Request} req - The Express request object.
*/ */
async function loadConfigModels(req) { async function loadConfigModels(req) {
const customConfig = await getCustomConfig(); const appConfig = await getAppConfig({ role: req.user?.role });
if (!appConfig) {
if (!customConfig) {
return {}; return {};
} }
const appConfig = await getAppConfig({ role: req.user?.role });
const { endpoints = {} } = customConfig ?? {};
const modelsConfig = {}; const modelsConfig = {};
const azureEndpoint = endpoints[EModelEndpoint.azureOpenAI];
const azureConfig = appConfig[EModelEndpoint.azureOpenAI]; const azureConfig = appConfig[EModelEndpoint.azureOpenAI];
const { modelNames } = azureConfig ?? {}; const { modelNames } = azureConfig ?? {};
if (modelNames && azureEndpoint) { if (modelNames && azureConfig) {
modelsConfig[EModelEndpoint.azureOpenAI] = modelNames; modelsConfig[EModelEndpoint.azureOpenAI] = modelNames;
} }
if (modelNames && azureEndpoint && azureEndpoint.plugins) { if (modelNames && azureConfig && azureConfig.plugins) {
modelsConfig[EModelEndpoint.gptPlugins] = modelNames; modelsConfig[EModelEndpoint.gptPlugins] = modelNames;
} }
if (azureEndpoint?.assistants && azureConfig.assistantModels) { if (azureConfig?.assistants && azureConfig.assistantModels) {
modelsConfig[EModelEndpoint.azureAssistants] = azureConfig.assistantModels; modelsConfig[EModelEndpoint.azureAssistants] = azureConfig.assistantModels;
} }
if (!Array.isArray(endpoints[EModelEndpoint.custom])) { if (!Array.isArray(appConfig[EModelEndpoint.custom])) {
return modelsConfig; return modelsConfig;
} }
const customEndpoints = endpoints[EModelEndpoint.custom].filter( const customEndpoints = appConfig[EModelEndpoint.custom].filter(
(endpoint) => (endpoint) =>
endpoint.baseURL && endpoint.baseURL &&
endpoint.apiKey && endpoint.apiKey &&

View file

@ -1,64 +1,60 @@
const { fetchModels } = require('~/server/services/ModelService'); const { fetchModels } = require('~/server/services/ModelService');
const { getCustomConfig } = require('./getCustomConfig');
const loadConfigModels = require('./loadConfigModels'); const loadConfigModels = require('./loadConfigModels');
const { getAppConfig } = require('./app'); const { getAppConfig } = require('./app');
jest.mock('~/server/services/ModelService'); jest.mock('~/server/services/ModelService');
jest.mock('./getCustomConfig');
jest.mock('./app'); jest.mock('./app');
const exampleConfig = { const exampleConfig = {
endpoints: { custom: [
custom: [ {
{ name: 'Mistral',
name: 'Mistral', apiKey: '${MY_PRECIOUS_MISTRAL_KEY}',
apiKey: '${MY_PRECIOUS_MISTRAL_KEY}', baseURL: 'https://api.mistral.ai/v1',
baseURL: 'https://api.mistral.ai/v1', models: {
models: { default: ['mistral-tiny', 'mistral-small', 'mistral-medium', 'mistral-large-latest'],
default: ['mistral-tiny', 'mistral-small', 'mistral-medium', 'mistral-large-latest'], fetch: true,
fetch: true,
},
dropParams: ['stop', 'user', 'frequency_penalty', 'presence_penalty'],
}, },
{ dropParams: ['stop', 'user', 'frequency_penalty', 'presence_penalty'],
name: 'OpenRouter', },
apiKey: '${MY_OPENROUTER_API_KEY}', {
baseURL: 'https://openrouter.ai/api/v1', name: 'OpenRouter',
models: { apiKey: '${MY_OPENROUTER_API_KEY}',
default: ['gpt-3.5-turbo'], baseURL: 'https://openrouter.ai/api/v1',
fetch: true, models: {
}, default: ['gpt-3.5-turbo'],
dropParams: ['stop'], fetch: true,
}, },
{ dropParams: ['stop'],
name: 'groq', },
apiKey: 'user_provided', {
baseURL: 'https://api.groq.com/openai/v1/', name: 'groq',
models: { apiKey: 'user_provided',
default: ['llama2-70b-4096', 'mixtral-8x7b-32768'], baseURL: 'https://api.groq.com/openai/v1/',
fetch: false, models: {
}, default: ['llama2-70b-4096', 'mixtral-8x7b-32768'],
fetch: false,
}, },
{ },
name: 'Ollama', {
apiKey: 'user_provided', name: 'Ollama',
baseURL: 'http://localhost:11434/v1/', apiKey: 'user_provided',
models: { baseURL: 'http://localhost:11434/v1/',
default: ['mistral', 'llama2:13b'], models: {
fetch: false, default: ['mistral', 'llama2:13b'],
}, fetch: false,
}, },
{ },
name: 'MLX', {
apiKey: 'user_provided', name: 'MLX',
baseURL: 'http://localhost:8080/v1/', apiKey: 'user_provided',
models: { baseURL: 'http://localhost:8080/v1/',
default: ['Meta-Llama-3-8B-Instruct-4bit'], models: {
fetch: false, default: ['Meta-Llama-3-8B-Instruct-4bit'],
}, fetch: false,
}, },
], },
}, ],
}; };
describe('loadConfigModels', () => { describe('loadConfigModels', () => {
@ -80,7 +76,7 @@ describe('loadConfigModels', () => {
}); });
it('should return an empty object if customConfig is null', async () => { it('should return an empty object if customConfig is null', async () => {
getCustomConfig.mockResolvedValue(null); getAppConfig.mockResolvedValue(null);
const result = await loadConfigModels(mockRequest); const result = await loadConfigModels(mockRequest);
expect(result).toEqual({}); expect(result).toEqual({});
}); });
@ -89,13 +85,6 @@ describe('loadConfigModels', () => {
getAppConfig.mockResolvedValue({ getAppConfig.mockResolvedValue({
azureOpenAI: { modelNames: ['model1', 'model2'] }, azureOpenAI: { modelNames: ['model1', 'model2'] },
}); });
getCustomConfig.mockResolvedValue({
endpoints: {
azureOpenAI: {
models: ['model1', 'model2'],
},
},
});
const result = await loadConfigModels(mockRequest); const result = await loadConfigModels(mockRequest);
expect(result.azureOpenAI).toEqual(['model1', 'model2']); expect(result.azureOpenAI).toEqual(['model1', 'model2']);
@ -104,18 +93,16 @@ describe('loadConfigModels', () => {
it('fetches custom models based on the unique key', async () => { it('fetches custom models based on the unique key', async () => {
process.env.BASE_URL = 'http://example.com'; process.env.BASE_URL = 'http://example.com';
process.env.API_KEY = 'some-api-key'; process.env.API_KEY = 'some-api-key';
const customEndpoints = { const customEndpoints = [
custom: [ {
{ baseURL: '${BASE_URL}',
baseURL: '${BASE_URL}', apiKey: '${API_KEY}',
apiKey: '${API_KEY}', name: 'CustomModel',
name: 'CustomModel', models: { fetch: true },
models: { fetch: true }, },
}, ];
],
};
getCustomConfig.mockResolvedValue({ endpoints: customEndpoints }); getAppConfig.mockResolvedValue({ custom: customEndpoints });
fetchModels.mockResolvedValue(['customModel1', 'customModel2']); fetchModels.mockResolvedValue(['customModel1', 'customModel2']);
const result = await loadConfigModels(mockRequest); const result = await loadConfigModels(mockRequest);
@ -124,23 +111,21 @@ describe('loadConfigModels', () => {
}); });
it('correctly associates models to names using unique keys', async () => { it('correctly associates models to names using unique keys', async () => {
getCustomConfig.mockResolvedValue({ getAppConfig.mockResolvedValue({
endpoints: { custom: [
custom: [ {
{ baseURL: 'http://example.com',
baseURL: 'http://example.com', apiKey: 'API_KEY1',
apiKey: 'API_KEY1', name: 'Model1',
name: 'Model1', models: { fetch: true },
models: { fetch: true }, },
}, {
{ baseURL: 'http://example.com',
baseURL: 'http://example.com', apiKey: 'API_KEY2',
apiKey: 'API_KEY2', name: 'Model2',
name: 'Model2', models: { fetch: true },
models: { fetch: true }, },
}, ],
],
},
}); });
fetchModels.mockImplementation(({ apiKey }) => fetchModels.mockImplementation(({ apiKey }) =>
Promise.resolve(apiKey === 'API_KEY1' ? ['model1Data'] : ['model2Data']), Promise.resolve(apiKey === 'API_KEY1' ? ['model1Data'] : ['model2Data']),
@ -153,29 +138,27 @@ describe('loadConfigModels', () => {
it('correctly handles multiple endpoints with the same baseURL but different apiKeys', async () => { it('correctly handles multiple endpoints with the same baseURL but different apiKeys', async () => {
// Mock the custom configuration to simulate the user's scenario // Mock the custom configuration to simulate the user's scenario
getCustomConfig.mockResolvedValue({ getAppConfig.mockResolvedValue({
endpoints: { custom: [
custom: [ {
{ name: 'LiteLLM',
name: 'LiteLLM', apiKey: '${LITELLM_ALL_MODELS}',
apiKey: '${LITELLM_ALL_MODELS}', baseURL: '${LITELLM_HOST}',
baseURL: '${LITELLM_HOST}', models: { fetch: true },
models: { fetch: true }, },
}, {
{ name: 'OpenAI',
name: 'OpenAI', apiKey: '${LITELLM_OPENAI_MODELS}',
apiKey: '${LITELLM_OPENAI_MODELS}', baseURL: '${LITELLM_SECOND_HOST}',
baseURL: '${LITELLM_SECOND_HOST}', models: { fetch: true },
models: { fetch: true }, },
}, {
{ name: 'Google',
name: 'Google', apiKey: '${LITELLM_GOOGLE_MODELS}',
apiKey: '${LITELLM_GOOGLE_MODELS}', baseURL: '${LITELLM_SECOND_HOST}',
baseURL: '${LITELLM_SECOND_HOST}', models: { fetch: true },
models: { fetch: true }, },
}, ],
],
},
}); });
// Mock `fetchModels` to return different models based on the apiKey // Mock `fetchModels` to return different models based on the apiKey
@ -217,7 +200,7 @@ describe('loadConfigModels', () => {
process.env.MY_OPENROUTER_API_KEY = 'actual_openrouter_api_key'; process.env.MY_OPENROUTER_API_KEY = 'actual_openrouter_api_key';
// Setup custom configuration with specific API keys for Mistral and OpenRouter // Setup custom configuration with specific API keys for Mistral and OpenRouter
// and "user_provided" for groq and Ollama, indicating no fetch for the latter two // and "user_provided" for groq and Ollama, indicating no fetch for the latter two
getCustomConfig.mockResolvedValue(exampleConfig); getAppConfig.mockResolvedValue(exampleConfig);
// Assuming fetchModels would be called only for Mistral and OpenRouter // Assuming fetchModels would be called only for Mistral and OpenRouter
fetchModels.mockImplementation(({ name }) => { fetchModels.mockImplementation(({ name }) => {
@ -263,8 +246,8 @@ describe('loadConfigModels', () => {
// For groq and ollama, since the apiKey is "user_provided", models should not be fetched // For groq and ollama, since the apiKey is "user_provided", models should not be fetched
// Depending on your implementation's behavior regarding "default" models without fetching, // Depending on your implementation's behavior regarding "default" models without fetching,
// you may need to adjust the following assertions: // you may need to adjust the following assertions:
expect(result.groq).toBe(exampleConfig.endpoints.custom[2].models.default); expect(result.groq).toBe(exampleConfig.custom[2].models.default);
expect(result.ollama).toBe(exampleConfig.endpoints.custom[3].models.default); expect(result.ollama).toBe(exampleConfig.custom[3].models.default);
// Verifying fetchModels was not called for groq and ollama // Verifying fetchModels was not called for groq and ollama
expect(fetchModels).not.toHaveBeenCalledWith( expect(fetchModels).not.toHaveBeenCalledWith(
@ -280,29 +263,27 @@ describe('loadConfigModels', () => {
}); });
it('falls back to default models if fetching returns an empty array', async () => { it('falls back to default models if fetching returns an empty array', async () => {
getCustomConfig.mockResolvedValue({ getAppConfig.mockResolvedValue({
endpoints: { custom: [
custom: [ {
{ name: 'EndpointWithSameFetchKey',
name: 'EndpointWithSameFetchKey', apiKey: 'API_KEY',
apiKey: 'API_KEY', baseURL: 'http://example.com',
baseURL: 'http://example.com', models: {
models: { fetch: true,
fetch: true, default: ['defaultModel1'],
default: ['defaultModel1'],
},
}, },
{ },
name: 'EmptyFetchModel', {
apiKey: 'API_KEY', name: 'EmptyFetchModel',
baseURL: 'http://example.com', apiKey: 'API_KEY',
models: { baseURL: 'http://example.com',
fetch: true, models: {
default: ['defaultModel1', 'defaultModel2'], fetch: true,
}, default: ['defaultModel1', 'defaultModel2'],
}, },
], },
}, ],
}); });
fetchModels.mockResolvedValue([]); fetchModels.mockResolvedValue([]);
@ -313,20 +294,18 @@ describe('loadConfigModels', () => {
}); });
it('falls back to default models if fetching returns a falsy value', async () => { it('falls back to default models if fetching returns a falsy value', async () => {
getCustomConfig.mockResolvedValue({ getAppConfig.mockResolvedValue({
endpoints: { custom: [
custom: [ {
{ name: 'FalsyFetchModel',
name: 'FalsyFetchModel', apiKey: 'API_KEY',
apiKey: 'API_KEY', baseURL: 'http://example.com',
baseURL: 'http://example.com', models: {
models: { fetch: true,
fetch: true, default: ['defaultModel1', 'defaultModel2'],
default: ['defaultModel1', 'defaultModel2'],
},
}, },
], },
}, ],
}); });
fetchModels.mockResolvedValue(false); fetchModels.mockResolvedValue(false);
@ -374,10 +353,8 @@ describe('loadConfigModels', () => {
}, },
]; ];
getCustomConfig.mockResolvedValue({ getAppConfig.mockResolvedValue({
endpoints: { custom: testCases,
custom: testCases,
},
}); });
const result = await loadConfigModels(mockRequest); const result = await loadConfigModels(mockRequest);