refactor: remove customConfig dependency for appConfig and streamline loadConfigModels logic

This commit is contained in:
Danny Avila 2025-08-18 03:51:38 -04:00
parent 1d2be247cf
commit 46f9c90223
No known key found for this signature in database
GPG key ID: BF31EEB2C5CA0956
2 changed files with 132 additions and 161 deletions

View file

@ -1,64 +1,60 @@
const { fetchModels } = require('~/server/services/ModelService');
const { getCustomConfig } = require('./getCustomConfig');
const loadConfigModels = require('./loadConfigModels');
const { getAppConfig } = require('./app');
jest.mock('~/server/services/ModelService');
jest.mock('./getCustomConfig');
jest.mock('./app');
const exampleConfig = {
endpoints: {
custom: [
{
name: 'Mistral',
apiKey: '${MY_PRECIOUS_MISTRAL_KEY}',
baseURL: 'https://api.mistral.ai/v1',
models: {
default: ['mistral-tiny', 'mistral-small', 'mistral-medium', 'mistral-large-latest'],
fetch: true,
},
dropParams: ['stop', 'user', 'frequency_penalty', 'presence_penalty'],
custom: [
{
name: 'Mistral',
apiKey: '${MY_PRECIOUS_MISTRAL_KEY}',
baseURL: 'https://api.mistral.ai/v1',
models: {
default: ['mistral-tiny', 'mistral-small', 'mistral-medium', 'mistral-large-latest'],
fetch: true,
},
{
name: 'OpenRouter',
apiKey: '${MY_OPENROUTER_API_KEY}',
baseURL: 'https://openrouter.ai/api/v1',
models: {
default: ['gpt-3.5-turbo'],
fetch: true,
},
dropParams: ['stop'],
dropParams: ['stop', 'user', 'frequency_penalty', 'presence_penalty'],
},
{
name: 'OpenRouter',
apiKey: '${MY_OPENROUTER_API_KEY}',
baseURL: 'https://openrouter.ai/api/v1',
models: {
default: ['gpt-3.5-turbo'],
fetch: true,
},
{
name: 'groq',
apiKey: 'user_provided',
baseURL: 'https://api.groq.com/openai/v1/',
models: {
default: ['llama2-70b-4096', 'mixtral-8x7b-32768'],
fetch: false,
},
dropParams: ['stop'],
},
{
name: 'groq',
apiKey: 'user_provided',
baseURL: 'https://api.groq.com/openai/v1/',
models: {
default: ['llama2-70b-4096', 'mixtral-8x7b-32768'],
fetch: false,
},
{
name: 'Ollama',
apiKey: 'user_provided',
baseURL: 'http://localhost:11434/v1/',
models: {
default: ['mistral', 'llama2:13b'],
fetch: false,
},
},
{
name: 'Ollama',
apiKey: 'user_provided',
baseURL: 'http://localhost:11434/v1/',
models: {
default: ['mistral', 'llama2:13b'],
fetch: false,
},
{
name: 'MLX',
apiKey: 'user_provided',
baseURL: 'http://localhost:8080/v1/',
models: {
default: ['Meta-Llama-3-8B-Instruct-4bit'],
fetch: false,
},
},
{
name: 'MLX',
apiKey: 'user_provided',
baseURL: 'http://localhost:8080/v1/',
models: {
default: ['Meta-Llama-3-8B-Instruct-4bit'],
fetch: false,
},
],
},
},
],
};
describe('loadConfigModels', () => {
@ -80,7 +76,7 @@ describe('loadConfigModels', () => {
});
it('should return an empty object if customConfig is null', async () => {
getCustomConfig.mockResolvedValue(null);
getAppConfig.mockResolvedValue(null);
const result = await loadConfigModels(mockRequest);
expect(result).toEqual({});
});
@ -89,13 +85,6 @@ describe('loadConfigModels', () => {
getAppConfig.mockResolvedValue({
azureOpenAI: { modelNames: ['model1', 'model2'] },
});
getCustomConfig.mockResolvedValue({
endpoints: {
azureOpenAI: {
models: ['model1', 'model2'],
},
},
});
const result = await loadConfigModels(mockRequest);
expect(result.azureOpenAI).toEqual(['model1', 'model2']);
@ -104,18 +93,16 @@ describe('loadConfigModels', () => {
it('fetches custom models based on the unique key', async () => {
process.env.BASE_URL = 'http://example.com';
process.env.API_KEY = 'some-api-key';
const customEndpoints = {
custom: [
{
baseURL: '${BASE_URL}',
apiKey: '${API_KEY}',
name: 'CustomModel',
models: { fetch: true },
},
],
};
const customEndpoints = [
{
baseURL: '${BASE_URL}',
apiKey: '${API_KEY}',
name: 'CustomModel',
models: { fetch: true },
},
];
getCustomConfig.mockResolvedValue({ endpoints: customEndpoints });
getAppConfig.mockResolvedValue({ custom: customEndpoints });
fetchModels.mockResolvedValue(['customModel1', 'customModel2']);
const result = await loadConfigModels(mockRequest);
@ -124,23 +111,21 @@ describe('loadConfigModels', () => {
});
it('correctly associates models to names using unique keys', async () => {
getCustomConfig.mockResolvedValue({
endpoints: {
custom: [
{
baseURL: 'http://example.com',
apiKey: 'API_KEY1',
name: 'Model1',
models: { fetch: true },
},
{
baseURL: 'http://example.com',
apiKey: 'API_KEY2',
name: 'Model2',
models: { fetch: true },
},
],
},
getAppConfig.mockResolvedValue({
custom: [
{
baseURL: 'http://example.com',
apiKey: 'API_KEY1',
name: 'Model1',
models: { fetch: true },
},
{
baseURL: 'http://example.com',
apiKey: 'API_KEY2',
name: 'Model2',
models: { fetch: true },
},
],
});
fetchModels.mockImplementation(({ apiKey }) =>
Promise.resolve(apiKey === 'API_KEY1' ? ['model1Data'] : ['model2Data']),
@ -153,29 +138,27 @@ describe('loadConfigModels', () => {
it('correctly handles multiple endpoints with the same baseURL but different apiKeys', async () => {
// Mock the custom configuration to simulate the user's scenario
getCustomConfig.mockResolvedValue({
endpoints: {
custom: [
{
name: 'LiteLLM',
apiKey: '${LITELLM_ALL_MODELS}',
baseURL: '${LITELLM_HOST}',
models: { fetch: true },
},
{
name: 'OpenAI',
apiKey: '${LITELLM_OPENAI_MODELS}',
baseURL: '${LITELLM_SECOND_HOST}',
models: { fetch: true },
},
{
name: 'Google',
apiKey: '${LITELLM_GOOGLE_MODELS}',
baseURL: '${LITELLM_SECOND_HOST}',
models: { fetch: true },
},
],
},
getAppConfig.mockResolvedValue({
custom: [
{
name: 'LiteLLM',
apiKey: '${LITELLM_ALL_MODELS}',
baseURL: '${LITELLM_HOST}',
models: { fetch: true },
},
{
name: 'OpenAI',
apiKey: '${LITELLM_OPENAI_MODELS}',
baseURL: '${LITELLM_SECOND_HOST}',
models: { fetch: true },
},
{
name: 'Google',
apiKey: '${LITELLM_GOOGLE_MODELS}',
baseURL: '${LITELLM_SECOND_HOST}',
models: { fetch: true },
},
],
});
// Mock `fetchModels` to return different models based on the apiKey
@ -217,7 +200,7 @@ describe('loadConfigModels', () => {
process.env.MY_OPENROUTER_API_KEY = 'actual_openrouter_api_key';
// Setup custom configuration with specific API keys for Mistral and OpenRouter
// and "user_provided" for groq and Ollama, indicating no fetch for the latter two
getCustomConfig.mockResolvedValue(exampleConfig);
getAppConfig.mockResolvedValue(exampleConfig);
// Assuming fetchModels would be called only for Mistral and OpenRouter
fetchModels.mockImplementation(({ name }) => {
@ -263,8 +246,8 @@ describe('loadConfigModels', () => {
// For groq and ollama, since the apiKey is "user_provided", models should not be fetched
// Depending on your implementation's behavior regarding "default" models without fetching,
// you may need to adjust the following assertions:
expect(result.groq).toBe(exampleConfig.endpoints.custom[2].models.default);
expect(result.ollama).toBe(exampleConfig.endpoints.custom[3].models.default);
expect(result.groq).toBe(exampleConfig.custom[2].models.default);
expect(result.ollama).toBe(exampleConfig.custom[3].models.default);
// Verifying fetchModels was not called for groq and ollama
expect(fetchModels).not.toHaveBeenCalledWith(
@ -280,29 +263,27 @@ describe('loadConfigModels', () => {
});
it('falls back to default models if fetching returns an empty array', async () => {
getCustomConfig.mockResolvedValue({
endpoints: {
custom: [
{
name: 'EndpointWithSameFetchKey',
apiKey: 'API_KEY',
baseURL: 'http://example.com',
models: {
fetch: true,
default: ['defaultModel1'],
},
getAppConfig.mockResolvedValue({
custom: [
{
name: 'EndpointWithSameFetchKey',
apiKey: 'API_KEY',
baseURL: 'http://example.com',
models: {
fetch: true,
default: ['defaultModel1'],
},
{
name: 'EmptyFetchModel',
apiKey: 'API_KEY',
baseURL: 'http://example.com',
models: {
fetch: true,
default: ['defaultModel1', 'defaultModel2'],
},
},
{
name: 'EmptyFetchModel',
apiKey: 'API_KEY',
baseURL: 'http://example.com',
models: {
fetch: true,
default: ['defaultModel1', 'defaultModel2'],
},
],
},
},
],
});
fetchModels.mockResolvedValue([]);
@ -313,20 +294,18 @@ describe('loadConfigModels', () => {
});
it('falls back to default models if fetching returns a falsy value', async () => {
getCustomConfig.mockResolvedValue({
endpoints: {
custom: [
{
name: 'FalsyFetchModel',
apiKey: 'API_KEY',
baseURL: 'http://example.com',
models: {
fetch: true,
default: ['defaultModel1', 'defaultModel2'],
},
getAppConfig.mockResolvedValue({
custom: [
{
name: 'FalsyFetchModel',
apiKey: 'API_KEY',
baseURL: 'http://example.com',
models: {
fetch: true,
default: ['defaultModel1', 'defaultModel2'],
},
],
},
},
],
});
fetchModels.mockResolvedValue(false);
@ -374,10 +353,8 @@ describe('loadConfigModels', () => {
},
];
getCustomConfig.mockResolvedValue({
endpoints: {
custom: testCases,
},
getAppConfig.mockResolvedValue({
custom: testCases,
});
const result = await loadConfigModels(mockRequest);