refactor: update appConfig access to use endpoints structure across various services

This commit is contained in:
Danny Avila 2025-08-18 15:20:58 -04:00
parent 89fb9c7e1c
commit 240e3bd59e
No known key found for this signature in database
GPG key ID: BF31EEB2C5CA0956
36 changed files with 591 additions and 510 deletions

View file

@ -718,8 +718,7 @@ class OpenAIClient extends BaseClient {
max_tokens: 16, max_tokens: 16,
}; };
/** @type {TAzureConfig | undefined} */ const azureConfig = appConfig?.endpoints?.[EModelEndpoint.azureOpenAI];
const azureConfig = appConfig?.[EModelEndpoint.azureOpenAI];
const resetTitleOptions = !!( const resetTitleOptions = !!(
(this.azure && azureConfig) || (this.azure && azureConfig) ||
@ -1154,8 +1153,7 @@ ${convo}
opts.fetchOptions.agent = new HttpsProxyAgent(this.options.proxy); opts.fetchOptions.agent = new HttpsProxyAgent(this.options.proxy);
} }
/** @type {TAzureConfig | undefined} */ const azureConfig = appConfig?.endpoints?.[EModelEndpoint.azureOpenAI];
const azureConfig = appConfig?.[EModelEndpoint.azureOpenAI];
if ( if (
(this.azure && this.isVisionModel && azureConfig) || (this.azure && this.isVisionModel && azureConfig) ||

View file

@ -464,7 +464,9 @@ class AgentClient extends BaseClient {
/** @type {Agent} */ /** @type {Agent} */
let prelimAgent; let prelimAgent;
const allowedProviders = new Set(appConfig?.[EModelEndpoint.agents]?.allowedProviders); const allowedProviders = new Set(
appConfig?.endpoints?.[EModelEndpoint.agents]?.allowedProviders,
);
try { try {
if (memoryConfig.agent?.id != null && memoryConfig.agent.id !== this.options.agent.id) { if (memoryConfig.agent?.id != null && memoryConfig.agent.id !== this.options.agent.id) {
prelimAgent = await loadAgent({ prelimAgent = await loadAgent({
@ -770,8 +772,8 @@ class AgentClient extends BaseClient {
} }
const appConfig = await getAppConfig({ role: this.options.req.user?.role }); const appConfig = await getAppConfig({ role: this.options.req.user?.role });
/** @type {TCustomConfig['endpoints']['agents']} */ /** @type {AppConfig['endpoints']['agents']} */
const agentsEConfig = appConfig[EModelEndpoint.agents]; const agentsEConfig = appConfig.endpoints?.[EModelEndpoint.agents];
config = { config = {
configurable: { configurable: {
@ -1104,7 +1106,9 @@ class AgentClient extends BaseClient {
/** @type {TEndpoint | undefined} */ /** @type {TEndpoint | undefined} */
const endpointConfig = const endpointConfig =
appConfig.all ?? appConfig[endpoint] ?? titleProviderConfig.customEndpointConfig; appConfig.endpoints?.all ??
appConfig.endpoints?.[endpoint] ??
titleProviderConfig.customEndpointConfig;
if (!endpointConfig) { if (!endpointConfig) {
logger.warn( logger.warn(
'[api/server/controllers/agents/client.js #titleConvo] Error getting endpoint config', '[api/server/controllers/agents/client.js #titleConvo] Error getting endpoint config',

View file

@ -46,6 +46,7 @@ describe('AgentClient - titleConvo', () => {
// Mock getAppConfig to return endpoint configurations // Mock getAppConfig to return endpoint configurations
getAppConfig.mockResolvedValue({ getAppConfig.mockResolvedValue({
endpoints: {
[EModelEndpoint.openAI]: { [EModelEndpoint.openAI]: {
// Match the agent endpoint // Match the agent endpoint
titleModel: 'gpt-3.5-turbo', titleModel: 'gpt-3.5-turbo',
@ -53,6 +54,7 @@ describe('AgentClient - titleConvo', () => {
titleMethod: 'structured', titleMethod: 'structured',
titlePromptTemplate: 'Template: {{content}}', titlePromptTemplate: 'Template: {{content}}',
}, },
},
}); });
// Mock request and response // Mock request and response
@ -148,7 +150,7 @@ describe('AgentClient - titleConvo', () => {
it('should handle missing endpoint config gracefully', async () => { it('should handle missing endpoint config gracefully', async () => {
// Remove endpoint config // Remove endpoint config
getAppConfig.mockResolvedValue({}); getAppConfig.mockResolvedValue({ endpoints: {} });
const text = 'Test conversation text'; const text = 'Test conversation text';
const abortController = new AbortController(); const abortController = new AbortController();
@ -167,12 +169,14 @@ describe('AgentClient - titleConvo', () => {
it('should use agent model when titleModel is not provided', async () => { it('should use agent model when titleModel is not provided', async () => {
// Remove titleModel from config // Remove titleModel from config
getAppConfig.mockResolvedValue({ getAppConfig.mockResolvedValue({
endpoints: {
[EModelEndpoint.openAI]: { [EModelEndpoint.openAI]: {
titlePrompt: 'Custom title prompt', titlePrompt: 'Custom title prompt',
titleMethod: 'structured', titleMethod: 'structured',
titlePromptTemplate: 'Template: {{content}}', titlePromptTemplate: 'Template: {{content}}',
// titleModel is omitted // titleModel is omitted
}, },
},
}); });
const text = 'Test conversation text'; const text = 'Test conversation text';
@ -186,12 +190,14 @@ describe('AgentClient - titleConvo', () => {
it('should not use titleModel when it equals CURRENT_MODEL constant', async () => { it('should not use titleModel when it equals CURRENT_MODEL constant', async () => {
getAppConfig.mockResolvedValue({ getAppConfig.mockResolvedValue({
endpoints: {
[EModelEndpoint.openAI]: { [EModelEndpoint.openAI]: {
titleModel: Constants.CURRENT_MODEL, titleModel: Constants.CURRENT_MODEL,
titlePrompt: 'Custom title prompt', titlePrompt: 'Custom title prompt',
titleMethod: 'structured', titleMethod: 'structured',
titlePromptTemplate: 'Template: {{content}}', titlePromptTemplate: 'Template: {{content}}',
}, },
},
}); });
const text = 'Test conversation text'; const text = 'Test conversation text';
@ -265,6 +271,7 @@ describe('AgentClient - titleConvo', () => {
// Add titleEndpoint to the config // Add titleEndpoint to the config
getAppConfig.mockResolvedValue({ getAppConfig.mockResolvedValue({
endpoints: {
[EModelEndpoint.openAI]: { [EModelEndpoint.openAI]: {
titleModel: 'gpt-3.5-turbo', titleModel: 'gpt-3.5-turbo',
titleEndpoint: EModelEndpoint.anthropic, titleEndpoint: EModelEndpoint.anthropic,
@ -272,6 +279,7 @@ describe('AgentClient - titleConvo', () => {
titlePrompt: 'Custom title prompt', titlePrompt: 'Custom title prompt',
titlePromptTemplate: 'Custom template', titlePromptTemplate: 'Custom template',
}, },
},
}); });
const text = 'Test conversation text'; const text = 'Test conversation text';
@ -300,12 +308,14 @@ describe('AgentClient - titleConvo', () => {
it('should use all config when endpoint config is missing', async () => { it('should use all config when endpoint config is missing', async () => {
// Set 'all' config without endpoint-specific config // Set 'all' config without endpoint-specific config
getAppConfig.mockResolvedValue({ getAppConfig.mockResolvedValue({
endpoints: {
all: { all: {
titleModel: 'gpt-4o-mini', titleModel: 'gpt-4o-mini',
titlePrompt: 'All config title prompt', titlePrompt: 'All config title prompt',
titleMethod: 'completion', titleMethod: 'completion',
titlePromptTemplate: 'All config template: {{content}}', titlePromptTemplate: 'All config template: {{content}}',
}, },
},
}); });
const text = 'Test conversation text'; const text = 'Test conversation text';
@ -330,6 +340,7 @@ describe('AgentClient - titleConvo', () => {
it('should prioritize all config over endpoint config for title settings', async () => { it('should prioritize all config over endpoint config for title settings', async () => {
// Set both endpoint and 'all' config // Set both endpoint and 'all' config
getAppConfig.mockResolvedValue({ getAppConfig.mockResolvedValue({
endpoints: {
[EModelEndpoint.openAI]: { [EModelEndpoint.openAI]: {
titleModel: 'gpt-3.5-turbo', titleModel: 'gpt-3.5-turbo',
titlePrompt: 'Endpoint title prompt', titlePrompt: 'Endpoint title prompt',
@ -342,6 +353,7 @@ describe('AgentClient - titleConvo', () => {
titleMethod: 'completion', titleMethod: 'completion',
titlePromptTemplate: 'All config template', titlePromptTemplate: 'All config template',
}, },
},
}); });
const text = 'Test conversation text'; const text = 'Test conversation text';
@ -370,6 +382,7 @@ describe('AgentClient - titleConvo', () => {
// Set comprehensive 'all' config with all new title options // Set comprehensive 'all' config with all new title options
getAppConfig.mockResolvedValue({ getAppConfig.mockResolvedValue({
endpoints: {
all: { all: {
titleConvo: true, titleConvo: true,
titleModel: 'claude-3-haiku-20240307', titleModel: 'claude-3-haiku-20240307',
@ -378,6 +391,7 @@ describe('AgentClient - titleConvo', () => {
titlePromptTemplate: 'Conversation summary: {{content}}', titlePromptTemplate: 'Conversation summary: {{content}}',
titleEndpoint: EModelEndpoint.anthropic, // Should switch provider to Anthropic titleEndpoint: EModelEndpoint.anthropic, // Should switch provider to Anthropic
}, },
},
}); });
const text = 'Test conversation about AI and machine learning'; const text = 'Test conversation about AI and machine learning';
@ -425,12 +439,14 @@ describe('AgentClient - titleConvo', () => {
// Set 'all' config with specific titleMethod // Set 'all' config with specific titleMethod
getAppConfig.mockResolvedValue({ getAppConfig.mockResolvedValue({
endpoints: {
all: { all: {
titleModel: 'gpt-4o-mini', titleModel: 'gpt-4o-mini',
titleMethod: method, titleMethod: method,
titlePrompt: `Testing ${method} method`, titlePrompt: `Testing ${method} method`,
titlePromptTemplate: `Template for ${method}: {{content}}`, titlePromptTemplate: `Template for ${method}: {{content}}`,
}, },
},
}); });
const text = `Test conversation for ${method} method`; const text = `Test conversation for ${method} method`;
@ -476,6 +492,7 @@ describe('AgentClient - titleConvo', () => {
mockAgent.endpoint = EModelEndpoint.azureOpenAI; mockAgent.endpoint = EModelEndpoint.azureOpenAI;
mockAgent.provider = EModelEndpoint.azureOpenAI; mockAgent.provider = EModelEndpoint.azureOpenAI;
getAppConfig.mockResolvedValue({ getAppConfig.mockResolvedValue({
endpoints: {
[EModelEndpoint.azureOpenAI]: { [EModelEndpoint.azureOpenAI]: {
titleConvo: true, titleConvo: true,
titleModel: 'grok-3', titleModel: 'grok-3',
@ -502,6 +519,7 @@ describe('AgentClient - titleConvo', () => {
}, },
}, },
}, },
},
}); });
mockReq.body.endpoint = EModelEndpoint.azureOpenAI; mockReq.body.endpoint = EModelEndpoint.azureOpenAI;
mockReq.body.model = 'grok-3'; mockReq.body.model = 'grok-3';
@ -526,6 +544,7 @@ describe('AgentClient - titleConvo', () => {
mockAgent.endpoint = EModelEndpoint.azureOpenAI; mockAgent.endpoint = EModelEndpoint.azureOpenAI;
mockAgent.provider = EModelEndpoint.azureOpenAI; mockAgent.provider = EModelEndpoint.azureOpenAI;
getAppConfig.mockResolvedValue({ getAppConfig.mockResolvedValue({
endpoints: {
[EModelEndpoint.azureOpenAI]: { [EModelEndpoint.azureOpenAI]: {
titleConvo: true, titleConvo: true,
titleModel: 'gpt-4o', titleModel: 'gpt-4o',
@ -551,6 +570,7 @@ describe('AgentClient - titleConvo', () => {
}, },
}, },
}, },
},
}); });
mockReq.body.endpoint = EModelEndpoint.azureOpenAI; mockReq.body.endpoint = EModelEndpoint.azureOpenAI;
mockReq.body.model = 'gpt-4o'; mockReq.body.model = 'gpt-4o';
@ -576,6 +596,7 @@ describe('AgentClient - titleConvo', () => {
mockAgent.provider = EModelEndpoint.azureOpenAI; mockAgent.provider = EModelEndpoint.azureOpenAI;
mockAgent.model_parameters.model = 'gpt-4o-latest'; mockAgent.model_parameters.model = 'gpt-4o-latest';
getAppConfig.mockResolvedValue({ getAppConfig.mockResolvedValue({
endpoints: {
[EModelEndpoint.azureOpenAI]: { [EModelEndpoint.azureOpenAI]: {
titleConvo: true, titleConvo: true,
titleModel: Constants.CURRENT_MODEL, titleModel: Constants.CURRENT_MODEL,
@ -602,6 +623,7 @@ describe('AgentClient - titleConvo', () => {
}, },
}, },
}, },
},
}); });
mockReq.body.endpoint = EModelEndpoint.azureOpenAI; mockReq.body.endpoint = EModelEndpoint.azureOpenAI;
mockReq.body.model = 'gpt-4o-latest'; mockReq.body.model = 'gpt-4o-latest';
@ -625,6 +647,7 @@ describe('AgentClient - titleConvo', () => {
mockAgent.endpoint = EModelEndpoint.azureOpenAI; mockAgent.endpoint = EModelEndpoint.azureOpenAI;
mockAgent.provider = EModelEndpoint.azureOpenAI; mockAgent.provider = EModelEndpoint.azureOpenAI;
getAppConfig.mockResolvedValue({ getAppConfig.mockResolvedValue({
endpoints: {
[EModelEndpoint.azureOpenAI]: { [EModelEndpoint.azureOpenAI]: {
titleConvo: true, titleConvo: true,
titleModel: 'o1-mini', titleModel: 'o1-mini',
@ -678,6 +701,7 @@ describe('AgentClient - titleConvo', () => {
}, },
}, },
}, },
},
}); });
mockReq.body.endpoint = EModelEndpoint.azureOpenAI; mockReq.body.endpoint = EModelEndpoint.azureOpenAI;
mockReq.body.model = 'o1-mini'; mockReq.body.model = 'o1-mini';
@ -709,6 +733,7 @@ describe('AgentClient - titleConvo', () => {
// Set 'all' config as fallback with a serverless Azure config // Set 'all' config as fallback with a serverless Azure config
getAppConfig.mockResolvedValue({ getAppConfig.mockResolvedValue({
endpoints: {
all: { all: {
titleConvo: true, titleConvo: true,
titleModel: 'gpt-4', titleModel: 'gpt-4',
@ -735,6 +760,7 @@ describe('AgentClient - titleConvo', () => {
}, },
}, },
}, },
},
}); });
const text = 'Test Azure with all config fallback'; const text = 'Test Azure with all config fallback';

View file

@ -376,9 +376,9 @@ const chatV2 = async (req, res) => {
}; };
/** @type {undefined | TAssistantEndpoint} */ /** @type {undefined | TAssistantEndpoint} */
const config = appConfig[endpoint] ?? {}; const config = appConfig.endpoints?.[endpoint] ?? {};
/** @type {undefined | TBaseEndpoint} */ /** @type {undefined | TBaseEndpoint} */
const allConfig = appConfig.all; const allConfig = appConfig.endpoints?.all;
const streamRunManager = new StreamRunManager({ const streamRunManager = new StreamRunManager({
req, req,

View file

@ -231,20 +231,20 @@ const fetchAssistants = async ({ req, res, overrideEndpoint }) => {
if (endpoint === EModelEndpoint.assistants) { if (endpoint === EModelEndpoint.assistants) {
({ body } = await listAllAssistants({ req, res, version, query })); ({ body } = await listAllAssistants({ req, res, version, query }));
} else if (endpoint === EModelEndpoint.azureAssistants) { } else if (endpoint === EModelEndpoint.azureAssistants) {
const azureConfig = appConfig[EModelEndpoint.azureOpenAI]; const azureConfig = appConfig.endpoints?.[EModelEndpoint.azureOpenAI];
body = await listAssistantsForAzure({ req, res, version, azureConfig, query }); body = await listAssistantsForAzure({ req, res, version, azureConfig, query });
} }
if (req.user.role === SystemRoles.ADMIN) { if (req.user.role === SystemRoles.ADMIN) {
return body; return body;
} else if (!appConfig[endpoint]) { } else if (!appConfig.endpoints?.[endpoint]) {
return body; return body;
} }
body.data = filterAssistants({ body.data = filterAssistants({
userId: req.user.id, userId: req.user.id,
assistants: body.data, assistants: body.data,
assistantsConfig: appConfig[endpoint], assistantsConfig: appConfig.endpoints?.[endpoint],
}); });
return body; return body;
}; };

View file

@ -260,7 +260,7 @@ const getAssistantDocuments = async (req, res) => {
try { try {
const appConfig = await getAppConfig({ role: req.user?.role }); const appConfig = await getAppConfig({ role: req.user?.role });
const endpoint = req.query; const endpoint = req.query;
const assistantsConfig = appConfig[endpoint]; const assistantsConfig = appConfig.endpoints?.[endpoint];
const documents = await getAssistants( const documents = await getAssistants(
{}, {},
{ {

View file

@ -15,7 +15,7 @@ const validateAssistant = async (req, res, next) => {
const appConfig = await getAppConfig({ role: req.user?.role }); const appConfig = await getAppConfig({ role: req.user?.role });
/** @type {Partial<TAssistantEndpoint>} */ /** @type {Partial<TAssistantEndpoint>} */
const assistantsConfig = appConfig?.[endpoint]; const assistantsConfig = appConfig.endpoints?.[endpoint];
if (!assistantsConfig) { if (!assistantsConfig) {
return next(); return next();
} }

View file

@ -23,7 +23,7 @@ const validateAuthor = async ({ req, openai, overrideEndpoint, overrideAssistant
const appConfig = await getAppConfig({ role: req.user?.role }); const appConfig = await getAppConfig({ role: req.user?.role });
/** @type {Partial<TAssistantEndpoint>} */ /** @type {Partial<TAssistantEndpoint>} */
const assistantsConfig = appConfig?.[endpoint]; const assistantsConfig = appConfig.endpoints?.[endpoint];
if (!assistantsConfig) { if (!assistantsConfig) {
return; return;
} }

View file

@ -130,7 +130,7 @@ router.post('/:assistant_id', async (req, res) => {
} }
/* Map Azure OpenAI model to the assistant as defined by config */ /* Map Azure OpenAI model to the assistant as defined by config */
if (appConfig[EModelEndpoint.azureOpenAI]?.assistants) { if (appConfig.endpoints?.[EModelEndpoint.azureOpenAI]?.assistants) {
updatedAssistant = { updatedAssistant = {
...updatedAssistant, ...updatedAssistant,
model: req.body.model, model: req.body.model,

View file

@ -111,7 +111,9 @@ const AppService = async () => {
if (!Object.keys(config).length) { if (!Object.keys(config).length) {
const appConfig = { const appConfig = {
...defaultConfig, ...defaultConfig,
endpoints: {
[EModelEndpoint.agents]: agentsDefaults, [EModelEndpoint.agents]: agentsDefaults,
},
}; };
await setAppConfig(appConfig); await setAppConfig(appConfig);
return; return;
@ -126,7 +128,7 @@ const AppService = async () => {
fileConfig: config?.fileConfig, fileConfig: config?.fileConfig,
secureImageLinks: config?.secureImageLinks, secureImageLinks: config?.secureImageLinks,
modelSpecs: processModelSpecs(config?.endpoints, config.modelSpecs, interfaceConfig), modelSpecs: processModelSpecs(config?.endpoints, config.modelSpecs, interfaceConfig),
...loadedEndpoints, endpoints: loadedEndpoints,
}; };
await setAppConfig(appConfig); await setAppConfig(appConfig);

View file

@ -172,6 +172,7 @@ describe('AppService', () => {
searxngInstanceUrl: '${SEARXNG_INSTANCE_URL}', searxngInstanceUrl: '${SEARXNG_INSTANCE_URL}',
}), }),
memory: undefined, memory: undefined,
endpoints: expect.objectContaining({
agents: expect.objectContaining({ agents: expect.objectContaining({
disableBuilder: false, disableBuilder: false,
capabilities: expect.arrayContaining([...defaultAgentCapabilities]), capabilities: expect.arrayContaining([...defaultAgentCapabilities]),
@ -180,6 +181,7 @@ describe('AppService', () => {
minRelevanceScore: 0.45, minRelevanceScore: 0.45,
}), }),
}), }),
}),
); );
}); });
@ -328,6 +330,7 @@ describe('AppService', () => {
expect(setAppConfig).toHaveBeenCalledWith( expect(setAppConfig).toHaveBeenCalledWith(
expect.objectContaining({ expect.objectContaining({
endpoints: expect.objectContaining({
[EModelEndpoint.assistants]: expect.objectContaining({ [EModelEndpoint.assistants]: expect.objectContaining({
disableBuilder: true, disableBuilder: true,
pollIntervalMs: 5000, pollIntervalMs: 5000,
@ -336,6 +339,7 @@ describe('AppService', () => {
privateAssistants: false, privateAssistants: false,
}), }),
}), }),
}),
); );
}); });
@ -358,6 +362,7 @@ describe('AppService', () => {
expect(setAppConfig).toHaveBeenCalledWith( expect(setAppConfig).toHaveBeenCalledWith(
expect.objectContaining({ expect.objectContaining({
endpoints: expect.objectContaining({
[EModelEndpoint.agents]: expect.objectContaining({ [EModelEndpoint.agents]: expect.objectContaining({
disableBuilder: true, disableBuilder: true,
recursionLimit: 10, recursionLimit: 10,
@ -369,6 +374,7 @@ describe('AppService', () => {
]), ]),
}), }),
}), }),
}),
); );
}); });
@ -379,11 +385,13 @@ describe('AppService', () => {
expect(setAppConfig).toHaveBeenCalledWith( expect(setAppConfig).toHaveBeenCalledWith(
expect.objectContaining({ expect.objectContaining({
endpoints: expect.objectContaining({
[EModelEndpoint.agents]: expect.objectContaining({ [EModelEndpoint.agents]: expect.objectContaining({
disableBuilder: false, disableBuilder: false,
capabilities: expect.arrayContaining([...defaultAgentCapabilities]), capabilities: expect.arrayContaining([...defaultAgentCapabilities]),
}), }),
}), }),
}),
); );
}); });
@ -402,6 +410,7 @@ describe('AppService', () => {
expect(setAppConfig).toHaveBeenCalledWith( expect(setAppConfig).toHaveBeenCalledWith(
expect.objectContaining({ expect.objectContaining({
endpoints: expect.objectContaining({
[EModelEndpoint.agents]: expect.objectContaining({ [EModelEndpoint.agents]: expect.objectContaining({
disableBuilder: false, disableBuilder: false,
capabilities: expect.arrayContaining([...defaultAgentCapabilities]), capabilities: expect.arrayContaining([...defaultAgentCapabilities]),
@ -410,6 +419,7 @@ describe('AppService', () => {
titleConvo: true, titleConvo: true,
}), }),
}), }),
}),
); );
}); });
@ -432,6 +442,7 @@ describe('AppService', () => {
await AppService(); await AppService();
expect(setAppConfig).toHaveBeenCalledWith( expect(setAppConfig).toHaveBeenCalledWith(
expect.objectContaining({ expect.objectContaining({
endpoints: expect.objectContaining({
[EModelEndpoint.azureAssistants]: expect.objectContaining({ [EModelEndpoint.azureAssistants]: expect.objectContaining({
capabilities: expect.arrayContaining([ capabilities: expect.arrayContaining([
expect.any(String), expect.any(String),
@ -440,6 +451,7 @@ describe('AppService', () => {
]), ]),
}), }),
}), }),
}),
); );
}); });
@ -462,12 +474,14 @@ describe('AppService', () => {
const { modelNames, modelGroupMap, groupMap } = validateAzureGroups(azureGroups); const { modelNames, modelGroupMap, groupMap } = validateAzureGroups(azureGroups);
expect(setAppConfig).toHaveBeenCalledWith( expect(setAppConfig).toHaveBeenCalledWith(
expect.objectContaining({ expect.objectContaining({
endpoints: expect.objectContaining({
[EModelEndpoint.azureOpenAI]: expect.objectContaining({ [EModelEndpoint.azureOpenAI]: expect.objectContaining({
modelNames, modelNames,
modelGroupMap, modelGroupMap,
groupMap, groupMap,
}), }),
}), }),
}),
); );
}); });
@ -619,6 +633,7 @@ describe('AppService', () => {
expect(setAppConfig).toHaveBeenCalledWith( expect(setAppConfig).toHaveBeenCalledWith(
expect.objectContaining({ expect.objectContaining({
endpoints: expect.objectContaining({
// Check OpenAI endpoint configuration // Check OpenAI endpoint configuration
[EModelEndpoint.openAI]: expect.objectContaining({ [EModelEndpoint.openAI]: expect.objectContaining({
titleConvo: true, titleConvo: true,
@ -642,6 +657,7 @@ describe('AppService', () => {
titlePromptTemplate: 'Azure conversation: {{context}}', titlePromptTemplate: 'Azure conversation: {{context}}',
}), }),
}), }),
}),
); );
}); });
@ -667,6 +683,7 @@ describe('AppService', () => {
expect(setAppConfig).toHaveBeenCalledWith( expect(setAppConfig).toHaveBeenCalledWith(
expect.objectContaining({ expect.objectContaining({
endpoints: expect.objectContaining({
[EModelEndpoint.agents]: expect.objectContaining({ [EModelEndpoint.agents]: expect.objectContaining({
disableBuilder: false, disableBuilder: false,
titleConvo: true, titleConvo: true,
@ -681,6 +698,7 @@ describe('AppService', () => {
]), ]),
}), }),
}), }),
}),
); );
}); });
@ -700,17 +718,19 @@ describe('AppService', () => {
expect(setAppConfig).toHaveBeenCalledWith( expect(setAppConfig).toHaveBeenCalledWith(
expect.objectContaining({ expect.objectContaining({
endpoints: expect.objectContaining({
[EModelEndpoint.openAI]: expect.objectContaining({ [EModelEndpoint.openAI]: expect.objectContaining({
titleConvo: true, titleConvo: true,
}), }),
}), }),
}),
); );
// Verify that optional fields are not set when not provided // Verify that optional fields are not set when not provided
const initCall = setAppConfig.mock.calls[0][0]; const initCall = setAppConfig.mock.calls[0][0];
expect(initCall[EModelEndpoint.openAI].titlePrompt).toBeUndefined(); expect(initCall.endpoints[EModelEndpoint.openAI].titlePrompt).toBeUndefined();
expect(initCall[EModelEndpoint.openAI].titlePromptTemplate).toBeUndefined(); expect(initCall.endpoints[EModelEndpoint.openAI].titlePromptTemplate).toBeUndefined();
expect(initCall[EModelEndpoint.openAI].titleMethod).toBeUndefined(); expect(initCall.endpoints[EModelEndpoint.openAI].titleMethod).toBeUndefined();
}); });
it('should correctly configure titleEndpoint when specified', async () => { it('should correctly configure titleEndpoint when specified', async () => {
@ -735,6 +755,7 @@ describe('AppService', () => {
expect(setAppConfig).toHaveBeenCalledWith( expect(setAppConfig).toHaveBeenCalledWith(
expect.objectContaining({ expect.objectContaining({
endpoints: expect.objectContaining({
// Check OpenAI endpoint has titleEndpoint // Check OpenAI endpoint has titleEndpoint
[EModelEndpoint.openAI]: expect.objectContaining({ [EModelEndpoint.openAI]: expect.objectContaining({
titleConvo: true, titleConvo: true,
@ -748,6 +769,7 @@ describe('AppService', () => {
titleMethod: 'structured', titleMethod: 'structured',
}), }),
}), }),
}),
); );
}); });
@ -777,6 +799,7 @@ describe('AppService', () => {
expect(setAppConfig).toHaveBeenCalledWith( expect(setAppConfig).toHaveBeenCalledWith(
expect.objectContaining({ expect.objectContaining({
// Check that 'all' endpoint config is loaded // Check that 'all' endpoint config is loaded
endpoints: expect.objectContaining({
all: expect.objectContaining({ all: expect.objectContaining({
titleConvo: true, titleConvo: true,
titleModel: 'gpt-4o-mini', titleModel: 'gpt-4o-mini',
@ -792,6 +815,7 @@ describe('AppService', () => {
titleModel: 'gpt-3.5-turbo', titleModel: 'gpt-3.5-turbo',
}), }),
}), }),
}),
); );
}); });
}); });
@ -883,6 +907,7 @@ describe('AppService updating app config and issuing warnings', () => {
expect(setAppConfig).toHaveBeenCalledWith( expect(setAppConfig).toHaveBeenCalledWith(
expect.objectContaining({ expect.objectContaining({
endpoints: expect.objectContaining({
assistants: expect.objectContaining({ assistants: expect.objectContaining({
disableBuilder: true, disableBuilder: true,
pollIntervalMs: 5000, pollIntervalMs: 5000,
@ -890,11 +915,12 @@ describe('AppService updating app config and issuing warnings', () => {
supportedIds: ['id1', 'id2'], supportedIds: ['id1', 'id2'],
}), }),
}), }),
}),
); );
// Verify excludedIds is undefined when not provided // Verify excludedIds is undefined when not provided
const initCall = setAppConfig.mock.calls[0][0]; const initCall = setAppConfig.mock.calls[0][0];
expect(initCall.assistants.excludedIds).toBeUndefined(); expect(initCall.endpoints.assistants.excludedIds).toBeUndefined();
}); });
it('should log a warning when both supportedIds and excludedIds are provided', async () => { it('should log a warning when both supportedIds and excludedIds are provided', async () => {

View file

@ -398,8 +398,8 @@ async function runAssistant({
}); });
const { endpoint = EModelEndpoint.azureAssistants } = openai.req.body; const { endpoint = EModelEndpoint.azureAssistants } = openai.req.body;
/** @type {TCustomConfig.endpoints.assistants} */ /** @type {AppConfig['endpoints']['assistants']} */
const assistantsEndpointConfig = appConfig?.[endpoint] ?? {}; const assistantsEndpointConfig = appConfig.endpoints?.[endpoint] ?? {};
const { pollIntervalMs, timeoutMs } = assistantsEndpointConfig; const { pollIntervalMs, timeoutMs } = assistantsEndpointConfig;
const run = await waitForRun({ const run = await waitForRun({

View file

@ -36,7 +36,7 @@ const getCustomEndpointConfig = async (endpoint) => {
throw new Error(`Config not found for the ${endpoint} custom endpoint.`); throw new Error(`Config not found for the ${endpoint} custom endpoint.`);
} }
const customEndpoints = appConfig[EModelEndpoint.custom] ?? []; const customEndpoints = appConfig.endpoints?.[EModelEndpoint.custom] ?? [];
return customEndpoints.find( return customEndpoints.find(
(endpointConfig) => normalizeEndpointName(endpointConfig.name) === endpoint, (endpointConfig) => normalizeEndpointName(endpointConfig.name) === endpoint,
); );

View file

@ -28,9 +28,12 @@ async function getEndpointsConfig(req) {
/** @type {TEndpointsConfig} */ /** @type {TEndpointsConfig} */
const mergedConfig = { ...defaultEndpointsConfig, ...customConfigEndpoints }; const mergedConfig = { ...defaultEndpointsConfig, ...customConfigEndpoints };
if (mergedConfig[EModelEndpoint.assistants] && appConfig?.[EModelEndpoint.assistants]) { if (
mergedConfig[EModelEndpoint.assistants] &&
appConfig?.endpoints?.[EModelEndpoint.assistants]
) {
const { disableBuilder, retrievalModels, capabilities, version, ..._rest } = const { disableBuilder, retrievalModels, capabilities, version, ..._rest } =
appConfig[EModelEndpoint.assistants]; appConfig.endpoints[EModelEndpoint.assistants];
mergedConfig[EModelEndpoint.assistants] = { mergedConfig[EModelEndpoint.assistants] = {
...mergedConfig[EModelEndpoint.assistants], ...mergedConfig[EModelEndpoint.assistants],
@ -40,9 +43,9 @@ async function getEndpointsConfig(req) {
capabilities, capabilities,
}; };
} }
if (mergedConfig[EModelEndpoint.agents] && appConfig?.[EModelEndpoint.agents]) { if (mergedConfig[EModelEndpoint.agents] && appConfig?.endpoints?.[EModelEndpoint.agents]) {
const { disableBuilder, capabilities, allowedProviders, ..._rest } = const { disableBuilder, capabilities, allowedProviders, ..._rest } =
appConfig[EModelEndpoint.agents]; appConfig.endpoints[EModelEndpoint.agents];
mergedConfig[EModelEndpoint.agents] = { mergedConfig[EModelEndpoint.agents] = {
...mergedConfig[EModelEndpoint.agents], ...mergedConfig[EModelEndpoint.agents],
@ -52,9 +55,12 @@ async function getEndpointsConfig(req) {
}; };
} }
if (mergedConfig[EModelEndpoint.azureAssistants] && appConfig?.[EModelEndpoint.azureAssistants]) { if (
mergedConfig[EModelEndpoint.azureAssistants] &&
appConfig?.endpoints?.[EModelEndpoint.azureAssistants]
) {
const { disableBuilder, retrievalModels, capabilities, version, ..._rest } = const { disableBuilder, retrievalModels, capabilities, version, ..._rest } =
appConfig[EModelEndpoint.azureAssistants]; appConfig.endpoints[EModelEndpoint.azureAssistants];
mergedConfig[EModelEndpoint.azureAssistants] = { mergedConfig[EModelEndpoint.azureAssistants] = {
...mergedConfig[EModelEndpoint.azureAssistants], ...mergedConfig[EModelEndpoint.azureAssistants],
@ -65,8 +71,8 @@ async function getEndpointsConfig(req) {
}; };
} }
if (mergedConfig[EModelEndpoint.bedrock] && appConfig?.[EModelEndpoint.bedrock]) { if (mergedConfig[EModelEndpoint.bedrock] && appConfig?.endpoints?.[EModelEndpoint.bedrock]) {
const { availableRegions } = appConfig[EModelEndpoint.bedrock]; const { availableRegions } = appConfig.endpoints[EModelEndpoint.bedrock];
mergedConfig[EModelEndpoint.bedrock] = { mergedConfig[EModelEndpoint.bedrock] = {
...mergedConfig[EModelEndpoint.bedrock], ...mergedConfig[EModelEndpoint.bedrock],
availableRegions, availableRegions,

View file

@ -1,6 +1,7 @@
const appConfig = require('./app'); const appConfig = require('./app');
const { config } = require('./EndpointService'); const { config } = require('./EndpointService');
const getCachedTools = require('./getCachedTools'); const getCachedTools = require('./getCachedTools');
const getCustomConfig = require('./getCustomConfig');
const loadCustomConfig = require('./loadCustomConfig'); const loadCustomConfig = require('./loadCustomConfig');
const loadConfigModels = require('./loadConfigModels'); const loadConfigModels = require('./loadConfigModels');
const loadDefaultModels = require('./loadDefaultModels'); const loadDefaultModels = require('./loadDefaultModels');
@ -17,5 +18,6 @@ module.exports = {
loadAsyncEndpoints, loadAsyncEndpoints,
...appConfig, ...appConfig,
...getCachedTools, ...getCachedTools,
...getCustomConfig,
...getEndpointsConfig, ...getEndpointsConfig,
}; };

View file

@ -36,7 +36,7 @@ async function loadAsyncEndpoints(req) {
const google = serviceKey || isGoogleKeyProvided ? { userProvide: googleUserProvides } : false; const google = serviceKey || isGoogleKeyProvided ? { userProvide: googleUserProvides } : false;
const useAzure = appConfig[EModelEndpoint.azureOpenAI]?.plugins; const useAzure = appConfig.endpoints?.[EModelEndpoint.azureOpenAI]?.plugins;
const gptPlugins = const gptPlugins =
useAzure || openAIApiKey || azureOpenAIApiKey useAzure || openAIApiKey || azureOpenAIApiKey
? { ? {

View file

@ -15,8 +15,8 @@ async function loadConfigEndpoints(req) {
const endpointsConfig = {}; const endpointsConfig = {};
if (Array.isArray(appConfig[EModelEndpoint.custom])) { if (Array.isArray(appConfig.endpoints?.[EModelEndpoint.custom])) {
const customEndpoints = appConfig[EModelEndpoint.custom].filter( const customEndpoints = appConfig.endpoints[EModelEndpoint.custom].filter(
(endpoint) => (endpoint) =>
endpoint.baseURL && endpoint.baseURL &&
endpoint.apiKey && endpoint.apiKey &&
@ -51,14 +51,14 @@ async function loadConfigEndpoints(req) {
} }
} }
if (appConfig[EModelEndpoint.azureOpenAI]) { if (appConfig.endpoints?.[EModelEndpoint.azureOpenAI]) {
/** @type {Omit<TConfig, 'order'>} */ /** @type {Omit<TConfig, 'order'>} */
endpointsConfig[EModelEndpoint.azureOpenAI] = { endpointsConfig[EModelEndpoint.azureOpenAI] = {
userProvide: false, userProvide: false,
}; };
} }
if (appConfig[EModelEndpoint.azureOpenAI]?.assistants) { if (appConfig.endpoints?.[EModelEndpoint.azureOpenAI]?.assistants) {
/** @type {Omit<TConfig, 'order'>} */ /** @type {Omit<TConfig, 'order'>} */
endpointsConfig[EModelEndpoint.azureAssistants] = { endpointsConfig[EModelEndpoint.azureAssistants] = {
userProvide: false, userProvide: false,

View file

@ -14,7 +14,7 @@ async function loadConfigModels(req) {
return {}; return {};
} }
const modelsConfig = {}; const modelsConfig = {};
const azureConfig = appConfig[EModelEndpoint.azureOpenAI]; const azureConfig = appConfig.endpoints?.[EModelEndpoint.azureOpenAI];
const { modelNames } = azureConfig ?? {}; const { modelNames } = azureConfig ?? {};
if (modelNames && azureConfig) { if (modelNames && azureConfig) {
@ -29,11 +29,11 @@ async function loadConfigModels(req) {
modelsConfig[EModelEndpoint.azureAssistants] = azureConfig.assistantModels; modelsConfig[EModelEndpoint.azureAssistants] = azureConfig.assistantModels;
} }
if (!Array.isArray(appConfig[EModelEndpoint.custom])) { if (!Array.isArray(appConfig.endpoints?.[EModelEndpoint.custom])) {
return modelsConfig; return modelsConfig;
} }
const customEndpoints = appConfig[EModelEndpoint.custom].filter( const customEndpoints = appConfig.endpoints[EModelEndpoint.custom].filter(
(endpoint) => (endpoint) =>
endpoint.baseURL && endpoint.baseURL &&
endpoint.apiKey && endpoint.apiKey &&

View file

@ -6,6 +6,7 @@ jest.mock('~/server/services/ModelService');
jest.mock('./app'); jest.mock('./app');
const exampleConfig = { const exampleConfig = {
endpoints: {
custom: [ custom: [
{ {
name: 'Mistral', name: 'Mistral',
@ -55,6 +56,7 @@ const exampleConfig = {
}, },
}, },
], ],
},
}; };
describe('loadConfigModels', () => { describe('loadConfigModels', () => {
@ -83,7 +85,9 @@ describe('loadConfigModels', () => {
it('handles azure models and endpoint correctly', async () => { it('handles azure models and endpoint correctly', async () => {
getAppConfig.mockResolvedValue({ getAppConfig.mockResolvedValue({
endpoints: {
azureOpenAI: { modelNames: ['model1', 'model2'] }, azureOpenAI: { modelNames: ['model1', 'model2'] },
},
}); });
const result = await loadConfigModels(mockRequest); const result = await loadConfigModels(mockRequest);
@ -102,7 +106,7 @@ describe('loadConfigModels', () => {
}, },
]; ];
getAppConfig.mockResolvedValue({ custom: customEndpoints }); getAppConfig.mockResolvedValue({ endpoints: { custom: customEndpoints } });
fetchModels.mockResolvedValue(['customModel1', 'customModel2']); fetchModels.mockResolvedValue(['customModel1', 'customModel2']);
const result = await loadConfigModels(mockRequest); const result = await loadConfigModels(mockRequest);
@ -112,6 +116,7 @@ describe('loadConfigModels', () => {
it('correctly associates models to names using unique keys', async () => { it('correctly associates models to names using unique keys', async () => {
getAppConfig.mockResolvedValue({ getAppConfig.mockResolvedValue({
endpoints: {
custom: [ custom: [
{ {
baseURL: 'http://example.com', baseURL: 'http://example.com',
@ -126,6 +131,7 @@ describe('loadConfigModels', () => {
models: { fetch: true }, models: { fetch: true },
}, },
], ],
},
}); });
fetchModels.mockImplementation(({ apiKey }) => fetchModels.mockImplementation(({ apiKey }) =>
Promise.resolve(apiKey === 'API_KEY1' ? ['model1Data'] : ['model2Data']), Promise.resolve(apiKey === 'API_KEY1' ? ['model1Data'] : ['model2Data']),
@ -139,6 +145,7 @@ describe('loadConfigModels', () => {
it('correctly handles multiple endpoints with the same baseURL but different apiKeys', async () => { it('correctly handles multiple endpoints with the same baseURL but different apiKeys', async () => {
// Mock the custom configuration to simulate the user's scenario // Mock the custom configuration to simulate the user's scenario
getAppConfig.mockResolvedValue({ getAppConfig.mockResolvedValue({
endpoints: {
custom: [ custom: [
{ {
name: 'LiteLLM', name: 'LiteLLM',
@ -159,6 +166,7 @@ describe('loadConfigModels', () => {
models: { fetch: true }, models: { fetch: true },
}, },
], ],
},
}); });
// Mock `fetchModels` to return different models based on the apiKey // Mock `fetchModels` to return different models based on the apiKey
@ -246,8 +254,8 @@ describe('loadConfigModels', () => {
// For groq and ollama, since the apiKey is "user_provided", models should not be fetched // For groq and ollama, since the apiKey is "user_provided", models should not be fetched
// Depending on your implementation's behavior regarding "default" models without fetching, // Depending on your implementation's behavior regarding "default" models without fetching,
// you may need to adjust the following assertions: // you may need to adjust the following assertions:
expect(result.groq).toBe(exampleConfig.custom[2].models.default); expect(result.groq).toBe(exampleConfig.endpoints.custom[2].models.default);
expect(result.ollama).toBe(exampleConfig.custom[3].models.default); expect(result.ollama).toBe(exampleConfig.endpoints.custom[3].models.default);
// Verifying fetchModels was not called for groq and ollama // Verifying fetchModels was not called for groq and ollama
expect(fetchModels).not.toHaveBeenCalledWith( expect(fetchModels).not.toHaveBeenCalledWith(
@ -264,6 +272,7 @@ describe('loadConfigModels', () => {
it('falls back to default models if fetching returns an empty array', async () => { it('falls back to default models if fetching returns an empty array', async () => {
getAppConfig.mockResolvedValue({ getAppConfig.mockResolvedValue({
endpoints: {
custom: [ custom: [
{ {
name: 'EndpointWithSameFetchKey', name: 'EndpointWithSameFetchKey',
@ -284,6 +293,7 @@ describe('loadConfigModels', () => {
}, },
}, },
], ],
},
}); });
fetchModels.mockResolvedValue([]); fetchModels.mockResolvedValue([]);
@ -295,6 +305,7 @@ describe('loadConfigModels', () => {
it('falls back to default models if fetching returns a falsy value', async () => { it('falls back to default models if fetching returns a falsy value', async () => {
getAppConfig.mockResolvedValue({ getAppConfig.mockResolvedValue({
endpoints: {
custom: [ custom: [
{ {
name: 'FalsyFetchModel', name: 'FalsyFetchModel',
@ -306,6 +317,7 @@ describe('loadConfigModels', () => {
}, },
}, },
], ],
},
}); });
fetchModels.mockResolvedValue(false); fetchModels.mockResolvedValue(false);
@ -354,7 +366,9 @@ describe('loadConfigModels', () => {
]; ];
getAppConfig.mockResolvedValue({ getAppConfig.mockResolvedValue({
endpoints: {
custom: testCases, custom: testCases,
},
}); });
const result = await loadConfigModels(mockRequest); const result = await loadConfigModels(mockRequest);

View file

@ -90,8 +90,7 @@ const initializeClient = async ({ req, res, endpointOption }) => {
} }
const agentConfigs = new Map(); const agentConfigs = new Map();
/** @type {Set<string>} */ const allowedProviders = new Set(appConfig?.endpoints?.[EModelEndpoint.agents]?.allowedProviders);
const allowedProviders = new Set(appConfig?.[EModelEndpoint.agents]?.allowedProviders);
const loadTools = createToolLoader(); const loadTools = createToolLoader();
/** @type {Array<MongoFile>} */ /** @type {Array<MongoFile>} */
@ -145,7 +144,7 @@ const initializeClient = async ({ req, res, endpointOption }) => {
} }
} }
let endpointConfig = appConfig[primaryConfig.endpoint]; let endpointConfig = appConfig.endpoints?.[primaryConfig.endpoint];
if (!isAgentsEndpoint(primaryConfig.endpoint) && !endpointConfig) { if (!isAgentsEndpoint(primaryConfig.endpoint) && !endpointConfig) {
try { try {
endpointConfig = await getCustomEndpointConfig(primaryConfig.endpoint); endpointConfig = await getCustomEndpointConfig(primaryConfig.endpoint);

View file

@ -25,15 +25,14 @@ const initializeClient = async ({ req, res, endpointOption, overrideModel, optio
let clientOptions = {}; let clientOptions = {};
/** @type {undefined | TBaseEndpoint} */ /** @type {undefined | TBaseEndpoint} */
const anthropicConfig = appConfig[EModelEndpoint.anthropic]; const anthropicConfig = appConfig.endpoints?.[EModelEndpoint.anthropic];
if (anthropicConfig) { if (anthropicConfig) {
clientOptions.streamRate = anthropicConfig.streamRate; clientOptions.streamRate = anthropicConfig.streamRate;
clientOptions.titleModel = anthropicConfig.titleModel; clientOptions.titleModel = anthropicConfig.titleModel;
} }
/** @type {undefined | TBaseEndpoint} */ const allConfig = appConfig.endpoints?.all;
const allConfig = appConfig.all;
if (allConfig) { if (allConfig) {
clientOptions.streamRate = allConfig.streamRate; clientOptions.streamRate = allConfig.streamRate;
} }

View file

@ -83,7 +83,7 @@ const initializeClient = async ({ req, res, version, endpointOption, initAppClie
}; };
/** @type {TAzureConfig | undefined} */ /** @type {TAzureConfig | undefined} */
const azureConfig = appConfig[EModelEndpoint.azureOpenAI]; const azureConfig = appConfig.endpoints?.[EModelEndpoint.azureOpenAI];
/** @type {AzureOptions | undefined} */ /** @type {AzureOptions | undefined} */
let azureOptions; let azureOptions;

View file

@ -52,14 +52,13 @@ const getOptions = async ({ req, overrideModel, endpointOption }) => {
let streamRate = Constants.DEFAULT_STREAM_RATE; let streamRate = Constants.DEFAULT_STREAM_RATE;
/** @type {undefined | TBaseEndpoint} */ /** @type {undefined | TBaseEndpoint} */
const bedrockConfig = appConfig[EModelEndpoint.bedrock]; const bedrockConfig = appConfig.endpoints?.[EModelEndpoint.bedrock];
if (bedrockConfig && bedrockConfig.streamRate) { if (bedrockConfig && bedrockConfig.streamRate) {
streamRate = bedrockConfig.streamRate; streamRate = bedrockConfig.streamRate;
} }
/** @type {undefined | TBaseEndpoint} */ const allConfig = appConfig.endpoints?.all;
const allConfig = appConfig.all;
if (allConfig && allConfig.streamRate) { if (allConfig && allConfig.streamRate) {
streamRate = allConfig.streamRate; streamRate = allConfig.streamRate;
} }

View file

@ -118,8 +118,7 @@ const initializeClient = async ({ req, res, endpointOption, optionsOnly, overrid
endpointTokenConfig, endpointTokenConfig,
}; };
/** @type {undefined | TBaseEndpoint} */ const allConfig = appConfig.endpoints?.all;
const allConfig = appConfig.all;
if (allConfig) { if (allConfig) {
customOptions.streamRate = allConfig.streamRate; customOptions.streamRate = allConfig.streamRate;
} }

View file

@ -49,9 +49,9 @@ const initializeClient = async ({ req, res, endpointOption, overrideModel, optio
const appConfig = await getAppConfig({ role: req.user?.role }); const appConfig = await getAppConfig({ role: req.user?.role });
/** @type {undefined | TBaseEndpoint} */ /** @type {undefined | TBaseEndpoint} */
const allConfig = appConfig.all; const allConfig = appConfig.endpoints?.all;
/** @type {undefined | TBaseEndpoint} */ /** @type {undefined | TBaseEndpoint} */
const googleConfig = appConfig[EModelEndpoint.google]; const googleConfig = appConfig.endpoints?.[EModelEndpoint.google];
if (googleConfig) { if (googleConfig) {
clientOptions.streamRate = googleConfig.streamRate; clientOptions.streamRate = googleConfig.streamRate;

View file

@ -16,7 +16,7 @@ const addTitle = async (req, { text, response, client }) => {
} }
const { GOOGLE_TITLE_MODEL } = process.env ?? {}; const { GOOGLE_TITLE_MODEL } = process.env ?? {};
const appConfig = await getAppConfig({ role: req.user?.role }); const appConfig = await getAppConfig({ role: req.user?.role });
const providerConfig = appConfig[EModelEndpoint.google]; const providerConfig = appConfig.endpoints?.[EModelEndpoint.google];
let model = let model =
providerConfig?.titleModel ?? providerConfig?.titleModel ??
GOOGLE_TITLE_MODEL ?? GOOGLE_TITLE_MODEL ??

View file

@ -66,7 +66,7 @@ const initializeClient = async ({
const isAzureOpenAI = endpoint === EModelEndpoint.azureOpenAI; const isAzureOpenAI = endpoint === EModelEndpoint.azureOpenAI;
/** @type {false | TAzureConfig} */ /** @type {false | TAzureConfig} */
const azureConfig = isAzureOpenAI && appConfig[EModelEndpoint.azureOpenAI]; const azureConfig = isAzureOpenAI && appConfig.endpoints?.[EModelEndpoint.azureOpenAI];
let serverless = false; let serverless = false;
if (isAzureOpenAI && azureConfig) { if (isAzureOpenAI && azureConfig) {
const { modelGroupMap, groupMap } = azureConfig; const { modelGroupMap, groupMap } = azureConfig;
@ -115,15 +115,14 @@ const initializeClient = async ({
} }
/** @type {undefined | TBaseEndpoint} */ /** @type {undefined | TBaseEndpoint} */
const openAIConfig = appConfig[EModelEndpoint.openAI]; const openAIConfig = appConfig.endpoints?.[EModelEndpoint.openAI];
if (!isAzureOpenAI && openAIConfig) { if (!isAzureOpenAI && openAIConfig) {
clientOptions.streamRate = openAIConfig.streamRate; clientOptions.streamRate = openAIConfig.streamRate;
clientOptions.titleModel = openAIConfig.titleModel; clientOptions.titleModel = openAIConfig.titleModel;
} }
/** @type {undefined | TBaseEndpoint} */ const allConfig = appConfig.endpoints?.all;
const allConfig = appConfig.all;
if (allConfig) { if (allConfig) {
clientOptions.streamRate = allConfig.streamRate; clientOptions.streamRate = allConfig.streamRate;
} }

View file

@ -22,6 +22,7 @@ jest.mock('~/server/services/UserService', () => ({
jest.mock('~/server/services/Config', () => ({ jest.mock('~/server/services/Config', () => ({
getAppConfig: jest.fn().mockResolvedValue({ getAppConfig: jest.fn().mockResolvedValue({
endpoints: {
openAI: { openAI: {
apiKey: 'test-key', apiKey: 'test-key',
}, },
@ -49,6 +50,7 @@ jest.mock('~/server/services/Config', () => ({
}, },
}, },
}, },
},
}), }),
})); }));

View file

@ -51,9 +51,11 @@ async function processFileCitations({ user, toolArtifact, toolCallId, metadata }
} }
const appConfig = await getAppConfig({ role: user?.role }); const appConfig = await getAppConfig({ role: user?.role });
const maxCitations = appConfig?.[EModelEndpoint.agents]?.maxCitations ?? 30; const maxCitations = appConfig.endpoints?.[EModelEndpoint.agents]?.maxCitations ?? 30;
const maxCitationsPerFile = appConfig?.[EModelEndpoint.agents]?.maxCitationsPerFile ?? 5; const maxCitationsPerFile =
const minRelevanceScore = appConfig?.[EModelEndpoint.agents]?.minRelevanceScore ?? 0.45; appConfig.endpoints?.[EModelEndpoint.agents]?.maxCitationsPerFile ?? 5;
const minRelevanceScore =
appConfig.endpoints?.[EModelEndpoint.agents]?.minRelevanceScore ?? 0.45;
const sources = toolArtifact[Tools.file_search].sources || []; const sources = toolArtifact[Tools.file_search].sources || [];
const filteredSources = sources.filter((source) => source.relevance >= minRelevanceScore); const filteredSources = sources.filter((source) => source.relevance >= minRelevanceScore);

View file

@ -165,7 +165,7 @@ const processDeleteRequest = async ({ req, files }) => {
/** @type {Record<string, OpenAI | undefined>} */ /** @type {Record<string, OpenAI | undefined>} */
const client = { [FileSources.openai]: undefined, [FileSources.azure]: undefined }; const client = { [FileSources.openai]: undefined, [FileSources.azure]: undefined };
const initializeClients = async () => { const initializeClients = async () => {
if (appConfig[EModelEndpoint.assistants]) { if (appConfig.endpoints?.[EModelEndpoint.assistants]) {
const openAIClient = await getOpenAIClient({ const openAIClient = await getOpenAIClient({
req, req,
overrideEndpoint: EModelEndpoint.assistants, overrideEndpoint: EModelEndpoint.assistants,
@ -173,7 +173,7 @@ const processDeleteRequest = async ({ req, files }) => {
client[FileSources.openai] = openAIClient.openai; client[FileSources.openai] = openAIClient.openai;
} }
if (!appConfig[EModelEndpoint.azureOpenAI]?.assistants) { if (!appConfig.endpoints?.[EModelEndpoint.azureOpenAI]?.assistants) {
return; return;
} }

View file

@ -33,7 +33,7 @@ async function retrieveRun({ thread_id, run_id, timeout, openai }) {
} }
/** @type {TAzureConfig | undefined} */ /** @type {TAzureConfig | undefined} */
const azureConfig = appConfig[EModelEndpoint.azureOpenAI]; const azureConfig = appConfig.endpoints?.[EModelEndpoint.azureOpenAI];
if (azureConfig && azureConfig.assistants) { if (azureConfig && azureConfig.assistants) {
delete headers.Authorization; delete headers.Authorization;

View file

@ -504,7 +504,7 @@ async function loadAgentTools({ req, res, agent, tool_resources, openAIApiKey })
/** Edge case: use defined/fallback capabilities when the "agents" endpoint is not enabled */ /** Edge case: use defined/fallback capabilities when the "agents" endpoint is not enabled */
if (enabledCapabilities.size === 0 && agent.id === Constants.EPHEMERAL_AGENT_ID) { if (enabledCapabilities.size === 0 && agent.id === Constants.EPHEMERAL_AGENT_ID) {
enabledCapabilities = new Set( enabledCapabilities = new Set(
appConfig?.[EModelEndpoint.agents]?.capabilities ?? defaultAgentCapabilities, appConfig.endpoints?.[EModelEndpoint.agents]?.capabilities ?? defaultAgentCapabilities,
); );
} }
const checkCapability = (capability) => { const checkCapability = (capability) => {

View file

@ -29,9 +29,11 @@ describe('primeResources', () => {
// Setup mock appConfig // Setup mock appConfig
mockAppConfig = { mockAppConfig = {
endpoints: {
[EModelEndpoint.agents]: { [EModelEndpoint.agents]: {
capabilities: [AgentCapabilities.ocr], capabilities: [AgentCapabilities.ocr],
} as TAgentsEndpoint, } as TAgentsEndpoint,
},
} as AppConfig; } as AppConfig;
// Setup mock getFiles function // Setup mock getFiles function
@ -87,7 +89,7 @@ describe('primeResources', () => {
describe('when OCR is disabled', () => { describe('when OCR is disabled', () => {
it('should not fetch OCR files even if tool_resources has OCR file_ids', async () => { it('should not fetch OCR files even if tool_resources has OCR file_ids', async () => {
(mockAppConfig[EModelEndpoint.agents] as TAgentsEndpoint).capabilities = []; (mockAppConfig.endpoints![EModelEndpoint.agents] as TAgentsEndpoint).capabilities = [];
const tool_resources = { const tool_resources = {
[EToolResources.ocr]: { [EToolResources.ocr]: {

View file

@ -202,9 +202,9 @@ export const primeResources = async ({
} }
} }
const isOCREnabled = (appConfig?.[EModelEndpoint.agents]?.capabilities ?? []).includes( const isOCREnabled = (
AgentCapabilities.ocr, appConfig?.endpoints?.[EModelEndpoint.agents]?.capabilities ?? []
); ).includes(AgentCapabilities.ocr);
if (tool_resources[EToolResources.ocr]?.file_ids && isOCREnabled) { if (tool_resources[EToolResources.ocr]?.file_ids && isOCREnabled) {
const context = await getFiles( const context = await getFiles(

View file

@ -72,7 +72,7 @@ export const initializeOpenAI = async ({
}; };
const isAzureOpenAI = endpoint === EModelEndpoint.azureOpenAI; const isAzureOpenAI = endpoint === EModelEndpoint.azureOpenAI;
const azureConfig = isAzureOpenAI && appConfig[EModelEndpoint.azureOpenAI]; const azureConfig = isAzureOpenAI && appConfig.endpoints?.[EModelEndpoint.azureOpenAI];
if (isAzureOpenAI && azureConfig) { if (isAzureOpenAI && azureConfig) {
const { modelGroupMap, groupMap } = azureConfig; const { modelGroupMap, groupMap } = azureConfig;
@ -143,8 +143,8 @@ export const initializeOpenAI = async ({
const options = getOpenAIConfig(apiKey, finalClientOptions, endpoint); const options = getOpenAIConfig(apiKey, finalClientOptions, endpoint);
const openAIConfig = appConfig[EModelEndpoint.openAI]; const openAIConfig = appConfig.endpoints?.[EModelEndpoint.openAI];
const allConfig = appConfig.all; const allConfig = appConfig.endpoints?.all;
const azureRate = modelName?.includes('gpt-4') ? 30 : 17; const azureRate = modelName?.includes('gpt-4') ? 30 : 17;
let streamRate: number | undefined; let streamRate: number | undefined;

View file

@ -59,6 +59,7 @@ export interface AppConfig {
secureImageLinks?: TCustomConfig['secureImageLinks']; secureImageLinks?: TCustomConfig['secureImageLinks'];
/** Processed model specifications */ /** Processed model specifications */
modelSpecs?: TCustomConfig['modelSpecs']; modelSpecs?: TCustomConfig['modelSpecs'];
endpoints?: {
/** OpenAI endpoint configuration */ /** OpenAI endpoint configuration */
openAI?: TEndpoint; openAI?: TEndpoint;
/** Google endpoint configuration */ /** Google endpoint configuration */
@ -81,4 +82,5 @@ export interface AppConfig {
all?: TEndpoint; all?: TEndpoint;
/** Any additional endpoint configurations */ /** Any additional endpoint configurations */
[key: string]: unknown; [key: string]: unknown;
};
} }