refactor: implement custom endpoints configuration and streamline endpoint loading logic

This commit is contained in:
Danny Avila 2025-08-18 16:58:05 -04:00
parent 240e3bd59e
commit 5eef6ea9e8
No known key found for this signature in database
GPG key ID: BF31EEB2C5CA0956
12 changed files with 100 additions and 88 deletions

View file

@ -1,3 +1,4 @@
const { loadCustomEndpointsConfig } = require('@librechat/api');
const {
CacheKeys,
EModelEndpoint,
@ -6,7 +7,6 @@ const {
defaultAgentCapabilities,
} = require('librechat-data-provider');
const loadDefaultEndpointsConfig = require('./loadDefaultEConfig');
const loadConfigEndpoints = require('./loadConfigEndpoints');
const getLogStores = require('~/cache/getLogStores');
const { getAppConfig } = require('./app');
@ -22,12 +22,30 @@ async function getEndpointsConfig(req) {
return cachedEndpointsConfig;
}
const defaultEndpointsConfig = await loadDefaultEndpointsConfig(req);
const customConfigEndpoints = await loadConfigEndpoints(req);
const appConfig = await getAppConfig({ role: req.user?.role });
const defaultEndpointsConfig = await loadDefaultEndpointsConfig(appConfig);
const customEndpointsConfig = loadCustomEndpointsConfig(appConfig?.endpoints?.custom);
/** @type {TEndpointsConfig} */
const mergedConfig = { ...defaultEndpointsConfig, ...customConfigEndpoints };
const mergedConfig = {
...defaultEndpointsConfig,
...customEndpointsConfig,
};
if (appConfig.endpoints?.[EModelEndpoint.azureOpenAI]) {
/** @type {Omit<TConfig, 'order'>} */
mergedConfig[EModelEndpoint.azureOpenAI] = {
userProvide: false,
};
}
if (appConfig.endpoints?.[EModelEndpoint.azureOpenAI]?.assistants) {
/** @type {Omit<TConfig, 'order'>} */
mergedConfig[EModelEndpoint.azureAssistants] = {
userProvide: false,
};
}
if (
mergedConfig[EModelEndpoint.assistants] &&
appConfig?.endpoints?.[EModelEndpoint.assistants]

View file

@ -1,18 +1,16 @@
const path = require('path');
const { logger } = require('@librechat/data-schemas');
const { loadServiceKey, isUserProvided } = require('@librechat/api');
const { EModelEndpoint } = require('librechat-data-provider');
const { loadServiceKey, isUserProvided } = require('@librechat/api');
const { config } = require('./EndpointService');
const { getAppConfig } = require('./app');
const { openAIApiKey, azureOpenAIApiKey, useAzurePlugins, userProvidedOpenAI, googleKey } = config;
/**
* Load async endpoints and return a configuration object
* @param {Express.Request} req - The request object
* @param {AppConfig} [appConfig] - The app configuration object
*/
async function loadAsyncEndpoints(req) {
const appConfig = await getAppConfig({ role: req.user?.role });
async function loadAsyncEndpoints(appConfig) {
let serviceKey, googleUserProvides;
/** Check if GOOGLE_KEY is provided at all(including 'user_provided') */
@ -36,7 +34,7 @@ async function loadAsyncEndpoints(req) {
const google = serviceKey || isGoogleKeyProvided ? { userProvide: googleUserProvides } : false;
const useAzure = appConfig.endpoints?.[EModelEndpoint.azureOpenAI]?.plugins;
const useAzure = !!appConfig?.endpoints?.[EModelEndpoint.azureOpenAI]?.plugins;
const gptPlugins =
useAzure || openAIApiKey || azureOpenAIApiKey
? {

View file

@ -1,71 +0,0 @@
const { isUserProvided, normalizeEndpointName } = require('@librechat/api');
const { EModelEndpoint, extractEnvVariable } = require('librechat-data-provider');
const { getAppConfig } = require('./app');
/**
* Load config endpoints from the cached configuration object
* @param {Express.Request} req - The request object
* @returns {Promise<TEndpointsConfig>} A promise that resolves to an object containing the endpoints configuration
*/
async function loadConfigEndpoints(req) {
const appConfig = await getAppConfig({ role: req.user?.role });
if (!appConfig) {
return {};
}
const endpointsConfig = {};
if (Array.isArray(appConfig.endpoints?.[EModelEndpoint.custom])) {
const customEndpoints = appConfig.endpoints[EModelEndpoint.custom].filter(
(endpoint) =>
endpoint.baseURL &&
endpoint.apiKey &&
endpoint.name &&
endpoint.models &&
(endpoint.models.fetch || endpoint.models.default),
);
for (let i = 0; i < customEndpoints.length; i++) {
const endpoint = customEndpoints[i];
const {
baseURL,
apiKey,
name: configName,
iconURL,
modelDisplayLabel,
customParams,
} = endpoint;
const name = normalizeEndpointName(configName);
const resolvedApiKey = extractEnvVariable(apiKey);
const resolvedBaseURL = extractEnvVariable(baseURL);
endpointsConfig[name] = {
type: EModelEndpoint.custom,
userProvide: isUserProvided(resolvedApiKey),
userProvideURL: isUserProvided(resolvedBaseURL),
modelDisplayLabel,
iconURL,
customParams,
};
}
}
if (appConfig.endpoints?.[EModelEndpoint.azureOpenAI]) {
/** @type {Omit<TConfig, 'order'>} */
endpointsConfig[EModelEndpoint.azureOpenAI] = {
userProvide: false,
};
}
if (appConfig.endpoints?.[EModelEndpoint.azureOpenAI]?.assistants) {
/** @type {Omit<TConfig, 'order'>} */
endpointsConfig[EModelEndpoint.azureAssistants] = {
userProvide: false,
};
}
return endpointsConfig;
}
module.exports = loadConfigEndpoints;

View file

@ -4,11 +4,11 @@ const { config } = require('./EndpointService');
/**
* Load async endpoints and return a configuration object
* @param {Express.Request} req - The request object
* @param {AppConfig} appConfig - The app configuration object
* @returns {Promise<Object.<string, EndpointWithOrder>>} An object whose keys are endpoint names and values are objects that contain the endpoint configuration and an order.
*/
async function loadDefaultEndpointsConfig(req) {
const { google, gptPlugins } = await loadAsyncEndpoints(req);
async function loadDefaultEndpointsConfig(appConfig) {
const { google, gptPlugins } = await loadAsyncEndpoints(appConfig);
const { assistants, azureAssistants, azureOpenAI, chatGPTBrowser } = config;
const enabledEndpoints = getEnabledEndpoints();

View file

@ -43,9 +43,9 @@ const loadEndpoints = (config, agentsDefaults) => {
const endpointKeys = [
EModelEndpoint.openAI,
EModelEndpoint.google,
EModelEndpoint.custom,
EModelEndpoint.bedrock,
EModelEndpoint.anthropic,
EModelEndpoint.gptPlugins,
];
endpointKeys.forEach((key) => {

View file

@ -0,0 +1,56 @@
import { EModelEndpoint, extractEnvVariable } from 'librechat-data-provider';
import type { TCustomEndpoints, TEndpoint, TConfig } from 'librechat-data-provider';
import type { TCustomEndpointsConfig } from '~/types/endpoints';
import { isUserProvided, normalizeEndpointName } from '~/utils';
/**
* Load config endpoints from the cached configuration object
* @param customEndpointsConfig - The configuration object
*/
export function loadCustomEndpointsConfig(
customEndpoints?: TCustomEndpoints,
): TCustomEndpointsConfig | undefined {
if (!customEndpoints) {
return;
}
const customEndpointsConfig: TCustomEndpointsConfig = {};
if (Array.isArray(customEndpoints)) {
const filteredEndpoints = customEndpoints.filter(
(endpoint) =>
endpoint.baseURL &&
endpoint.apiKey &&
endpoint.name &&
endpoint.models &&
(endpoint.models.fetch || endpoint.models.default),
);
for (let i = 0; i < filteredEndpoints.length; i++) {
const endpoint = filteredEndpoints[i] as TEndpoint;
const {
baseURL,
apiKey,
name: configName,
iconURL,
modelDisplayLabel,
customParams,
} = endpoint;
const name = normalizeEndpointName(configName);
const resolvedApiKey = extractEnvVariable(apiKey ?? '');
const resolvedBaseURL = extractEnvVariable(baseURL ?? '');
customEndpointsConfig[name] = {
type: EModelEndpoint.custom,
userProvide: isUserProvided(resolvedApiKey),
userProvideURL: isUserProvided(resolvedBaseURL),
customParams: customParams as TConfig['customParams'],
modelDisplayLabel,
iconURL,
};
}
}
return customEndpointsConfig;
}

View file

@ -0,0 +1 @@
export * from './config';

View file

@ -1,2 +1,3 @@
export * from './custom';
export * from './google';
export * from './openai';

View file

@ -6,6 +6,7 @@ import type {
TMemoryConfig,
EModelEndpoint,
TAgentsEndpoint,
TCustomEndpoints,
TAssistantEndpoint,
} from 'librechat-data-provider';
@ -78,9 +79,9 @@ export interface AppConfig {
azureAssistants?: TAssistantEndpoint;
/** Agents endpoint configuration */
[EModelEndpoint.agents]?: TAgentsEndpoint;
/** Custom endpoints configuration */
[EModelEndpoint.custom]?: TCustomEndpoints;
/** Global endpoint configuration */
all?: TEndpoint;
/** Any additional endpoint configurations */
[key: string]: unknown;
};
}

View file

@ -0,0 +1,3 @@
import type { TConfig } from 'librechat-data-provider';
export type TCustomEndpointsConfig = Partial<{ [key: string]: Omit<TConfig, 'order'> }>;

View file

@ -1,6 +1,7 @@
export * from './config';
export * from './azure';
export * from './balance';
export * from './endpoints';
export * from './events';
export * from './error';
export * from './google';

View file

@ -300,6 +300,7 @@ export const endpointSchema = baseEndpointSchema.merge(
}),
summarize: z.boolean().optional(),
summaryModel: z.string().optional(),
iconURL: z.string().optional(),
forcePrompt: z.boolean().optional(),
modelDisplayLabel: z.string().optional(),
headers: z.record(z.any()).optional(),
@ -789,6 +790,8 @@ export const memorySchema = z.object({
export type TMemoryConfig = z.infer<typeof memorySchema>;
const customEndpointsSchema = z.array(endpointSchema.partial()).optional();
export const configSchema = z.object({
version: z.string(),
cache: z.boolean().default(true),
@ -837,7 +840,7 @@ export const configSchema = z.object({
[EModelEndpoint.azureAssistants]: assistantEndpointSchema.optional(),
[EModelEndpoint.assistants]: assistantEndpointSchema.optional(),
[EModelEndpoint.agents]: agentsEndpointSchema.optional(),
[EModelEndpoint.custom]: z.array(endpointSchema.partial()).optional(),
[EModelEndpoint.custom]: customEndpointsSchema.optional(),
[EModelEndpoint.bedrock]: baseEndpointSchema.optional(),
})
.strict()
@ -850,6 +853,7 @@ export const configSchema = z.object({
export const getConfigDefaults = () => getSchemaDefaults(configSchema);
export type TCustomConfig = z.infer<typeof configSchema>;
export type TCustomEndpoints = z.infer<typeof customEndpointsSchema>;
export type TProviderSchema =
| z.infer<typeof ttsOpenaiSchema>