mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-12-17 00:40:14 +01:00
* wip: first pass for azure endpoint schema * refactor: azure config to return groupMap and modelConfigMap * wip: naming and schema changes * refactor(errorsToString): move to data-provider * feat: rename to azureGroups, add additional tests, tests all expected outcomes, return errors * feat(AppService): load Azure groups * refactor(azure): use imported types, write `mapModelToAzureConfig` * refactor: move `extractEnvVariable` to data-provider * refactor(validateAzureGroups): throw on duplicate groups or models; feat(mapModelToAzureConfig): throw if env vars not present, add tests * refactor(AppService): ensure each model is properly configured on startup * refactor: deprecate azureOpenAI environment variables in favor of librechat.yaml config * feat: use helper functions to handle and order enabled/default endpoints; initialize azureOpenAI from config file * refactor: redefine types as well as load azureOpenAI models from config file * chore(ci): fix test description naming * feat(azureOpenAI): use validated model grouping for request authentication * chore: bump data-provider following rebase * chore: bump config file version noting significant changes * feat: add title options and switch azure configs for titling and vision requests * feat: enable azure plugins from config file * fix(ci): pass tests * chore(.env.example): mark `PLUGINS_USE_AZURE` as deprecated * fix(fetchModels): early return if apiKey not passed * chore: fix azure config typing * refactor(mapModelToAzureConfig): return baseURL and headers as well as azureOptions * feat(createLLM): use `azureOpenAIBasePath` * feat(parsers): resolveHeaders * refactor(extractBaseURL): handle invalid input * feat(OpenAIClient): handle headers and baseURL for azureConfig * fix(ci): pass `OpenAIClient` tests * chore: extract env var for azureOpenAI group config, baseURL * docs: azureOpenAI config setup docs * feat: safe check of potential conflicting env vars that map to unique placeholders * fix: reset apiKey when model switches from originally requested model (vision or title) * chore: linting * docs: CONFIG_PATH notes in custom_config.md
37 lines
1.4 KiB
JavaScript
37 lines
1.4 KiB
JavaScript
const { EModelEndpoint, getEnabledEndpoints } = require('librechat-data-provider');
|
|
const loadAsyncEndpoints = require('./loadAsyncEndpoints');
|
|
const { config } = require('./EndpointService');
|
|
|
|
/**
|
|
* Load async endpoints and return a configuration object
|
|
* @param {Express.Request} req - The request object
|
|
* @returns {Promise<Object.<string, EndpointWithOrder>>} An object whose keys are endpoint names and values are objects that contain the endpoint configuration and an order.
|
|
*/
|
|
async function loadDefaultEndpointsConfig(req) {
|
|
const { google, gptPlugins } = await loadAsyncEndpoints(req);
|
|
const { openAI, assistants, bingAI, anthropic, azureOpenAI, chatGPTBrowser } = config;
|
|
|
|
const enabledEndpoints = getEnabledEndpoints();
|
|
|
|
const endpointConfig = {
|
|
[EModelEndpoint.openAI]: openAI,
|
|
[EModelEndpoint.assistants]: assistants,
|
|
[EModelEndpoint.azureOpenAI]: azureOpenAI,
|
|
[EModelEndpoint.google]: google,
|
|
[EModelEndpoint.bingAI]: bingAI,
|
|
[EModelEndpoint.chatGPTBrowser]: chatGPTBrowser,
|
|
[EModelEndpoint.gptPlugins]: gptPlugins,
|
|
[EModelEndpoint.anthropic]: anthropic,
|
|
};
|
|
|
|
const orderedAndFilteredEndpoints = enabledEndpoints.reduce((config, key, index) => {
|
|
if (endpointConfig[key]) {
|
|
config[key] = { ...(endpointConfig[key] ?? {}), order: index };
|
|
}
|
|
return config;
|
|
}, {});
|
|
|
|
return orderedAndFilteredEndpoints;
|
|
}
|
|
|
|
module.exports = loadDefaultEndpointsConfig;
|