mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-12-18 01:10:14 +01:00
🅰️ feat: Azure Config to Allow Different Deployments per Model (#1863)
* wip: first pass for azure endpoint schema * refactor: azure config to return groupMap and modelConfigMap * wip: naming and schema changes * refactor(errorsToString): move to data-provider * feat: rename to azureGroups, add additional tests, tests all expected outcomes, return errors * feat(AppService): load Azure groups * refactor(azure): use imported types, write `mapModelToAzureConfig` * refactor: move `extractEnvVariable` to data-provider * refactor(validateAzureGroups): throw on duplicate groups or models; feat(mapModelToAzureConfig): throw if env vars not present, add tests * refactor(AppService): ensure each model is properly configured on startup * refactor: deprecate azureOpenAI environment variables in favor of librechat.yaml config * feat: use helper functions to handle and order enabled/default endpoints; initialize azureOpenAI from config file * refactor: redefine types as well as load azureOpenAI models from config file * chore(ci): fix test description naming * feat(azureOpenAI): use validated model grouping for request authentication * chore: bump data-provider following rebase * chore: bump config file version noting significant changes * feat: add title options and switch azure configs for titling and vision requests * feat: enable azure plugins from config file * fix(ci): pass tests * chore(.env.example): mark `PLUGINS_USE_AZURE` as deprecated * fix(fetchModels): early return if apiKey not passed * chore: fix azure config typing * refactor(mapModelToAzureConfig): return baseURL and headers as well as azureOptions * feat(createLLM): use `azureOpenAIBasePath` * feat(parsers): resolveHeaders * refactor(extractBaseURL): handle invalid input * feat(OpenAIClient): handle headers and baseURL for azureConfig * fix(ci): pass `OpenAIClient` tests * chore: extract env var for azureOpenAI group config, baseURL * docs: azureOpenAI config setup docs * feat: safe check of potential conflicting env vars that map to unique placeholders * fix: reset apiKey when model switches from originally requested model (vision or title) * chore: linting * docs: CONFIG_PATH notes in custom_config.md
This commit is contained in:
parent
7a55132e42
commit
097a978e5b
37 changed files with 2066 additions and 394 deletions
|
|
@ -1,12 +1,14 @@
|
|||
const { availableTools } = require('~/app/clients/tools');
|
||||
const { EModelEndpoint } = require('librechat-data-provider');
|
||||
const { addOpenAPISpecs } = require('~/app/clients/tools/util/addOpenAPISpecs');
|
||||
const { availableTools } = require('~/app/clients/tools');
|
||||
const { openAIApiKey, azureOpenAIApiKey, useAzurePlugins, userProvidedOpenAI, googleKey } =
|
||||
require('./EndpointService').config;
|
||||
|
||||
/**
|
||||
* Load async endpoints and return a configuration object
|
||||
* @param {Express.Request} req - The request object
|
||||
*/
|
||||
async function loadAsyncEndpoints() {
|
||||
async function loadAsyncEndpoints(req) {
|
||||
let i = 0;
|
||||
let serviceKey, googleUserProvides;
|
||||
try {
|
||||
|
|
@ -35,13 +37,14 @@ async function loadAsyncEndpoints() {
|
|||
|
||||
const google = serviceKey || googleKey ? { userProvide: googleUserProvides } : false;
|
||||
|
||||
const useAzure = req.app.locals[EModelEndpoint.azureOpenAI]?.plugins;
|
||||
const gptPlugins =
|
||||
openAIApiKey || azureOpenAIApiKey
|
||||
useAzure || openAIApiKey || azureOpenAIApiKey
|
||||
? {
|
||||
plugins,
|
||||
availableAgents: ['classic', 'functions'],
|
||||
userProvide: userProvidedOpenAI,
|
||||
azure: useAzurePlugins,
|
||||
userProvide: useAzure ? false : userProvidedOpenAI,
|
||||
azure: useAzurePlugins || useAzure,
|
||||
}
|
||||
: false;
|
||||
|
||||
|
|
|
|||
|
|
@ -1,11 +1,13 @@
|
|||
const { EModelEndpoint } = require('librechat-data-provider');
|
||||
const { isUserProvided, extractEnvVariable } = require('~/server/utils');
|
||||
const { EModelEndpoint, extractEnvVariable } = require('librechat-data-provider');
|
||||
const { isUserProvided } = require('~/server/utils');
|
||||
const getCustomConfig = require('./getCustomConfig');
|
||||
|
||||
/**
|
||||
* Load config endpoints from the cached configuration object
|
||||
* @function loadConfigEndpoints */
|
||||
async function loadConfigEndpoints() {
|
||||
* @param {Express.Request} req - The request object
|
||||
* @returns {Promise<TEndpointsConfig>} A promise that resolves to an object containing the endpoints configuration
|
||||
*/
|
||||
async function loadConfigEndpoints(req) {
|
||||
const customConfig = await getCustomConfig();
|
||||
|
||||
if (!customConfig) {
|
||||
|
|
@ -42,6 +44,13 @@ async function loadConfigEndpoints() {
|
|||
}
|
||||
}
|
||||
|
||||
if (req.app.locals[EModelEndpoint.azureOpenAI]) {
|
||||
/** @type {Omit<TConfig, 'order'>} */
|
||||
endpointsConfig[EModelEndpoint.azureOpenAI] = {
|
||||
userProvide: false,
|
||||
};
|
||||
}
|
||||
|
||||
return endpointsConfig;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
const { EModelEndpoint } = require('librechat-data-provider');
|
||||
const { isUserProvided, extractEnvVariable } = require('~/server/utils');
|
||||
const { EModelEndpoint, extractEnvVariable } = require('librechat-data-provider');
|
||||
const { isUserProvided } = require('~/server/utils');
|
||||
const { fetchModels } = require('~/server/services/ModelService');
|
||||
const getCustomConfig = require('./getCustomConfig');
|
||||
|
||||
|
|
@ -17,6 +17,16 @@ async function loadConfigModels(req) {
|
|||
|
||||
const { endpoints = {} } = customConfig ?? {};
|
||||
const modelsConfig = {};
|
||||
const azureModels = req.app.locals[EModelEndpoint.azureOpenAI]?.modelNames;
|
||||
const azureEndpoint = endpoints[EModelEndpoint.azureOpenAI];
|
||||
|
||||
if (azureModels && azureEndpoint) {
|
||||
modelsConfig[EModelEndpoint.azureOpenAI] = azureModels;
|
||||
}
|
||||
|
||||
if (azureModels && azureEndpoint && azureEndpoint.plugins) {
|
||||
modelsConfig[EModelEndpoint.gptPlugins] = azureModels;
|
||||
}
|
||||
|
||||
if (!Array.isArray(endpoints[EModelEndpoint.custom])) {
|
||||
return modelsConfig;
|
||||
|
|
|
|||
|
|
@ -18,7 +18,7 @@ let i = 0;
|
|||
async function loadCustomConfig() {
|
||||
// Use CONFIG_PATH if set, otherwise fallback to defaultConfigPath
|
||||
const configPath = process.env.CONFIG_PATH || defaultConfigPath;
|
||||
|
||||
|
||||
const customConfig = loadYaml(configPath);
|
||||
if (!customConfig) {
|
||||
i === 0 &&
|
||||
|
|
|
|||
|
|
@ -1,34 +1,17 @@
|
|||
const { EModelEndpoint } = require('librechat-data-provider');
|
||||
const { EModelEndpoint, getEnabledEndpoints } = require('librechat-data-provider');
|
||||
const loadAsyncEndpoints = require('./loadAsyncEndpoints');
|
||||
const { config } = require('./EndpointService');
|
||||
|
||||
/**
|
||||
* Load async endpoints and return a configuration object
|
||||
* @function loadDefaultEndpointsConfig
|
||||
* @param {Express.Request} req - The request object
|
||||
* @returns {Promise<Object.<string, EndpointWithOrder>>} An object whose keys are endpoint names and values are objects that contain the endpoint configuration and an order.
|
||||
*/
|
||||
async function loadDefaultEndpointsConfig() {
|
||||
const { google, gptPlugins } = await loadAsyncEndpoints();
|
||||
async function loadDefaultEndpointsConfig(req) {
|
||||
const { google, gptPlugins } = await loadAsyncEndpoints(req);
|
||||
const { openAI, assistants, bingAI, anthropic, azureOpenAI, chatGPTBrowser } = config;
|
||||
|
||||
let enabledEndpoints = [
|
||||
EModelEndpoint.openAI,
|
||||
EModelEndpoint.assistants,
|
||||
EModelEndpoint.azureOpenAI,
|
||||
EModelEndpoint.google,
|
||||
EModelEndpoint.bingAI,
|
||||
EModelEndpoint.chatGPTBrowser,
|
||||
EModelEndpoint.gptPlugins,
|
||||
EModelEndpoint.anthropic,
|
||||
];
|
||||
|
||||
const endpointsEnv = process.env.ENDPOINTS || '';
|
||||
if (endpointsEnv) {
|
||||
enabledEndpoints = endpointsEnv
|
||||
.split(',')
|
||||
.filter((endpoint) => endpoint?.trim())
|
||||
.map((endpoint) => endpoint.trim());
|
||||
}
|
||||
const enabledEndpoints = getEnabledEndpoints();
|
||||
|
||||
const endpointConfig = {
|
||||
[EModelEndpoint.openAI]: openAI,
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue