🅰️ feat: Azure Config to Allow Different Deployments per Model (#1863)

* wip: first pass for azure endpoint schema

* refactor: azure config to return groupMap and modelConfigMap

* wip: naming and schema changes

* refactor(errorsToString): move to data-provider

* feat: rename to azureGroups, add additional tests, tests all expected outcomes, return errors

* feat(AppService): load Azure groups

* refactor(azure): use imported types, write `mapModelToAzureConfig`

* refactor: move `extractEnvVariable` to data-provider

* refactor(validateAzureGroups): throw on duplicate groups or models; feat(mapModelToAzureConfig): throw if env vars not present, add tests

* refactor(AppService): ensure each model is properly configured on startup

* refactor: deprecate azureOpenAI environment variables in favor of librechat.yaml config

* feat: use helper functions to handle and order enabled/default endpoints; initialize azureOpenAI from config file

* refactor: redefine types as well as load azureOpenAI models from config file

* chore(ci): fix test description naming

* feat(azureOpenAI): use validated model grouping for request authentication

* chore: bump data-provider following rebase

* chore: bump config file version noting significant changes

* feat: add title options and switch azure configs for titling and vision requests

* feat: enable azure plugins from config file

* fix(ci): pass tests

* chore(.env.example): mark `PLUGINS_USE_AZURE` as deprecated

* fix(fetchModels): early return if apiKey not passed

* chore: fix azure config typing

* refactor(mapModelToAzureConfig): return baseURL and headers as well as azureOptions

* feat(createLLM): use `azureOpenAIBasePath`

* feat(parsers): resolveHeaders

* refactor(extractBaseURL): handle invalid input

* feat(OpenAIClient): handle headers and baseURL for azureConfig

* fix(ci): pass `OpenAIClient` tests

* chore: extract env var for azureOpenAI group config, baseURL

* docs: azureOpenAI config setup docs

* feat: safe check of potential conflicting env vars that map to unique placeholders

* fix: reset apiKey when model switches from originally requested model (vision or title)

* chore: linting

* docs: CONFIG_PATH notes in custom_config.md
This commit is contained in:
Danny Avila 2024-02-26 14:12:25 -05:00 committed by GitHub
parent 7a55132e42
commit 097a978e5b
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
37 changed files with 2066 additions and 394 deletions

View file

@ -66,18 +66,21 @@ ANTHROPIC_MODELS=claude-1,claude-instant-1,claude-2
# Azure #
#============#
# AZURE_API_KEY=
AZURE_OPENAI_MODELS=gpt-3.5-turbo,gpt-4
# AZURE_OPENAI_DEFAULT_MODEL=gpt-3.5-turbo
# PLUGINS_USE_AZURE="true"
AZURE_USE_MODEL_AS_DEPLOYMENT_NAME=TRUE
# Note: these variables are DEPRECATED
# Use the `librechat.yaml` configuration for `azureOpenAI` instead
# You may also continue to use them if you opt out of using the `librechat.yaml` configuration
# AZURE_OPENAI_API_INSTANCE_NAME=
# AZURE_OPENAI_API_DEPLOYMENT_NAME=
# AZURE_OPENAI_API_VERSION=
# AZURE_OPENAI_API_COMPLETIONS_DEPLOYMENT_NAME=
# AZURE_OPENAI_API_EMBEDDINGS_DEPLOYMENT_NAME=
# AZURE_OPENAI_DEFAULT_MODEL=gpt-3.5-turbo # Deprecated
# AZURE_OPENAI_MODELS=gpt-3.5-turbo,gpt-4 # Deprecated
# AZURE_USE_MODEL_AS_DEPLOYMENT_NAME=TRUE # Deprecated
# AZURE_API_KEY= # Deprecated
# AZURE_OPENAI_API_INSTANCE_NAME= # Deprecated
# AZURE_OPENAI_API_DEPLOYMENT_NAME= # Deprecated
# AZURE_OPENAI_API_VERSION= # Deprecated
# AZURE_OPENAI_API_COMPLETIONS_DEPLOYMENT_NAME= # Deprecated
# AZURE_OPENAI_API_EMBEDDINGS_DEPLOYMENT_NAME= # Deprecated
# PLUGINS_USE_AZURE="true" # Deprecated
#============#
# BingAI #

View file

@ -1,10 +1,13 @@
const OpenAI = require('openai');
const { HttpsProxyAgent } = require('https-proxy-agent');
const {
ImageDetail,
EModelEndpoint,
resolveHeaders,
ImageDetailCost,
getResponseSender,
validateVisionModel,
ImageDetailCost,
ImageDetail,
mapModelToAzureConfig,
} = require('librechat-data-provider');
const { encoding_for_model: encodingForModel, get_encoding: getEncoding } = require('tiktoken');
const {
@ -665,6 +668,16 @@ class OpenAIClient extends BaseClient {
};
}
const { headers } = this.options;
if (headers && typeof headers === 'object' && !Array.isArray(headers)) {
configOptions.baseOptions = {
headers: resolveHeaders({
...headers,
...configOptions?.baseOptions?.headers,
}),
};
}
if (this.options.proxy) {
configOptions.httpAgent = new HttpsProxyAgent(this.options.proxy);
configOptions.httpsAgent = new HttpsProxyAgent(this.options.proxy);
@ -725,6 +738,26 @@ class OpenAIClient extends BaseClient {
max_tokens: 16,
};
/** @type {TAzureConfig | undefined} */
const azureConfig = this.options?.req?.app?.locals?.[EModelEndpoint.azureOpenAI];
if (this.azure && azureConfig) {
const { modelGroupMap, groupMap } = azureConfig;
const {
azureOptions,
baseURL,
headers = {},
} = mapModelToAzureConfig({
modelName: modelOptions.model,
modelGroupMap,
groupMap,
});
this.azure = azureOptions;
this.options.headers = resolveHeaders(headers);
this.options.reverseProxyUrl = baseURL ?? null;
this.langchainProxy = extractBaseURL(this.options.reverseProxyUrl);
this.apiKey = azureOptions.azureOpenAIApiKey;
}
const titleChatCompletion = async () => {
modelOptions.model = model;
@ -975,6 +1008,27 @@ ${convo}
modelOptions.max_tokens = 4000;
}
/** @type {TAzureConfig | undefined} */
const azureConfig = this.options?.req?.app?.locals?.[EModelEndpoint.azureOpenAI];
if (this.azure && this.isVisionModel && azureConfig) {
const { modelGroupMap, groupMap } = azureConfig;
const {
azureOptions,
baseURL,
headers = {},
} = mapModelToAzureConfig({
modelName: modelOptions.model,
modelGroupMap,
groupMap,
});
this.azure = azureOptions;
this.azureEndpoint = genAzureChatCompletion(this.azure, modelOptions.model, this);
opts.defaultHeaders = resolveHeaders(headers);
this.langchainProxy = extractBaseURL(baseURL);
this.apiKey = azureOptions.azureOpenAIApiKey;
}
if (this.azure || this.options.azure) {
// Azure does not accept `model` in the body, so we need to remove it.
delete modelOptions.model;
@ -1026,12 +1080,20 @@ ${convo}
...modelOptions,
...this.options.addParams,
};
logger.debug('[OpenAIClient] chatCompletion: added params', {
addParams: this.options.addParams,
modelOptions,
});
}
if (this.options.dropParams && Array.isArray(this.options.dropParams)) {
this.options.dropParams.forEach((param) => {
delete modelOptions[param];
});
logger.debug('[OpenAIClient] chatCompletion: dropped params', {
dropParams: this.options.dropParams,
modelOptions,
});
}
let UnexpectedRoleError = false;

View file

@ -55,10 +55,13 @@ function createLLM({
}
if (azure && configOptions.basePath) {
configOptions.basePath = constructAzureURL({
const azureURL = constructAzureURL({
baseURL: configOptions.basePath,
azure: azureOptions,
});
azureOptions.azureOpenAIBasePath = azureURL.split(
`/${azureOptions.azureOpenAIApiDeploymentName}`,
)[0];
}
return new ChatOpenAI(

View file

@ -1,4 +1,4 @@
const { CacheKeys, EModelEndpoint } = require('librechat-data-provider');
const { CacheKeys, EModelEndpoint, orderEndpointsConfig } = require('librechat-data-provider');
const { loadDefaultEndpointsConfig, loadConfigEndpoints } = require('~/server/services/Config');
const { getLogStores } = require('~/cache');
@ -10,15 +10,18 @@ async function endpointController(req, res) {
return;
}
const defaultEndpointsConfig = await loadDefaultEndpointsConfig();
const customConfigEndpoints = await loadConfigEndpoints();
const defaultEndpointsConfig = await loadDefaultEndpointsConfig(req);
const customConfigEndpoints = await loadConfigEndpoints(req);
const endpointsConfig = { ...defaultEndpointsConfig, ...customConfigEndpoints };
if (endpointsConfig[EModelEndpoint.assistants] && req.app.locals?.[EModelEndpoint.assistants]) {
endpointsConfig[EModelEndpoint.assistants].disableBuilder =
/** @type {TEndpointsConfig} */
const mergedConfig = { ...defaultEndpointsConfig, ...customConfigEndpoints };
if (mergedConfig[EModelEndpoint.assistants] && req.app.locals?.[EModelEndpoint.assistants]) {
mergedConfig[EModelEndpoint.assistants].disableBuilder =
req.app.locals[EModelEndpoint.assistants].disableBuilder;
}
const endpointsConfig = orderEndpointsConfig(mergedConfig);
await cache.set(CacheKeys.ENDPOINT_CONFIG, endpointsConfig);
res.send(JSON.stringify(endpointsConfig));
}

View file

@ -1,8 +1,12 @@
const {
Constants,
FileSources,
EModelEndpoint,
Constants,
defaultSocialLogins,
validateAzureGroups,
mapModelToAzureConfig,
deprecatedAzureVariables,
conflictingAzureVariables,
} = require('librechat-data-provider');
const { initializeFirebase } = require('./Files/Firebase/initialize');
const loadCustomConfig = require('./Config/loadCustomConfig');
@ -62,6 +66,50 @@ const AppService = async (app) => {
handleRateLimits(config?.rateLimits);
const endpointLocals = {};
if (config?.endpoints?.[EModelEndpoint.azureOpenAI]) {
const { groups, titleModel, titleConvo, titleMethod, plugins } =
config.endpoints[EModelEndpoint.azureOpenAI];
const { isValid, modelNames, modelGroupMap, groupMap, errors } = validateAzureGroups(groups);
if (!isValid) {
const errorString = errors.join('\n');
const errorMessage = 'Invalid Azure OpenAI configuration:\n' + errorString;
logger.error(errorMessage);
throw new Error(errorMessage);
}
for (const modelName of modelNames) {
mapModelToAzureConfig({ modelName, modelGroupMap, groupMap });
}
endpointLocals[EModelEndpoint.azureOpenAI] = {
modelNames,
modelGroupMap,
groupMap,
titleConvo,
titleMethod,
titleModel,
plugins,
};
deprecatedAzureVariables.forEach(({ key, description }) => {
if (process.env[key]) {
logger.warn(
`The \`${key}\` environment variable (related to ${description}) should not be used in combination with the \`azureOpenAI\` endpoint configuration, as you will experience conflicts and errors.`,
);
}
});
conflictingAzureVariables.forEach(({ key }) => {
if (process.env[key]) {
logger.warn(
`The \`${key}\` environment variable should not be used in combination with the \`azureOpenAI\` endpoint configuration, as you may experience with the defined placeholders for mapping to the current model grouping using the same name.`,
);
}
});
}
if (config?.endpoints?.[EModelEndpoint.assistants]) {
const { disableBuilder, pollIntervalMs, timeoutMs, supportedIds, excludedIds } =
config.endpoints[EModelEndpoint.assistants];

View file

@ -1,4 +1,11 @@
const { FileSources, defaultSocialLogins } = require('librechat-data-provider');
const {
FileSources,
EModelEndpoint,
defaultSocialLogins,
validateAzureGroups,
deprecatedAzureVariables,
conflictingAzureVariables,
} = require('librechat-data-provider');
const AppService = require('./AppService');
@ -32,6 +39,43 @@ jest.mock('./ToolService', () => ({
}),
}));
const azureGroups = [
{
group: 'librechat-westus',
apiKey: '${WESTUS_API_KEY}',
instanceName: 'librechat-westus',
version: '2023-12-01-preview',
models: {
'gpt-4-vision-preview': {
deploymentName: 'gpt-4-vision-preview',
version: '2024-02-15-preview',
},
'gpt-3.5-turbo': {
deploymentName: 'gpt-35-turbo',
},
'gpt-3.5-turbo-1106': {
deploymentName: 'gpt-35-turbo-1106',
},
'gpt-4': {
deploymentName: 'gpt-4',
},
'gpt-4-1106-preview': {
deploymentName: 'gpt-4-1106-preview',
},
},
},
{
group: 'librechat-eastus',
apiKey: '${EASTUS_API_KEY}',
instanceName: 'librechat-eastus',
deploymentName: 'gpt-4-turbo',
version: '2024-02-15-preview',
models: {
'gpt-4-turbo': true,
},
},
];
describe('AppService', () => {
let app;
@ -122,11 +166,11 @@ describe('AppService', () => {
});
});
it('should correctly configure endpoints based on custom config', async () => {
it('should correctly configure Assistants endpoint based on custom config', async () => {
require('./Config/loadCustomConfig').mockImplementationOnce(() =>
Promise.resolve({
endpoints: {
assistants: {
[EModelEndpoint.assistants]: {
disableBuilder: true,
pollIntervalMs: 5000,
timeoutMs: 30000,
@ -138,8 +182,8 @@ describe('AppService', () => {
await AppService(app);
expect(app.locals).toHaveProperty('assistants');
expect(app.locals.assistants).toEqual(
expect(app.locals).toHaveProperty(EModelEndpoint.assistants);
expect(app.locals[EModelEndpoint.assistants]).toEqual(
expect.objectContaining({
disableBuilder: true,
pollIntervalMs: 5000,
@ -149,6 +193,34 @@ describe('AppService', () => {
);
});
it('should correctly configure Azure OpenAI endpoint based on custom config', async () => {
require('./Config/loadCustomConfig').mockImplementationOnce(() =>
Promise.resolve({
endpoints: {
[EModelEndpoint.azureOpenAI]: {
groups: azureGroups,
},
},
}),
);
process.env.WESTUS_API_KEY = 'westus-key';
process.env.EASTUS_API_KEY = 'eastus-key';
await AppService(app);
expect(app.locals).toHaveProperty(EModelEndpoint.azureOpenAI);
const azureConfig = app.locals[EModelEndpoint.azureOpenAI];
expect(azureConfig).toHaveProperty('modelNames');
expect(azureConfig).toHaveProperty('modelGroupMap');
expect(azureConfig).toHaveProperty('groupMap');
const { modelNames, modelGroupMap, groupMap } = validateAzureGroups(azureGroups);
expect(azureConfig.modelNames).toEqual(modelNames);
expect(azureConfig.modelGroupMap).toEqual(modelGroupMap);
expect(azureConfig.groupMap).toEqual(groupMap);
});
it('should not modify FILE_UPLOAD environment variables without rate limits', async () => {
// Setup initial environment variables
process.env.FILE_UPLOAD_IP_MAX = '10';
@ -213,7 +285,7 @@ describe('AppService', () => {
});
});
describe('AppService updating app.locals', () => {
describe('AppService updating app.locals and issuing warnings', () => {
let app;
let initialEnv;
@ -309,4 +381,56 @@ describe('AppService updating app.locals', () => {
expect.stringContaining('Both `supportedIds` and `excludedIds` are defined'),
);
});
it('should issue expected warnings when loading Azure Groups with deprecated Environment Variables', async () => {
require('./Config/loadCustomConfig').mockImplementationOnce(() =>
Promise.resolve({
endpoints: {
[EModelEndpoint.azureOpenAI]: {
groups: azureGroups,
},
},
}),
);
deprecatedAzureVariables.forEach((varInfo) => {
process.env[varInfo.key] = 'test';
});
const app = { locals: {} };
await require('./AppService')(app);
const { logger } = require('~/config');
deprecatedAzureVariables.forEach(({ key, description }) => {
expect(logger.warn).toHaveBeenCalledWith(
`The \`${key}\` environment variable (related to ${description}) should not be used in combination with the \`azureOpenAI\` endpoint configuration, as you will experience conflicts and errors.`,
);
});
});
it('should issue expected warnings when loading conflicting Azure Envrionment Variables', async () => {
require('./Config/loadCustomConfig').mockImplementationOnce(() =>
Promise.resolve({
endpoints: {
[EModelEndpoint.azureOpenAI]: {
groups: azureGroups,
},
},
}),
);
conflictingAzureVariables.forEach((varInfo) => {
process.env[varInfo.key] = 'test';
});
const app = { locals: {} };
await require('./AppService')(app);
const { logger } = require('~/config');
conflictingAzureVariables.forEach(({ key }) => {
expect(logger.warn).toHaveBeenCalledWith(
`The \`${key}\` environment variable should not be used in combination with the \`azureOpenAI\` endpoint configuration, as you may experience with the defined placeholders for mapping to the current model grouping using the same name.`,
);
});
});
});

View file

@ -1,6 +1,7 @@
const crypto = require('crypto');
const bcrypt = require('bcryptjs');
const { registerSchema, errorsToString } = require('~/strategies/validators');
const { errorsToString } = require('librechat-data-provider');
const { registerSchema } = require('~/strategies/validators');
const getCustomConfig = require('~/server/services/Config/getCustomConfig');
const Token = require('~/models/schema/tokenSchema');
const { sendEmail } = require('~/server/utils');

View file

@ -1,12 +1,14 @@
const { availableTools } = require('~/app/clients/tools');
const { EModelEndpoint } = require('librechat-data-provider');
const { addOpenAPISpecs } = require('~/app/clients/tools/util/addOpenAPISpecs');
const { availableTools } = require('~/app/clients/tools');
const { openAIApiKey, azureOpenAIApiKey, useAzurePlugins, userProvidedOpenAI, googleKey } =
require('./EndpointService').config;
/**
* Load async endpoints and return a configuration object
* @param {Express.Request} req - The request object
*/
async function loadAsyncEndpoints() {
async function loadAsyncEndpoints(req) {
let i = 0;
let serviceKey, googleUserProvides;
try {
@ -35,13 +37,14 @@ async function loadAsyncEndpoints() {
const google = serviceKey || googleKey ? { userProvide: googleUserProvides } : false;
const useAzure = req.app.locals[EModelEndpoint.azureOpenAI]?.plugins;
const gptPlugins =
openAIApiKey || azureOpenAIApiKey
useAzure || openAIApiKey || azureOpenAIApiKey
? {
plugins,
availableAgents: ['classic', 'functions'],
userProvide: userProvidedOpenAI,
azure: useAzurePlugins,
userProvide: useAzure ? false : userProvidedOpenAI,
azure: useAzurePlugins || useAzure,
}
: false;

View file

@ -1,11 +1,13 @@
const { EModelEndpoint } = require('librechat-data-provider');
const { isUserProvided, extractEnvVariable } = require('~/server/utils');
const { EModelEndpoint, extractEnvVariable } = require('librechat-data-provider');
const { isUserProvided } = require('~/server/utils');
const getCustomConfig = require('./getCustomConfig');
/**
* Load config endpoints from the cached configuration object
* @function loadConfigEndpoints */
async function loadConfigEndpoints() {
* @param {Express.Request} req - The request object
* @returns {Promise<TEndpointsConfig>} A promise that resolves to an object containing the endpoints configuration
*/
async function loadConfigEndpoints(req) {
const customConfig = await getCustomConfig();
if (!customConfig) {
@ -42,6 +44,13 @@ async function loadConfigEndpoints() {
}
}
if (req.app.locals[EModelEndpoint.azureOpenAI]) {
/** @type {Omit<TConfig, 'order'>} */
endpointsConfig[EModelEndpoint.azureOpenAI] = {
userProvide: false,
};
}
return endpointsConfig;
}

View file

@ -1,5 +1,5 @@
const { EModelEndpoint } = require('librechat-data-provider');
const { isUserProvided, extractEnvVariable } = require('~/server/utils');
const { EModelEndpoint, extractEnvVariable } = require('librechat-data-provider');
const { isUserProvided } = require('~/server/utils');
const { fetchModels } = require('~/server/services/ModelService');
const getCustomConfig = require('./getCustomConfig');
@ -17,6 +17,16 @@ async function loadConfigModels(req) {
const { endpoints = {} } = customConfig ?? {};
const modelsConfig = {};
const azureModels = req.app.locals[EModelEndpoint.azureOpenAI]?.modelNames;
const azureEndpoint = endpoints[EModelEndpoint.azureOpenAI];
if (azureModels && azureEndpoint) {
modelsConfig[EModelEndpoint.azureOpenAI] = azureModels;
}
if (azureModels && azureEndpoint && azureEndpoint.plugins) {
modelsConfig[EModelEndpoint.gptPlugins] = azureModels;
}
if (!Array.isArray(endpoints[EModelEndpoint.custom])) {
return modelsConfig;

View file

@ -18,7 +18,7 @@ let i = 0;
async function loadCustomConfig() {
// Use CONFIG_PATH if set, otherwise fallback to defaultConfigPath
const configPath = process.env.CONFIG_PATH || defaultConfigPath;
const customConfig = loadYaml(configPath);
if (!customConfig) {
i === 0 &&

View file

@ -1,34 +1,17 @@
const { EModelEndpoint } = require('librechat-data-provider');
const { EModelEndpoint, getEnabledEndpoints } = require('librechat-data-provider');
const loadAsyncEndpoints = require('./loadAsyncEndpoints');
const { config } = require('./EndpointService');
/**
* Load async endpoints and return a configuration object
* @function loadDefaultEndpointsConfig
* @param {Express.Request} req - The request object
* @returns {Promise<Object.<string, EndpointWithOrder>>} An object whose keys are endpoint names and values are objects that contain the endpoint configuration and an order.
*/
async function loadDefaultEndpointsConfig() {
const { google, gptPlugins } = await loadAsyncEndpoints();
async function loadDefaultEndpointsConfig(req) {
const { google, gptPlugins } = await loadAsyncEndpoints(req);
const { openAI, assistants, bingAI, anthropic, azureOpenAI, chatGPTBrowser } = config;
let enabledEndpoints = [
EModelEndpoint.openAI,
EModelEndpoint.assistants,
EModelEndpoint.azureOpenAI,
EModelEndpoint.google,
EModelEndpoint.bingAI,
EModelEndpoint.chatGPTBrowser,
EModelEndpoint.gptPlugins,
EModelEndpoint.anthropic,
];
const endpointsEnv = process.env.ENDPOINTS || '';
if (endpointsEnv) {
enabledEndpoints = endpointsEnv
.split(',')
.filter((endpoint) => endpoint?.trim())
.map((endpoint) => endpoint.trim());
}
const enabledEndpoints = getEnabledEndpoints();
const endpointConfig = {
[EModelEndpoint.openAI]: openAI,

View file

@ -1,13 +1,16 @@
const { EModelEndpoint, CacheKeys } = require('librechat-data-provider');
const {
EModelEndpoint,
CacheKeys,
extractEnvVariable,
envVarRegex,
} = require('librechat-data-provider');
const { getUserKey, checkUserKeyExpiry } = require('~/server/services/UserService');
const getCustomConfig = require('~/server/services/Config/getCustomConfig');
const { isUserProvided, extractEnvVariable } = require('~/server/utils');
const { fetchModels } = require('~/server/services/ModelService');
const getLogStores = require('~/cache/getLogStores');
const { isUserProvided } = require('~/server/utils');
const { OpenAIClient } = require('~/app');
const envVarRegex = /^\${(.+)}$/;
const { PROXY } = process.env;
const initializeClient = async ({ req, res, endpointOption }) => {

View file

@ -1,4 +1,8 @@
const { EModelEndpoint } = require('librechat-data-provider');
const {
EModelEndpoint,
mapModelToAzureConfig,
resolveHeaders,
} = require('librechat-data-provider');
const { getUserKey, checkUserKeyExpiry } = require('~/server/services/UserService');
const { getAzureCredentials } = require('~/utils');
const { isEnabled } = require('~/server/utils');
@ -16,11 +20,19 @@ const initializeClient = async ({ req, res, endpointOption }) => {
DEBUG_PLUGINS,
} = process.env;
const { key: expiresAt } = req.body;
const { key: expiresAt, model: modelName } = req.body;
const contextStrategy = isEnabled(OPENAI_SUMMARIZE) ? 'summarize' : null;
const useAzure = isEnabled(PLUGINS_USE_AZURE);
const endpoint = useAzure ? EModelEndpoint.azureOpenAI : EModelEndpoint.openAI;
let useAzure = isEnabled(PLUGINS_USE_AZURE);
let endpoint = useAzure ? EModelEndpoint.azureOpenAI : EModelEndpoint.openAI;
/** @type {false | TAzureConfig} */
const azureConfig = req.app.locals[EModelEndpoint.azureOpenAI];
useAzure = useAzure || azureConfig?.plugins;
if (useAzure && endpoint !== EModelEndpoint.azureOpenAI) {
endpoint = EModelEndpoint.azureOpenAI;
}
const baseURLOptions = {
[EModelEndpoint.openAI]: OPENAI_REVERSE_PROXY,
@ -59,8 +71,26 @@ const initializeClient = async ({ req, res, endpointOption }) => {
}
let apiKey = isUserProvided ? userKey : credentials[endpoint];
if (useAzure && azureConfig) {
const { modelGroupMap, groupMap } = azureConfig;
const {
azureOptions,
baseURL,
headers = {},
} = mapModelToAzureConfig({
modelName,
modelGroupMap,
groupMap,
});
clientOptions.azure = azureOptions;
clientOptions.titleConvo = azureConfig.titleConvo;
clientOptions.titleModel = azureConfig.titleModel;
clientOptions.titleMethod = azureConfig.titleMethod ?? 'completion';
clientOptions.reverseProxyUrl = baseURL ?? clientOptions.reverseProxyUrl;
clientOptions.headers = resolveHeaders({ ...headers, ...(clientOptions.headers ?? {}) });
if (useAzure || (apiKey && apiKey.includes('{"azure') && !clientOptions.azure)) {
apiKey = clientOptions.azure.azureOpenAIApiKey;
} else if (useAzure || (apiKey && apiKey.includes('{"azure') && !clientOptions.azure)) {
clientOptions.azure = isUserProvided ? JSON.parse(userKey) : getAzureCredentials();
apiKey = clientOptions.azure.azureOpenAIApiKey;
}

View file

@ -13,6 +13,9 @@ jest.mock('~/server/services/UserService', () => ({
describe('gptPlugins/initializeClient', () => {
// Set up environment variables
const originalEnvironment = process.env;
const app = {
locals: {},
};
beforeEach(() => {
jest.resetModules(); // Clears the cache
@ -32,6 +35,7 @@ describe('gptPlugins/initializeClient', () => {
const req = {
body: { key: null },
user: { id: '123' },
app,
};
const res = {};
const endpointOption = { modelOptions: { model: 'default-model' } };
@ -56,6 +60,7 @@ describe('gptPlugins/initializeClient', () => {
const req = {
body: { key: null },
user: { id: '123' },
app,
};
const res = {};
const endpointOption = { modelOptions: { model: 'test-model' } };
@ -73,6 +78,7 @@ describe('gptPlugins/initializeClient', () => {
const req = {
body: { key: null },
user: { id: '123' },
app,
};
const res = {};
const endpointOption = { modelOptions: { model: 'default-model' } };
@ -89,6 +95,7 @@ describe('gptPlugins/initializeClient', () => {
const req = {
body: { key: null },
user: { id: '123' },
app,
};
const res = {};
const endpointOption = { modelOptions: { model: 'default-model' } };
@ -108,6 +115,7 @@ describe('gptPlugins/initializeClient', () => {
const req = {
body: { key: null },
user: { id: '123' },
app,
};
const res = {};
const endpointOption = { modelOptions: { model: 'default-model' } };
@ -129,6 +137,7 @@ describe('gptPlugins/initializeClient', () => {
const req = {
body: { key: futureDate },
user: { id: '123' },
app,
};
const res = {};
const endpointOption = { modelOptions: { model: 'default-model' } };
@ -148,6 +157,7 @@ describe('gptPlugins/initializeClient', () => {
const req = {
body: { key: futureDate },
user: { id: '123' },
app,
};
const res = {};
const endpointOption = { modelOptions: { model: 'test-model' } };
@ -171,6 +181,7 @@ describe('gptPlugins/initializeClient', () => {
const req = {
body: { key: expiresAt },
user: { id: '123' },
app,
};
const res = {};
const endpointOption = { modelOptions: { model: 'default-model' } };
@ -187,6 +198,7 @@ describe('gptPlugins/initializeClient', () => {
const req = {
body: { key: new Date(Date.now() + 10000).toISOString() },
user: { id: '123' },
app,
};
const res = {};
const endpointOption = { modelOptions: { model: 'default-model' } };
@ -207,6 +219,7 @@ describe('gptPlugins/initializeClient', () => {
const req = {
body: { key: null },
user: { id: '123' },
app,
};
const res = {};
const endpointOption = { modelOptions: { model: 'default-model' } };

View file

@ -1,4 +1,8 @@
const { EModelEndpoint } = require('librechat-data-provider');
const {
EModelEndpoint,
mapModelToAzureConfig,
resolveHeaders,
} = require('librechat-data-provider');
const { getUserKey, checkUserKeyExpiry } = require('~/server/services/UserService');
const { getAzureCredentials } = require('~/utils');
const { isEnabled } = require('~/server/utils');
@ -14,7 +18,7 @@ const initializeClient = async ({ req, res, endpointOption }) => {
OPENAI_SUMMARIZE,
DEBUG_OPENAI,
} = process.env;
const { key: expiresAt, endpoint } = req.body;
const { key: expiresAt, endpoint, model: modelName } = req.body;
const contextStrategy = isEnabled(OPENAI_SUMMARIZE) ? 'summarize' : null;
const baseURLOptions = {
@ -51,8 +55,30 @@ const initializeClient = async ({ req, res, endpointOption }) => {
}
let apiKey = isUserProvided ? userKey : credentials[endpoint];
const isAzureOpenAI = endpoint === EModelEndpoint.azureOpenAI;
/** @type {false | TAzureConfig} */
const azureConfig = isAzureOpenAI && req.app.locals[EModelEndpoint.azureOpenAI];
if (endpoint === EModelEndpoint.azureOpenAI) {
if (isAzureOpenAI && azureConfig) {
const { modelGroupMap, groupMap } = azureConfig;
const {
azureOptions,
baseURL,
headers = {},
} = mapModelToAzureConfig({
modelName,
modelGroupMap,
groupMap,
});
clientOptions.azure = azureOptions;
clientOptions.titleConvo = azureConfig.titleConvo;
clientOptions.titleModel = azureConfig.titleModel;
clientOptions.titleMethod = azureConfig.titleMethod ?? 'completion';
clientOptions.reverseProxyUrl = baseURL ?? clientOptions.reverseProxyUrl;
clientOptions.headers = resolveHeaders({ ...headers, ...(clientOptions.headers ?? {}) });
apiKey = clientOptions.azure.azureOpenAIApiKey;
} else if (isAzureOpenAI) {
clientOptions.azure = isUserProvided ? JSON.parse(userKey) : getAzureCredentials();
apiKey = clientOptions.azure.azureOpenAIApiKey;
}

View file

@ -12,6 +12,9 @@ jest.mock('~/server/services/UserService', () => ({
describe('initializeClient', () => {
// Set up environment variables
const originalEnvironment = process.env;
const app = {
locals: {},
};
beforeEach(() => {
jest.resetModules(); // Clears the cache
@ -30,6 +33,7 @@ describe('initializeClient', () => {
const req = {
body: { key: null, endpoint: 'openAI' },
user: { id: '123' },
app,
};
const res = {};
const endpointOption = {};
@ -54,6 +58,7 @@ describe('initializeClient', () => {
const req = {
body: { key: null, endpoint: 'azureOpenAI' },
user: { id: '123' },
app,
};
const res = {};
const endpointOption = { modelOptions: { model: 'test-model' } };
@ -71,6 +76,7 @@ describe('initializeClient', () => {
const req = {
body: { key: null, endpoint: 'openAI' },
user: { id: '123' },
app,
};
const res = {};
const endpointOption = {};
@ -87,6 +93,7 @@ describe('initializeClient', () => {
const req = {
body: { key: null, endpoint: 'openAI' },
user: { id: '123' },
app,
};
const res = {};
const endpointOption = {};
@ -104,6 +111,7 @@ describe('initializeClient', () => {
const req = {
body: { key: null, endpoint: 'openAI' },
user: { id: '123' },
app,
};
const res = {};
const endpointOption = {};
@ -124,6 +132,7 @@ describe('initializeClient', () => {
const req = {
body: { key: expiresAt, endpoint: 'openAI' },
user: { id: '123' },
app,
};
const res = {};
const endpointOption = {};
@ -141,6 +150,7 @@ describe('initializeClient', () => {
const req = {
body: { key: null, endpoint: 'openAI' },
user: { id: '123' },
app,
};
const res = {};
const endpointOption = {};
@ -160,6 +170,7 @@ describe('initializeClient', () => {
user: {
id: '123',
},
app,
};
const res = {};
@ -183,6 +194,7 @@ describe('initializeClient', () => {
const req = {
body: { key: invalidKey, endpoint: 'openAI' },
user: { id: '123' },
app,
};
const res = {};
const endpointOption = {};

View file

@ -38,6 +38,10 @@ const fetchModels = async ({
return models;
}
if (!apiKey) {
return models;
}
try {
const options = {
headers: {
@ -92,9 +96,7 @@ const fetchModels = async ({
},
);
} else {
logger.error(`${logMessage} Something happened in setting up the request`, {
message: error.message ? error.message : '',
});
logger.error(`${logMessage} Something happened in setting up the request`, error);
}
}

View file

@ -172,19 +172,6 @@ function isEnabled(value) {
*/
const isUserProvided = (value) => value === 'user_provided';
/**
* Extracts the value of an environment variable from a string.
* @param {string} value - The value to be processed, possibly containing an env variable placeholder.
* @returns {string} - The actual value from the environment variable or the original value.
*/
function extractEnvVariable(value) {
const envVarMatch = value.match(/^\${(.+)}$/);
if (envVarMatch) {
return process.env[envVarMatch[1]] || value;
}
return value;
}
module.exports = {
createOnProgress,
isEnabled,
@ -193,5 +180,4 @@ module.exports = {
formatAction,
addSpaceIfNeeded,
isUserProvided,
extractEnvVariable,
};

View file

@ -1,4 +1,4 @@
const { isEnabled, extractEnvVariable } = require('./handleText');
const { isEnabled } = require('./handleText');
describe('isEnabled', () => {
test('should return true when input is "true"', () => {
@ -48,51 +48,4 @@ describe('isEnabled', () => {
test('should return false when input is an array', () => {
expect(isEnabled([])).toBe(false);
});
describe('extractEnvVariable', () => {
const originalEnv = process.env;
beforeEach(() => {
jest.resetModules();
process.env = { ...originalEnv };
});
afterAll(() => {
process.env = originalEnv;
});
test('should return the value of the environment variable', () => {
process.env.TEST_VAR = 'test_value';
expect(extractEnvVariable('${TEST_VAR}')).toBe('test_value');
});
test('should return the original string if the envrionment variable is not defined correctly', () => {
process.env.TEST_VAR = 'test_value';
expect(extractEnvVariable('${ TEST_VAR }')).toBe('${ TEST_VAR }');
});
test('should return the original string if environment variable is not set', () => {
expect(extractEnvVariable('${NON_EXISTENT_VAR}')).toBe('${NON_EXISTENT_VAR}');
});
test('should return the original string if it does not contain an environment variable', () => {
expect(extractEnvVariable('some_string')).toBe('some_string');
});
test('should handle empty strings', () => {
expect(extractEnvVariable('')).toBe('');
});
test('should handle strings without variable format', () => {
expect(extractEnvVariable('no_var_here')).toBe('no_var_here');
});
test('should not process multiple variable formats', () => {
process.env.FIRST_VAR = 'first';
process.env.SECOND_VAR = 'second';
expect(extractEnvVariable('${FIRST_VAR} and ${SECOND_VAR}')).toBe(
'${FIRST_VAR} and ${SECOND_VAR}',
);
});
});
});

View file

@ -1,7 +1,8 @@
const { errorsToString } = require('librechat-data-provider');
const { Strategy: PassportLocalStrategy } = require('passport-local');
const User = require('../models/User');
const { loginSchema, errorsToString } = require('./validators');
const logger = require('../utils/logger');
const { loginSchema } = require('./validators');
const logger = require('~/utils/logger');
const User = require('~/models/User');
async function validateLoginRequest(req) {
const { error } = loginSchema.safeParse(req.body);

View file

@ -1,16 +1,5 @@
const { z } = require('zod');
function errorsToString(errors) {
return errors
.map((error) => {
let field = error.path.join('.');
let message = error.message;
return `${field}: ${message}`;
})
.join(' ');
}
const allowedCharactersRegex = /^[a-zA-Z0-9_.@#$%&*()\p{Script=Latin}\p{Script=Common}]+$/u;
const injectionPatternsRegex = /('|--|\$ne|\$gt|\$lt|\$or|\{|\}|\*|;|<|>|\/|=)/i;
@ -72,5 +61,4 @@ const registerSchema = z
module.exports = {
loginSchema,
registerSchema,
errorsToString,
};

View file

@ -1,6 +1,6 @@
// file deepcode ignore NoHardcodedPasswords: No hard-coded passwords in tests
const { loginSchema, registerSchema, errorsToString } = require('./validators');
const { errorsToString } = require('librechat-data-provider');
const { loginSchema, registerSchema } = require('./validators');
describe('Zod Schemas', () => {
describe('loginSchema', () => {

View file

@ -32,6 +32,29 @@
* @memberof typedefs
*/
/**
* @exports TAzureGroups
* @typedef {import('librechat-data-provider').TAzureGroups} TAzureGroups
* @memberof typedefs
*/
/**
* @exports TAzureModelGroupMap
* @typedef {import('librechat-data-provider').TAzureModelGroupMap} TAzureModelGroupMap
* @memberof typedefs
*/
/**
* @exports TAzureGroupMap
* @typedef {import('librechat-data-provider').TAzureGroupMap} TAzureGroupMap
* @memberof typedefs
*/
/**
* @exports TAzureConfig
* @typedef {import('librechat-data-provider').TAzureConfig} TAzureConfig
* @memberof typedefs
*/
/**
* @exports TModelsConfig
* @typedef {import('librechat-data-provider').TModelsConfig} TModelsConfig
@ -50,6 +73,12 @@
* @memberof typedefs
*/
/**
* @exports TEndpointsConfig
* @typedef {import('librechat-data-provider').TEndpointsConfig} TEndpointsConfig
* @memberof typedefs
*/
/**
* @exports TMessage
* @typedef {import('librechat-data-provider').TMessage} TMessage

View file

@ -12,9 +12,13 @@
* - `https://api.example.com/v1/replicate` -> `https://api.example.com/v1/replicate`
*
* @param {string} url - The URL to be processed.
* @returns {string} The matched pattern or input if no match is found.
* @returns {string | undefined} The matched pattern or input if no match is found.
*/
function extractBaseURL(url) {
if (!url || typeof url !== 'string') {
return undefined;
}
if (!url.includes('/v1')) {
return url;
}

View file

@ -236,221 +236,9 @@ Note: Using Gemini models through Vertex AI is possible but not yet supported.
## Azure OpenAI
In order to use Azure OpenAI with this project, specific environment variables must be set in your `.env` file. These variables will be used for constructing the API URLs.
### Please see the dedicated [Azure OpenAI Setup Guide.](./azure_openai.md)
The variables needed are outlined below:
### Required Variables
These variables construct the API URL for Azure OpenAI.
* `AZURE_API_KEY`: Your Azure OpenAI API key.
* `AZURE_OPENAI_API_INSTANCE_NAME`: The instance name of your Azure OpenAI API.
* `AZURE_OPENAI_API_DEPLOYMENT_NAME`: The deployment name of your Azure OpenAI API.
* `AZURE_OPENAI_API_VERSION`: The version of your Azure OpenAI API.
For example, with these variables, the URL for chat completion would look something like:
```plaintext
https://{AZURE_OPENAI_API_INSTANCE_NAME}.openai.azure.com/openai/deployments/{AZURE_OPENAI_API_DEPLOYMENT_NAME}/chat/completions?api-version={AZURE_OPENAI_API_VERSION}
```
You should also consider changing the `AZURE_OPENAI_MODELS` variable to the models available in your deployment.
```bash
# .env file
AZURE_OPENAI_MODELS=gpt-4-1106-preview,gpt-4,gpt-3.5-turbo,gpt-3.5-turbo-1106,gpt-4-vision-preview
```
Overriding the construction of the API URL will be possible but is not yet implemented. Follow progress on this feature here: **[Issue #1266](https://github.com/danny-avila/LibreChat/issues/1266)**
### Model Deployments
> Note: a change will be developed to improve current configuration settings, to allow multiple deployments/model configurations setup with ease: **[#1390](https://github.com/danny-avila/LibreChat/issues/1390)**
As of 2023-12-18, the Azure API allows only one model per deployment.
**It's highly recommended** to name your deployments *after* the model name (e.g., "gpt-3.5-turbo") for easy deployment switching.
When you do so, LibreChat will correctly switch the deployment, while associating the correct max context per model, if you have the following environment variable set:
```bash
AZURE_USE_MODEL_AS_DEPLOYMENT_NAME=TRUE
```
For example, when you have set `AZURE_USE_MODEL_AS_DEPLOYMENT_NAME=TRUE`, the following deployment configuration provides the most seamless, error-free experience for LibreChat, including Vision support and tracking the correct max context tokens:
![Screenshot 2023-12-18 111742](https://github.com/danny-avila/LibreChat/assets/110412045/4aa8a61c-0317-4681-8262-a6382dcaa7b0)
Alternatively, you can use custom deployment names and set `AZURE_OPENAI_DEFAULT_MODEL` for expected functionality.
- **`AZURE_OPENAI_MODELS`**: List the available models, separated by commas without spaces. The first listed model will be the default. If left blank, internal settings will be used. Note that deployment names can't have periods, which are removed when generating the endpoint.
Example use:
```bash
# .env file
AZURE_OPENAI_MODELS=gpt-3.5-turbo,gpt-4,gpt-5
```
- **`AZURE_USE_MODEL_AS_DEPLOYMENT_NAME`**: Enable using the model name as the deployment name for the API URL.
Example use:
```bash
# .env file
AZURE_USE_MODEL_AS_DEPLOYMENT_NAME=TRUE
```
### Setting a Default Model for Azure
This section is relevant when you are **not** naming deployments after model names as shown above.
**Important:** The Azure OpenAI API does not use the `model` field in the payload but is a necessary identifier for LibreChat. If your deployment names do not correspond to the model names, and you're having issues with the model not being recognized, you should set this field to explicitly tell LibreChat to treat your Azure OpenAI API requests as if the specified model was selected.
If AZURE_USE_MODEL_AS_DEPLOYMENT_NAME is enabled, the model you set with `AZURE_OPENAI_DEFAULT_MODEL` will **not** be recognized and will **not** be used as the deployment name; instead, it will use the model selected by the user as the "deployment" name.
- **`AZURE_OPENAI_DEFAULT_MODEL`**: Override the model setting for Azure, useful if using custom deployment names.
Example use:
```bash
# .env file
# MUST be a real OpenAI model, named exactly how it is recognized by OpenAI API (not Azure)
AZURE_OPENAI_DEFAULT_MODEL=gpt-3.5-turbo # do include periods in the model name here
```
### Using a Specified Base URL with Azure
The base URL for Azure OpenAI API requests can be dynamically configured. This is useful for proxying services such as [Cloudflare AI Gateway](https://developers.cloudflare.com/ai-gateway/providers/azureopenai/), or if you wish to explicitly override the baseURL handling of the app.
LibreChat will use the `AZURE_OPENAI_BASEURL` environment variable, which can include placeholders for the Azure OpenAI API instance and deployment names.
In the application's environment configuration, the base URL is set like this:
```bash
# .env file
AZURE_OPENAI_BASEURL=https://example.azure-api.net/${INSTANCE_NAME}/${DEPLOYMENT_NAME}
# OR
AZURE_OPENAI_BASEURL=https://${INSTANCE_NAME}.openai.azure.com/openai/deployments/${DEPLOYMENT_NAME}
# Cloudflare example
AZURE_OPENAI_BASEURL=https://gateway.ai.cloudflare.com/v1/ACCOUNT_TAG/GATEWAY/azure-openai/${INSTANCE_NAME}/${DEPLOYMENT_NAME}
```
The application replaces `${INSTANCE_NAME}` and `${DEPLOYMENT_NAME}` in the `AZURE_OPENAI_BASEURL`, processed according to the other settings discussed in the guide.
**You can also omit the placeholders completely and simply construct the baseURL with your credentials:**
```bash
# .env file
AZURE_OPENAI_BASEURL=https://instance-1.openai.azure.com/openai/deployments/deployment-1
# Cloudflare example
AZURE_OPENAI_BASEURL=https://gateway.ai.cloudflare.com/v1/ACCOUNT_TAG/GATEWAY/azure-openai/instance-1/deployment-1
```
Setting these values will override all of the application's internal handling of the instance and deployment names and use your specified base URL.
**Notes:**
- You should still provide the `AZURE_OPENAI_API_VERSION` and `AZURE_API_KEY` via the .env file as they are programmatically added to the requests.
- When specifying instance and deployment names in the `AZURE_OPENAI_BASEURL`, their respective environment variables can be omitted (`AZURE_OPENAI_API_INSTANCE_NAME` and `AZURE_OPENAI_API_DEPLOYMENT_NAME`) except for use with Plugins.
- Specifying instance and deployment names in the `AZURE_OPENAI_BASEURL` instead of placeholders creates conflicts with "plugins," "vision," "default-model," and "model-as-deployment-name" support.
- Due to the conflicts that arise with other features, it is recommended to use placeholder for instance and deployment names in the `AZURE_OPENAI_BASEURL`
### Enabling Auto-Generated Titles with Azure
The default titling model is set to `gpt-3.5-turbo`.
If you're using `AZURE_USE_MODEL_AS_DEPLOYMENT_NAME` and have "gpt-35-turbo" setup as a deployment name, this should work out-of-the-box.
In any case, you can adjust the title model as such: `OPENAI_TITLE_MODEL=your-title-model`
### Using GPT-4 Vision with Azure
Currently, the best way to setup Vision is to use your deployment names as the model names, as [shown here](#model-deployments)
This will work seamlessly as it does with the [OpenAI endpoint](#openai) (no need to select the vision model, it will be switched behind the scenes)
Alternatively, you can set the [required variables](#required-variables) to explicitly use your vision deployment, but this may limit you to exclusively using your vision deployment for all Azure chat settings.
**Notes:**
- If using `AZURE_OPENAI_BASEURL`, you should not specify instance and deployment names instead of placeholders as the vision request will fail.
- As of December 18th, 2023, Vision models seem to have degraded performance with Azure OpenAI when compared to [OpenAI](#openai)
![image](https://github.com/danny-avila/LibreChat/assets/110412045/7306185f-c32c-4483-9167-af514cc1c2dd)
> Note: a change will be developed to improve current configuration settings, to allow multiple deployments/model configurations setup with ease: **[#1390](https://github.com/danny-avila/LibreChat/issues/1390)**
### Generate images with Azure OpenAI Service (DALL-E)
| Model ID | Feature Availability | Max Request (characters) |
|----------|----------------------|-------------------------|
| dalle2 | East US | 1000 |
| dalle3 | Sweden Central | 4000 |
- First you need to create an Azure resource that hosts DALL-E
- At the time of writing, dall-e-3 is available in the `SwedenCentral` region, dall-e-2 in the `EastUS` region.
- Then, you need to deploy the image generation model in one of the above regions.
- Read the [Azure OpenAI Image Generation Quickstart Guide](https://learn.microsoft.com/en-us/azure/ai-services/openai/dall-e-quickstart) for further assistance
- Configure your environment variables based on Azure credentials:
**- For DALL-E-3:**
```bash
DALLE3_AZURE_API_VERSION=the-api-version # e.g.: 2023-12-01-preview
DALLE3_BASEURL=https://<AZURE_OPENAI_API_INSTANCE_NAME>.openai.azure.com/openai/deployments/<DALLE3_DEPLOYMENT_NAME>/
DALLE3_API_KEY=your-azure-api-key-for-dall-e-3
```
**- For DALL-E-2:**
```bash
DALLE2_AZURE_API_VERSION=the-api-version # e.g.: 2023-12-01-preview
DALLE2_BASEURL=https://<AZURE_OPENAI_API_INSTANCE_NAME>.openai.azure.com/openai/deployments/<DALLE2_DEPLOYMENT_NAME>/
DALLE2_API_KEY=your-azure-api-key-for-dall-e-2
```
**DALL-E Notes:**
- For DALL-E-3, the default system prompt has the LLM prefer the ["vivid" style](https://platform.openai.com/docs/api-reference/images/create#images-create-style) parameter, which seems to be the preferred setting for ChatGPT as "natural" can sometimes produce lackluster results.
- See official prompt for reference: **[DALL-E System Prompt](https://github.com/spdustin/ChatGPT-AutoExpert/blob/main/_system-prompts/dall-e.md)**
- You can adjust the system prompts to your liking:
```bash
DALLE3_SYSTEM_PROMPT="Your DALL-E-3 System Prompt here"
DALLE2_SYSTEM_PROMPT="Your DALL-E-2 System Prompt here"
```
- The `DALLE_REVERSE_PROXY` environment variable is ignored when Azure credentials (DALLEx_AZURE_API_VERSION and DALLEx_BASEURL) for DALL-E are configured.
### Optional Variables
*These variables are currently not used by LibreChat*
* `AZURE_OPENAI_API_COMPLETIONS_DEPLOYMENT_NAME`: The deployment name for completion. This is currently not in use but may be used in future.
* `AZURE_OPENAI_API_EMBEDDINGS_DEPLOYMENT_NAME`: The deployment name for embedding. This is currently not in use but may be used in future.
These two variables are optional but may be used in future updates of this project.
### Using Plugins with Azure
Note: To use the Plugins endpoint with Azure OpenAI, you need a deployment supporting **[function calling](https://techcommunity.microsoft.com/t5/azure-ai-services-blog/function-calling-is-now-available-in-azure-openai-service/ba-p/3879241)**. Otherwise, you need to set "Functions" off in the Agent settings. When you are not using "functions" mode, it's recommend to have "skip completion" off as well, which is a review step of what the agent generated.
To use Azure with the Plugins endpoint, make sure the following environment variables are set:
* `PLUGINS_USE_AZURE`: If set to "true" or any truthy value, this will enable the program to use Azure with the Plugins endpoint.
* `AZURE_API_KEY`: Your Azure API key must be set with an environment variable.
**Important:**
- If using `AZURE_OPENAI_BASEURL`, you should not specify instance and deployment names instead of placeholders as the plugin request will fail.
This was done to improve upon legacy configuration settings, to allow multiple deployments/model configurations setup with ease: **[#1390](https://github.com/danny-avila/LibreChat/issues/1390)**
---
@ -476,6 +264,10 @@ OpenRouter is integrated to the LibreChat by overriding the OpenAI endpoint.
#### Setup (legacy):
**Note:** It is NOT recommended to setup OpenRouter this way with versions 0.6.6 or higher of LibreChat as it may be removed in future versions.
As noted earlier, [review the Custom Config Guide (click here)](./custom_config.md) to add an `OpenRouter` Endpoint instead.
- Signup to **[OpenRouter](https://openrouter.ai/)** and create a key. You should name it and set a limit as well.
- Set the environment variable `OPENROUTER_API_KEY` in your .env file to the key you just created.
- Set something in the `OPENAI_API_KEY`, it can be anyting, but **do not** leave it blank or set to `user_provided`

View file

@ -0,0 +1,406 @@
# Azure OpenAI
**Azure OpenAI Integration for LibreChat**
To properly utilize Azure OpenAI within LibreChat, it's crucial to configure the [`librechat.yaml` file](./custom_config.md#azure-openai-object-structure) according to your specific needs. This document guides you through the essential setup process which allows seamless use of multiple deployments and models with as much flexibility as needed.
## Setup
1. **Open `librechat.yaml` for Editing**: Use your preferred text editor or IDE to open and edit the `librechat.yaml` file.
2. **Configure Azure OpenAI Settings**: Follow the detailed structure outlined below to populate your Azure OpenAI settings appropriately. This includes specifying API keys, instance names, model groups, and other essential configurations.
3. **Save Your Changes**: After accurately inputting your settings, save the `librechat.yaml` file.
4. **Restart LibreChat**: For the changes to take effect, restart your LibreChat application. This ensures that the updated configurations are loaded and utilized.
Here's a working example configured according to the specifications of the [Azure OpenAI Endpoint Configuration Docs:](./custom_config.md#azure-openai-object-structure)
## Required Fields
To properly integrate Azure OpenAI with LibreChat, specific fields must be accurately configured in your `librechat.yaml` file. These fields are validated through a combination of custom and environmental variables to ensure the correct setup. Here are the detailed requirements based on the validation process:
### Group-Level Configuration
1. **group** (String, Required): Unique identifier name for a group of models. Duplicate group names are not allowed and will result in validation errors.
2. **apiKey** (String, Required): Must be a valid API key for Azure OpenAI services. It could be a direct key string or an environment variable reference (e.g., `${WESTUS_API_KEY}`).
3. **instanceName** (String, Required): Name of the Azure OpenAI instance. This field can also support environment variable references.
4. **deploymentName** (String, Optional): The deployment name at the group level is optional but required if any model within the group is set to `true`.
5. **version** (String, Optional): The version of the Azure OpenAI service at the group level is optional but required if any model within the group is set to `true`.
6. **baseURL** (String, Optional): Custom base URL for the Azure OpenAI API requests. Environment variable references are supported. This is optional and can be used for advanced routing scenarios.
7. **additionalHeaders** (Object, Optional): Specifies any extra headers for Azure OpenAI API requests as key-value pairs. Environment variable references can be included as values.
### Model-Level Configuration
Within each group, the `models` field must contain a mapping of records, or model identifiers to either boolean values or object configurations.
- The key or model identifier must match its corresponding OpenAI model name in order for it to properly reflect its known context limits and/or function in the case of vision. For example, if you intend to use gpt-4-vision, it must be configured like so:
```yaml
models:
gpt-4-vision-preview: # matching OpenAI Model name
deploymentName: "arbitrary-deployment-name"
version: "2024-02-15-preview" # version can be any that supports vision
```
- See [Model Deployments](#model-deployments) for more examples.
- If a model is set to `true`, it implies using the group-level `deploymentName` and `version` for this model. Both must be defined at the group level in this case.
- If a model is configured as an object, it can specify its own `deploymentName` and `version`. If these are not provided, the model inherits the group's `deploymentName` and `version`.
### Special Considerations
1. **Unique Names**: Both model and group names must be unique across the entire configuration. Duplicate names lead to validation failures.
2. **Missing Required Fields**: Lack of required `deploymentName` or `version` either at the group level (for boolean-flagged models) or within the models' configurations (if not inheriting or explicitly specified) will result in validation errors.
3. **Environment Variable References**: The configuration supports environment variable references (e.g., `${VARIABLE_NAME}`). Ensure that all referenced variables are present in your environment to avoid runtime errors. The absence of defined environment variables referenced in the config will cause errors.`${INSTANCE_NAME}` and `${DEPLOYMENT_NAME}` are unique placeholders, and do not correspond to environment variables, but instead correspond to the instance and deployment name of the currently selected model. It is not recommended you use `INSTANCE_NAME` and `DEPLOYMENT_NAME` as environment variable names to avoid any potential conflicts.
4. **Error Handling**: Any issues in the config, like duplicate names, undefined environment variables, or missing required fields, will invalidate the setup and generate descriptive error messages aiming for prompt resolution. You will not be allowed to run the server with an invalid configuration.
Applying these setup requirements thoughtfully will ensure a correct and efficient integration of Azure OpenAI services with LibreChat through the `librechat.yaml` configuration. Always validate your configuration against the latest schema definitions and guidelines to maintain compatibility and functionality.
### Model Deployments
The list of models available to your users are determined by the model groupings specified in your [`azureOpenAI` endpoint config.](./custom_config.md#models-1)
For example:
```yaml
# Example Azure OpenAI Object Structure
endpoints:
azureOpenAI:
groups:
- group: "my-westus" # arbitrary name
apiKey: "${WESTUS_API_KEY}"
instanceName: "actual-instance-name" # name of the resource group or instance
version: "2023-12-01-preview"
models:
gpt-4-vision-preview:
deploymentName: gpt-4-vision-preview
version: "2024-02-15-preview"
gpt-3.5-turbo: true
- group: "my-eastus"
apiKey: "${EASTUS_API_KEY}"
instanceName: "actual-eastus-instance-name"
deploymentName: gpt-4-turbo
version: "2024-02-15-preview"
models:
gpt-4-turbo: true
```
The above configuration would enable `gpt-4-vision-preview`, `gpt-3.5-turbo` and `gpt-4-turbo` for your users in the order they were defined.
### Using Plugins with Azure
To use the Plugins endpoint with Azure OpenAI, you need a deployment supporting **[function calling](https://techcommunity.microsoft.com/t5/azure-ai-services-blog/function-calling-is-now-available-in-azure-openai-service/ba-p/3879241)**. Otherwise, you need to set "Functions" off in the Agent settings. When you are not using "functions" mode, it's recommend to have "skip completion" off as well, which is a review step of what the agent generated.
To use Azure with the Plugins endpoint, make sure the field `plugins` is set to `true` in your Azure OpenAI endpoing config:
```yaml
# Example Azure OpenAI Object Structure
endpoints:
azureOpenAI:
plugins: true # <------- Set this
groups:
# omitted for brevity
```
Configuring the `plugins` field will configure Plugins to use Azure models.
**NOTE**: The current configuration through `librechat.yaml` uses the primary model you select from the frontend for Plugin use, which is not usually how it works without Azure, where instead the "Agent" model is used. The Agent model setting can be ignored when using Plugins through Azure.
### Using a Specified Base URL with Azure
The base URL for Azure OpenAI API requests can be dynamically configured. This is useful for proxying services such as [Cloudflare AI Gateway](https://developers.cloudflare.com/ai-gateway/providers/azureopenai/), or if you wish to explicitly override the baseURL handling of the app.
LibreChat will use the baseURL field for your Azure model grouping, which can include placeholders for the Azure OpenAI API instance and deployment names.
In the configuration, the base URL can be customized like so:
```yaml
# librechat.yaml file, under an Azure group:
endpoints:
azureOpenAI:
groups:
- group: "group-with-custom-base-url"
baseURL: "https://example.azure-api.net/${INSTANCE_NAME}/${DEPLOYMENT_NAME}"
# OR
baseURL: "https://${INSTANCE_NAME}.openai.azure.com/openai/deployments/${DEPLOYMENT_NAME}"
# Cloudflare example
baseURL: "https://gateway.ai.cloudflare.com/v1/ACCOUNT_TAG/GATEWAY/azure-openai/${INSTANCE_NAME}/${DEPLOYMENT_NAME}"
```
**NOTE**: `${INSTANCE_NAME}` and `${DEPLOYMENT_NAME}` are unique placeholders, and do not correspond to environment variables, but instead correspond to the instance and deployment name of the currently selected model. It is not recommended you use INSTANCE_NAME and DEPLOYMENT_NAME as environment variable names to avoid any potential conflicts.
**You can also omit the placeholders completely and simply construct the baseURL with your credentials:**
```yaml
baseURL: "https://gateway.ai.cloudflare.com/v1/ACCOUNT_TAG/GATEWAY/azure-openai/my-secret-instance/my-deployment"
```
**Lastly, you can specify the entire baseURL through a custom environment variable**
```yaml
baseURL: "${MY_CUSTOM_BASEURL}"
```
### Enabling Auto-Generated Titles with Azure
To enable titling for Azure, set `titleConvo` to `true`.
```yaml
# Example Azure OpenAI Object Structure
endpoints:
azureOpenAI:
titleConvo: true # <------- Set this
groups:
# omitted for brevity
```
**You can also specify the model to use for titling, with `titleModel`** provided you have configured it in your group(s).
```yaml
titleModel: "gpt-3.5-turbo"
```
**Note**: "gpt-3.5-turbo" is the default value, so you can omit it if you want to use this exact model and have it configured. If not configured and `titleConvo` is set to `true`, the titling process will result in an error and no title will be generated.
### Using GPT-4 Vision with Azure
To use Vision (image analysis) with Azure OpenAI, you need to make sure `gpt-4-vision-preview` is a specified model [in one of your groupings](#model-deployments)
This will work seamlessly as it does with the [OpenAI endpoint](#openai) (no need to select the vision model, it will be switched behind the scenes)
### Generate images with Azure OpenAI Service (DALL-E)
| Model ID | Feature Availability | Max Request (characters) |
|----------|----------------------|-------------------------|
| dalle2 | East US | 1000 |
| dalle3 | Sweden Central | 4000 |
- First you need to create an Azure resource that hosts DALL-E
- At the time of writing, dall-e-3 is available in the `SwedenCentral` region, dall-e-2 in the `EastUS` region.
- Then, you need to deploy the image generation model in one of the above regions.
- Read the [Azure OpenAI Image Generation Quickstart Guide](https://learn.microsoft.com/en-us/azure/ai-services/openai/dall-e-quickstart) for further assistance
- Configure your environment variables based on Azure credentials:
**- For DALL-E-3:**
```bash
DALLE3_AZURE_API_VERSION=the-api-version # e.g.: 2023-12-01-preview
DALLE3_BASEURL=https://<AZURE_OPENAI_API_INSTANCE_NAME>.openai.azure.com/openai/deployments/<DALLE3_DEPLOYMENT_NAME>/
DALLE3_API_KEY=your-azure-api-key-for-dall-e-3
```
**- For DALL-E-2:**
```bash
DALLE2_AZURE_API_VERSION=the-api-version # e.g.: 2023-12-01-preview
DALLE2_BASEURL=https://<AZURE_OPENAI_API_INSTANCE_NAME>.openai.azure.com/openai/deployments/<DALLE2_DEPLOYMENT_NAME>/
DALLE2_API_KEY=your-azure-api-key-for-dall-e-2
```
**DALL-E Notes:**
- For DALL-E-3, the default system prompt has the LLM prefer the ["vivid" style](https://platform.openai.com/docs/api-reference/images/create#images-create-style) parameter, which seems to be the preferred setting for ChatGPT as "natural" can sometimes produce lackluster results.
- See official prompt for reference: **[DALL-E System Prompt](https://github.com/spdustin/ChatGPT-AutoExpert/blob/main/_system-prompts/dall-e.md)**
- You can adjust the system prompts to your liking:
```bash
DALLE3_SYSTEM_PROMPT="Your DALL-E-3 System Prompt here"
DALLE2_SYSTEM_PROMPT="Your DALL-E-2 System Prompt here"
```
- The `DALLE_REVERSE_PROXY` environment variable is ignored when Azure credentials (DALLEx_AZURE_API_VERSION and DALLEx_BASEURL) for DALL-E are configured.
---
## ⚠️ Legacy Setup ⚠️
---
**Note:** The legacy instructions may be used for a simple setup but they are no longer recommended as of v0.7.0 and may break in future versions. This was done to improve upon legacy configuration settings, to allow multiple deployments/model configurations setup with ease: **[#1390](https://github.com/danny-avila/LibreChat/issues/1390)**
**Use the recommended [Setup](#setup) in the section above.**
**Required Variables (legacy)**
These variables construct the API URL for Azure OpenAI.
* `AZURE_API_KEY`: Your Azure OpenAI API key.
* `AZURE_OPENAI_API_INSTANCE_NAME`: The instance name of your Azure OpenAI API.
* `AZURE_OPENAI_API_DEPLOYMENT_NAME`: The deployment name of your Azure OpenAI API.
* `AZURE_OPENAI_API_VERSION`: The version of your Azure OpenAI API.
For example, with these variables, the URL for chat completion would look something like:
```plaintext
https://{AZURE_OPENAI_API_INSTANCE_NAME}.openai.azure.com/openai/deployments/{AZURE_OPENAI_API_DEPLOYMENT_NAME}/chat/completions?api-version={AZURE_OPENAI_API_VERSION}
```
You should also consider changing the `AZURE_OPENAI_MODELS` variable to the models available in your deployment.
```bash
# .env file
AZURE_OPENAI_MODELS=gpt-4-1106-preview,gpt-4,gpt-3.5-turbo,gpt-3.5-turbo-1106,gpt-4-vision-preview
```
Overriding the construction of the API URL is possible as of implementing **[Issue #1266](https://github.com/danny-avila/LibreChat/issues/1266)**
**Model Deployments (legacy)**
> Note: a change will be developed to improve current configuration settings, to allow multiple deployments/model configurations setup with ease: **[#1390](https://github.com/danny-avila/LibreChat/issues/1390)**
As of 2023-12-18, the Azure API allows only one model per deployment.
**It's highly recommended** to name your deployments *after* the model name (e.g., "gpt-3.5-turbo") for easy deployment switching.
When you do so, LibreChat will correctly switch the deployment, while associating the correct max context per model, if you have the following environment variable set:
```bash
AZURE_USE_MODEL_AS_DEPLOYMENT_NAME=TRUE
```
For example, when you have set `AZURE_USE_MODEL_AS_DEPLOYMENT_NAME=TRUE`, the following deployment configuration provides the most seamless, error-free experience for LibreChat, including Vision support and tracking the correct max context tokens:
![Screenshot 2023-12-18 111742](https://github.com/danny-avila/LibreChat/assets/110412045/4aa8a61c-0317-4681-8262-a6382dcaa7b0)
Alternatively, you can use custom deployment names and set `AZURE_OPENAI_DEFAULT_MODEL` for expected functionality.
- **`AZURE_OPENAI_MODELS`**: List the available models, separated by commas without spaces. The first listed model will be the default. If left blank, internal settings will be used. Note that deployment names can't have periods, which are removed when generating the endpoint.
Example use:
```bash
# .env file
AZURE_OPENAI_MODELS=gpt-3.5-turbo,gpt-4,gpt-5
```
- **`AZURE_USE_MODEL_AS_DEPLOYMENT_NAME`**: Enable using the model name as the deployment name for the API URL.
Example use:
```bash
# .env file
AZURE_USE_MODEL_AS_DEPLOYMENT_NAME=TRUE
```
**Setting a Default Model for Azure (legacy)**
This section is relevant when you are **not** naming deployments after model names as shown above.
**Important:** The Azure OpenAI API does not use the `model` field in the payload but is a necessary identifier for LibreChat. If your deployment names do not correspond to the model names, and you're having issues with the model not being recognized, you should set this field to explicitly tell LibreChat to treat your Azure OpenAI API requests as if the specified model was selected.
If AZURE_USE_MODEL_AS_DEPLOYMENT_NAME is enabled, the model you set with `AZURE_OPENAI_DEFAULT_MODEL` will **not** be recognized and will **not** be used as the deployment name; instead, it will use the model selected by the user as the "deployment" name.
- **`AZURE_OPENAI_DEFAULT_MODEL`**: Override the model setting for Azure, useful if using custom deployment names.
Example use:
```bash
# .env file
# MUST be a real OpenAI model, named exactly how it is recognized by OpenAI API (not Azure)
AZURE_OPENAI_DEFAULT_MODEL=gpt-3.5-turbo # do include periods in the model name here
```
**Using a Specified Base URL with Azure (legacy)**
The base URL for Azure OpenAI API requests can be dynamically configured. This is useful for proxying services such as [Cloudflare AI Gateway](https://developers.cloudflare.com/ai-gateway/providers/azureopenai/), or if you wish to explicitly override the baseURL handling of the app.
LibreChat will use the `AZURE_OPENAI_BASEURL` environment variable, which can include placeholders for the Azure OpenAI API instance and deployment names.
In the application's environment configuration, the base URL is set like this:
```bash
# .env file
AZURE_OPENAI_BASEURL=https://example.azure-api.net/${INSTANCE_NAME}/${DEPLOYMENT_NAME}
# OR
AZURE_OPENAI_BASEURL=https://${INSTANCE_NAME}.openai.azure.com/openai/deployments/${DEPLOYMENT_NAME}
# Cloudflare example
AZURE_OPENAI_BASEURL=https://gateway.ai.cloudflare.com/v1/ACCOUNT_TAG/GATEWAY/azure-openai/${INSTANCE_NAME}/${DEPLOYMENT_NAME}
```
The application replaces `${INSTANCE_NAME}` and `${DEPLOYMENT_NAME}` in the `AZURE_OPENAI_BASEURL`, processed according to the other settings discussed in the guide.
**You can also omit the placeholders completely and simply construct the baseURL with your credentials:**
```bash
# .env file
AZURE_OPENAI_BASEURL=https://instance-1.openai.azure.com/openai/deployments/deployment-1
# Cloudflare example
AZURE_OPENAI_BASEURL=https://gateway.ai.cloudflare.com/v1/ACCOUNT_TAG/GATEWAY/azure-openai/instance-1/deployment-1
```
Setting these values will override all of the application's internal handling of the instance and deployment names and use your specified base URL.
**Notes:**
- You should still provide the `AZURE_OPENAI_API_VERSION` and `AZURE_API_KEY` via the .env file as they are programmatically added to the requests.
- When specifying instance and deployment names in the `AZURE_OPENAI_BASEURL`, their respective environment variables can be omitted (`AZURE_OPENAI_API_INSTANCE_NAME` and `AZURE_OPENAI_API_DEPLOYMENT_NAME`) except for use with Plugins.
- Specifying instance and deployment names in the `AZURE_OPENAI_BASEURL` instead of placeholders creates conflicts with "plugins," "vision," "default-model," and "model-as-deployment-name" support.
- Due to the conflicts that arise with other features, it is recommended to use placeholder for instance and deployment names in the `AZURE_OPENAI_BASEURL`
**Enabling Auto-Generated Titles with Azure (legacy)**
The default titling model is set to `gpt-3.5-turbo`.
If you're using `AZURE_USE_MODEL_AS_DEPLOYMENT_NAME` and have "gpt-35-turbo" setup as a deployment name, this should work out-of-the-box.
In any case, you can adjust the title model as such: `OPENAI_TITLE_MODEL=your-title-model`
**Using GPT-4 Vision with Azure (legacy)**
Currently, the best way to setup Vision is to use your deployment names as the model names, as [shown here](#model-deployments)
This will work seamlessly as it does with the [OpenAI endpoint](#openai) (no need to select the vision model, it will be switched behind the scenes)
Alternatively, you can set the [required variables](#required-variables) to explicitly use your vision deployment, but this may limit you to exclusively using your vision deployment for all Azure chat settings.
**Notes:**
- If using `AZURE_OPENAI_BASEURL`, you should not specify instance and deployment names instead of placeholders as the vision request will fail.
- As of December 18th, 2023, Vision models seem to have degraded performance with Azure OpenAI when compared to [OpenAI](#openai)
![image](https://github.com/danny-avila/LibreChat/assets/110412045/7306185f-c32c-4483-9167-af514cc1c2dd)
> Note: a change will be developed to improve current configuration settings, to allow multiple deployments/model configurations setup with ease: **[#1390](https://github.com/danny-avila/LibreChat/issues/1390)**
**Optional Variables (legacy)**
*These variables are currently not used by LibreChat*
* `AZURE_OPENAI_API_COMPLETIONS_DEPLOYMENT_NAME`: The deployment name for completion. This is currently not in use but may be used in future.
* `AZURE_OPENAI_API_EMBEDDINGS_DEPLOYMENT_NAME`: The deployment name for embedding. This is currently not in use but may be used in future.
These two variables are optional but may be used in future updates of this project.
**Using Plugins with Azure**
Note: To use the Plugins endpoint with Azure OpenAI, you need a deployment supporting **[function calling](https://techcommunity.microsoft.com/t5/azure-ai-services-blog/function-calling-is-now-available-in-azure-openai-service/ba-p/3879241)**. Otherwise, you need to set "Functions" off in the Agent settings. When you are not using "functions" mode, it's recommend to have "skip completion" off as well, which is a review step of what the agent generated.
To use Azure with the Plugins endpoint, make sure the following environment variables are set:
* `PLUGINS_USE_AZURE`: If set to "true" or any truthy value, this will enable the program to use Azure with the Plugins endpoint.
* `AZURE_API_KEY`: Your Azure API key must be set with an environment variable.
**Important:**
- If using `AZURE_OPENAI_BASEURL`, you should not specify instance and deployment names instead of placeholders as the plugin request will fail.

View file

@ -30,6 +30,12 @@ You can copy the [example config file](#example-config) as a good starting point
The example config file has some options ready to go for Mistral AI and Openrouter.
**Note:** You can set an alternate filepath for the `librechat.yaml` file through an environment variable:
```bash
CONFIG_PATH="/alternative/path/to/librechat.yaml"
```
## Docker Setup
For Docker, you need to make use of an [override file](./docker_override.md), named `docker-compose.override.yml`, to ensure the config file works for you.
@ -46,9 +52,11 @@ version: '3.4'
services:
api:
volumes:
- ./librechat.yaml:/app/librechat.yaml
- ./librechat.yaml:/app/librechat.yaml # local/filepath:container/filepath
```
- **Note:** If you are using `CONFIG_PATH` for an alternative filepath for this file, make sure to specify it accordingly.
- Start docker again, and you should see your config file settings apply
```bash
docker compose up # no need to rebuild
@ -239,30 +247,18 @@ rateLimits:
- **Key**: `endpoints`
- **Type**: Object
- **Description**: Defines custom API endpoints for the application.
- **Sub-Key**: `assistants`
- **Type**: Object
- **Description**: Assistants endpoint-specific configuration.
- **Sub-Key**: `disableBuilder`
- **Description**: Controls the visibility and use of the builder interface for assistants.
- [More info](#disablebuilder)
- **Sub-Key**: `pollIntervalMs`
- **Description**: Specifies the polling interval in milliseconds for checking run updates or changes in assistant run states.
- [More info](#pollintervalms)
- **Sub-Key**: `timeoutMs`
- **Description**: Sets a timeout in milliseconds for assistant runs. Helps manage system load by limiting total run operation time.
- [More info](#timeoutMs)
- **Sub-Key**: `supportedIds`
- **Description**: List of supported assistant Ids. Use this or `excludedIds` but not both.
- [More info](#supportedIds)
- **Sub-Key**: `excludedIds`
- **Description**: List of excluded assistant Ids. Use this or `supportedIds` but not both (the `excludedIds` field will be ignored if so).
- [More info](#excludedIds)
- [Full Assistants Endpoint Object Structure](#assistants-endpoint-object-structure)
- **Sub-Key**: `custom`
- **Type**: Array of Objects
- **Description**: Each object in the array represents a unique endpoint configuration.
- [Full Custom Endpoint Object Structure](#custom-endpoint-object-structure)
- **Required**
- **Sub-Key**: `azureOpenAI`
- **Type**: Object
- **Description**: Azure OpenAI endpoint-specific configuration
- [Full Azure OpenAI Endpoint Object Structure](#azure-openai-object-structure)
- **Sub-Key**: `assistants`
- **Type**: Object
- **Description**: Assistants endpoint-specific configuration.
- [Full Assistants Endpoint Object Structure](#assistants-endpoint-object-structure)
## Endpoint File Config Object Structure
@ -723,3 +719,188 @@ Custom endpoints share logic with the OpenAI endpoint, and thus have default par
**Note:** The `max_tokens` field is not sent to use the maximum amount of tokens available, which is default OpenAI API behavior. Some alternate APIs require this field, or it may default to a very low value and your responses may appear cut off; in this case, you should add it to `addParams` field as shown in the [Endpoint Object Structure](#endpoint-object-structure).
## Azure OpenAI Object Structure
Integrating Azure OpenAI Service with your application allows you to seamlessly utilize multiple deployments and region models hosted by Azure OpenAI. This section details how to configure the Azure OpenAI endpoint for your needs.
**[For a detailed guide on setting up Azure OpenAI configurations, click here](./azure_openai.md)**
### Example Configuration
```yaml
# Example Azure OpenAI Object Structure
endpoints:
azureOpenAI:
titleModel: "gpt-4-turbo"
plugins: true
groups:
- group: "my-westus" # arbitrary name
apiKey: "${WESTUS_API_KEY}"
instanceName: "actual-instance-name" # name of the resource group or instance
version: "2023-12-01-preview"
# baseURL: https://prod.example.com
# additionalHeaders:
# X-Custom-Header: value
models:
gpt-4-vision-preview:
deploymentName: gpt-4-vision-preview
version: "2024-02-15-preview"
gpt-3.5-turbo:
deploymentName: gpt-35-turbo
gpt-3.5-turbo-1106:
deploymentName: gpt-35-turbo-1106
gpt-4:
deploymentName: gpt-4
gpt-4-1106-preview:
deploymentName: gpt-4-1106-preview
- group: "my-eastus"
apiKey: "${EASTUS_API_KEY}"
instanceName: "actual-eastus-instance-name"
deploymentName: gpt-4-turbo
version: "2024-02-15-preview"
baseURL: "https://gateway.ai.cloudflare.com/v1/cloudflareId/azure/azure-openai/${INSTANCE_NAME}/${DEPLOYMENT_NAME}" # uses env variables
additionalHeaders:
X-Custom-Header: value
models:
gpt-4-turbo: true
```
### **groups**:
> Configuration for groups of models by geographic location or purpose.
- Type: Array
- **Description**: Each item in the `groups` array configures a set of models under a certain grouping, often by geographic region or distinct configuration.
- **Example**: See above.
### **plugins**:
> Enables or disables plugins for the Azure OpenAI endpoint.
- Type: Boolean
- **Example**: `plugins: true`
- **Description**: When set to `true`, activates plugins associated with this endpoint.
### Group Configuration Parameters
#### **group**:
> Identifier for a group of models.
- Type: String
- **Required**
- **Example**: `"my-westus"`
#### **apiKey**:
> The API key for accessing the Azure OpenAI Service.
- Type: String
- **Required**
- **Example**: `"${WESTUS_API_KEY}"`
- **Note**: It's highly recommended to use a custom env. variable reference for this field, i.e. `${YOUR_VARIABLE}`
#### **instanceName**:
> Name of the Azure instance.
- Type: String
- **Required**
- **Example**: `"my-westus"`
- **Note**: It's recommended to use a custom env. variable reference for this field, i.e. `${YOUR_VARIABLE}`
#### **version**:
> API version.
- Type: String
- **Optional**
- **Example**: `"2023-12-01-preview"`
- **Note**: It's recommended to use a custom env. variable reference for this field, i.e. `${YOUR_VARIABLE}`
#### **baseURL**:
> The base URL for the Azure OpenAI Service.
- Type: String
- **Optional**
- **Example**: `"https://prod.example.com"`
- **Note**: It's recommended to use a custom env. variable reference for this field, i.e. `${YOUR_VARIABLE}`
#### **additionalHeaders**:
> Additional headers for API requests.
- Type: Dictionary
- **Optional**
- **Example**:
```yaml
additionalHeaders:
X-Custom-Header: ${YOUR_SECRET_CUSTOM_VARIABLE}
```
- **Note**: It's recommended to use a custom env. variable reference for the values of field, as shown in the example.
- **Note**: `api-key` header value is sent on every request
#### **models**:
> Configuration for individual models within a group.
- **Description**: Configures settings for each model, including deployment name and version. Model configurations can adopt the group's deployment name and/or version when configured as a boolean (set to `true`) or an object for detailed settings of either of those fields.
- **Example**: See above example configuration.
Within each group, models are records, either set to true, or set with a specific `deploymentName` and/or `version` where the key MUST be the matching OpenAI model name; for example, if you intend to use gpt-4-vision, it must be configured like so:
```yaml
models:
gpt-4-vision-preview: # matching OpenAI Model name
deploymentName: "arbitrary-deployment-name"
version: "2024-02-15-preview" # version can be any that supports vision
```
### Model Configuration Parameters
#### **deploymentName**:
> The name of the deployment for the model.
- Type: String
- **Required**
- **Example**: `"gpt-4-vision-preview"`
- **Description**: Identifies the deployment of the model within Azure.
- **Note**: This does not have to be the matching OpenAI model name as is convention, but must match the actual name of your deployment on Azure.
#### **version**:
> Specifies the version of the model.
- Type: String
- **Required**
- **Example**: `"2024-02-15-preview"`
- **Description**: Defines the version of the model to be used.
**When specifying a model as a boolean (`true`):**
When a model is enabled (`true`) without using an object, it uses the group's configuration values for deployment name and version.
**Example**:
```yaml
models:
gpt-4-turbo: true
```
**When specifying a model as an object:**
An object allows for detailed configuration of the model, including its `deploymentName` and/or `version`. This mode is used for more granular control over the models, especially when working with multiple versions or deployments under one instance or resource group.
**Example**:
```yaml
models:
gpt-4-vision-preview:
deploymentName: "gpt-4-vision-preview"
version: "2024-02-15-preview"
```
### Notes:
- **Deployment Names** and **Versions** are critical for ensuring that the correct model is used. Double-check these values for accuracy to prevent unexpected behavior.

View file

@ -11,6 +11,7 @@ weight: 2
* 🐋 [Docker Compose Override](./docker_override.md)
---
* 🤖 [AI Setup](./ai_setup.md)
* 🅰️ [Azure OpenAI](./azure_openai.md)
* 🚅 [LiteLLM](./litellm.md)
* 💸 [Free AI APIs](./free_ai_apis.md)
---

View file

@ -20,6 +20,7 @@ weight: 1
* 🖥️ [Custom Endpoints & Config](./configuration/custom_config.md)
* 🐋 [Docker Compose Override](./configuration/docker_override.md)
* 🤖 [AI Setup](./configuration/ai_setup.md)
* 🅰️ [Azure OpenAI](./configuration/azure_openai.md)
* 🚅 [LiteLLM](./configuration/litellm.md)
* 💸 [Free AI APIs](./configuration/free_ai_apis.md)
* 🛂 [Authentication System](./configuration/user_auth_system.md)

View file

@ -1,6 +1,6 @@
{
"name": "librechat-data-provider",
"version": "0.4.3",
"version": "0.4.4",
"description": "data services for librechat apps",
"main": "dist/index.js",
"module": "dist/index.es.js",

View file

@ -0,0 +1,567 @@
import type { TAzureGroups } from '../src/config';
import { validateAzureGroups, mapModelToAzureConfig } from '../src/azure';
describe('validateAzureGroups', () => {
it('should validate a correct configuration', () => {
const configs = [
{
group: 'us-east',
apiKey: 'prod-1234',
instanceName: 'prod-instance',
deploymentName: 'v1-deployment',
version: '2023-12-31',
baseURL: 'https://prod.example.com',
additionalHeaders: {
'X-Custom-Header': 'value',
},
models: {
'gpt-4-turbo': {
deploymentName: 'gpt-4-turbo-deployment',
version: '2023-11-06',
},
},
},
];
const { isValid, modelNames, modelGroupMap, groupMap } = validateAzureGroups(configs);
expect(isValid).toBe(true);
expect(modelNames).toEqual(['gpt-4-turbo']);
const { azureOptions, baseURL, headers } = mapModelToAzureConfig({
modelName: 'gpt-4-turbo',
modelGroupMap,
groupMap,
});
expect(azureOptions).toEqual({
azureOpenAIApiKey: 'prod-1234',
azureOpenAIApiInstanceName: 'prod-instance',
azureOpenAIApiDeploymentName: 'gpt-4-turbo-deployment',
azureOpenAIApiVersion: '2023-11-06',
});
expect(baseURL).toBe('https://prod.example.com');
expect(headers).toEqual({
'X-Custom-Header': 'value',
});
});
it('should return invalid for a configuration missing deploymentName at the model level where required', () => {
const configs = [
{
group: 'us-west',
apiKey: 'us-west-key-5678',
instanceName: 'us-west-instance',
models: {
'gpt-5': {
version: '2023-12-01', // Missing deploymentName
},
},
},
];
const { isValid, errors } = validateAzureGroups(configs);
expect(isValid).toBe(false);
expect(errors.length).toBe(1);
});
it('should return invalid for a configuration with a boolean model where group lacks deploymentName and version', () => {
const configs = [
{
group: 'sweden-central',
apiKey: 'sweden-central-9012',
instanceName: 'sweden-central-instance',
models: {
'gpt-35-turbo': true, // The group lacks deploymentName and version
},
},
];
const { isValid, errors } = validateAzureGroups(configs);
expect(isValid).toBe(false);
expect(errors.length).toBe(1);
});
it('should allow a boolean model when group has both deploymentName and version', () => {
const configs = [
{
group: 'japan-east',
apiKey: 'japan-east-3456',
instanceName: 'japan-east-instance',
deploymentName: 'default-deployment',
version: '2023-04-01',
models: {
'gpt-5-turbo': true,
},
},
];
const { isValid, modelNames, modelGroupMap, groupMap } = validateAzureGroups(configs);
expect(isValid).toBe(true);
const modelGroup = modelGroupMap['gpt-5-turbo'];
expect(modelGroup).toBeDefined();
expect(modelGroup.group).toBe('japan-east');
expect(groupMap[modelGroup.group]).toBeDefined();
expect(modelNames).toContain('gpt-5-turbo');
const { azureOptions } = mapModelToAzureConfig({
modelName: 'gpt-5-turbo',
modelGroupMap,
groupMap,
});
expect(azureOptions).toEqual({
azureOpenAIApiKey: 'japan-east-3456',
azureOpenAIApiInstanceName: 'japan-east-instance',
azureOpenAIApiDeploymentName: 'default-deployment',
azureOpenAIApiVersion: '2023-04-01',
});
});
it('should validate correctly when optional fields are missing', () => {
const configs = [
{
group: 'canada-central',
apiKey: 'canada-key',
instanceName: 'canada-instance',
models: {
'gpt-6': {
deploymentName: 'gpt-6-deployment',
version: '2023-01-01',
},
},
},
];
const { isValid, modelNames, modelGroupMap, groupMap } = validateAzureGroups(configs);
expect(isValid).toBe(true);
expect(modelNames).toEqual(['gpt-6']);
const { azureOptions } = mapModelToAzureConfig({ modelName: 'gpt-6', modelGroupMap, groupMap });
expect(azureOptions).toEqual({
azureOpenAIApiKey: 'canada-key',
azureOpenAIApiInstanceName: 'canada-instance',
azureOpenAIApiDeploymentName: 'gpt-6-deployment',
azureOpenAIApiVersion: '2023-01-01',
});
});
it('should return invalid for configurations with incorrect types', () => {
const configs = [
{
group: 123, // incorrect type
apiKey: 'key123',
instanceName: 'instance123',
models: {
'gpt-7': true,
},
},
];
// @ts-expect-error This error is expected because the 'group' property should be a string.
const { isValid, errors } = validateAzureGroups(configs);
expect(isValid).toBe(false);
expect(errors.length).toBe(1);
});
it('should correctly handle a mix of valid and invalid model configurations', () => {
const configs = [
{
group: 'australia-southeast',
apiKey: 'australia-key',
instanceName: 'australia-instance',
models: {
'valid-model': {
deploymentName: 'valid-deployment',
version: '2023-02-02',
},
'invalid-model': true, // Invalid because the group lacks deploymentName and version
},
},
];
const { isValid, modelNames, errors } = validateAzureGroups(configs);
expect(isValid).toBe(false);
expect(modelNames).toEqual(expect.arrayContaining(['valid-model', 'invalid-model']));
expect(errors.length).toBe(1);
});
it('should return invalid for configuration missing required fields at the group level', () => {
const configs = [
{
group: 'brazil-south',
apiKey: 'brazil-key',
// Missing instanceName
models: {
'gpt-8': {
deploymentName: 'gpt-8-deployment',
version: '2023-03-03',
},
},
},
];
// @ts-expect-error This error is expected because the 'instanceName' property is intentionally left out.
const { isValid, errors } = validateAzureGroups(configs);
expect(isValid).toBe(false);
expect(errors.length).toBe(1);
});
});
describe('validateAzureGroups with modelGroupMap and groupMap', () => {
const originalEnv = process.env;
beforeEach(() => {
jest.resetModules();
process.env = { ...originalEnv };
});
afterAll(() => {
process.env = originalEnv;
});
it('should provide a valid modelGroupMap and groupMap for a correct configuration', () => {
const validConfigs: TAzureGroups = [
{
group: 'us-east',
apiKey: 'prod-1234',
instanceName: 'prod-instance',
deploymentName: 'v1-deployment',
version: '2023-12-31',
baseURL: 'https://prod.example.com',
additionalHeaders: {
'X-Custom-Header': 'value',
},
models: {
'gpt-4-turbo': {
deploymentName: 'gpt-4-turbo-deployment',
version: '2023-11-06',
},
},
},
{
group: 'us-west',
apiKey: 'prod-12345',
instanceName: 'prod-instance',
deploymentName: 'v1-deployment',
version: '2023-12-31',
baseURL: 'https://prod.example.com',
additionalHeaders: {
'X-Custom-Header': 'value',
},
models: {
'gpt-5-turbo': {
deploymentName: 'gpt-5-turbo-deployment',
version: '2023-11-06',
},
},
},
];
const { isValid, modelGroupMap, groupMap } = validateAzureGroups(validConfigs);
expect(isValid).toBe(true);
expect(modelGroupMap['gpt-4-turbo']).toBeDefined();
expect(modelGroupMap['gpt-4-turbo'].group).toBe('us-east');
expect(groupMap['us-east']).toBeDefined();
expect(groupMap['us-east'].apiKey).toBe('prod-1234');
expect(groupMap['us-east'].models['gpt-4-turbo']).toBeDefined();
const { azureOptions, baseURL, headers } = mapModelToAzureConfig({
modelName: 'gpt-4-turbo',
modelGroupMap,
groupMap,
});
expect(azureOptions).toEqual({
azureOpenAIApiKey: 'prod-1234',
azureOpenAIApiInstanceName: 'prod-instance',
azureOpenAIApiDeploymentName: 'gpt-4-turbo-deployment',
azureOpenAIApiVersion: '2023-11-06',
});
expect(baseURL).toBe('https://prod.example.com');
expect(headers).toEqual({
'X-Custom-Header': 'value',
});
});
it('should not allow duplicate group names', () => {
const duplicateGroups: TAzureGroups = [
{
group: 'us-east',
apiKey: 'prod-1234',
instanceName: 'prod-instance',
deploymentName: 'v1-deployment',
version: '2023-12-31',
baseURL: 'https://prod.example.com',
additionalHeaders: {
'X-Custom-Header': 'value',
},
models: {
'gpt-4-turbo': {
deploymentName: 'gpt-4-turbo-deployment',
version: '2023-11-06',
},
},
},
{
group: 'us-east',
apiKey: 'prod-1234',
instanceName: 'prod-instance',
deploymentName: 'v1-deployment',
version: '2023-12-31',
baseURL: 'https://prod.example.com',
additionalHeaders: {
'X-Custom-Header': 'value',
},
models: {
'gpt-5-turbo': {
deploymentName: 'gpt-4-turbo-deployment',
version: '2023-11-06',
},
},
},
];
const { isValid } = validateAzureGroups(duplicateGroups);
expect(isValid).toBe(false);
});
it('should not allow duplicate models across groups', () => {
const duplicateGroups: TAzureGroups = [
{
group: 'us-east',
apiKey: 'prod-1234',
instanceName: 'prod-instance',
deploymentName: 'v1-deployment',
version: '2023-12-31',
baseURL: 'https://prod.example.com',
additionalHeaders: {
'X-Custom-Header': 'value',
},
models: {
'gpt-4-turbo': {
deploymentName: 'gpt-4-turbo-deployment',
version: '2023-11-06',
},
},
},
{
group: 'us-west',
apiKey: 'prod-1234',
instanceName: 'prod-instance',
deploymentName: 'v1-deployment',
version: '2023-12-31',
baseURL: 'https://prod.example.com',
additionalHeaders: {
'X-Custom-Header': 'value',
},
models: {
'gpt-4-turbo': {
deploymentName: 'gpt-4-turbo-deployment',
version: '2023-11-06',
},
},
},
];
const { isValid } = validateAzureGroups(duplicateGroups);
expect(isValid).toBe(false);
});
it('should throw an error if environment variables are set but not configured', () => {
const validConfigs: TAzureGroups = [
{
group: 'librechat-westus',
apiKey: '${WESTUS_API_KEY}',
instanceName: 'librechat-westus',
version: '2023-12-01-preview',
models: {
'gpt-4-vision-preview': {
deploymentName: 'gpt-4-vision-preview',
version: '2024-02-15-preview',
},
'gpt-3.5-turbo': {
deploymentName: 'gpt-35-turbo',
},
'gpt-3.5-turbo-1106': {
deploymentName: 'gpt-35-turbo-1106',
},
'gpt-4': {
deploymentName: 'gpt-4',
},
'gpt-4-1106-preview': {
deploymentName: 'gpt-4-1106-preview',
},
},
},
{
group: 'librechat-eastus',
apiKey: '${EASTUS_API_KEY}',
instanceName: 'librechat-eastus',
deploymentName: 'gpt-4-turbo',
version: '2024-02-15-preview',
models: {
'gpt-4-turbo': true,
},
},
];
const { isValid, modelGroupMap, groupMap } = validateAzureGroups(validConfigs);
expect(isValid).toBe(true);
expect(() =>
mapModelToAzureConfig({ modelName: 'gpt-4-turbo', modelGroupMap, groupMap }),
).toThrow();
});
it('should list all expected models in both modelGroupMap and groupMap', () => {
process.env.WESTUS_API_KEY = 'westus-key';
process.env.EASTUS_API_KEY = 'eastus-key';
const validConfigs: TAzureGroups = [
{
group: 'librechat-westus',
apiKey: '${WESTUS_API_KEY}',
instanceName: 'librechat-westus',
version: '2023-12-01-preview',
models: {
'gpt-4-vision-preview': {
deploymentName: 'gpt-4-vision-preview',
version: '2024-02-15-preview',
},
'gpt-3.5-turbo': {
deploymentName: 'gpt-35-turbo',
},
'gpt-3.5-turbo-1106': {
deploymentName: 'gpt-35-turbo-1106',
},
'gpt-4': {
deploymentName: 'gpt-4',
},
'gpt-4-1106-preview': {
deploymentName: 'gpt-4-1106-preview',
},
},
},
{
group: 'librechat-eastus',
apiKey: '${EASTUS_API_KEY}',
instanceName: 'librechat-eastus',
deploymentName: 'gpt-4-turbo',
version: '2024-02-15-preview',
models: {
'gpt-4-turbo': true,
},
baseURL: 'https://eastus.example.com',
additionalHeaders: {
'x-api-key': 'x-api-key-value',
},
},
];
const { isValid, modelGroupMap, groupMap, modelNames } = validateAzureGroups(validConfigs);
expect(isValid).toBe(true);
expect(modelNames).toEqual([
'gpt-4-vision-preview',
'gpt-3.5-turbo',
'gpt-3.5-turbo-1106',
'gpt-4',
'gpt-4-1106-preview',
'gpt-4-turbo',
]);
// Check modelGroupMap
modelNames.forEach((modelName) => {
expect(modelGroupMap[modelName]).toBeDefined();
});
// Check groupMap for 'librechat-westus'
expect(groupMap).toHaveProperty('librechat-westus');
expect(groupMap['librechat-westus']).toEqual(
expect.objectContaining({
apiKey: '${WESTUS_API_KEY}',
instanceName: 'librechat-westus',
version: '2023-12-01-preview',
models: expect.objectContaining({
'gpt-4-vision-preview': expect.any(Object),
'gpt-3.5-turbo': expect.any(Object),
'gpt-3.5-turbo-1106': expect.any(Object),
'gpt-4': expect.any(Object),
'gpt-4-1106-preview': expect.any(Object),
}),
}),
);
// Check groupMap for 'librechat-eastus'
expect(groupMap).toHaveProperty('librechat-eastus');
expect(groupMap['librechat-eastus']).toEqual(
expect.objectContaining({
apiKey: '${EASTUS_API_KEY}',
instanceName: 'librechat-eastus',
deploymentName: 'gpt-4-turbo',
version: '2024-02-15-preview',
models: expect.objectContaining({
'gpt-4-turbo': true,
}),
}),
);
const { azureOptions: azureOptions1 } = mapModelToAzureConfig({
modelName: 'gpt-4-vision-preview',
modelGroupMap,
groupMap,
});
expect(azureOptions1).toEqual({
azureOpenAIApiKey: 'westus-key',
azureOpenAIApiInstanceName: 'librechat-westus',
azureOpenAIApiDeploymentName: 'gpt-4-vision-preview',
azureOpenAIApiVersion: '2024-02-15-preview',
});
const {
azureOptions: azureOptions2,
baseURL,
headers,
} = mapModelToAzureConfig({
modelName: 'gpt-4-turbo',
modelGroupMap,
groupMap,
});
expect(azureOptions2).toEqual({
azureOpenAIApiKey: 'eastus-key',
azureOpenAIApiInstanceName: 'librechat-eastus',
azureOpenAIApiDeploymentName: 'gpt-4-turbo',
azureOpenAIApiVersion: '2024-02-15-preview',
});
expect(baseURL).toBe('https://eastus.example.com');
expect(headers).toEqual({
'x-api-key': 'x-api-key-value',
});
const { azureOptions: azureOptions3 } = mapModelToAzureConfig({
modelName: 'gpt-4',
modelGroupMap,
groupMap,
});
expect(azureOptions3).toEqual({
azureOpenAIApiKey: 'westus-key',
azureOpenAIApiInstanceName: 'librechat-westus',
azureOpenAIApiDeploymentName: 'gpt-4',
azureOpenAIApiVersion: '2023-12-01-preview',
});
const { azureOptions: azureOptions4 } = mapModelToAzureConfig({
modelName: 'gpt-3.5-turbo',
modelGroupMap,
groupMap,
});
expect(azureOptions4).toEqual({
azureOpenAIApiKey: 'westus-key',
azureOpenAIApiInstanceName: 'librechat-westus',
azureOpenAIApiDeploymentName: 'gpt-35-turbo',
azureOpenAIApiVersion: '2023-12-01-preview',
});
const { azureOptions: azureOptions5 } = mapModelToAzureConfig({
modelName: 'gpt-3.5-turbo-1106',
modelGroupMap,
groupMap,
});
expect(azureOptions5).toEqual({
azureOpenAIApiKey: 'westus-key',
azureOpenAIApiInstanceName: 'librechat-westus',
azureOpenAIApiDeploymentName: 'gpt-35-turbo-1106',
azureOpenAIApiVersion: '2023-12-01-preview',
});
const { azureOptions: azureOptions6 } = mapModelToAzureConfig({
modelName: 'gpt-4-1106-preview',
modelGroupMap,
groupMap,
});
expect(azureOptions6).toEqual({
azureOpenAIApiKey: 'westus-key',
azureOpenAIApiInstanceName: 'librechat-westus',
azureOpenAIApiDeploymentName: 'gpt-4-1106-preview',
azureOpenAIApiVersion: '2023-12-01-preview',
});
});
});

View file

@ -0,0 +1,48 @@
import { extractEnvVariable } from '../src/parsers';
describe('extractEnvVariable', () => {
const originalEnv = process.env;
beforeEach(() => {
jest.resetModules();
process.env = { ...originalEnv };
});
afterAll(() => {
process.env = originalEnv;
});
test('should return the value of the environment variable', () => {
process.env.TEST_VAR = 'test_value';
expect(extractEnvVariable('${TEST_VAR}')).toBe('test_value');
});
test('should return the original string if the envrionment variable is not defined correctly', () => {
process.env.TEST_VAR = 'test_value';
expect(extractEnvVariable('${ TEST_VAR }')).toBe('${ TEST_VAR }');
});
test('should return the original string if environment variable is not set', () => {
expect(extractEnvVariable('${NON_EXISTENT_VAR}')).toBe('${NON_EXISTENT_VAR}');
});
test('should return the original string if it does not contain an environment variable', () => {
expect(extractEnvVariable('some_string')).toBe('some_string');
});
test('should handle empty strings', () => {
expect(extractEnvVariable('')).toBe('');
});
test('should handle strings without variable format', () => {
expect(extractEnvVariable('no_var_here')).toBe('no_var_here');
});
test('should not process multiple variable formats', () => {
process.env.FIRST_VAR = 'first';
process.env.SECOND_VAR = 'second';
expect(extractEnvVariable('${FIRST_VAR} and ${SECOND_VAR}')).toBe(
'${FIRST_VAR} and ${SECOND_VAR}',
);
});
});

View file

@ -0,0 +1,211 @@
import type { ZodError } from 'zod';
import type {
TAzureGroups,
TAzureGroupMap,
TAzureModelGroupMap,
TValidatedAzureConfig,
} from '../src/config';
import { errorsToString, extractEnvVariable, envVarRegex } from '../src/parsers';
import { azureGroupConfigsSchema } from '../src/config';
export const deprecatedAzureVariables = [
/* "related to" precedes description text */
{ key: 'AZURE_OPENAI_DEFAULT_MODEL', description: 'setting a default model' },
{ key: 'AZURE_OPENAI_MODELS', description: 'setting models' },
{
key: 'AZURE_USE_MODEL_AS_DEPLOYMENT_NAME',
description: 'using model names as deployment names',
},
{ key: 'AZURE_API_KEY', description: 'setting a single Azure API key' },
{ key: 'AZURE_OPENAI_API_INSTANCE_NAME', description: 'setting a single Azure instance name' },
{
key: 'AZURE_OPENAI_API_DEPLOYMENT_NAME',
description: 'setting a single Azure deployment name',
},
{ key: 'AZURE_OPENAI_API_VERSION', description: 'setting a single Azure API version' },
{
key: 'AZURE_OPENAI_API_COMPLETIONS_DEPLOYMENT_NAME',
description: 'setting a single Azure completions deployment name',
},
{
key: 'AZURE_OPENAI_API_EMBEDDINGS_DEPLOYMENT_NAME',
description: 'setting a single Azure embeddings deployment name',
},
{
key: 'PLUGINS_USE_AZURE',
description: 'using Azure for Plugins',
},
];
export const conflictingAzureVariables = [
{
key: 'INSTANCE_NAME',
},
{
key: 'DEPLOYMENT_NAME',
},
];
export function validateAzureGroups(configs: TAzureGroups): TValidatedAzureConfig & {
isValid: boolean;
errors: (ZodError | string)[];
} {
let isValid = true;
const modelNames: string[] = [];
const modelGroupMap: TAzureModelGroupMap = {};
const groupMap: TAzureGroupMap = {};
const errors: (ZodError | string)[] = [];
const result = azureGroupConfigsSchema.safeParse(configs);
if (!result.success) {
isValid = false;
errors.push(errorsToString(result.error.errors));
} else {
for (const group of result.data) {
const {
group: groupName,
apiKey,
instanceName,
deploymentName,
version,
baseURL,
additionalHeaders,
models,
} = group;
if (groupMap[groupName]) {
errors.push(`Duplicate group name detected: "${groupName}". Group names must be unique.`);
return { isValid: false, modelNames, modelGroupMap, groupMap, errors };
}
groupMap[groupName] = {
apiKey,
instanceName,
deploymentName,
version,
baseURL,
additionalHeaders,
models,
};
for (const modelName in group.models) {
modelNames.push(modelName);
const model = group.models[modelName];
if (modelGroupMap[modelName]) {
errors.push(
`Duplicate model name detected: "${modelName}". Model names must be unique across groups.`,
);
return { isValid: false, modelNames, modelGroupMap, groupMap, errors };
}
if (typeof model === 'boolean') {
// For boolean models, check if group-level deploymentName and version are present.
if (!group.deploymentName || !group.version) {
errors.push(
`Model "${modelName}" in group "${groupName}" is missing a deploymentName or version.`,
);
return { isValid: false, modelNames, modelGroupMap, groupMap, errors };
}
modelGroupMap[modelName] = {
group: groupName,
};
} else {
// For object models, check if deploymentName and version are required but missing.
if (
(!model.deploymentName && !group.deploymentName) ||
(!model.version && !group.version)
) {
errors.push(
`Model "${modelName}" in group "${groupName}" is missing a required deploymentName or version.`,
);
return { isValid: false, modelNames, modelGroupMap, groupMap, errors };
}
modelGroupMap[modelName] = {
group: groupName,
// deploymentName: model.deploymentName || group.deploymentName,
// version: model.version || group.version,
};
}
}
}
}
return { isValid, modelNames, modelGroupMap, groupMap, errors };
}
type AzureOptions = {
azureOpenAIApiKey: string;
azureOpenAIApiInstanceName: string;
azureOpenAIApiDeploymentName: string;
azureOpenAIApiVersion: string;
};
type MappedAzureConfig = {
azureOptions: AzureOptions;
baseURL?: string;
headers?: Record<string, string>;
};
export function mapModelToAzureConfig({
modelName,
modelGroupMap,
groupMap,
}: Omit<TValidatedAzureConfig, 'modelNames'> & {
modelName: string;
}): MappedAzureConfig {
const modelConfig = modelGroupMap[modelName];
if (!modelConfig) {
throw new Error(`Model named "${modelName}" not found in configuration.`);
}
const groupConfig = groupMap[modelConfig.group];
if (!groupConfig) {
throw new Error(
`Group "${modelConfig.group}" for model "${modelName}" not found in configuration.`,
);
}
const modelDetails = groupConfig.models[modelName];
const deploymentName =
typeof modelDetails === 'object'
? modelDetails.deploymentName || groupConfig.deploymentName
: groupConfig.deploymentName;
const version =
typeof modelDetails === 'object'
? modelDetails.version || groupConfig.version
: groupConfig.version;
if (!deploymentName || !version) {
throw new Error(
`Model "${modelName}" in group "${modelConfig.group}" is missing a deploymentName ("${deploymentName}") or version ("${version}").`,
);
}
const azureOptions: AzureOptions = {
azureOpenAIApiKey: extractEnvVariable(groupConfig.apiKey),
azureOpenAIApiInstanceName: extractEnvVariable(groupConfig.instanceName),
azureOpenAIApiDeploymentName: extractEnvVariable(deploymentName),
azureOpenAIApiVersion: extractEnvVariable(version),
};
for (const value of Object.values(azureOptions)) {
if (typeof value === 'string' && envVarRegex.test(value)) {
throw new Error(`Azure configuration environment variable "${value}" was not found.`);
}
}
const result: MappedAzureConfig = { azureOptions };
if (groupConfig.baseURL) {
result.baseURL = extractEnvVariable(groupConfig.baseURL);
}
if (groupConfig.additionalHeaders) {
result.headers = groupConfig.additionalHeaders;
}
return result;
}

View file

@ -8,6 +8,55 @@ export const defaultSocialLogins = ['google', 'facebook', 'openid', 'github', 'd
export const fileSourceSchema = z.nativeEnum(FileSources);
export const modelConfigSchema = z
.object({
deploymentName: z.string().optional(),
version: z.string().optional(),
})
.or(z.boolean());
export type TAzureModelConfig = z.infer<typeof modelConfigSchema>;
export const azureBaseSchema = z.object({
apiKey: z.string(),
instanceName: z.string(),
deploymentName: z.string().optional(),
version: z.string().optional(),
baseURL: z.string().optional(),
additionalHeaders: z.record(z.any()).optional(),
});
export type TAzureBaseSchema = z.infer<typeof azureBaseSchema>;
export const azureGroupSchema = z
.object({
group: z.string(),
models: z.record(z.string(), modelConfigSchema),
})
.required()
.and(azureBaseSchema);
export const azureGroupConfigsSchema = z.array(azureGroupSchema).min(1);
export type TAzureGroups = z.infer<typeof azureGroupConfigsSchema>;
export type TAzureModelMapSchema = {
// deploymentName?: string;
// version?: string;
group: string;
};
export type TAzureModelGroupMap = Record<string, TAzureModelMapSchema>;
export type TAzureGroupMap = Record<
string,
TAzureBaseSchema & { models: Record<string, TAzureModelConfig> }
>;
export type TValidatedAzureConfig = {
modelNames: string[];
modelGroupMap: TAzureModelGroupMap;
groupMap: TAzureGroupMap;
};
export const assistantEndpointSchema = z.object({
/* assistants specific */
disableBuilder: z.boolean().optional(),
@ -56,8 +105,30 @@ export const endpointSchema = z.object({
headers: z.record(z.any()).optional(),
addParams: z.record(z.any()).optional(),
dropParams: z.array(z.string()).optional(),
customOrder: z.number().optional(),
});
export const azureEndpointSchema = z
.object({
groups: azureGroupConfigsSchema,
plugins: z.boolean().optional(),
})
.and(
endpointSchema
.pick({
titleConvo: true,
titleMethod: true,
titleModel: true,
summarize: true,
summaryModel: true,
customOrder: true,
})
.partial(),
);
export type TAzureConfig = Omit<z.infer<typeof azureEndpointSchema>, 'groups'> &
TValidatedAzureConfig;
export const rateLimitSchema = z.object({
fileUploads: z
.object({
@ -83,6 +154,7 @@ export const configSchema = z.object({
fileConfig: fileConfigSchema.optional(),
endpoints: z
.object({
[EModelEndpoint.azureOpenAI]: azureEndpointSchema.optional(),
[EModelEndpoint.assistants]: assistantEndpointSchema.optional(),
custom: z.array(endpointSchema.partial()).optional(),
})
@ -371,7 +443,7 @@ export enum Constants {
/**
* Key for the Custom Config's version (librechat.yaml).
*/
CONFIG_VERSION = '1.0.3',
CONFIG_VERSION = '1.0.4',
/**
* Standard value for the first message's `parentMessageId` value, to indicate no parent exists.
*/

View file

@ -1,4 +1,5 @@
/* config */
export * from './azure';
export * from './config';
export * from './file-config';
/* schema helpers */

View file

@ -1,5 +1,6 @@
import type { ZodIssue } from 'zod';
import type { TConversation, TPreset } from './schemas';
import type { TEndpointOption } from './types';
import type { TConfig, TEndpointOption, TEndpointsConfig } from './types';
import {
EModelEndpoint,
openAISchema,
@ -42,6 +43,101 @@ const endpointSchemas: Record<EModelEndpoint, EndpointSchema> = {
// [EModelEndpoint.google]: createGoogleSchema,
// };
/** Get the enabled endpoints from the `ENDPOINTS` environment variable */
export function getEnabledEndpoints() {
const defaultEndpoints: string[] = [
EModelEndpoint.openAI,
EModelEndpoint.assistants,
EModelEndpoint.azureOpenAI,
EModelEndpoint.google,
EModelEndpoint.bingAI,
EModelEndpoint.chatGPTBrowser,
EModelEndpoint.gptPlugins,
EModelEndpoint.anthropic,
];
const endpointsEnv = process.env.ENDPOINTS || '';
let enabledEndpoints = defaultEndpoints;
if (endpointsEnv) {
enabledEndpoints = endpointsEnv
.split(',')
.filter((endpoint) => endpoint?.trim())
.map((endpoint) => endpoint.trim());
}
return enabledEndpoints;
}
/** Orders an existing EndpointsConfig object based on enabled endpoint/custom ordering */
export function orderEndpointsConfig(endpointsConfig: TEndpointsConfig) {
if (!endpointsConfig) {
return {};
}
const enabledEndpoints = getEnabledEndpoints();
const endpointKeys = Object.keys(endpointsConfig);
const defaultCustomIndex = enabledEndpoints.indexOf(EModelEndpoint.custom);
return endpointKeys.reduce(
(accumulatedConfig: Record<string, TConfig | null | undefined>, currentEndpointKey) => {
const isCustom = !(currentEndpointKey in EModelEndpoint);
const isEnabled = enabledEndpoints.includes(currentEndpointKey);
if (!isEnabled && !isCustom) {
return accumulatedConfig;
}
const index = enabledEndpoints.indexOf(currentEndpointKey);
if (isCustom) {
accumulatedConfig[currentEndpointKey] = {
order: defaultCustomIndex >= 0 ? defaultCustomIndex : 9999,
...(endpointsConfig[currentEndpointKey] as Omit<TConfig, 'order'> & { order?: number }),
};
} else if (endpointsConfig[currentEndpointKey]) {
accumulatedConfig[currentEndpointKey] = {
...endpointsConfig[currentEndpointKey],
order: index,
};
}
return accumulatedConfig;
},
{},
);
}
/** Converts an array of Zod issues into a string. */
export function errorsToString(errors: ZodIssue[]) {
return errors
.map((error) => {
const field = error.path.join('.');
const message = error.message;
return `${field}: ${message}`;
})
.join(' ');
}
export const envVarRegex = /^\${(.+)}$/;
/** Extracts the value of an environment variable from a string. */
export function extractEnvVariable(value: string) {
const envVarMatch = value.match(envVarRegex);
if (envVarMatch) {
return process.env[envVarMatch[1]] || value;
}
return value;
}
/** Resolves header values to env variables if detected */
export function resolveHeaders(headers: Record<string, string> | undefined) {
const resolvedHeaders = { ...(headers ?? {}) };
if (headers && typeof headers === 'object' && !Array.isArray(headers)) {
Object.keys(headers).forEach((key) => {
resolvedHeaders[key] = extractEnvVariable(headers[key]);
});
}
return resolvedHeaders;
}
export function getFirstDefinedValue(possibleValues: string[]) {
let returnValue;
for (const value of possibleValues) {