🅰️ feat: Azure Config to Allow Different Deployments per Model (#1863)

* wip: first pass for azure endpoint schema

* refactor: azure config to return groupMap and modelConfigMap

* wip: naming and schema changes

* refactor(errorsToString): move to data-provider

* feat: rename to azureGroups, add additional tests, tests all expected outcomes, return errors

* feat(AppService): load Azure groups

* refactor(azure): use imported types, write `mapModelToAzureConfig`

* refactor: move `extractEnvVariable` to data-provider

* refactor(validateAzureGroups): throw on duplicate groups or models; feat(mapModelToAzureConfig): throw if env vars not present, add tests

* refactor(AppService): ensure each model is properly configured on startup

* refactor: deprecate azureOpenAI environment variables in favor of librechat.yaml config

* feat: use helper functions to handle and order enabled/default endpoints; initialize azureOpenAI from config file

* refactor: redefine types as well as load azureOpenAI models from config file

* chore(ci): fix test description naming

* feat(azureOpenAI): use validated model grouping for request authentication

* chore: bump data-provider following rebase

* chore: bump config file version noting significant changes

* feat: add title options and switch azure configs for titling and vision requests

* feat: enable azure plugins from config file

* fix(ci): pass tests

* chore(.env.example): mark `PLUGINS_USE_AZURE` as deprecated

* fix(fetchModels): early return if apiKey not passed

* chore: fix azure config typing

* refactor(mapModelToAzureConfig): return baseURL and headers as well as azureOptions

* feat(createLLM): use `azureOpenAIBasePath`

* feat(parsers): resolveHeaders

* refactor(extractBaseURL): handle invalid input

* feat(OpenAIClient): handle headers and baseURL for azureConfig

* fix(ci): pass `OpenAIClient` tests

* chore: extract env var for azureOpenAI group config, baseURL

* docs: azureOpenAI config setup docs

* feat: safe check of potential conflicting env vars that map to unique placeholders

* fix: reset apiKey when model switches from originally requested model (vision or title)

* chore: linting

* docs: CONFIG_PATH notes in custom_config.md
This commit is contained in:
Danny Avila 2024-02-26 14:12:25 -05:00 committed by GitHub
parent 7a55132e42
commit 097a978e5b
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
37 changed files with 2066 additions and 394 deletions

View file

@ -1,4 +1,4 @@
const { CacheKeys, EModelEndpoint } = require('librechat-data-provider');
const { CacheKeys, EModelEndpoint, orderEndpointsConfig } = require('librechat-data-provider');
const { loadDefaultEndpointsConfig, loadConfigEndpoints } = require('~/server/services/Config');
const { getLogStores } = require('~/cache');
@ -10,15 +10,18 @@ async function endpointController(req, res) {
return;
}
const defaultEndpointsConfig = await loadDefaultEndpointsConfig();
const customConfigEndpoints = await loadConfigEndpoints();
const defaultEndpointsConfig = await loadDefaultEndpointsConfig(req);
const customConfigEndpoints = await loadConfigEndpoints(req);
const endpointsConfig = { ...defaultEndpointsConfig, ...customConfigEndpoints };
if (endpointsConfig[EModelEndpoint.assistants] && req.app.locals?.[EModelEndpoint.assistants]) {
endpointsConfig[EModelEndpoint.assistants].disableBuilder =
/** @type {TEndpointsConfig} */
const mergedConfig = { ...defaultEndpointsConfig, ...customConfigEndpoints };
if (mergedConfig[EModelEndpoint.assistants] && req.app.locals?.[EModelEndpoint.assistants]) {
mergedConfig[EModelEndpoint.assistants].disableBuilder =
req.app.locals[EModelEndpoint.assistants].disableBuilder;
}
const endpointsConfig = orderEndpointsConfig(mergedConfig);
await cache.set(CacheKeys.ENDPOINT_CONFIG, endpointsConfig);
res.send(JSON.stringify(endpointsConfig));
}

View file

@ -1,8 +1,12 @@
const {
Constants,
FileSources,
EModelEndpoint,
Constants,
defaultSocialLogins,
validateAzureGroups,
mapModelToAzureConfig,
deprecatedAzureVariables,
conflictingAzureVariables,
} = require('librechat-data-provider');
const { initializeFirebase } = require('./Files/Firebase/initialize');
const loadCustomConfig = require('./Config/loadCustomConfig');
@ -62,6 +66,50 @@ const AppService = async (app) => {
handleRateLimits(config?.rateLimits);
const endpointLocals = {};
if (config?.endpoints?.[EModelEndpoint.azureOpenAI]) {
const { groups, titleModel, titleConvo, titleMethod, plugins } =
config.endpoints[EModelEndpoint.azureOpenAI];
const { isValid, modelNames, modelGroupMap, groupMap, errors } = validateAzureGroups(groups);
if (!isValid) {
const errorString = errors.join('\n');
const errorMessage = 'Invalid Azure OpenAI configuration:\n' + errorString;
logger.error(errorMessage);
throw new Error(errorMessage);
}
for (const modelName of modelNames) {
mapModelToAzureConfig({ modelName, modelGroupMap, groupMap });
}
endpointLocals[EModelEndpoint.azureOpenAI] = {
modelNames,
modelGroupMap,
groupMap,
titleConvo,
titleMethod,
titleModel,
plugins,
};
deprecatedAzureVariables.forEach(({ key, description }) => {
if (process.env[key]) {
logger.warn(
`The \`${key}\` environment variable (related to ${description}) should not be used in combination with the \`azureOpenAI\` endpoint configuration, as you will experience conflicts and errors.`,
);
}
});
conflictingAzureVariables.forEach(({ key }) => {
if (process.env[key]) {
logger.warn(
`The \`${key}\` environment variable should not be used in combination with the \`azureOpenAI\` endpoint configuration, as you may experience with the defined placeholders for mapping to the current model grouping using the same name.`,
);
}
});
}
if (config?.endpoints?.[EModelEndpoint.assistants]) {
const { disableBuilder, pollIntervalMs, timeoutMs, supportedIds, excludedIds } =
config.endpoints[EModelEndpoint.assistants];

View file

@ -1,4 +1,11 @@
const { FileSources, defaultSocialLogins } = require('librechat-data-provider');
const {
FileSources,
EModelEndpoint,
defaultSocialLogins,
validateAzureGroups,
deprecatedAzureVariables,
conflictingAzureVariables,
} = require('librechat-data-provider');
const AppService = require('./AppService');
@ -32,6 +39,43 @@ jest.mock('./ToolService', () => ({
}),
}));
const azureGroups = [
{
group: 'librechat-westus',
apiKey: '${WESTUS_API_KEY}',
instanceName: 'librechat-westus',
version: '2023-12-01-preview',
models: {
'gpt-4-vision-preview': {
deploymentName: 'gpt-4-vision-preview',
version: '2024-02-15-preview',
},
'gpt-3.5-turbo': {
deploymentName: 'gpt-35-turbo',
},
'gpt-3.5-turbo-1106': {
deploymentName: 'gpt-35-turbo-1106',
},
'gpt-4': {
deploymentName: 'gpt-4',
},
'gpt-4-1106-preview': {
deploymentName: 'gpt-4-1106-preview',
},
},
},
{
group: 'librechat-eastus',
apiKey: '${EASTUS_API_KEY}',
instanceName: 'librechat-eastus',
deploymentName: 'gpt-4-turbo',
version: '2024-02-15-preview',
models: {
'gpt-4-turbo': true,
},
},
];
describe('AppService', () => {
let app;
@ -122,11 +166,11 @@ describe('AppService', () => {
});
});
it('should correctly configure endpoints based on custom config', async () => {
it('should correctly configure Assistants endpoint based on custom config', async () => {
require('./Config/loadCustomConfig').mockImplementationOnce(() =>
Promise.resolve({
endpoints: {
assistants: {
[EModelEndpoint.assistants]: {
disableBuilder: true,
pollIntervalMs: 5000,
timeoutMs: 30000,
@ -138,8 +182,8 @@ describe('AppService', () => {
await AppService(app);
expect(app.locals).toHaveProperty('assistants');
expect(app.locals.assistants).toEqual(
expect(app.locals).toHaveProperty(EModelEndpoint.assistants);
expect(app.locals[EModelEndpoint.assistants]).toEqual(
expect.objectContaining({
disableBuilder: true,
pollIntervalMs: 5000,
@ -149,6 +193,34 @@ describe('AppService', () => {
);
});
it('should correctly configure Azure OpenAI endpoint based on custom config', async () => {
require('./Config/loadCustomConfig').mockImplementationOnce(() =>
Promise.resolve({
endpoints: {
[EModelEndpoint.azureOpenAI]: {
groups: azureGroups,
},
},
}),
);
process.env.WESTUS_API_KEY = 'westus-key';
process.env.EASTUS_API_KEY = 'eastus-key';
await AppService(app);
expect(app.locals).toHaveProperty(EModelEndpoint.azureOpenAI);
const azureConfig = app.locals[EModelEndpoint.azureOpenAI];
expect(azureConfig).toHaveProperty('modelNames');
expect(azureConfig).toHaveProperty('modelGroupMap');
expect(azureConfig).toHaveProperty('groupMap');
const { modelNames, modelGroupMap, groupMap } = validateAzureGroups(azureGroups);
expect(azureConfig.modelNames).toEqual(modelNames);
expect(azureConfig.modelGroupMap).toEqual(modelGroupMap);
expect(azureConfig.groupMap).toEqual(groupMap);
});
it('should not modify FILE_UPLOAD environment variables without rate limits', async () => {
// Setup initial environment variables
process.env.FILE_UPLOAD_IP_MAX = '10';
@ -213,7 +285,7 @@ describe('AppService', () => {
});
});
describe('AppService updating app.locals', () => {
describe('AppService updating app.locals and issuing warnings', () => {
let app;
let initialEnv;
@ -309,4 +381,56 @@ describe('AppService updating app.locals', () => {
expect.stringContaining('Both `supportedIds` and `excludedIds` are defined'),
);
});
it('should issue expected warnings when loading Azure Groups with deprecated Environment Variables', async () => {
require('./Config/loadCustomConfig').mockImplementationOnce(() =>
Promise.resolve({
endpoints: {
[EModelEndpoint.azureOpenAI]: {
groups: azureGroups,
},
},
}),
);
deprecatedAzureVariables.forEach((varInfo) => {
process.env[varInfo.key] = 'test';
});
const app = { locals: {} };
await require('./AppService')(app);
const { logger } = require('~/config');
deprecatedAzureVariables.forEach(({ key, description }) => {
expect(logger.warn).toHaveBeenCalledWith(
`The \`${key}\` environment variable (related to ${description}) should not be used in combination with the \`azureOpenAI\` endpoint configuration, as you will experience conflicts and errors.`,
);
});
});
it('should issue expected warnings when loading conflicting Azure Envrionment Variables', async () => {
require('./Config/loadCustomConfig').mockImplementationOnce(() =>
Promise.resolve({
endpoints: {
[EModelEndpoint.azureOpenAI]: {
groups: azureGroups,
},
},
}),
);
conflictingAzureVariables.forEach((varInfo) => {
process.env[varInfo.key] = 'test';
});
const app = { locals: {} };
await require('./AppService')(app);
const { logger } = require('~/config');
conflictingAzureVariables.forEach(({ key }) => {
expect(logger.warn).toHaveBeenCalledWith(
`The \`${key}\` environment variable should not be used in combination with the \`azureOpenAI\` endpoint configuration, as you may experience with the defined placeholders for mapping to the current model grouping using the same name.`,
);
});
});
});

View file

@ -1,6 +1,7 @@
const crypto = require('crypto');
const bcrypt = require('bcryptjs');
const { registerSchema, errorsToString } = require('~/strategies/validators');
const { errorsToString } = require('librechat-data-provider');
const { registerSchema } = require('~/strategies/validators');
const getCustomConfig = require('~/server/services/Config/getCustomConfig');
const Token = require('~/models/schema/tokenSchema');
const { sendEmail } = require('~/server/utils');

View file

@ -1,12 +1,14 @@
const { availableTools } = require('~/app/clients/tools');
const { EModelEndpoint } = require('librechat-data-provider');
const { addOpenAPISpecs } = require('~/app/clients/tools/util/addOpenAPISpecs');
const { availableTools } = require('~/app/clients/tools');
const { openAIApiKey, azureOpenAIApiKey, useAzurePlugins, userProvidedOpenAI, googleKey } =
require('./EndpointService').config;
/**
* Load async endpoints and return a configuration object
* @param {Express.Request} req - The request object
*/
async function loadAsyncEndpoints() {
async function loadAsyncEndpoints(req) {
let i = 0;
let serviceKey, googleUserProvides;
try {
@ -35,13 +37,14 @@ async function loadAsyncEndpoints() {
const google = serviceKey || googleKey ? { userProvide: googleUserProvides } : false;
const useAzure = req.app.locals[EModelEndpoint.azureOpenAI]?.plugins;
const gptPlugins =
openAIApiKey || azureOpenAIApiKey
useAzure || openAIApiKey || azureOpenAIApiKey
? {
plugins,
availableAgents: ['classic', 'functions'],
userProvide: userProvidedOpenAI,
azure: useAzurePlugins,
userProvide: useAzure ? false : userProvidedOpenAI,
azure: useAzurePlugins || useAzure,
}
: false;

View file

@ -1,11 +1,13 @@
const { EModelEndpoint } = require('librechat-data-provider');
const { isUserProvided, extractEnvVariable } = require('~/server/utils');
const { EModelEndpoint, extractEnvVariable } = require('librechat-data-provider');
const { isUserProvided } = require('~/server/utils');
const getCustomConfig = require('./getCustomConfig');
/**
* Load config endpoints from the cached configuration object
* @function loadConfigEndpoints */
async function loadConfigEndpoints() {
* @param {Express.Request} req - The request object
* @returns {Promise<TEndpointsConfig>} A promise that resolves to an object containing the endpoints configuration
*/
async function loadConfigEndpoints(req) {
const customConfig = await getCustomConfig();
if (!customConfig) {
@ -42,6 +44,13 @@ async function loadConfigEndpoints() {
}
}
if (req.app.locals[EModelEndpoint.azureOpenAI]) {
/** @type {Omit<TConfig, 'order'>} */
endpointsConfig[EModelEndpoint.azureOpenAI] = {
userProvide: false,
};
}
return endpointsConfig;
}

View file

@ -1,5 +1,5 @@
const { EModelEndpoint } = require('librechat-data-provider');
const { isUserProvided, extractEnvVariable } = require('~/server/utils');
const { EModelEndpoint, extractEnvVariable } = require('librechat-data-provider');
const { isUserProvided } = require('~/server/utils');
const { fetchModels } = require('~/server/services/ModelService');
const getCustomConfig = require('./getCustomConfig');
@ -17,6 +17,16 @@ async function loadConfigModels(req) {
const { endpoints = {} } = customConfig ?? {};
const modelsConfig = {};
const azureModels = req.app.locals[EModelEndpoint.azureOpenAI]?.modelNames;
const azureEndpoint = endpoints[EModelEndpoint.azureOpenAI];
if (azureModels && azureEndpoint) {
modelsConfig[EModelEndpoint.azureOpenAI] = azureModels;
}
if (azureModels && azureEndpoint && azureEndpoint.plugins) {
modelsConfig[EModelEndpoint.gptPlugins] = azureModels;
}
if (!Array.isArray(endpoints[EModelEndpoint.custom])) {
return modelsConfig;

View file

@ -18,7 +18,7 @@ let i = 0;
async function loadCustomConfig() {
// Use CONFIG_PATH if set, otherwise fallback to defaultConfigPath
const configPath = process.env.CONFIG_PATH || defaultConfigPath;
const customConfig = loadYaml(configPath);
if (!customConfig) {
i === 0 &&

View file

@ -1,34 +1,17 @@
const { EModelEndpoint } = require('librechat-data-provider');
const { EModelEndpoint, getEnabledEndpoints } = require('librechat-data-provider');
const loadAsyncEndpoints = require('./loadAsyncEndpoints');
const { config } = require('./EndpointService');
/**
* Load async endpoints and return a configuration object
* @function loadDefaultEndpointsConfig
* @param {Express.Request} req - The request object
* @returns {Promise<Object.<string, EndpointWithOrder>>} An object whose keys are endpoint names and values are objects that contain the endpoint configuration and an order.
*/
async function loadDefaultEndpointsConfig() {
const { google, gptPlugins } = await loadAsyncEndpoints();
async function loadDefaultEndpointsConfig(req) {
const { google, gptPlugins } = await loadAsyncEndpoints(req);
const { openAI, assistants, bingAI, anthropic, azureOpenAI, chatGPTBrowser } = config;
let enabledEndpoints = [
EModelEndpoint.openAI,
EModelEndpoint.assistants,
EModelEndpoint.azureOpenAI,
EModelEndpoint.google,
EModelEndpoint.bingAI,
EModelEndpoint.chatGPTBrowser,
EModelEndpoint.gptPlugins,
EModelEndpoint.anthropic,
];
const endpointsEnv = process.env.ENDPOINTS || '';
if (endpointsEnv) {
enabledEndpoints = endpointsEnv
.split(',')
.filter((endpoint) => endpoint?.trim())
.map((endpoint) => endpoint.trim());
}
const enabledEndpoints = getEnabledEndpoints();
const endpointConfig = {
[EModelEndpoint.openAI]: openAI,

View file

@ -1,13 +1,16 @@
const { EModelEndpoint, CacheKeys } = require('librechat-data-provider');
const {
EModelEndpoint,
CacheKeys,
extractEnvVariable,
envVarRegex,
} = require('librechat-data-provider');
const { getUserKey, checkUserKeyExpiry } = require('~/server/services/UserService');
const getCustomConfig = require('~/server/services/Config/getCustomConfig');
const { isUserProvided, extractEnvVariable } = require('~/server/utils');
const { fetchModels } = require('~/server/services/ModelService');
const getLogStores = require('~/cache/getLogStores');
const { isUserProvided } = require('~/server/utils');
const { OpenAIClient } = require('~/app');
const envVarRegex = /^\${(.+)}$/;
const { PROXY } = process.env;
const initializeClient = async ({ req, res, endpointOption }) => {

View file

@ -1,4 +1,8 @@
const { EModelEndpoint } = require('librechat-data-provider');
const {
EModelEndpoint,
mapModelToAzureConfig,
resolveHeaders,
} = require('librechat-data-provider');
const { getUserKey, checkUserKeyExpiry } = require('~/server/services/UserService');
const { getAzureCredentials } = require('~/utils');
const { isEnabled } = require('~/server/utils');
@ -16,11 +20,19 @@ const initializeClient = async ({ req, res, endpointOption }) => {
DEBUG_PLUGINS,
} = process.env;
const { key: expiresAt } = req.body;
const { key: expiresAt, model: modelName } = req.body;
const contextStrategy = isEnabled(OPENAI_SUMMARIZE) ? 'summarize' : null;
const useAzure = isEnabled(PLUGINS_USE_AZURE);
const endpoint = useAzure ? EModelEndpoint.azureOpenAI : EModelEndpoint.openAI;
let useAzure = isEnabled(PLUGINS_USE_AZURE);
let endpoint = useAzure ? EModelEndpoint.azureOpenAI : EModelEndpoint.openAI;
/** @type {false | TAzureConfig} */
const azureConfig = req.app.locals[EModelEndpoint.azureOpenAI];
useAzure = useAzure || azureConfig?.plugins;
if (useAzure && endpoint !== EModelEndpoint.azureOpenAI) {
endpoint = EModelEndpoint.azureOpenAI;
}
const baseURLOptions = {
[EModelEndpoint.openAI]: OPENAI_REVERSE_PROXY,
@ -59,8 +71,26 @@ const initializeClient = async ({ req, res, endpointOption }) => {
}
let apiKey = isUserProvided ? userKey : credentials[endpoint];
if (useAzure && azureConfig) {
const { modelGroupMap, groupMap } = azureConfig;
const {
azureOptions,
baseURL,
headers = {},
} = mapModelToAzureConfig({
modelName,
modelGroupMap,
groupMap,
});
clientOptions.azure = azureOptions;
clientOptions.titleConvo = azureConfig.titleConvo;
clientOptions.titleModel = azureConfig.titleModel;
clientOptions.titleMethod = azureConfig.titleMethod ?? 'completion';
clientOptions.reverseProxyUrl = baseURL ?? clientOptions.reverseProxyUrl;
clientOptions.headers = resolveHeaders({ ...headers, ...(clientOptions.headers ?? {}) });
if (useAzure || (apiKey && apiKey.includes('{"azure') && !clientOptions.azure)) {
apiKey = clientOptions.azure.azureOpenAIApiKey;
} else if (useAzure || (apiKey && apiKey.includes('{"azure') && !clientOptions.azure)) {
clientOptions.azure = isUserProvided ? JSON.parse(userKey) : getAzureCredentials();
apiKey = clientOptions.azure.azureOpenAIApiKey;
}

View file

@ -13,6 +13,9 @@ jest.mock('~/server/services/UserService', () => ({
describe('gptPlugins/initializeClient', () => {
// Set up environment variables
const originalEnvironment = process.env;
const app = {
locals: {},
};
beforeEach(() => {
jest.resetModules(); // Clears the cache
@ -32,6 +35,7 @@ describe('gptPlugins/initializeClient', () => {
const req = {
body: { key: null },
user: { id: '123' },
app,
};
const res = {};
const endpointOption = { modelOptions: { model: 'default-model' } };
@ -56,6 +60,7 @@ describe('gptPlugins/initializeClient', () => {
const req = {
body: { key: null },
user: { id: '123' },
app,
};
const res = {};
const endpointOption = { modelOptions: { model: 'test-model' } };
@ -73,6 +78,7 @@ describe('gptPlugins/initializeClient', () => {
const req = {
body: { key: null },
user: { id: '123' },
app,
};
const res = {};
const endpointOption = { modelOptions: { model: 'default-model' } };
@ -89,6 +95,7 @@ describe('gptPlugins/initializeClient', () => {
const req = {
body: { key: null },
user: { id: '123' },
app,
};
const res = {};
const endpointOption = { modelOptions: { model: 'default-model' } };
@ -108,6 +115,7 @@ describe('gptPlugins/initializeClient', () => {
const req = {
body: { key: null },
user: { id: '123' },
app,
};
const res = {};
const endpointOption = { modelOptions: { model: 'default-model' } };
@ -129,6 +137,7 @@ describe('gptPlugins/initializeClient', () => {
const req = {
body: { key: futureDate },
user: { id: '123' },
app,
};
const res = {};
const endpointOption = { modelOptions: { model: 'default-model' } };
@ -148,6 +157,7 @@ describe('gptPlugins/initializeClient', () => {
const req = {
body: { key: futureDate },
user: { id: '123' },
app,
};
const res = {};
const endpointOption = { modelOptions: { model: 'test-model' } };
@ -171,6 +181,7 @@ describe('gptPlugins/initializeClient', () => {
const req = {
body: { key: expiresAt },
user: { id: '123' },
app,
};
const res = {};
const endpointOption = { modelOptions: { model: 'default-model' } };
@ -187,6 +198,7 @@ describe('gptPlugins/initializeClient', () => {
const req = {
body: { key: new Date(Date.now() + 10000).toISOString() },
user: { id: '123' },
app,
};
const res = {};
const endpointOption = { modelOptions: { model: 'default-model' } };
@ -207,6 +219,7 @@ describe('gptPlugins/initializeClient', () => {
const req = {
body: { key: null },
user: { id: '123' },
app,
};
const res = {};
const endpointOption = { modelOptions: { model: 'default-model' } };

View file

@ -1,4 +1,8 @@
const { EModelEndpoint } = require('librechat-data-provider');
const {
EModelEndpoint,
mapModelToAzureConfig,
resolveHeaders,
} = require('librechat-data-provider');
const { getUserKey, checkUserKeyExpiry } = require('~/server/services/UserService');
const { getAzureCredentials } = require('~/utils');
const { isEnabled } = require('~/server/utils');
@ -14,7 +18,7 @@ const initializeClient = async ({ req, res, endpointOption }) => {
OPENAI_SUMMARIZE,
DEBUG_OPENAI,
} = process.env;
const { key: expiresAt, endpoint } = req.body;
const { key: expiresAt, endpoint, model: modelName } = req.body;
const contextStrategy = isEnabled(OPENAI_SUMMARIZE) ? 'summarize' : null;
const baseURLOptions = {
@ -51,8 +55,30 @@ const initializeClient = async ({ req, res, endpointOption }) => {
}
let apiKey = isUserProvided ? userKey : credentials[endpoint];
const isAzureOpenAI = endpoint === EModelEndpoint.azureOpenAI;
/** @type {false | TAzureConfig} */
const azureConfig = isAzureOpenAI && req.app.locals[EModelEndpoint.azureOpenAI];
if (endpoint === EModelEndpoint.azureOpenAI) {
if (isAzureOpenAI && azureConfig) {
const { modelGroupMap, groupMap } = azureConfig;
const {
azureOptions,
baseURL,
headers = {},
} = mapModelToAzureConfig({
modelName,
modelGroupMap,
groupMap,
});
clientOptions.azure = azureOptions;
clientOptions.titleConvo = azureConfig.titleConvo;
clientOptions.titleModel = azureConfig.titleModel;
clientOptions.titleMethod = azureConfig.titleMethod ?? 'completion';
clientOptions.reverseProxyUrl = baseURL ?? clientOptions.reverseProxyUrl;
clientOptions.headers = resolveHeaders({ ...headers, ...(clientOptions.headers ?? {}) });
apiKey = clientOptions.azure.azureOpenAIApiKey;
} else if (isAzureOpenAI) {
clientOptions.azure = isUserProvided ? JSON.parse(userKey) : getAzureCredentials();
apiKey = clientOptions.azure.azureOpenAIApiKey;
}

View file

@ -12,6 +12,9 @@ jest.mock('~/server/services/UserService', () => ({
describe('initializeClient', () => {
// Set up environment variables
const originalEnvironment = process.env;
const app = {
locals: {},
};
beforeEach(() => {
jest.resetModules(); // Clears the cache
@ -30,6 +33,7 @@ describe('initializeClient', () => {
const req = {
body: { key: null, endpoint: 'openAI' },
user: { id: '123' },
app,
};
const res = {};
const endpointOption = {};
@ -54,6 +58,7 @@ describe('initializeClient', () => {
const req = {
body: { key: null, endpoint: 'azureOpenAI' },
user: { id: '123' },
app,
};
const res = {};
const endpointOption = { modelOptions: { model: 'test-model' } };
@ -71,6 +76,7 @@ describe('initializeClient', () => {
const req = {
body: { key: null, endpoint: 'openAI' },
user: { id: '123' },
app,
};
const res = {};
const endpointOption = {};
@ -87,6 +93,7 @@ describe('initializeClient', () => {
const req = {
body: { key: null, endpoint: 'openAI' },
user: { id: '123' },
app,
};
const res = {};
const endpointOption = {};
@ -104,6 +111,7 @@ describe('initializeClient', () => {
const req = {
body: { key: null, endpoint: 'openAI' },
user: { id: '123' },
app,
};
const res = {};
const endpointOption = {};
@ -124,6 +132,7 @@ describe('initializeClient', () => {
const req = {
body: { key: expiresAt, endpoint: 'openAI' },
user: { id: '123' },
app,
};
const res = {};
const endpointOption = {};
@ -141,6 +150,7 @@ describe('initializeClient', () => {
const req = {
body: { key: null, endpoint: 'openAI' },
user: { id: '123' },
app,
};
const res = {};
const endpointOption = {};
@ -160,6 +170,7 @@ describe('initializeClient', () => {
user: {
id: '123',
},
app,
};
const res = {};
@ -183,6 +194,7 @@ describe('initializeClient', () => {
const req = {
body: { key: invalidKey, endpoint: 'openAI' },
user: { id: '123' },
app,
};
const res = {};
const endpointOption = {};

View file

@ -38,6 +38,10 @@ const fetchModels = async ({
return models;
}
if (!apiKey) {
return models;
}
try {
const options = {
headers: {
@ -92,9 +96,7 @@ const fetchModels = async ({
},
);
} else {
logger.error(`${logMessage} Something happened in setting up the request`, {
message: error.message ? error.message : '',
});
logger.error(`${logMessage} Something happened in setting up the request`, error);
}
}

View file

@ -172,19 +172,6 @@ function isEnabled(value) {
*/
const isUserProvided = (value) => value === 'user_provided';
/**
* Extracts the value of an environment variable from a string.
* @param {string} value - The value to be processed, possibly containing an env variable placeholder.
* @returns {string} - The actual value from the environment variable or the original value.
*/
function extractEnvVariable(value) {
const envVarMatch = value.match(/^\${(.+)}$/);
if (envVarMatch) {
return process.env[envVarMatch[1]] || value;
}
return value;
}
module.exports = {
createOnProgress,
isEnabled,
@ -193,5 +180,4 @@ module.exports = {
formatAction,
addSpaceIfNeeded,
isUserProvided,
extractEnvVariable,
};

View file

@ -1,4 +1,4 @@
const { isEnabled, extractEnvVariable } = require('./handleText');
const { isEnabled } = require('./handleText');
describe('isEnabled', () => {
test('should return true when input is "true"', () => {
@ -48,51 +48,4 @@ describe('isEnabled', () => {
test('should return false when input is an array', () => {
expect(isEnabled([])).toBe(false);
});
describe('extractEnvVariable', () => {
const originalEnv = process.env;
beforeEach(() => {
jest.resetModules();
process.env = { ...originalEnv };
});
afterAll(() => {
process.env = originalEnv;
});
test('should return the value of the environment variable', () => {
process.env.TEST_VAR = 'test_value';
expect(extractEnvVariable('${TEST_VAR}')).toBe('test_value');
});
test('should return the original string if the envrionment variable is not defined correctly', () => {
process.env.TEST_VAR = 'test_value';
expect(extractEnvVariable('${ TEST_VAR }')).toBe('${ TEST_VAR }');
});
test('should return the original string if environment variable is not set', () => {
expect(extractEnvVariable('${NON_EXISTENT_VAR}')).toBe('${NON_EXISTENT_VAR}');
});
test('should return the original string if it does not contain an environment variable', () => {
expect(extractEnvVariable('some_string')).toBe('some_string');
});
test('should handle empty strings', () => {
expect(extractEnvVariable('')).toBe('');
});
test('should handle strings without variable format', () => {
expect(extractEnvVariable('no_var_here')).toBe('no_var_here');
});
test('should not process multiple variable formats', () => {
process.env.FIRST_VAR = 'first';
process.env.SECOND_VAR = 'second';
expect(extractEnvVariable('${FIRST_VAR} and ${SECOND_VAR}')).toBe(
'${FIRST_VAR} and ${SECOND_VAR}',
);
});
});
});