mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-09-22 06:00:56 +02:00

* chore: rename dir from `assistant` to plural * feat: `assistants` field for azure config, spread options in AppService * refactor: rename constructAzureURL param for azure as `azureOptions` * chore: bump openai and bun * chore(loadDefaultModels): change naming of assistant -> assistants * feat: load azure settings with currect baseURL for assistants' initializeClient * refactor: add `assistants` flags to groups and model configs, add mapGroupToAzureConfig * feat(loadConfigEndpoints): initialize assistants endpoint if azure flag `assistants` is enabled * feat(AppService): determine assistant models on startup, throw Error if none * refactor(useDeleteAssistantMutation): send model along with assistant id for delete mutations * feat: support listing and deleting assistants with azure * feat: add model query to assistant avatar upload * feat: add azure support for retrieveRun method * refactor: update OpenAIClient initialization * chore: update README * fix(ci): tests passing * refactor(uploadOpenAIFile): improve logging and use more efficient REST API method * refactor(useFileHandling): add model to metadata to target Azure region compatible with current model * chore(files): add azure naming pattern for valid file id recognition * fix(assistants): initialize openai with first available assistant model if none provided * refactor(uploadOpenAIFile): add content type for azure, initialize formdata before azure options * refactor(sleep): move sleep function out of Runs and into `~/server/utils` * fix(azureOpenAI/assistants): make sure to only overwrite models with assistant models if `assistants` flag is enabled * refactor(uploadOpenAIFile): revert to old method * chore(uploadOpenAIFile): use enum for file purpose * docs: azureOpenAI update guide with more info, examples * feat: enable/disable assistant capabilities and specify retrieval models * refactor: optional chain conditional statement in loadConfigModels.js * docs: add assistants examples * chore: update librechat.example.yaml * docs(azure): update note of file upload behavior in Azure OpenAI Assistants * chore: update docs and add descriptive message about assistant errors * fix: prevent message submission with invalid assistant or if files loading * style: update Landing icon & text when assistant is not selected * chore: bump librechat-data-provider to 0.4.8 * fix(assistants/azure): assign req.body.model for proper azure init to abort runs
99 lines
3 KiB
JavaScript
99 lines
3 KiB
JavaScript
const { EModelEndpoint, extractEnvVariable } = require('librechat-data-provider');
|
|
const { fetchModels } = require('~/server/services/ModelService');
|
|
const { isUserProvided } = require('~/server/utils');
|
|
const getCustomConfig = require('./getCustomConfig');
|
|
|
|
/**
|
|
* Load config endpoints from the cached configuration object
|
|
* @function loadConfigModels
|
|
* @param {Express.Request} req - The Express request object.
|
|
*/
|
|
async function loadConfigModels(req) {
|
|
const customConfig = await getCustomConfig();
|
|
|
|
if (!customConfig) {
|
|
return {};
|
|
}
|
|
|
|
const { endpoints = {} } = customConfig ?? {};
|
|
const modelsConfig = {};
|
|
const azureEndpoint = endpoints[EModelEndpoint.azureOpenAI];
|
|
const azureConfig = req.app.locals[EModelEndpoint.azureOpenAI];
|
|
const { modelNames } = azureConfig ?? {};
|
|
|
|
if (modelNames && azureEndpoint) {
|
|
modelsConfig[EModelEndpoint.azureOpenAI] = modelNames;
|
|
}
|
|
|
|
if (modelNames && azureEndpoint && azureEndpoint.plugins) {
|
|
modelsConfig[EModelEndpoint.gptPlugins] = modelNames;
|
|
}
|
|
|
|
if (azureEndpoint?.assistants && azureConfig.assistantModels) {
|
|
modelsConfig[EModelEndpoint.assistants] = azureConfig.assistantModels;
|
|
}
|
|
|
|
if (!Array.isArray(endpoints[EModelEndpoint.custom])) {
|
|
return modelsConfig;
|
|
}
|
|
|
|
const customEndpoints = endpoints[EModelEndpoint.custom].filter(
|
|
(endpoint) =>
|
|
endpoint.baseURL &&
|
|
endpoint.apiKey &&
|
|
endpoint.name &&
|
|
endpoint.models &&
|
|
(endpoint.models.fetch || endpoint.models.default),
|
|
);
|
|
|
|
const fetchPromisesMap = {}; // Map for promises keyed by unique combination of baseURL and apiKey
|
|
const uniqueKeyToNameMap = {}; // Map to associate unique keys with endpoint names
|
|
|
|
for (let i = 0; i < customEndpoints.length; i++) {
|
|
const endpoint = customEndpoints[i];
|
|
const { models, name, baseURL, apiKey } = endpoint;
|
|
|
|
const API_KEY = extractEnvVariable(apiKey);
|
|
const BASE_URL = extractEnvVariable(baseURL);
|
|
|
|
const uniqueKey = `${BASE_URL}__${API_KEY}`;
|
|
|
|
modelsConfig[name] = [];
|
|
|
|
if (models.fetch && !isUserProvided(API_KEY) && !isUserProvided(BASE_URL)) {
|
|
fetchPromisesMap[uniqueKey] =
|
|
fetchPromisesMap[uniqueKey] ||
|
|
fetchModels({
|
|
user: req.user.id,
|
|
baseURL: BASE_URL,
|
|
apiKey: API_KEY,
|
|
name,
|
|
userIdQuery: models.userIdQuery,
|
|
});
|
|
uniqueKeyToNameMap[uniqueKey] = uniqueKeyToNameMap[uniqueKey] || [];
|
|
uniqueKeyToNameMap[uniqueKey].push(name);
|
|
continue;
|
|
}
|
|
|
|
if (Array.isArray(models.default)) {
|
|
modelsConfig[name] = models.default;
|
|
}
|
|
}
|
|
|
|
const fetchedData = await Promise.all(Object.values(fetchPromisesMap));
|
|
const uniqueKeys = Object.keys(fetchPromisesMap);
|
|
|
|
for (let i = 0; i < fetchedData.length; i++) {
|
|
const currentKey = uniqueKeys[i];
|
|
const modelData = fetchedData[i];
|
|
const associatedNames = uniqueKeyToNameMap[currentKey];
|
|
|
|
for (const name of associatedNames) {
|
|
modelsConfig[name] = modelData;
|
|
}
|
|
}
|
|
|
|
return modelsConfig;
|
|
}
|
|
|
|
module.exports = loadConfigModels;
|