mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-09-22 06:00:56 +02:00

* chore: rename dir from `assistant` to plural * feat: `assistants` field for azure config, spread options in AppService * refactor: rename constructAzureURL param for azure as `azureOptions` * chore: bump openai and bun * chore(loadDefaultModels): change naming of assistant -> assistants * feat: load azure settings with currect baseURL for assistants' initializeClient * refactor: add `assistants` flags to groups and model configs, add mapGroupToAzureConfig * feat(loadConfigEndpoints): initialize assistants endpoint if azure flag `assistants` is enabled * feat(AppService): determine assistant models on startup, throw Error if none * refactor(useDeleteAssistantMutation): send model along with assistant id for delete mutations * feat: support listing and deleting assistants with azure * feat: add model query to assistant avatar upload * feat: add azure support for retrieveRun method * refactor: update OpenAIClient initialization * chore: update README * fix(ci): tests passing * refactor(uploadOpenAIFile): improve logging and use more efficient REST API method * refactor(useFileHandling): add model to metadata to target Azure region compatible with current model * chore(files): add azure naming pattern for valid file id recognition * fix(assistants): initialize openai with first available assistant model if none provided * refactor(uploadOpenAIFile): add content type for azure, initialize formdata before azure options * refactor(sleep): move sleep function out of Runs and into `~/server/utils` * fix(azureOpenAI/assistants): make sure to only overwrite models with assistant models if `assistants` flag is enabled * refactor(uploadOpenAIFile): revert to old method * chore(uploadOpenAIFile): use enum for file purpose * docs: azureOpenAI update guide with more info, examples * feat: enable/disable assistant capabilities and specify retrieval models * refactor: optional chain conditional statement in loadConfigModels.js * docs: add assistants examples * chore: update librechat.example.yaml * docs(azure): update note of file upload behavior in Azure OpenAI Assistants * chore: update docs and add descriptive message about assistant errors * fix: prevent message submission with invalid assistant or if files loading * style: update Landing icon & text when assistant is not selected * chore: bump librechat-data-provider to 0.4.8 * fix(assistants/azure): assign req.body.model for proper azure init to abort runs
64 lines
1.9 KiB
JavaScript
64 lines
1.9 KiB
JavaScript
const { EModelEndpoint, extractEnvVariable } = require('librechat-data-provider');
|
|
const { isUserProvided } = require('~/server/utils');
|
|
const getCustomConfig = require('./getCustomConfig');
|
|
|
|
/**
|
|
* Load config endpoints from the cached configuration object
|
|
* @param {Express.Request} req - The request object
|
|
* @returns {Promise<TEndpointsConfig>} A promise that resolves to an object containing the endpoints configuration
|
|
*/
|
|
async function loadConfigEndpoints(req) {
|
|
const customConfig = await getCustomConfig();
|
|
|
|
if (!customConfig) {
|
|
return {};
|
|
}
|
|
|
|
const { endpoints = {} } = customConfig ?? {};
|
|
const endpointsConfig = {};
|
|
|
|
if (Array.isArray(endpoints[EModelEndpoint.custom])) {
|
|
const customEndpoints = endpoints[EModelEndpoint.custom].filter(
|
|
(endpoint) =>
|
|
endpoint.baseURL &&
|
|
endpoint.apiKey &&
|
|
endpoint.name &&
|
|
endpoint.models &&
|
|
(endpoint.models.fetch || endpoint.models.default),
|
|
);
|
|
|
|
for (let i = 0; i < customEndpoints.length; i++) {
|
|
const endpoint = customEndpoints[i];
|
|
const { baseURL, apiKey, name, iconURL, modelDisplayLabel } = endpoint;
|
|
|
|
const resolvedApiKey = extractEnvVariable(apiKey);
|
|
const resolvedBaseURL = extractEnvVariable(baseURL);
|
|
|
|
endpointsConfig[name] = {
|
|
type: EModelEndpoint.custom,
|
|
userProvide: isUserProvided(resolvedApiKey),
|
|
userProvideURL: isUserProvided(resolvedBaseURL),
|
|
modelDisplayLabel,
|
|
iconURL,
|
|
};
|
|
}
|
|
}
|
|
|
|
if (req.app.locals[EModelEndpoint.azureOpenAI]) {
|
|
/** @type {Omit<TConfig, 'order'>} */
|
|
endpointsConfig[EModelEndpoint.azureOpenAI] = {
|
|
userProvide: false,
|
|
};
|
|
}
|
|
|
|
if (req.app.locals[EModelEndpoint.azureOpenAI]?.assistants) {
|
|
/** @type {Omit<TConfig, 'order'>} */
|
|
endpointsConfig[EModelEndpoint.assistants] = {
|
|
userProvide: false,
|
|
};
|
|
}
|
|
|
|
return endpointsConfig;
|
|
}
|
|
|
|
module.exports = loadConfigEndpoints;
|