LibreChat/api/server/services/Config/loadConfigModels.js
Danny Avila 1a452121fa
🤖 feat: OpenAI Assistants v2 (initial support) (#2781)
* 🤖 Assistants V2 Support: Part 1

- Separated Azure Assistants to its own endpoint
- File Search / Vector Store integration is incomplete, but can toggle and use storage from playground
- Code Interpreter resource files can be added but not deleted
- GPT-4o is supported
- Many improvements to the Assistants Endpoint overall

data-provider v2 changes

copy existing route as v1

chore: rename new endpoint to reduce comparison operations and add new azure filesource

api: add azureAssistants part 1

force use of version for assistants/assistantsAzure

chore: switch name back to azureAssistants

refactor type version: string | number

Ensure assistants endpoints have version set

fix: isArchived type issue in ConversationListParams

refactor: update assistants mutations/queries with endpoint/version definitions, update Assistants Map structure

chore:  FilePreview component ExtendedFile type assertion

feat: isAssistantsEndpoint helper

chore: remove unused useGenerations

chore(buildTree): type issue

chore(Advanced): type issue (unused component, maybe in future)

first pass for multi-assistant endpoint rewrite

fix(listAssistants): pass params correctly

feat: list separate assistants by endpoint

fix(useTextarea): access assistantMap correctly

fix: assistant endpoint switching, resetting ID

fix: broken during rewrite, selecting assistant mention

fix: set/invalidate assistants endpoint query data correctly

feat: Fix issue with assistant ID not being reset correctly

getOpenAIClient helper function

feat: add toast for assistant deletion

fix: assistants delete right after create issue for azure

fix: assistant patching

refactor: actions to use getOpenAIClient

refactor: consolidate logic into helpers file

fix: issue where conversation data was not initially available

v1 chat support

refactor(spendTokens): only early return if completionTokens isNaN

fix(OpenAIClient): ensure spendTokens has all necessary params

refactor: route/controller logic

fix(assistants/initializeClient): use defaultHeaders field

fix: sanitize default operation id

chore: bump openai package

first pass v2 action service

feat: retroactive domain parsing for actions added via v1

feat: delete db records of actions/assistants on openai assistant deletion

chore: remove vision tools from v2 assistants

feat: v2 upload and delete assistant vision images

WIP first pass, thread attachments

fix: show assistant vision files (save local/firebase copy)

v2 image continue

fix: annotations

fix: refine annotations

show analyze as error if is no longer submitting before progress reaches 1 and show file_search as retrieval tool

fix: abort run, undefined endpoint issue

refactor: consolidate capabilities logic and anticipate versioning

frontend version 2 changes

fix: query selection and filter

add endpoint to unknown filepath

add file ids to resource, deleting in progress

enable/disable file search

remove version log

* 🤖 Assistants V2 Support: Part 2

🎹 fix: Autocompletion Chrome Bug on Action API Key Input

chore: remove `useOriginNavigate`

chore: set correct OpenAI Storage Source

fix: azure file deletions, instantiate clients by source for deletion

update code interpret files info

feat: deleteResourceFileId

chore: increase poll interval as azure easily rate limits

fix: openai file deletions, TODO: evaluate rejected deletion settled promises to determine which to delete from db records

file source icons

update table file filters

chore: file search info and versioning

fix: retrieval update with necessary tool_resources if specified

fix(useMentions): add optional chaining in case listMap value is undefined

fix: force assistant avatar roundedness

fix: azure assistants, check correct flag

chore: bump data-provider

* fix: merge conflict

* ci: fix backend tests due to new updates

* chore: update .env.example

* meilisearch improvements

* localization updates

* chore: update comparisons

* feat: add additional metadata: endpoint, author ID

* chore: azureAssistants ENDPOINTS exclusion warning
2024-05-19 12:56:55 -04:00

111 lines
3.4 KiB
JavaScript

const { EModelEndpoint, extractEnvVariable } = require('librechat-data-provider');
const { fetchModels } = require('~/server/services/ModelService');
const { isUserProvided } = require('~/server/utils');
const getCustomConfig = require('./getCustomConfig');
/**
* Load config endpoints from the cached configuration object
* @function loadConfigModels
* @param {Express.Request} req - The Express request object.
*/
async function loadConfigModels(req) {
const customConfig = await getCustomConfig();
if (!customConfig) {
return {};
}
const { endpoints = {} } = customConfig ?? {};
const modelsConfig = {};
const azureEndpoint = endpoints[EModelEndpoint.azureOpenAI];
const azureConfig = req.app.locals[EModelEndpoint.azureOpenAI];
const { modelNames } = azureConfig ?? {};
if (modelNames && azureEndpoint) {
modelsConfig[EModelEndpoint.azureOpenAI] = modelNames;
}
if (modelNames && azureEndpoint && azureEndpoint.plugins) {
modelsConfig[EModelEndpoint.gptPlugins] = modelNames;
}
if (azureEndpoint?.assistants && azureConfig.assistantModels) {
modelsConfig[EModelEndpoint.azureAssistants] = azureConfig.assistantModels;
}
if (!Array.isArray(endpoints[EModelEndpoint.custom])) {
return modelsConfig;
}
const customEndpoints = endpoints[EModelEndpoint.custom].filter(
(endpoint) =>
endpoint.baseURL &&
endpoint.apiKey &&
endpoint.name &&
endpoint.models &&
(endpoint.models.fetch || endpoint.models.default),
);
/**
* @type {Record<string, string[]>}
* Map for promises keyed by unique combination of baseURL and apiKey */
const fetchPromisesMap = {};
/**
* @type {Record<string, string[]>}
* Map to associate unique keys with endpoint names; note: one key may can correspond to multiple endpoints */
const uniqueKeyToEndpointsMap = {};
/**
* @type {Record<string, Partial<TEndpoint>>}
* Map to associate endpoint names to their configurations */
const endpointsMap = {};
for (let i = 0; i < customEndpoints.length; i++) {
const endpoint = customEndpoints[i];
const { models, name, baseURL, apiKey } = endpoint;
endpointsMap[name] = endpoint;
const API_KEY = extractEnvVariable(apiKey);
const BASE_URL = extractEnvVariable(baseURL);
const uniqueKey = `${BASE_URL}__${API_KEY}`;
modelsConfig[name] = [];
if (models.fetch && !isUserProvided(API_KEY) && !isUserProvided(BASE_URL)) {
fetchPromisesMap[uniqueKey] =
fetchPromisesMap[uniqueKey] ||
fetchModels({
user: req.user.id,
baseURL: BASE_URL,
apiKey: API_KEY,
name,
userIdQuery: models.userIdQuery,
});
uniqueKeyToEndpointsMap[uniqueKey] = uniqueKeyToEndpointsMap[uniqueKey] || [];
uniqueKeyToEndpointsMap[uniqueKey].push(name);
continue;
}
if (Array.isArray(models.default)) {
modelsConfig[name] = models.default;
}
}
const fetchedData = await Promise.all(Object.values(fetchPromisesMap));
const uniqueKeys = Object.keys(fetchPromisesMap);
for (let i = 0; i < fetchedData.length; i++) {
const currentKey = uniqueKeys[i];
const modelData = fetchedData[i];
const associatedNames = uniqueKeyToEndpointsMap[currentKey];
for (const name of associatedNames) {
const endpoint = endpointsMap[name];
modelsConfig[name] = !modelData?.length ? endpoint.models.default ?? [] : modelData;
}
}
return modelsConfig;
}
module.exports = loadConfigModels;