mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-12-17 08:50:15 +01:00
* 🤖 Assistants V2 Support: Part 1 - Separated Azure Assistants to its own endpoint - File Search / Vector Store integration is incomplete, but can toggle and use storage from playground - Code Interpreter resource files can be added but not deleted - GPT-4o is supported - Many improvements to the Assistants Endpoint overall data-provider v2 changes copy existing route as v1 chore: rename new endpoint to reduce comparison operations and add new azure filesource api: add azureAssistants part 1 force use of version for assistants/assistantsAzure chore: switch name back to azureAssistants refactor type version: string | number Ensure assistants endpoints have version set fix: isArchived type issue in ConversationListParams refactor: update assistants mutations/queries with endpoint/version definitions, update Assistants Map structure chore: FilePreview component ExtendedFile type assertion feat: isAssistantsEndpoint helper chore: remove unused useGenerations chore(buildTree): type issue chore(Advanced): type issue (unused component, maybe in future) first pass for multi-assistant endpoint rewrite fix(listAssistants): pass params correctly feat: list separate assistants by endpoint fix(useTextarea): access assistantMap correctly fix: assistant endpoint switching, resetting ID fix: broken during rewrite, selecting assistant mention fix: set/invalidate assistants endpoint query data correctly feat: Fix issue with assistant ID not being reset correctly getOpenAIClient helper function feat: add toast for assistant deletion fix: assistants delete right after create issue for azure fix: assistant patching refactor: actions to use getOpenAIClient refactor: consolidate logic into helpers file fix: issue where conversation data was not initially available v1 chat support refactor(spendTokens): only early return if completionTokens isNaN fix(OpenAIClient): ensure spendTokens has all necessary params refactor: route/controller logic fix(assistants/initializeClient): use defaultHeaders field fix: sanitize default operation id chore: bump openai package first pass v2 action service feat: retroactive domain parsing for actions added via v1 feat: delete db records of actions/assistants on openai assistant deletion chore: remove vision tools from v2 assistants feat: v2 upload and delete assistant vision images WIP first pass, thread attachments fix: show assistant vision files (save local/firebase copy) v2 image continue fix: annotations fix: refine annotations show analyze as error if is no longer submitting before progress reaches 1 and show file_search as retrieval tool fix: abort run, undefined endpoint issue refactor: consolidate capabilities logic and anticipate versioning frontend version 2 changes fix: query selection and filter add endpoint to unknown filepath add file ids to resource, deleting in progress enable/disable file search remove version log * 🤖 Assistants V2 Support: Part 2 🎹 fix: Autocompletion Chrome Bug on Action API Key Input chore: remove `useOriginNavigate` chore: set correct OpenAI Storage Source fix: azure file deletions, instantiate clients by source for deletion update code interpret files info feat: deleteResourceFileId chore: increase poll interval as azure easily rate limits fix: openai file deletions, TODO: evaluate rejected deletion settled promises to determine which to delete from db records file source icons update table file filters chore: file search info and versioning fix: retrieval update with necessary tool_resources if specified fix(useMentions): add optional chaining in case listMap value is undefined fix: force assistant avatar roundedness fix: azure assistants, check correct flag chore: bump data-provider * fix: merge conflict * ci: fix backend tests due to new updates * chore: update .env.example * meilisearch improvements * localization updates * chore: update comparisons * feat: add additional metadata: endpoint, author ID * chore: azureAssistants ENDPOINTS exclusion warning
78 lines
2.8 KiB
JavaScript
78 lines
2.8 KiB
JavaScript
const { parseConvo, EModelEndpoint } = require('librechat-data-provider');
|
|
const { getModelsConfig } = require('~/server/controllers/ModelController');
|
|
const azureAssistants = require('~/server/services/Endpoints/azureAssistants');
|
|
const assistants = require('~/server/services/Endpoints/assistants');
|
|
const gptPlugins = require('~/server/services/Endpoints/gptPlugins');
|
|
const { processFiles } = require('~/server/services/Files/process');
|
|
const anthropic = require('~/server/services/Endpoints/anthropic');
|
|
const openAI = require('~/server/services/Endpoints/openAI');
|
|
const custom = require('~/server/services/Endpoints/custom');
|
|
const google = require('~/server/services/Endpoints/google');
|
|
const enforceModelSpec = require('./enforceModelSpec');
|
|
const { handleError } = require('~/server/utils');
|
|
|
|
const buildFunction = {
|
|
[EModelEndpoint.openAI]: openAI.buildOptions,
|
|
[EModelEndpoint.google]: google.buildOptions,
|
|
[EModelEndpoint.custom]: custom.buildOptions,
|
|
[EModelEndpoint.azureOpenAI]: openAI.buildOptions,
|
|
[EModelEndpoint.anthropic]: anthropic.buildOptions,
|
|
[EModelEndpoint.gptPlugins]: gptPlugins.buildOptions,
|
|
[EModelEndpoint.assistants]: assistants.buildOptions,
|
|
[EModelEndpoint.azureAssistants]: azureAssistants.buildOptions,
|
|
};
|
|
|
|
async function buildEndpointOption(req, res, next) {
|
|
const { endpoint, endpointType } = req.body;
|
|
const parsedBody = parseConvo({ endpoint, endpointType, conversation: req.body });
|
|
|
|
if (req.app.locals.modelSpecs?.list && req.app.locals.modelSpecs?.enforce) {
|
|
/** @type {{ list: TModelSpec[] }}*/
|
|
const { list } = req.app.locals.modelSpecs;
|
|
const { spec } = parsedBody;
|
|
|
|
if (!spec) {
|
|
return handleError(res, { text: 'No model spec selected' });
|
|
}
|
|
|
|
const currentModelSpec = list.find((s) => s.name === spec);
|
|
if (!currentModelSpec) {
|
|
return handleError(res, { text: 'Invalid model spec' });
|
|
}
|
|
|
|
if (endpoint !== currentModelSpec.preset.endpoint) {
|
|
return handleError(res, { text: 'Model spec mismatch' });
|
|
}
|
|
|
|
if (
|
|
currentModelSpec.preset.endpoint !== EModelEndpoint.gptPlugins &&
|
|
currentModelSpec.preset.tools
|
|
) {
|
|
return handleError(res, {
|
|
text: `Only the "${EModelEndpoint.gptPlugins}" endpoint can have tools defined in the preset`,
|
|
});
|
|
}
|
|
|
|
const isValidModelSpec = enforceModelSpec(currentModelSpec, parsedBody);
|
|
if (!isValidModelSpec) {
|
|
return handleError(res, { text: 'Model spec mismatch' });
|
|
}
|
|
}
|
|
|
|
req.body.endpointOption = buildFunction[endpointType ?? endpoint](
|
|
endpoint,
|
|
parsedBody,
|
|
endpointType,
|
|
);
|
|
|
|
const modelsConfig = await getModelsConfig(req);
|
|
req.body.endpointOption.modelsConfig = modelsConfig;
|
|
|
|
if (req.body.files) {
|
|
// hold the promise
|
|
req.body.endpointOption.attachments = processFiles(req.body.files);
|
|
}
|
|
next();
|
|
}
|
|
|
|
module.exports = buildEndpointOption;
|