feat: Assistants API, General File Support, Side Panel, File Explorer (#1696)

* feat: assistant name/icon in Landing & Header

* feat: assistname in textarea placeholder, and use `Assistant` as default name

* feat: display non-image files in user messages

* fix: only render files if files.length is > 0

* refactor(config -> file-config): move file related configuration values to separate module, add excel types

* chore: spreadsheet file rendering

* fix(Landing): dark mode style for Assistant Name

* refactor: move progress incrementing to own hook, start smaller, cap near limit \(1\)

* refactor(useContentHandler): add empty Text part if last part was completed tool or image

* chore: add accordion trigger border styling for dark mode

* feat: Assistant Builder model selection

* chore: use Spinner when Assistant is mutating

* fix(get/assistants): return correct response object `AssistantListResponse`

* refactor(Spinner): pass size as prop

* refactor: make assistant crud mutations optimistic, add types for options

* chore: remove assistants route and view

* chore: move assistant builder components to separate directory

* feat(ContextButton): delete Assistant via context button/dialog, add localization

* refactor: conditionally show use and context menu buttons, add localization for create assistant

* feat: save side panel states to localStorage

* style(SidePanel): improve avatar menu and assistant select styling for dark mode

* refactor: make NavToggle reusable for either side (left or right), add SidePanel Toggle with ability to close it completely

* fix: resize handle and navToggle behavior

* fix(/avatar/:assistant_id): await `deleteFile` and assign unique name to uploaded image

* WIP: file UI components from PR #576

* refactor(OpenAIMinimalIcon): pass className

* feat: formatDate helper fn

* feat: DataTableColumnHeader

* feat: add row selection, formatted row values, number of rows selected

* WIP: add files to Side panel temporarily

* feat: `LB_QueueAsyncCall`: Leaky Bucket queue for external APIs, use in `processDeleteRequest`

* fix(TFile): correct `source` type with `FileSources`

* fix(useFileHandling): use `continue` instead of return when iterating multiple files, add file type to extendedFile

* chore: add generic setter type

* refactor(processDeleteRequest): settle promises to prevent rejections from processing deletions, log errors

* feat: `useFileDeletion` to reuse file deletion logic

* refactor(useFileDeletion): make `setFiles` an optional param and use object as param

* feat: useDeleteFilesFromTable

* feat: use real `files` data and add deletion action to data table

* fix(Table): make headers sticky

* feat: add dynamic filtering for columns; only show to user Host or OpenAI storage type

* style(DropdownMenu): replace `slate` with `gray`

* style(DataTable): apply dark mode themes and other misc styling

* style(Columns): add color to OpenAI Storage option

* refactor(FileContainer): make file preview reusable

* refactor(Images): make image preview reusable

* refactor(FilePreview): make file prop optional for FileIcon and FilePreview, fix relative style

* feat(Columns): add file/image previews, set a minimum size to show for file size in bytes

* WIP: File Panel with real files and formatted

* feat: open files dialog from panel

* style: file data table mobile and general column styling fixes

* refactor(api/files): return files sorted by the most recently updated

* refactor: provide fileMap through context to prevent re-selecting files to map in different areas; remove unused imports commented out in PanelColumns

* refactor(ExtendFile): make File type optional, add `attached` to prevent attached files from being deleted on remove, make Message.files a partial TFile type

* feat: attach files through file panel

* refactor(useFileHandling): move files to the start of cache list when uploaded

* refactor(useDeleteFilesMutation): delete files from cache when successfully deleted from server

* fix(FileRow): handle possible edge case of duplication due to attaching recently uploaded file

* style(SidePanel): make resize grip border transparent, remove unnecessary styling on close sidepanel button

* feat: action utilities and tests

* refactor(actions): add `ValidationResult` type and change wording for no server URL found

* refactor(actions): check for empty server URL

* fix(data-provider): revert tsconfig to fix type issue resolution

* feat(client): first pass of actions input for assistants

* refactor(FunctionSignature): change method to output object instead of string

* refactor(models/Assistant): add actions field to schema, use searchParams object for methods, and add `getAssistant`

* feat: post actions input first pass
- create new Action document
- add actions to Assistant DB document
- create /action/:assistant_id POST route
- pass more props down from PanelSwitcher, derive assistant_id from switcher
- move privacy policy to ActionInput
- reset data on input change/validation
- add `useUpdateAction`
- conform FunctionSignature type to FunctionTool
- add action, assistant doc, update hook related types

* refactor: optimize assistant/actions relationship
- past domain in metadata as hostname and not a URL
- include domain in tool name
- add `getActions` for actions retrieval by user
- add `getAssistants` for assistant docs retrieval by user
- add `assistant_id` to Action schema
- move actions to own module as a subroute to `api/assistants`
- add `useGetActionsQuery` and `useGetAssistantDocsQuery` hooks
- fix Action type def

* feat: show assistant actions in assistant builder

* feat: switch to actions on action click, editing action styling

* fix: add Assistant state for builder panel to allow immediate selection of newly created assistants as well as retaining the current assistant when switching to a different panel within the builder

* refactor(SidePanel/NavToggle): offset less from right when SidePanel is completely collapsed

* chore: rename `processActions` -> `processRequiredActions`

* chore: rename Assistant API Action to RequiredAction

* refactor(actions): avoid nesting actual API params under generic `requestBody` to optimize LLM token usage

* fix(handleTools): avoid calling `validTool` if not defined, add optional param to skip the loading of specs, which throws an error in the context of assistants

* WIP: working first pass of toolCalls generated from openapi specs

* WIP: first pass ToolCall styling

* feat: programmatic iv encryption/decryption helpers

* fix: correct ActionAuth types/enums, and define type for AuthForm

* feat: encryption/decryption helpers for Action AuthMetadata

* refactor(getActions): remove sensitive fields from query response

* refactor(POST/actions): encrypt and remove sensitive fields from mutation response

* fix(ActionService): change ESM import to CJS

* feat: frontend auth handling for actions + optimistic update on action update/creation

* refactor(actions): use the correct variables and types for setAuth method

* refactor: POST /:assistant_id action can now handle updating an existing action, add `saved_auth_fields` to determine when user explicitly saves new auth creds. only send auth metadata if user explicitly saved fields

* refactor(createActionTool): catch errors and send back meaningful error message, add flag to `getActions` to determine whether to retrieve sensitive values or not

* refactor(ToolService): add `action` property to ToolCall PartMetadata to determine if the tool call was an action, fix parsing function name issue with actionDelimiter

* fix(ActionRequest): use URL class to correctly join endpoint parts for `execute` call

* feat: delete assistant actions

* refactor: conditionally show Available actions

* refactor: show `retrieval` and `code_interpreter` as Capabilities, swap `Switch` for `Checkbox`

* chore: remove shadow-stroke from messages

* WIP: first pass of Assistants Knowledge attachments

* refactor: remove AssistantsProvider in favor of FormProvider, fix selectedAssistant re-render bug, map Assistant file_ids to files via fileMap, initialize Knowledge component with mapped files if any exist

* fix: prevent deleting files on assistant file upload

* chore: remove console.log

* refactor(useUploadFileMutation): update files and assistants cache on upload

* chore: disable oauth option as not supported yet

* feat: cancel assistant runs

* refactor: initialize OpenAI client with helper function, resolve all related circular dependencies

* fix(DALL-E): initialization

* fix(process): openai client initialization

* fix: select an existing Assistant when the active one is deleted

* chore: allow attaching files for assistant endpoint, send back relevant OpenAI error message when uploading, deconstruct openAI initialization correctly, add `message_file` to formData when a file is attached to the message but not the assistant

* fix: add assistant_id on newConvo

* fix(initializeClient): import fix

* chore: swap setAssistant for setOption in useEffect

* fix(DALL-E): add processFileURL to loadTools call

* chore: add customConfig to debug logs

* feat: delete threads on convo delete

* chore: replace Assistants icon

* chore: remove console.dir() in `abortRun`

* feat(AssistantService): accumulate text values from run in openai.responseText

* feat: titling for assistants endpoint

* chore: move panel file components to appropriate directory, add file checks for attaching files, change icon for Attach Files

* refactor: add localizations to tools, plugins, add condition for adding/remove user plugins so tool selections don't affect this value

* chore: disable `import from url` action for now

* chore: remove textMimeTypes from default fileConfig for now

* fix: catch tool errors and send as outputs with error messages

* fix: React warning about button as descendant of button

* style: retrieval and cancelled icon

* WIP: pass isSubmitting to Parts, use InProgressCall to display cancelled tool calls correctly, show domain/function name

* fix(meilisearch): fix `postSaveHook` issue where indexing expects a mongo document, and join all text content parts for meili indexing

* ci: fix dall-e tests

* ci: fix client tests

* fix: button types in actions panel

* fix: plugin auth form persisting across tool selections

* fix(ci): update AppService spec with `loadAndFormatTools`

* fix(clearConvos): add id check earlier on

* refactor(AssistantAvatar): set previewURL dynamically when emtadata.avatar changes

* feat(assistants): addTitle cache setting

* fix(useSSE): resolve rebase conflicts

* fix: delete mutation

* style(SidePanel): make grip visible on active and hover, invisible otherwise

* ci: add data-provider tests to workflow, also update eslint/tsconfig to recognize specs, and add `text/csv` to fileConfig

* fix: handle edge case where auth object is undefined, and log errors

* refactor(actions): resolve  schemas, add tests for resolving refs, import specs from separate file for tests

* chore: remove comment

* fix(ActionsInput): re-render bug when initializing states with action fields

* fix(patch/assistant): filter undefined tools

* chore: add logging for errors in assistants routes

* fix(updateAssistant): map actions to functions to avoid overwriting

* fix(actions): properly handle GET paths

* fix(convos): unhandled delete thread exception

* refactor(AssistantService): pass both thread_id and conversationId when sending intermediate assistant messages, remove `mapMessagesToSteps` from AssistantService

* refactor(useSSE): replace all messages with runMessages and pass latestMessageId to abortRun; fix(checkMessageGaps): include tool calls when  syncing messages

* refactor(assistants/chat): invoke `createOnTextProgress` after thread creation

* chore: add typing

* style: sidepanel styling

* style: action tool call domain styling

* feat(assistants): default models, limit retrieval to certain models, add env variables to to env.example

* feat: assistants api key in EndpointService

* refactor: set assistant model to conversation on assistant switch

* refactor: set assistant model to conversation on assistant select from panel

* fix(retrieveAndProcessFile): catch attempt to download file with `assistant` purpose which is not allowed; add logging

* feat: retrieval styling, handling, and logging

* chore: rename ASSISTANTS_REVERSE_PROXY to ASSISTANTS_BASE_URL

* feat: FileContext for file metadata

* feat: context file mgmt and filtering

* style(Select): hover/rounded changes

* refactor: explicit conversation switch, endpoint dependent, through `useSelectAssistant`, which does not create new chat if current endpoint is assistant endpoint

* fix(AssistantAvatar): make empty previewURL if no avatar present

* refactor: side panel mobile styling

* style: merge tool and action section, optimize mobile styling for action/tool buttons

* fix: localStorage issues

* fix(useSelectAssistant): invoke react query hook directly in select hook as Map was not being updated in time

* style: light mode fixes

* fix: prevent sidepanel nav styling from shifting layout up

* refactor: change default layout (collapsed by default)

* style: mobile optimization of DataTable

* style: datatable

* feat: client-side hide right-side panel

* chore(useNewConvo): add partial typing for preset

* fix(useSelectAssistant): pass correct model name by using template as preset

* WIP: assistant presets

* refactor(ToolService): add native solution for `TavilySearchResults` and log tool output errors

* refactor: organize imports and use native TavilySearchResults

* fix(TavilySearchResults): stringify result

* fix(ToolCall): show tool call outputs when not an action

* chore: rename Prompt Prefix to custom instructions (in user facing text only)

* refactor(EditPresetDialog): Optimize setting title by debouncing, reset preset on dialog close to avoid state mixture

* feat: add `presetOverride` to overwrite active conversation settings when saving a Preset (relevant for client side updates only)

* feat: Assistant preset settings (client-side)

* fix(Switcher): only set assistant_id and model if current endpoint is Assistants

* feat: use `useDebouncedInput` for updating conversation settings, starting with EditPresetDialog title setting and Assistant instructions setting

* feat(Assistants): add instructions field to settings

* feat(chat/assistants): pass conversation settings to run body

* wip: begin localization and only allow actions if the assistant is created

* refactor(AssistantsPanel): knowledge localization, allow tools on creation

* feat: experimental: allow 'priming' values before assistant is created, that would normally require an assistant_id to be defined

* chore: trim console logs and make more meaningful

* chore: toast messages

* fix(ci): date test

* feat: create file when uploading Assistant Avatar

* feat: file upload rate limiting from custom config with dynamic file route initialization

* refactor: use file upload limiters on post routes only

* refactor(fileConfig): add endpoints field for endpoint specific fileconfigs, add mergeConfig function, add tests

* refactor: fileConfig route, dynamic multer instances used on all '/' and '/images' POST routes, data service and query hook

* feat: supportedMimeTypesSchema, test for array of regex

* feat: configurable file config limits

* chore: clarify assistants file knowledge prereq.

* chore(useTextarea): default to localized 'Assistant' if assistant name is empty

* feat: configurable file limits and toggle file upload per endpoint

* fix(useUploadFileMutation): prevent updating assistant.files cache if file upload is a message_file attachment

* fix(AssistantSelect): set last selected assistant only when timeout successfully runs

* refactor(queries): disable assistant queries if assistants endpoint is not enabled

* chore(Switcher): add localization

* chore: pluralize `assistant` for `EModelEndpoint key and value

* feat: show/hide assistant UI components based on endpoint availability; librechat.yaml config for disabling builder section and setting polling/timeout intervals

* fix(compactEndpointSchemas): use EModelEndpoint for schema access

* feat(runAssistant): use configured values from `librechat.yaml` for `pollIntervalMs` and `timeout`

* fix: naming issue

* wip: revert landing

* 🎉 happy birthday LibreChat (#1768)

* happy birthday LibreChat

* Refactor endpoint condition in Landing component

* Update birthday message in Eng.tsx

* fix(/config): avoid nesting ternaries

* refactor(/config): check birthday

---------

Co-authored-by: Danny Avila <messagedaniel@protonmail.com>

* fix: landing

* fix: landing

* fix(useMessageHelpers): hardcoded check to use EModelEndpoint instead

* fix(ci): convo test revert to main

* fix(assistants/chat): fix issue where assistant_id was being saved as model for convo

* chore: added logging, promises racing to prevent longer timeouts, explicit setting of maxRetries and timeouts, robust catching of invalid abortRun params

* refactor: use recoil state for `showStopButton` and only show for assistants endpoint after syncing conversation data

* refactor: optimize abortRun strategy using localStorage, refactor `abortConversation` to use async/await and await the result, refactor how the abortKey cache is set for runs

* fix(checkMessageGaps): assign `assistant_id` to synced messages if defined; prevents UI from showing blank assistant for cancelled messages

* refactor: re-order sequence of chat route, only allow aborting messages after run is created, cancel abortRun if there was a cancelling error (likely due already cancelled in chat route), and add extra logging

* chore(typedefs): add httpAgent type to OpenAIClient

* refactor: use custom implementation of retrieving run with axios to allow for timing out run query

* fix(waitForRun): handle timed out run retrieval query

* refactor: update preset conditions:
- presets will retain settings when a different endpoint is selected; for existing convos, either when modular or is assistant switch
- no longer use `navigateToConvo` on preset select

* fix: temporary calculator hack as expects string input when invoked

* fix: cancel abortRun only when cancelling error is a result of the run already being cancelled

* chore: remove use of `fileMaxSizeMB` and total counterpart (redundant)

* docs: custom config documentation update

* docs: assistants api setup and dotenv, new custom config fields

* refactor(Switcher): make Assistant switcher sticky in SidePanel

* chore(useSSE): remove console log of data and message index

* refactor(AssistantPanel): button styling and add secondary select button to bottom of panel

* refactor(OpenAIClient): allow passing conversationId to RunManager through titleConvo and initializeLLM to properly record title context tokens used in cases where conversationId was not defined by the client

* feat(assistants): token tracking for assistant runs

* chore(spendTokens): improve logging

* feat: support/exclude specific assistant Ids

* chore: add update `librechat.example.yaml`, optimize `AppService` handling, new tests for `AppService`, optimize missing/outdate config logging

* chore: mount docker logs to root of project

* chore: condense axios errors

* chore: bump vite

* chore: vite hot reload fix using latest version

* chore(getOpenAIModels): sort instruct models to the end of models list

* fix(assistants): user provided key

* fix(assistants): user provided key, invalidate more queries on revoke

---------

Co-authored-by: Marco Beretta <81851188+Berry-13@users.noreply.github.com>
This commit is contained in:
Danny Avila 2024-02-13 20:42:27 -05:00 committed by GitHub
parent cd2786441a
commit ecd63eb9f1
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
316 changed files with 21873 additions and 6315 deletions

View file

@ -1,5 +1,6 @@
const crypto = require('crypto');
const express = require('express');
const { Constants } = require('librechat-data-provider');
const { saveMessage, getConvoTitle, saveConvo, getConvo } = require('~/models');
const { handleError, sendMessage, createOnProgress, handleText } = require('~/server/utils');
const { setHeaders } = require('~/server/middleware');
@ -27,7 +28,7 @@ router.post('/', setHeaders, async (req, res) => {
const conversationId = oldConversationId || crypto.randomUUID();
const isNewConversation = !oldConversationId;
const userMessageId = crypto.randomUUID();
const userParentMessageId = parentMessageId || '00000000-0000-0000-0000-000000000000';
const userParentMessageId = parentMessageId || Constants.NO_PARENT;
const userMessage = {
messageId: userMessageId,
sender: 'User',
@ -209,7 +210,7 @@ const ask = async ({
});
res.end();
if (userParentMessageId == '00000000-0000-0000-0000-000000000000') {
if (userParentMessageId == Constants.NO_PARENT) {
// const title = await titleConvo({ endpoint: endpointOption?.endpoint, text, response: responseMessage });
const title = await response.details.title;
await saveConvo(user, {

View file

@ -1,5 +1,6 @@
const express = require('express');
const crypto = require('crypto');
const express = require('express');
const { Constants } = require('librechat-data-provider');
const { handleError, sendMessage, createOnProgress, handleText } = require('~/server/utils');
const { saveMessage, getConvoTitle, saveConvo, getConvo } = require('~/models');
const { setHeaders } = require('~/server/middleware');
@ -28,7 +29,7 @@ router.post('/', setHeaders, async (req, res) => {
const conversationId = oldConversationId || crypto.randomUUID();
const isNewConversation = !oldConversationId;
const userMessageId = messageId;
const userParentMessageId = parentMessageId || '00000000-0000-0000-0000-000000000000';
const userParentMessageId = parentMessageId || Constants.NO_PARENT;
let userMessage = {
messageId: userMessageId,
sender: 'User',
@ -238,7 +239,7 @@ const ask = async ({
});
res.end();
if (userParentMessageId == '00000000-0000-0000-0000-000000000000') {
if (userParentMessageId == Constants.NO_PARENT) {
const title = await titleConvoBing({
text,
response: responseMessage,

View file

@ -1,6 +1,6 @@
const express = require('express');
const router = express.Router();
const { getResponseSender } = require('librechat-data-provider');
const { getResponseSender, Constants } = require('librechat-data-provider');
const { validateTools } = require('~/app');
const { addTitle } = require('~/server/services/Endpoints/openAI');
const { initializeClient } = require('~/server/services/Endpoints/gptPlugins');
@ -204,7 +204,7 @@ router.post('/', validateEndpoint, buildEndpointOption, setHeaders, async (req,
});
res.end();
if (parentMessageId === '00000000-0000-0000-0000-000000000000' && newConvo) {
if (parentMessageId === Constants.NO_PARENT && newConvo) {
addTitle(req, {
text,
response,

View file

@ -0,0 +1,201 @@
const { v4 } = require('uuid');
const express = require('express');
const { actionDelimiter } = require('librechat-data-provider');
const { initializeClient } = require('~/server/services/Endpoints/assistant');
const { updateAction, getActions, deleteAction } = require('~/models/Action');
const { updateAssistant, getAssistant } = require('~/models/Assistant');
const { encryptMetadata } = require('~/server/services/ActionService');
const { logger } = require('~/config');
const router = express.Router();
/**
* Retrieves all user's actions
* @route GET /actions/
* @param {string} req.params.id - Assistant identifier.
* @returns {Action[]} 200 - success response - application/json
*/
router.get('/', async (req, res) => {
try {
res.json(await getActions({ user: req.user.id }));
} catch (error) {
res.status(500).json({ error: error.message });
}
});
/**
* Adds or updates actions for a specific assistant.
* @route POST /actions/:assistant_id
* @param {string} req.params.assistant_id - The ID of the assistant.
* @param {FunctionTool[]} req.body.functions - The functions to be added or updated.
* @param {string} [req.body.action_id] - Optional ID for the action.
* @param {ActionMetadata} req.body.metadata - Metadata for the action.
* @returns {Object} 200 - success response - application/json
*/
router.post('/:assistant_id', async (req, res) => {
try {
const { assistant_id } = req.params;
/** @type {{ functions: FunctionTool[], action_id: string, metadata: ActionMetadata }} */
const { functions, action_id: _action_id, metadata: _metadata } = req.body;
if (!functions.length) {
return res.status(400).json({ message: 'No functions provided' });
}
let metadata = encryptMetadata(_metadata);
const { domain } = metadata;
if (!domain) {
return res.status(400).json({ message: 'No domain provided' });
}
const action_id = _action_id ?? v4();
const initialPromises = [];
/** @type {{ openai: OpenAI }} */
const { openai } = await initializeClient({ req, res });
initialPromises.push(getAssistant({ assistant_id, user: req.user.id }));
initialPromises.push(openai.beta.assistants.retrieve(assistant_id));
!!_action_id && initialPromises.push(getActions({ user: req.user.id, action_id }, true));
/** @type {[AssistantDocument, Assistant, [Action|undefined]]} */
const [assistant_data, assistant, actions_result] = await Promise.all(initialPromises);
if (actions_result && actions_result.length) {
const action = actions_result[0];
metadata = { ...action.metadata, ...metadata };
}
if (!assistant) {
return res.status(404).json({ message: 'Assistant not found' });
}
const { actions: _actions = [] } = assistant_data ?? {};
const actions = [];
for (const action of _actions) {
const [action_domain, current_action_id] = action.split(actionDelimiter);
if (action_domain === domain && !_action_id) {
// TODO: dupe check on the frontend
return res.status(400).json({
message: `Action sets cannot have duplicate domains - ${domain} already exists on another action`,
});
}
if (current_action_id === action_id) {
continue;
}
actions.push(action);
}
actions.push(`${domain}${actionDelimiter}${action_id}`);
/** @type {{ tools: FunctionTool[] | { type: 'code_interpreter'|'retrieval'}[]}} */
const { tools: _tools = [] } = assistant;
const tools = _tools
.filter(
(tool) =>
!(
tool.function &&
(tool.function.name.includes(domain) || tool.function.name.includes(action_id))
),
)
.concat(
functions.map((tool) => ({
...tool,
function: {
...tool.function,
name: `${tool.function.name}${actionDelimiter}${domain}`,
},
})),
);
const promises = [];
promises.push(
updateAssistant(
{ assistant_id, user: req.user.id },
{
actions,
},
),
);
promises.push(openai.beta.assistants.update(assistant_id, { tools }));
promises.push(updateAction({ action_id, user: req.user.id }, { metadata, assistant_id }));
/** @type {[AssistantDocument, Assistant, Action]} */
const resolved = await Promise.all(promises);
const sensitiveFields = ['api_key', 'oauth_client_id', 'oauth_client_secret'];
for (let field of sensitiveFields) {
if (resolved[2].metadata[field]) {
delete resolved[2].metadata[field];
}
}
res.json(resolved);
} catch (error) {
const message = 'Trouble updating the Assistant Action';
logger.error(message, error);
res.status(500).json({ message });
}
});
/**
* Deletes an action for a specific assistant.
* @route DELETE /actions/:assistant_id/:action_id
* @param {string} req.params.assistant_id - The ID of the assistant.
* @param {string} req.params.action_id - The ID of the action to delete.
* @returns {Object} 200 - success response - application/json
*/
router.delete('/:assistant_id/:action_id', async (req, res) => {
try {
const { assistant_id, action_id } = req.params;
/** @type {{ openai: OpenAI }} */
const { openai } = await initializeClient({ req, res });
const initialPromises = [];
initialPromises.push(getAssistant({ assistant_id, user: req.user.id }));
initialPromises.push(openai.beta.assistants.retrieve(assistant_id));
/** @type {[AssistantDocument, Assistant]} */
const [assistant_data, assistant] = await Promise.all(initialPromises);
const { actions } = assistant_data ?? {};
const { tools = [] } = assistant ?? {};
let domain = '';
const updatedActions = actions.filter((action) => {
if (action.includes(action_id)) {
[domain] = action.split(actionDelimiter);
return false;
}
return true;
});
const updatedTools = tools.filter(
(tool) => !(tool.function && tool.function.name.includes(domain)),
);
const promises = [];
promises.push(
updateAssistant(
{ assistant_id, user: req.user.id },
{
actions: updatedActions,
},
),
);
promises.push(openai.beta.assistants.update(assistant_id, { tools: updatedTools }));
promises.push(deleteAction({ action_id, user: req.user.id }));
await Promise.all(promises);
res.status(200).json({ message: 'Action deleted successfully' });
} catch (error) {
const message = 'Trouble deleting the Assistant Action';
logger.error(message, error);
res.status(500).json({ message });
}
});
module.exports = router;

View file

@ -1,9 +1,31 @@
const OpenAI = require('openai');
const multer = require('multer');
const express = require('express');
const { FileContext, EModelEndpoint } = require('librechat-data-provider');
const { updateAssistant, getAssistants } = require('~/models/Assistant');
const { initializeClient } = require('~/server/services/Endpoints/assistant');
const { getStrategyFunctions } = require('~/server/services/Files/strategies');
const { uploadImageBuffer } = require('~/server/services/Files/process');
const { deleteFileByFilter } = require('~/models/File');
const { logger } = require('~/config');
const actions = require('./actions');
const tools = require('./tools');
const upload = multer();
const router = express.Router();
/**
* Assistant actions route.
* @route GET|POST /assistants/actions
*/
router.use('/actions', actions);
/**
* Create an assistant.
* @route GET /assistants/tools
* @returns {TPlugin[]} 200 - application/json
*/
router.use('/tools', tools);
/**
* Create an assistant.
* @route POST /assistants
@ -12,12 +34,25 @@ const router = express.Router();
*/
router.post('/', async (req, res) => {
try {
const openai = new OpenAI(process.env.OPENAI_API_KEY);
const assistantData = req.body;
/** @type {{ openai: OpenAI }} */
const { openai } = await initializeClient({ req, res });
const { tools = [], ...assistantData } = req.body;
assistantData.tools = tools
.map((tool) => {
if (typeof tool !== 'string') {
return tool;
}
return req.app.locals.availableTools[tool];
})
.filter((tool) => tool);
const assistant = await openai.beta.assistants.create(assistantData);
logger.debug('/assistants/', assistant);
res.status(201).json(assistant);
} catch (error) {
logger.error('[/assistants] Error creating assistant', error);
res.status(500).json({ error: error.message });
}
});
@ -30,11 +65,14 @@ router.post('/', async (req, res) => {
*/
router.get('/:id', async (req, res) => {
try {
const openai = new OpenAI(process.env.OPENAI_API_KEY);
/** @type {{ openai: OpenAI }} */
const { openai } = await initializeClient({ req, res });
const assistant_id = req.params.id;
const assistant = await openai.beta.assistants.retrieve(assistant_id);
res.json(assistant);
} catch (error) {
logger.error('[/assistants/:id] Error retrieving assistant', error);
res.status(500).json({ error: error.message });
}
});
@ -48,12 +86,25 @@ router.get('/:id', async (req, res) => {
*/
router.patch('/:id', async (req, res) => {
try {
const openai = new OpenAI(process.env.OPENAI_API_KEY);
/** @type {{ openai: OpenAI }} */
const { openai } = await initializeClient({ req, res });
const assistant_id = req.params.id;
const updateData = req.body;
updateData.tools = (updateData.tools ?? [])
.map((tool) => {
if (typeof tool !== 'string') {
return tool;
}
return req.app.locals.availableTools[tool];
})
.filter((tool) => tool);
const updatedAssistant = await openai.beta.assistants.update(assistant_id, updateData);
res.json(updatedAssistant);
} catch (error) {
logger.error('[/assistants/:id] Error updating assistant', error);
res.status(500).json({ error: error.message });
}
});
@ -66,12 +117,15 @@ router.patch('/:id', async (req, res) => {
*/
router.delete('/:id', async (req, res) => {
try {
const openai = new OpenAI(process.env.OPENAI_API_KEY);
/** @type {{ openai: OpenAI }} */
const { openai } = await initializeClient({ req, res });
const assistant_id = req.params.id;
const deletionStatus = await openai.beta.assistants.del(assistant_id);
res.json(deletionStatus);
} catch (error) {
res.status(500).json({ error: error.message });
logger.error('[/assistants/:id] Error deleting assistant', error);
res.status(500).json({ error: 'Error deleting assistant' });
}
});
@ -79,22 +133,121 @@ router.delete('/:id', async (req, res) => {
* Returns a list of assistants.
* @route GET /assistants
* @param {AssistantListParams} req.query - The assistant list parameters for pagination and sorting.
* @returns {Array<Assistant>} 200 - success response - application/json
* @returns {AssistantListResponse} 200 - success response - application/json
*/
router.get('/', async (req, res) => {
try {
const openai = new OpenAI(process.env.OPENAI_API_KEY);
/** @type {{ openai: OpenAI }} */
const { openai } = await initializeClient({ req, res });
const { limit, order, after, before } = req.query;
const assistants = await openai.beta.assistants.list({
const response = await openai.beta.assistants.list({
limit,
order,
after,
before,
});
res.json(assistants);
/** @type {AssistantListResponse} */
let body = response.body;
if (req.app.locals?.[EModelEndpoint.assistants]) {
/** @type {Partial<TAssistantEndpoint>} */
const assistantsConfig = req.app.locals[EModelEndpoint.assistants];
const { supportedIds, excludedIds } = assistantsConfig;
if (supportedIds?.length) {
body.data = body.data.filter((assistant) => supportedIds.includes(assistant.id));
} else if (excludedIds?.length) {
body.data = body.data.filter((assistant) => !excludedIds.includes(assistant.id));
}
}
res.json(body);
} catch (error) {
logger.error('[/assistants] Error listing assistants', error);
res.status(500).json({ error: error.message });
}
});
/**
* Returns a list of the user's assistant documents (metadata saved to database).
* @route GET /assistants/documents
* @returns {AssistantDocument[]} 200 - success response - application/json
*/
router.get('/documents', async (req, res) => {
try {
res.json(await getAssistants({ user: req.user.id }));
} catch (error) {
logger.error('[/assistants/documents] Error listing assistant documents', error);
res.status(500).json({ error: error.message });
}
});
/**
* Uploads and updates an avatar for a specific assistant.
* @route POST /avatar/:assistant_id
* @param {string} req.params.assistant_id - The ID of the assistant.
* @param {Express.Multer.File} req.file - The avatar image file.
* @param {string} [req.body.metadata] - Optional metadata for the assistant's avatar.
* @returns {Object} 200 - success response - application/json
*/
router.post('/avatar/:assistant_id', upload.single('file'), async (req, res) => {
try {
const { assistant_id } = req.params;
if (!assistant_id) {
return res.status(400).json({ message: 'Assistant ID is required' });
}
let { metadata: _metadata = '{}' } = req.body;
/** @type {{ openai: OpenAI }} */
const { openai } = await initializeClient({ req, res });
const image = await uploadImageBuffer({ req, context: FileContext.avatar });
try {
_metadata = JSON.parse(_metadata);
} catch (error) {
logger.error('[/avatar/:assistant_id] Error parsing metadata', error);
_metadata = {};
}
if (_metadata.avatar && _metadata.avatar_source) {
const { deleteFile } = getStrategyFunctions(_metadata.avatar_source);
try {
await deleteFile(req, { filepath: _metadata.avatar });
await deleteFileByFilter({ filepath: _metadata.avatar });
} catch (error) {
logger.error('[/avatar/:assistant_id] Error deleting old avatar', error);
}
}
const metadata = {
..._metadata,
avatar: image.filepath,
avatar_source: req.app.locals.fileStrategy,
};
const promises = [];
promises.push(
updateAssistant(
{ assistant_id, user: req.user.id },
{
avatar: {
filepath: image.filepath,
source: req.app.locals.fileStrategy,
},
},
),
);
promises.push(openai.beta.assistants.update(assistant_id, { metadata }));
const resolved = await Promise.all(promises);
res.status(201).json(resolved[1]);
} catch (error) {
const message = 'An error occurred while updating the Assistant Avatar';
logger.error(message, error);
res.status(500).json({ message });
}
});
module.exports = router;

View file

@ -1,64 +1,217 @@
const crypto = require('crypto');
const OpenAI = require('openai');
const { logger } = require('~/config');
const { sendMessage } = require('../../utils');
const { initThread, createRun, handleRun } = require('../../services/AssistantService');
const { v4 } = require('uuid');
const express = require('express');
const { EModelEndpoint, Constants, RunStatus, CacheKeys } = require('librechat-data-provider');
const {
initThread,
recordUsage,
saveUserMessage,
checkMessageGaps,
addThreadMetadata,
saveAssistantMessage,
} = require('~/server/services/Threads');
const { runAssistant, createOnTextProgress } = require('~/server/services/AssistantService');
const { addTitle, initializeClient } = require('~/server/services/Endpoints/assistant');
const { createRun, sleep } = require('~/server/services/Runs');
const { getConvo } = require('~/models/Conversation');
const getLogStores = require('~/cache/getLogStores');
const { sendMessage } = require('~/server/utils');
const { logger } = require('~/config');
const router = express.Router();
const {
setHeaders,
// handleAbort,
// handleAbortError,
handleAbort,
handleAbortError,
// validateEndpoint,
// buildEndpointOption,
// createAbortController,
} = require('../../middleware');
buildEndpointOption,
} = require('~/server/middleware');
// const thread = {
// id: 'thread_LexzJUVugYFqfslS7c7iL3Zo',
// "thread_nZoiCbPauU60LqY1Q0ME1elg"
// };
router.post('/abort', handleAbort());
/**
* Chat with an assistant.
* @route POST /
* @desc Chat with an assistant
* @access Public
* @param {express.Request} req - The request object, containing the request data.
* @param {express.Response} res - The response object, used to send back a response.
* @returns {void}
*/
router.post('/', setHeaders, async (req, res) => {
try {
logger.debug('[/assistants/chat/] req.body', req.body);
// test message:
// How many polls of 500 ms intervals are there in 18 seconds?
router.post('/', buildEndpointOption, setHeaders, async (req, res) => {
logger.debug('[/assistants/chat/] req.body', req.body);
const {
text,
model,
files = [],
promptPrefix,
assistant_id,
instructions,
thread_id: _thread_id,
messageId: _messageId,
conversationId: convoId,
parentMessageId: _parentId = Constants.NO_PARENT,
} = req.body;
const { assistant_id, messages, text: userMessage, messageId } = req.body;
const conversationId = req.body.conversationId || crypto.randomUUID();
// let thread_id = req.body.thread_id ?? 'thread_nZoiCbPauU60LqY1Q0ME1elg'; // for testing
let thread_id = req.body.thread_id;
/** @type {Partial<TAssistantEndpoint>} */
const assistantsConfig = req.app.locals?.[EModelEndpoint.assistants];
if (assistantsConfig) {
const { supportedIds, excludedIds } = assistantsConfig;
const error = { message: 'Assistant not supported' };
if (supportedIds?.length && !supportedIds.includes(assistant_id)) {
return await handleAbortError(res, req, error, {
sender: 'System',
conversationId: convoId,
messageId: v4(),
parentMessageId: _messageId,
error,
});
} else if (excludedIds?.length && excludedIds.includes(assistant_id)) {
return await handleAbortError(res, req, error, {
sender: 'System',
conversationId: convoId,
messageId: v4(),
parentMessageId: _messageId,
});
}
}
/** @type {OpenAIClient} */
let openai;
/** @type {string|undefined} - the current thread id */
let thread_id = _thread_id;
/** @type {string|undefined} - the current run id */
let run_id;
/** @type {string|undefined} - the parent messageId */
let parentMessageId = _parentId;
/** @type {TMessage[]} */
let previousMessages = [];
const userMessageId = v4();
const responseMessageId = v4();
/** @type {string} - The conversation UUID - created if undefined */
const conversationId = convoId ?? v4();
const cache = getLogStores(CacheKeys.ABORT_KEYS);
const cacheKey = `${req.user.id}:${conversationId}`;
try {
if (convoId && !_thread_id) {
throw new Error('Missing thread_id for existing conversation');
}
if (!assistant_id) {
throw new Error('Missing assistant_id');
}
const openai = new OpenAI(process.env.OPENAI_API_KEY);
console.log(messages);
/** @type {{ openai: OpenAIClient }} */
const { openai: _openai, client } = await initializeClient({
req,
res,
endpointOption: req.body.endpointOption,
initAppClient: true,
});
const initThreadBody = {
messages: [
{
role: 'user',
content: userMessage,
metadata: {
messageId,
},
},
],
openai = _openai;
// if (thread_id) {
// previousMessages = await checkMessageGaps({ openai, thread_id, conversationId });
// }
if (previousMessages.length) {
parentMessageId = previousMessages[previousMessages.length - 1].messageId;
}
const userMessage = {
role: 'user',
content: text,
metadata: {
messageId: userMessageId,
},
};
let thread_file_ids = [];
if (convoId) {
const convo = await getConvo(req.user.id, convoId);
if (convo && convo.file_ids) {
thread_file_ids = convo.file_ids;
}
}
const file_ids = files.map(({ file_id }) => file_id);
if (file_ids.length || thread_file_ids.length) {
userMessage.file_ids = file_ids;
openai.attachedFileIds = new Set([...file_ids, ...thread_file_ids]);
}
// TODO: may allow multiple messages to be created beforehand in a future update
const initThreadBody = {
messages: [userMessage],
metadata: {
user: req.user.id,
conversationId,
},
};
const result = await initThread({ openai, body: initThreadBody, thread_id });
// const { messages: _messages } = result;
thread_id = result.thread_id;
createOnTextProgress({
openai,
conversationId,
userMessageId,
messageId: responseMessageId,
thread_id,
});
const requestMessage = {
user: req.user.id,
text,
messageId: userMessageId,
parentMessageId,
// TODO: make sure client sends correct format for `files`, use zod
files,
file_ids,
conversationId,
isCreatedByUser: true,
assistant_id,
thread_id,
model: assistant_id,
};
previousMessages.push(requestMessage);
await saveUserMessage({ ...requestMessage, model });
const conversation = {
conversationId,
// TODO: title feature
title: 'New Chat',
endpoint: EModelEndpoint.assistants,
promptPrefix: promptPrefix,
instructions: instructions,
assistant_id,
// model,
};
if (file_ids.length) {
conversation.file_ids = file_ids;
}
/** @type {CreateRunBody} */
const body = {
assistant_id,
model,
};
if (promptPrefix) {
body.additional_instructions = promptPrefix;
}
if (instructions) {
body.instructions = instructions;
}
/* NOTE:
* By default, a Run will use the model and tools configuration specified in Assistant object,
* but you can override most of these when creating the Run for added flexibility:
@ -66,43 +219,160 @@ router.post('/', setHeaders, async (req, res) => {
const run = await createRun({
openai,
thread_id,
body: { assistant_id, model: 'gpt-3.5-turbo-1106' },
body,
});
const response = await handleRun({ openai, thread_id, run_id: run.id });
run_id = run.id;
await cache.set(cacheKey, `${thread_id}:${run_id}`);
sendMessage(res, {
sync: true,
conversationId,
// messages: previousMessages,
requestMessage,
responseMessage: {
user: req.user.id,
messageId: openai.responseMessage.messageId,
parentMessageId: userMessageId,
conversationId,
assistant_id,
thread_id,
model: assistant_id,
},
});
// todo: retry logic
let response = await runAssistant({ openai, thread_id, run_id });
logger.debug('[/assistants/chat/] response', response);
if (response.run.status === RunStatus.IN_PROGRESS) {
response = await runAssistant({
openai,
thread_id,
run_id,
in_progress: openai.in_progress,
});
}
/** @type {ResponseMessage} */
const responseMessage = {
...openai.responseMessage,
parentMessageId: userMessageId,
conversationId,
user: req.user.id,
assistant_id,
thread_id,
model: assistant_id,
};
// TODO: token count from usage returned in run
// TODO: parse responses, save to db, send to user
sendMessage(res, {
title: 'New Chat',
final: true,
conversation: {
conversationId: 'fake-convo-id',
title: 'New Chat',
},
conversation,
requestMessage: {
messageId: 'fake-user-message-id',
parentMessageId: '00000000-0000-0000-0000-000000000000',
conversationId: 'fake-convo-id',
sender: 'User',
text: req.body.text,
isCreatedByUser: true,
},
responseMessage: {
messageId: 'fake-response-id',
conversationId: 'fake-convo-id',
parentMessageId: 'fake-user-message-id',
isCreatedByUser: false,
isEdited: false,
model: 'gpt-3.5-turbo-1106',
sender: 'Assistant',
text: response.choices[0].text,
parentMessageId,
thread_id,
},
});
res.end();
await saveAssistantMessage({ ...responseMessage, model });
if (parentMessageId === Constants.NO_PARENT && !_thread_id) {
addTitle(req, {
text,
responseText: openai.responseText,
conversationId,
client,
});
}
await addThreadMetadata({
openai,
thread_id,
messageId: responseMessage.messageId,
messages: response.messages,
});
if (!response.run.usage) {
await sleep(3000);
const completedRun = await openai.beta.threads.runs.retrieve(thread_id, run.id);
if (completedRun.usage) {
await recordUsage({
...completedRun.usage,
user: req.user.id,
model: completedRun.model ?? model,
conversationId,
});
}
} else {
await recordUsage({
...response.run.usage,
user: req.user.id,
model: response.run.model ?? model,
conversationId,
});
}
} catch (error) {
// res.status(500).json({ error: error.message });
if (error.message === 'Run cancelled') {
return res.end();
}
logger.error('[/assistants/chat/]', error);
res.end();
if (!openai || !thread_id || !run_id) {
return res.status(500).json({ error: 'The Assistant run failed to initialize' });
}
try {
await cache.delete(cacheKey);
const cancelledRun = await openai.beta.threads.runs.cancel(thread_id, run_id);
logger.debug('Cancelled run:', cancelledRun);
} catch (error) {
logger.error('[abortRun] Error cancelling run', error);
}
await sleep(2000);
try {
const run = await openai.beta.threads.runs.retrieve(thread_id, run_id);
await recordUsage({
...run.usage,
model: run.model,
user: req.user.id,
conversationId,
});
} catch (error) {
logger.error('[/assistants/chat/] Error fetching or processing run', error);
}
try {
const runMessages = await checkMessageGaps({
openai,
run_id,
thread_id,
conversationId,
latestMessageId: responseMessageId,
});
const finalEvent = {
title: 'New Chat',
final: true,
conversation: await getConvo(req.user.id, conversationId),
runMessages,
};
if (res.headersSent && finalEvent) {
return sendMessage(res, finalEvent);
}
res.json(finalEvent);
} catch (error) {
logger.error('[/assistants/chat/] Error finalizing error process', error);
return res.status(500).json({ error: 'The Assistant run failed' });
}
}
});

View file

@ -0,0 +1,8 @@
const express = require('express');
const { getAvailableTools } = require('~/server/controllers/PluginController');
const router = express.Router();
router.get('/', getAvailableTools);
module.exports = router;

View file

@ -1,10 +1,10 @@
const express = require('express');
const { CacheKeys } = require('librechat-data-provider');
const { getConvosByPage, deleteConvos } = require('~/models/Conversation');
const { initializeClient } = require('~/server/services/Endpoints/assistant');
const { getConvosByPage, deleteConvos, getConvo, saveConvo } = require('~/models/Conversation');
const requireJwtAuth = require('~/server/middleware/requireJwtAuth');
const { sleep } = require('~/server/services/AssistantService');
const { sleep } = require('~/server/services/Runs/handle');
const getLogStores = require('~/cache/getLogStores');
const { getConvo, saveConvo } = require('~/models');
const { logger } = require('~/config');
const router = express.Router();
@ -47,28 +47,37 @@ router.post('/gen_title', async (req, res) => {
await titleCache.delete(key);
res.status(200).json({ title });
} else {
res
.status(404)
.json({
message: 'Title not found or method not implemented for the conversation\'s endpoint',
});
res.status(404).json({
message: 'Title not found or method not implemented for the conversation\'s endpoint',
});
}
});
router.post('/clear', async (req, res) => {
let filter = {};
const { conversationId, source } = req.body.arg;
const { conversationId, source, thread_id } = req.body.arg;
if (conversationId) {
filter = { conversationId };
}
// for debugging deletion source
// logger.debug('source:', source);
if (source === 'button' && !conversationId) {
return res.status(200).send('No conversationId provided');
}
if (thread_id) {
/** @type {{ openai: OpenAI}} */
const { openai } = await initializeClient({ req, res });
try {
const response = await openai.beta.threads.del(thread_id);
logger.debug('Deleted OpenAI thread:', response);
} catch (error) {
logger.error('Error deleting OpenAI thread:', error);
}
}
// for debugging deletion source
// logger.debug('source:', source);
try {
const dbResponse = await deleteConvos(req.user.id, filter);
res.status(201).json(dbResponse);

View file

@ -1,38 +1,36 @@
const express = require('express');
const multer = require('multer');
const uploadAvatar = require('~/server/services/Files/images/avatar');
const { requireJwtAuth } = require('~/server/middleware/');
const User = require('~/models/User');
const express = require('express');
const { getStrategyFunctions } = require('~/server/services/Files/strategies');
const { resizeAvatar } = require('~/server/services/Files/images/avatar');
const { logger } = require('~/config');
const upload = multer();
const router = express.Router();
router.post('/', requireJwtAuth, upload.single('input'), async (req, res) => {
router.post('/', upload.single('input'), async (req, res) => {
try {
const userId = req.user.id;
const { manual } = req.body;
const input = req.file.buffer;
if (!userId) {
throw new Error('User ID is undefined');
}
// TODO: do not use Model directly, instead use a service method that uses the model
const user = await User.findById(userId).lean();
if (!user) {
throw new Error('User not found');
}
const url = await uploadAvatar({
input,
const fileStrategy = req.app.locals.fileStrategy;
const webPBuffer = await resizeAvatar({
userId,
manual,
fileStrategy: req.app.locals.fileStrategy,
input,
});
const { processAvatar } = getStrategyFunctions(fileStrategy);
const url = await processAvatar({ buffer: webPBuffer, userId, manual });
res.json({ url });
} catch (error) {
res.status(500).json({ message: 'An error occurred while uploading the profile picture' });
const message = 'An error occurred while uploading the profile picture';
logger.error(message, error);
res.status(500).json({ message });
}
});

View file

@ -1,14 +1,17 @@
const { z } = require('zod');
const axios = require('axios');
const fs = require('fs').promises;
const express = require('express');
const { FileSources } = require('librechat-data-provider');
const { getStrategyFunctions } = require('~/server/services/Files/strategies');
const { deleteFiles, getFiles } = require('~/models');
const { isUUID } = require('librechat-data-provider');
const {
filterFile,
processFileUpload,
processDeleteRequest,
} = require('~/server/services/Files/process');
const { getFiles } = require('~/models/File');
const { logger } = require('~/config');
const router = express.Router();
const isUUID = z.string().uuid();
router.get('/', async (req, res) => {
try {
const files = await getFiles({ user: req.user.id });
@ -19,6 +22,15 @@ router.get('/', async (req, res) => {
}
});
router.get('/config', async (req, res) => {
try {
res.status(200).json(req.app.locals.fileConfig);
} catch (error) {
logger.error('[/files] Error getting fileConfig', error);
res.status(400).json({ message: 'Error in request', error: error.message });
}
});
router.delete('/', async (req, res) => {
try {
const { files: _files } = req.body;
@ -31,6 +43,11 @@ router.delete('/', async (req, res) => {
if (!file.filepath) {
return false;
}
if (/^file-/.test(file.file_id)) {
return true;
}
return isUUID.safeParse(file.file_id).success;
});
@ -39,29 +56,8 @@ router.delete('/', async (req, res) => {
return;
}
const file_ids = files.map((file) => file.file_id);
const deletionMethods = {};
const promises = [];
promises.push(await deleteFiles(file_ids));
await processDeleteRequest({ req, files });
for (const file of files) {
const source = file.source ?? FileSources.local;
if (deletionMethods[source]) {
promises.push(deletionMethods[source](req, file));
continue;
}
const { deleteFile } = getStrategyFunctions(source);
if (!deleteFile) {
throw new Error(`Delete function not implemented for ${source}`);
}
deletionMethods[source] = deleteFile;
promises.push(deleteFile(req, file));
}
await Promise.all(promises);
res.status(200).json({ message: 'Files deleted successfully' });
} catch (error) {
logger.error('[/files] Error deleting files:', error);
@ -69,4 +65,69 @@ router.delete('/', async (req, res) => {
}
});
router.get('/download/:fileId', async (req, res) => {
try {
const { fileId } = req.params;
const options = {
headers: {
// TODO: Client initialization for OpenAI API Authentication
Authorization: `Bearer ${process.env.OPENAI_API_KEY}`,
},
responseType: 'stream',
};
const fileResponse = await axios.get(`https://api.openai.com/v1/files/${fileId}`, {
headers: options.headers,
});
const { filename } = fileResponse.data;
const response = await axios.get(`https://api.openai.com/v1/files/${fileId}/content`, options);
res.setHeader('Content-Disposition', `attachment; filename="${filename}"`);
response.data.pipe(res);
} catch (error) {
console.error('Error downloading file:', error);
res.status(500).send('Error downloading file');
}
});
router.post('/', async (req, res) => {
const file = req.file;
const metadata = req.body;
let cleanup = true;
try {
filterFile({ req, file });
metadata.temp_file_id = metadata.file_id;
metadata.file_id = req.file_id;
await processFileUpload({ req, res, file, metadata });
} catch (error) {
let message = 'Error processing file';
logger.error('[/files] Error processing file:', error);
cleanup = false;
if (error.message?.includes('file_ids')) {
message += ': ' + error.message;
}
// TODO: delete remote file if it exists
try {
await fs.unlink(file.path);
} catch (error) {
logger.error('[/files] Error deleting file:', error);
}
res.status(500).json({ message });
}
if (cleanup) {
try {
await fs.unlink(file.path);
} catch (error) {
logger.error('[/files/images] Error deleting file after file processing:', error);
}
}
});
module.exports = router;

View file

@ -1,49 +1,29 @@
const { z } = require('zod');
const path = require('path');
const fs = require('fs').promises;
const express = require('express');
const upload = require('./multer');
const { processImageUpload } = require('~/server/services/Files/process');
const { filterFile, processImageFile } = require('~/server/services/Files/process');
const { logger } = require('~/config');
const router = express.Router();
router.post('/', upload.single('file'), async (req, res) => {
const file = req.file;
router.post('/', async (req, res) => {
const metadata = req.body;
// TODO: add file size/type validation
const uuidSchema = z.string().uuid();
try {
if (!file) {
throw new Error('No file provided');
}
filterFile({ req, file: req.file, image: true });
if (!metadata.file_id) {
throw new Error('No file_id provided');
}
if (!metadata.width) {
throw new Error('No width provided');
}
if (!metadata.height) {
throw new Error('No height provided');
}
/* parse to validate api call */
uuidSchema.parse(metadata.file_id);
metadata.temp_file_id = metadata.file_id;
metadata.file_id = req.file_id;
await processImageUpload({ req, res, file, metadata });
await processImageFile({ req, res, file: req.file, metadata });
} catch (error) {
// TODO: delete remote file if it exists
logger.error('[/files/images] Error processing file:', error);
try {
const filepath = path.join(
req.app.locals.paths.imageOutput,
req.user.id,
path.basename(file.filename),
path.basename(req.file.filename),
);
await fs.unlink(filepath);
} catch (error) {
@ -51,16 +31,6 @@ router.post('/', upload.single('file'), async (req, res) => {
}
res.status(500).json({ message: 'Error processing file' });
}
// do this if strategy is not local
// finally {
// try {
// // await fs.unlink(file.path);
// } catch (error) {
// logger.error('[/files/images] Error deleting file:', error);
// }
// }
});
module.exports = router;

View file

@ -1,24 +1,27 @@
const express = require('express');
const router = express.Router();
const {
uaParser,
checkBan,
requireJwtAuth,
// concurrentLimiter,
// messageIpLimiter,
// messageUserLimiter,
} = require('../../middleware');
const createMulterInstance = require('./multer');
const { uaParser, checkBan, requireJwtAuth, createFileLimiters } = require('~/server/middleware');
const files = require('./files');
const images = require('./images');
const avatar = require('./avatar');
router.use(requireJwtAuth);
router.use(checkBan);
router.use(uaParser);
const initialize = async () => {
const router = express.Router();
router.use(requireJwtAuth);
router.use(checkBan);
router.use(uaParser);
router.use('/', files);
router.use('/images', images);
router.use('/images/avatar', avatar);
const upload = await createMulterInstance();
const { fileUploadIpLimiter, fileUploadUserLimiter } = createFileLimiters();
router.post('*', fileUploadIpLimiter, fileUploadUserLimiter);
router.post('/', upload.single('file'));
router.post('/images', upload.single('file'));
module.exports = router;
router.use('/', files);
router.use('/images', images);
router.use('/images/avatar', avatar);
return router;
};
module.exports = { initialize };

View file

@ -2,13 +2,12 @@ const fs = require('fs');
const path = require('path');
const crypto = require('crypto');
const multer = require('multer');
const supportedTypes = ['image/jpeg', 'image/jpg', 'image/png', 'image/webp'];
const sizeLimit = 20 * 1024 * 1024; // 20 MB
const { fileConfig: defaultFileConfig, mergeFileConfig } = require('librechat-data-provider');
const getCustomConfig = require('~/server/services/Config/getCustomConfig');
const storage = multer.diskStorage({
destination: function (req, file, cb) {
const outputPath = path.join(req.app.locals.paths.imageOutput, 'temp');
const outputPath = path.join(req.app.locals.paths.uploads, 'temp', req.user.id);
if (!fs.existsSync(outputPath)) {
fs.mkdirSync(outputPath, { recursive: true });
}
@ -16,22 +15,30 @@ const storage = multer.diskStorage({
},
filename: function (req, file, cb) {
req.file_id = crypto.randomUUID();
const fileExt = path.extname(file.originalname);
cb(null, `img-${req.file_id}${fileExt}`);
cb(null, `${file.originalname}`);
},
});
const fileFilter = (req, file, cb) => {
if (!supportedTypes.includes(file.mimetype)) {
return cb(
new Error('Unsupported file type. Only JPEG, JPG, PNG, and WEBP files are allowed.'),
false,
);
if (!file) {
return cb(new Error('No file provided'), false);
}
if (!defaultFileConfig.checkType(file.mimetype)) {
return cb(new Error('Unsupported file type: ' + file.mimetype), false);
}
cb(null, true);
};
const upload = multer({ storage, fileFilter, limits: { fileSize: sizeLimit } });
const createMulterInstance = async () => {
const customConfig = await getCustomConfig();
const fileConfig = mergeFileConfig(customConfig?.fileConfig);
return multer({
storage,
fileFilter,
limits: { fileSize: fileConfig.serverFileSizeLimit },
});
};
module.exports = upload;
module.exports = createMulterInstance;