feat: Assistants API, General File Support, Side Panel, File Explorer (#1696)

* feat: assistant name/icon in Landing & Header

* feat: assistname in textarea placeholder, and use `Assistant` as default name

* feat: display non-image files in user messages

* fix: only render files if files.length is > 0

* refactor(config -> file-config): move file related configuration values to separate module, add excel types

* chore: spreadsheet file rendering

* fix(Landing): dark mode style for Assistant Name

* refactor: move progress incrementing to own hook, start smaller, cap near limit \(1\)

* refactor(useContentHandler): add empty Text part if last part was completed tool or image

* chore: add accordion trigger border styling for dark mode

* feat: Assistant Builder model selection

* chore: use Spinner when Assistant is mutating

* fix(get/assistants): return correct response object `AssistantListResponse`

* refactor(Spinner): pass size as prop

* refactor: make assistant crud mutations optimistic, add types for options

* chore: remove assistants route and view

* chore: move assistant builder components to separate directory

* feat(ContextButton): delete Assistant via context button/dialog, add localization

* refactor: conditionally show use and context menu buttons, add localization for create assistant

* feat: save side panel states to localStorage

* style(SidePanel): improve avatar menu and assistant select styling for dark mode

* refactor: make NavToggle reusable for either side (left or right), add SidePanel Toggle with ability to close it completely

* fix: resize handle and navToggle behavior

* fix(/avatar/:assistant_id): await `deleteFile` and assign unique name to uploaded image

* WIP: file UI components from PR #576

* refactor(OpenAIMinimalIcon): pass className

* feat: formatDate helper fn

* feat: DataTableColumnHeader

* feat: add row selection, formatted row values, number of rows selected

* WIP: add files to Side panel temporarily

* feat: `LB_QueueAsyncCall`: Leaky Bucket queue for external APIs, use in `processDeleteRequest`

* fix(TFile): correct `source` type with `FileSources`

* fix(useFileHandling): use `continue` instead of return when iterating multiple files, add file type to extendedFile

* chore: add generic setter type

* refactor(processDeleteRequest): settle promises to prevent rejections from processing deletions, log errors

* feat: `useFileDeletion` to reuse file deletion logic

* refactor(useFileDeletion): make `setFiles` an optional param and use object as param

* feat: useDeleteFilesFromTable

* feat: use real `files` data and add deletion action to data table

* fix(Table): make headers sticky

* feat: add dynamic filtering for columns; only show to user Host or OpenAI storage type

* style(DropdownMenu): replace `slate` with `gray`

* style(DataTable): apply dark mode themes and other misc styling

* style(Columns): add color to OpenAI Storage option

* refactor(FileContainer): make file preview reusable

* refactor(Images): make image preview reusable

* refactor(FilePreview): make file prop optional for FileIcon and FilePreview, fix relative style

* feat(Columns): add file/image previews, set a minimum size to show for file size in bytes

* WIP: File Panel with real files and formatted

* feat: open files dialog from panel

* style: file data table mobile and general column styling fixes

* refactor(api/files): return files sorted by the most recently updated

* refactor: provide fileMap through context to prevent re-selecting files to map in different areas; remove unused imports commented out in PanelColumns

* refactor(ExtendFile): make File type optional, add `attached` to prevent attached files from being deleted on remove, make Message.files a partial TFile type

* feat: attach files through file panel

* refactor(useFileHandling): move files to the start of cache list when uploaded

* refactor(useDeleteFilesMutation): delete files from cache when successfully deleted from server

* fix(FileRow): handle possible edge case of duplication due to attaching recently uploaded file

* style(SidePanel): make resize grip border transparent, remove unnecessary styling on close sidepanel button

* feat: action utilities and tests

* refactor(actions): add `ValidationResult` type and change wording for no server URL found

* refactor(actions): check for empty server URL

* fix(data-provider): revert tsconfig to fix type issue resolution

* feat(client): first pass of actions input for assistants

* refactor(FunctionSignature): change method to output object instead of string

* refactor(models/Assistant): add actions field to schema, use searchParams object for methods, and add `getAssistant`

* feat: post actions input first pass
- create new Action document
- add actions to Assistant DB document
- create /action/:assistant_id POST route
- pass more props down from PanelSwitcher, derive assistant_id from switcher
- move privacy policy to ActionInput
- reset data on input change/validation
- add `useUpdateAction`
- conform FunctionSignature type to FunctionTool
- add action, assistant doc, update hook related types

* refactor: optimize assistant/actions relationship
- past domain in metadata as hostname and not a URL
- include domain in tool name
- add `getActions` for actions retrieval by user
- add `getAssistants` for assistant docs retrieval by user
- add `assistant_id` to Action schema
- move actions to own module as a subroute to `api/assistants`
- add `useGetActionsQuery` and `useGetAssistantDocsQuery` hooks
- fix Action type def

* feat: show assistant actions in assistant builder

* feat: switch to actions on action click, editing action styling

* fix: add Assistant state for builder panel to allow immediate selection of newly created assistants as well as retaining the current assistant when switching to a different panel within the builder

* refactor(SidePanel/NavToggle): offset less from right when SidePanel is completely collapsed

* chore: rename `processActions` -> `processRequiredActions`

* chore: rename Assistant API Action to RequiredAction

* refactor(actions): avoid nesting actual API params under generic `requestBody` to optimize LLM token usage

* fix(handleTools): avoid calling `validTool` if not defined, add optional param to skip the loading of specs, which throws an error in the context of assistants

* WIP: working first pass of toolCalls generated from openapi specs

* WIP: first pass ToolCall styling

* feat: programmatic iv encryption/decryption helpers

* fix: correct ActionAuth types/enums, and define type for AuthForm

* feat: encryption/decryption helpers for Action AuthMetadata

* refactor(getActions): remove sensitive fields from query response

* refactor(POST/actions): encrypt and remove sensitive fields from mutation response

* fix(ActionService): change ESM import to CJS

* feat: frontend auth handling for actions + optimistic update on action update/creation

* refactor(actions): use the correct variables and types for setAuth method

* refactor: POST /:assistant_id action can now handle updating an existing action, add `saved_auth_fields` to determine when user explicitly saves new auth creds. only send auth metadata if user explicitly saved fields

* refactor(createActionTool): catch errors and send back meaningful error message, add flag to `getActions` to determine whether to retrieve sensitive values or not

* refactor(ToolService): add `action` property to ToolCall PartMetadata to determine if the tool call was an action, fix parsing function name issue with actionDelimiter

* fix(ActionRequest): use URL class to correctly join endpoint parts for `execute` call

* feat: delete assistant actions

* refactor: conditionally show Available actions

* refactor: show `retrieval` and `code_interpreter` as Capabilities, swap `Switch` for `Checkbox`

* chore: remove shadow-stroke from messages

* WIP: first pass of Assistants Knowledge attachments

* refactor: remove AssistantsProvider in favor of FormProvider, fix selectedAssistant re-render bug, map Assistant file_ids to files via fileMap, initialize Knowledge component with mapped files if any exist

* fix: prevent deleting files on assistant file upload

* chore: remove console.log

* refactor(useUploadFileMutation): update files and assistants cache on upload

* chore: disable oauth option as not supported yet

* feat: cancel assistant runs

* refactor: initialize OpenAI client with helper function, resolve all related circular dependencies

* fix(DALL-E): initialization

* fix(process): openai client initialization

* fix: select an existing Assistant when the active one is deleted

* chore: allow attaching files for assistant endpoint, send back relevant OpenAI error message when uploading, deconstruct openAI initialization correctly, add `message_file` to formData when a file is attached to the message but not the assistant

* fix: add assistant_id on newConvo

* fix(initializeClient): import fix

* chore: swap setAssistant for setOption in useEffect

* fix(DALL-E): add processFileURL to loadTools call

* chore: add customConfig to debug logs

* feat: delete threads on convo delete

* chore: replace Assistants icon

* chore: remove console.dir() in `abortRun`

* feat(AssistantService): accumulate text values from run in openai.responseText

* feat: titling for assistants endpoint

* chore: move panel file components to appropriate directory, add file checks for attaching files, change icon for Attach Files

* refactor: add localizations to tools, plugins, add condition for adding/remove user plugins so tool selections don't affect this value

* chore: disable `import from url` action for now

* chore: remove textMimeTypes from default fileConfig for now

* fix: catch tool errors and send as outputs with error messages

* fix: React warning about button as descendant of button

* style: retrieval and cancelled icon

* WIP: pass isSubmitting to Parts, use InProgressCall to display cancelled tool calls correctly, show domain/function name

* fix(meilisearch): fix `postSaveHook` issue where indexing expects a mongo document, and join all text content parts for meili indexing

* ci: fix dall-e tests

* ci: fix client tests

* fix: button types in actions panel

* fix: plugin auth form persisting across tool selections

* fix(ci): update AppService spec with `loadAndFormatTools`

* fix(clearConvos): add id check earlier on

* refactor(AssistantAvatar): set previewURL dynamically when emtadata.avatar changes

* feat(assistants): addTitle cache setting

* fix(useSSE): resolve rebase conflicts

* fix: delete mutation

* style(SidePanel): make grip visible on active and hover, invisible otherwise

* ci: add data-provider tests to workflow, also update eslint/tsconfig to recognize specs, and add `text/csv` to fileConfig

* fix: handle edge case where auth object is undefined, and log errors

* refactor(actions): resolve  schemas, add tests for resolving refs, import specs from separate file for tests

* chore: remove comment

* fix(ActionsInput): re-render bug when initializing states with action fields

* fix(patch/assistant): filter undefined tools

* chore: add logging for errors in assistants routes

* fix(updateAssistant): map actions to functions to avoid overwriting

* fix(actions): properly handle GET paths

* fix(convos): unhandled delete thread exception

* refactor(AssistantService): pass both thread_id and conversationId when sending intermediate assistant messages, remove `mapMessagesToSteps` from AssistantService

* refactor(useSSE): replace all messages with runMessages and pass latestMessageId to abortRun; fix(checkMessageGaps): include tool calls when  syncing messages

* refactor(assistants/chat): invoke `createOnTextProgress` after thread creation

* chore: add typing

* style: sidepanel styling

* style: action tool call domain styling

* feat(assistants): default models, limit retrieval to certain models, add env variables to to env.example

* feat: assistants api key in EndpointService

* refactor: set assistant model to conversation on assistant switch

* refactor: set assistant model to conversation on assistant select from panel

* fix(retrieveAndProcessFile): catch attempt to download file with `assistant` purpose which is not allowed; add logging

* feat: retrieval styling, handling, and logging

* chore: rename ASSISTANTS_REVERSE_PROXY to ASSISTANTS_BASE_URL

* feat: FileContext for file metadata

* feat: context file mgmt and filtering

* style(Select): hover/rounded changes

* refactor: explicit conversation switch, endpoint dependent, through `useSelectAssistant`, which does not create new chat if current endpoint is assistant endpoint

* fix(AssistantAvatar): make empty previewURL if no avatar present

* refactor: side panel mobile styling

* style: merge tool and action section, optimize mobile styling for action/tool buttons

* fix: localStorage issues

* fix(useSelectAssistant): invoke react query hook directly in select hook as Map was not being updated in time

* style: light mode fixes

* fix: prevent sidepanel nav styling from shifting layout up

* refactor: change default layout (collapsed by default)

* style: mobile optimization of DataTable

* style: datatable

* feat: client-side hide right-side panel

* chore(useNewConvo): add partial typing for preset

* fix(useSelectAssistant): pass correct model name by using template as preset

* WIP: assistant presets

* refactor(ToolService): add native solution for `TavilySearchResults` and log tool output errors

* refactor: organize imports and use native TavilySearchResults

* fix(TavilySearchResults): stringify result

* fix(ToolCall): show tool call outputs when not an action

* chore: rename Prompt Prefix to custom instructions (in user facing text only)

* refactor(EditPresetDialog): Optimize setting title by debouncing, reset preset on dialog close to avoid state mixture

* feat: add `presetOverride` to overwrite active conversation settings when saving a Preset (relevant for client side updates only)

* feat: Assistant preset settings (client-side)

* fix(Switcher): only set assistant_id and model if current endpoint is Assistants

* feat: use `useDebouncedInput` for updating conversation settings, starting with EditPresetDialog title setting and Assistant instructions setting

* feat(Assistants): add instructions field to settings

* feat(chat/assistants): pass conversation settings to run body

* wip: begin localization and only allow actions if the assistant is created

* refactor(AssistantsPanel): knowledge localization, allow tools on creation

* feat: experimental: allow 'priming' values before assistant is created, that would normally require an assistant_id to be defined

* chore: trim console logs and make more meaningful

* chore: toast messages

* fix(ci): date test

* feat: create file when uploading Assistant Avatar

* feat: file upload rate limiting from custom config with dynamic file route initialization

* refactor: use file upload limiters on post routes only

* refactor(fileConfig): add endpoints field for endpoint specific fileconfigs, add mergeConfig function, add tests

* refactor: fileConfig route, dynamic multer instances used on all '/' and '/images' POST routes, data service and query hook

* feat: supportedMimeTypesSchema, test for array of regex

* feat: configurable file config limits

* chore: clarify assistants file knowledge prereq.

* chore(useTextarea): default to localized 'Assistant' if assistant name is empty

* feat: configurable file limits and toggle file upload per endpoint

* fix(useUploadFileMutation): prevent updating assistant.files cache if file upload is a message_file attachment

* fix(AssistantSelect): set last selected assistant only when timeout successfully runs

* refactor(queries): disable assistant queries if assistants endpoint is not enabled

* chore(Switcher): add localization

* chore: pluralize `assistant` for `EModelEndpoint key and value

* feat: show/hide assistant UI components based on endpoint availability; librechat.yaml config for disabling builder section and setting polling/timeout intervals

* fix(compactEndpointSchemas): use EModelEndpoint for schema access

* feat(runAssistant): use configured values from `librechat.yaml` for `pollIntervalMs` and `timeout`

* fix: naming issue

* wip: revert landing

* 🎉 happy birthday LibreChat (#1768)

* happy birthday LibreChat

* Refactor endpoint condition in Landing component

* Update birthday message in Eng.tsx

* fix(/config): avoid nesting ternaries

* refactor(/config): check birthday

---------

Co-authored-by: Danny Avila <messagedaniel@protonmail.com>

* fix: landing

* fix: landing

* fix(useMessageHelpers): hardcoded check to use EModelEndpoint instead

* fix(ci): convo test revert to main

* fix(assistants/chat): fix issue where assistant_id was being saved as model for convo

* chore: added logging, promises racing to prevent longer timeouts, explicit setting of maxRetries and timeouts, robust catching of invalid abortRun params

* refactor: use recoil state for `showStopButton` and only show for assistants endpoint after syncing conversation data

* refactor: optimize abortRun strategy using localStorage, refactor `abortConversation` to use async/await and await the result, refactor how the abortKey cache is set for runs

* fix(checkMessageGaps): assign `assistant_id` to synced messages if defined; prevents UI from showing blank assistant for cancelled messages

* refactor: re-order sequence of chat route, only allow aborting messages after run is created, cancel abortRun if there was a cancelling error (likely due already cancelled in chat route), and add extra logging

* chore(typedefs): add httpAgent type to OpenAIClient

* refactor: use custom implementation of retrieving run with axios to allow for timing out run query

* fix(waitForRun): handle timed out run retrieval query

* refactor: update preset conditions:
- presets will retain settings when a different endpoint is selected; for existing convos, either when modular or is assistant switch
- no longer use `navigateToConvo` on preset select

* fix: temporary calculator hack as expects string input when invoked

* fix: cancel abortRun only when cancelling error is a result of the run already being cancelled

* chore: remove use of `fileMaxSizeMB` and total counterpart (redundant)

* docs: custom config documentation update

* docs: assistants api setup and dotenv, new custom config fields

* refactor(Switcher): make Assistant switcher sticky in SidePanel

* chore(useSSE): remove console log of data and message index

* refactor(AssistantPanel): button styling and add secondary select button to bottom of panel

* refactor(OpenAIClient): allow passing conversationId to RunManager through titleConvo and initializeLLM to properly record title context tokens used in cases where conversationId was not defined by the client

* feat(assistants): token tracking for assistant runs

* chore(spendTokens): improve logging

* feat: support/exclude specific assistant Ids

* chore: add update `librechat.example.yaml`, optimize `AppService` handling, new tests for `AppService`, optimize missing/outdate config logging

* chore: mount docker logs to root of project

* chore: condense axios errors

* chore: bump vite

* chore: vite hot reload fix using latest version

* chore(getOpenAIModels): sort instruct models to the end of models list

* fix(assistants): user provided key

* fix(assistants): user provided key, invalidate more queries on revoke

---------

Co-authored-by: Marco Beretta <81851188+Berry-13@users.noreply.github.com>
This commit is contained in:
Danny Avila 2024-02-13 20:42:27 -05:00 committed by GitHub
parent cd2786441a
commit ecd63eb9f1
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
316 changed files with 21873 additions and 6315 deletions

View file

@ -1,5 +1,6 @@
require('dotenv').config();
const { KeyvFile } = require('keyv-file');
const { Constants } = require('librechat-data-provider');
const { getUserKey, checkUserKeyExpiry } = require('../server/services/UserService');
const browserClient = async ({
@ -48,7 +49,7 @@ const browserClient = async ({
options = { ...options, parentMessageId, conversationId };
}
if (parentMessageId === '00000000-0000-0000-0000-000000000000') {
if (parentMessageId === Constants.NO_PARENT) {
delete options.conversationId;
}

View file

@ -1,5 +1,5 @@
const crypto = require('crypto');
const { supportsBalanceCheck } = require('librechat-data-provider');
const { supportsBalanceCheck, Constants } = require('librechat-data-provider');
const { getConvo, getMessages, saveMessage, updateMessage, saveConvo } = require('~/models');
const { addSpaceIfNeeded, isEnabled } = require('~/server/utils');
const checkBalance = require('~/models/checkBalance');
@ -77,7 +77,7 @@ class BaseClient {
const saveOptions = this.getSaveOptions();
this.abortController = opts.abortController ?? new AbortController();
const conversationId = opts.conversationId ?? crypto.randomUUID();
const parentMessageId = opts.parentMessageId ?? '00000000-0000-0000-0000-000000000000';
const parentMessageId = opts.parentMessageId ?? Constants.NO_PARENT;
const userMessageId = opts.overrideParentMessageId ?? crypto.randomUUID();
let responseMessageId = opts.responseMessageId ?? crypto.randomUUID();
let head = isEdited ? responseMessageId : parentMessageId;
@ -552,7 +552,7 @@ class BaseClient {
*
* Each message object should have an 'id' or 'messageId' property and may have a 'parentMessageId' property.
* The 'parentMessageId' is the ID of the message that the current message is a reply to.
* If 'parentMessageId' is not present, null, or is '00000000-0000-0000-0000-000000000000',
* If 'parentMessageId' is not present, null, or is Constants.NO_PARENT,
* the message is considered a root message.
*
* @param {Object} options - The options for the function.
@ -607,9 +607,7 @@ class BaseClient {
}
currentMessageId =
message.parentMessageId === '00000000-0000-0000-0000-000000000000'
? null
: message.parentMessageId;
message.parentMessageId === Constants.NO_PARENT ? null : message.parentMessageId;
}
orderedMessages.reverse();

View file

@ -4,12 +4,13 @@ const { GoogleVertexAI } = require('langchain/llms/googlevertexai');
const { ChatGoogleGenerativeAI } = require('@langchain/google-genai');
const { ChatGoogleVertexAI } = require('langchain/chat_models/googlevertexai');
const { AIMessage, HumanMessage, SystemMessage } = require('langchain/schema');
const { encodeAndFormat, validateVisionModel } = require('~/server/services/Files/images');
const { encodeAndFormat } = require('~/server/services/Files/images');
const { encoding_for_model: encodingForModel, get_encoding: getEncoding } = require('tiktoken');
const {
validateVisionModel,
getResponseSender,
EModelEndpoint,
endpointSettings,
EModelEndpoint,
AuthKeys,
} = require('librechat-data-provider');
const { getModelMaxTokens } = require('~/utils');

View file

@ -1,14 +1,19 @@
const OpenAI = require('openai');
const { HttpsProxyAgent } = require('https-proxy-agent');
const { getResponseSender, ImageDetailCost, ImageDetail } = require('librechat-data-provider');
const {
getResponseSender,
validateVisionModel,
ImageDetailCost,
ImageDetail,
} = require('librechat-data-provider');
const { encoding_for_model: encodingForModel, get_encoding: getEncoding } = require('tiktoken');
const {
getModelMaxTokens,
genAzureChatCompletion,
extractBaseURL,
constructAzureURL,
getModelMaxTokens,
genAzureChatCompletion,
} = require('~/utils');
const { encodeAndFormat, validateVisionModel } = require('~/server/services/Files/images');
const { encodeAndFormat } = require('~/server/services/Files/images/encode');
const { truncateText, formatMessage, CUT_OFF_PROMPT } = require('./prompts');
const { handleOpenAIErrors } = require('./tools/util');
const spendTokens = require('~/models/spendTokens');
@ -630,6 +635,7 @@ class OpenAIClient extends BaseClient {
context,
tokenBuffer,
initialMessageCount,
conversationId,
}) {
const modelOptions = {
modelName: modelName ?? model,
@ -677,7 +683,7 @@ class OpenAIClient extends BaseClient {
callbacks: runManager.createCallbacks({
context,
tokenBuffer,
conversationId: this.conversationId,
conversationId: this.conversationId ?? conversationId,
initialMessageCount,
}),
});
@ -693,12 +699,13 @@ class OpenAIClient extends BaseClient {
*
* @param {Object} params - The parameters for the conversation title generation.
* @param {string} params.text - The user's input.
* @param {string} [params.conversationId] - The current conversationId, if not already defined on client initialization.
* @param {string} [params.responseText=''] - The AI's immediate response to the user.
*
* @returns {Promise<string | 'New Chat'>} A promise that resolves to the generated conversation title.
* In case of failure, it will return the default title, "New Chat".
*/
async titleConvo({ text, responseText = '' }) {
async titleConvo({ text, conversationId, responseText = '' }) {
let title = 'New Chat';
const convo = `||>User:
"${truncateText(text)}"
@ -758,7 +765,12 @@ ${convo}
try {
this.abortController = new AbortController();
const llm = this.initializeLLM({ ...modelOptions, context: 'title', tokenBuffer: 150 });
const llm = this.initializeLLM({
...modelOptions,
conversationId,
context: 'title',
tokenBuffer: 150,
});
title = await runTitleChain({ llm, text, convo, signal: this.abortController.signal });
} catch (e) {
if (e?.message?.toLowerCase()?.includes('abort')) {

View file

@ -3,6 +3,7 @@ const { CallbackManager } = require('langchain/callbacks');
const { BufferMemory, ChatMessageHistory } = require('langchain/memory');
const { initializeCustomAgent, initializeFunctionsAgent } = require('./agents');
const { addImages, buildErrorInput, buildPromptPrefix } = require('./output_parsers');
const { processFileURL } = require('~/server/services/Files/process');
const { EModelEndpoint } = require('librechat-data-provider');
const { formatLangChainMessages } = require('./prompts');
const checkBalance = require('~/models/checkBalance');
@ -113,6 +114,7 @@ class PluginsClient extends OpenAIClient {
openAIApiKey: this.openAIApiKey,
conversationId: this.conversationId,
fileStrategy: this.options.req.app.locals.fileStrategy,
processFileURL,
message,
},
});

View file

@ -1,5 +1,6 @@
const { formatMessage, formatLangChainMessages, formatFromLangChain } = require('./formatMessages');
const { Constants } = require('librechat-data-provider');
const { HumanMessage, AIMessage, SystemMessage } = require('langchain/schema');
const { formatMessage, formatLangChainMessages, formatFromLangChain } = require('./formatMessages');
describe('formatMessage', () => {
it('formats user message', () => {
@ -61,7 +62,7 @@ describe('formatMessage', () => {
isCreatedByUser: true,
isEdited: false,
model: null,
parentMessageId: '00000000-0000-0000-0000-000000000000',
parentMessageId: Constants.NO_PARENT,
sender: 'User',
text: 'hi',
tokenCount: 5,

View file

@ -1,3 +1,4 @@
const { Constants } = require('librechat-data-provider');
const { initializeFakeClient } = require('./FakeClient');
jest.mock('../../../lib/db/connectDb');
@ -307,7 +308,7 @@ describe('BaseClient', () => {
const unorderedMessages = [
{ id: '3', parentMessageId: '2', text: 'Message 3' },
{ id: '2', parentMessageId: '1', text: 'Message 2' },
{ id: '1', parentMessageId: '00000000-0000-0000-0000-000000000000', text: 'Message 1' },
{ id: '1', parentMessageId: Constants.NO_PARENT, text: 'Message 1' },
];
it('should return ordered messages based on parentMessageId', () => {
@ -316,7 +317,7 @@ describe('BaseClient', () => {
parentMessageId: '3',
});
expect(result).toEqual([
{ id: '1', parentMessageId: '00000000-0000-0000-0000-000000000000', text: 'Message 1' },
{ id: '1', parentMessageId: Constants.NO_PARENT, text: 'Message 1' },
{ id: '2', parentMessageId: '1', text: 'Message 2' },
{ id: '3', parentMessageId: '2', text: 'Message 3' },
]);

View file

@ -1,6 +1,7 @@
const crypto = require('crypto');
const { Constants } = require('librechat-data-provider');
const { HumanChatMessage, AIChatMessage } = require('langchain/schema');
const PluginsClient = require('../PluginsClient');
const crypto = require('crypto');
jest.mock('~/lib/db/connectDb');
jest.mock('~/models/Conversation', () => {
@ -66,7 +67,7 @@ describe('PluginsClient', () => {
TestAgent.setOptions(opts);
}
const conversationId = opts.conversationId || crypto.randomUUID();
const parentMessageId = opts.parentMessageId || '00000000-0000-0000-0000-000000000000';
const parentMessageId = opts.parentMessageId || Constants.NO_PARENT;
const userMessageId = opts.overrideParentMessageId || crypto.randomUUID();
this.pastMessages = await TestAgent.loadHistory(
conversationId,

View file

@ -3,8 +3,8 @@ const OpenAI = require('openai');
const { v4: uuidv4 } = require('uuid');
const { Tool } = require('langchain/tools');
const { HttpsProxyAgent } = require('https-proxy-agent');
const { FileContext } = require('librechat-data-provider');
const { getImageBasename } = require('~/server/services/Files/images');
const { processFileURL } = require('~/server/services/Files/process');
const extractBaseURL = require('~/utils/extractBaseURL');
const { logger } = require('~/config');
@ -14,6 +14,9 @@ class OpenAICreateImage extends Tool {
this.userId = fields.userId;
this.fileStrategy = fields.fileStrategy;
if (fields.processFileURL) {
this.processFileURL = fields.processFileURL.bind(this);
}
let apiKey = fields.DALLE2_API_KEY ?? fields.DALLE_API_KEY ?? this.getApiKey();
const config = { apiKey };
@ -80,13 +83,21 @@ Guidelines:
}
async _call(input) {
const resp = await this.openai.images.generate({
prompt: this.replaceUnwantedChars(input),
// TODO: Future idea -- could we ask an LLM to extract these arguments from an input that might contain them?
n: 1,
// size: '1024x1024'
size: '512x512',
});
let resp;
try {
resp = await this.openai.images.generate({
prompt: this.replaceUnwantedChars(input),
// TODO: Future idea -- could we ask an LLM to extract these arguments from an input that might contain them?
n: 1,
// size: '1024x1024'
size: '512x512',
});
} catch (error) {
logger.error('[DALL-E] Problem generating the image:', error);
return `Something went wrong when trying to generate the image. The DALL-E API may be unavailable:
Error Message: ${error.message}`;
}
const theImageUrl = resp.data[0].url;
@ -110,15 +121,16 @@ Guidelines:
});
try {
const result = await processFileURL({
const result = await this.processFileURL({
fileStrategy: this.fileStrategy,
userId: this.userId,
URL: theImageUrl,
fileName: imageName,
basePath: 'images',
context: FileContext.image_generation,
});
this.result = this.wrapInMarkdown(result);
this.result = this.wrapInMarkdown(result.filepath);
} catch (error) {
logger.error('Error while saving the image:', error);
this.result = `Failed to save the image locally. ${error.message}`;

View file

@ -1,35 +1,42 @@
const availableTools = require('./manifest.json');
// Basic Tools
const CodeBrew = require('./CodeBrew');
const GoogleSearchAPI = require('./GoogleSearch');
const OpenAICreateImage = require('./DALL-E');
const DALLE3 = require('./structured/DALLE3');
const StructuredSD = require('./structured/StableDiffusion');
const StableDiffusionAPI = require('./StableDiffusion');
const WolframAlphaAPI = require('./Wolfram');
const StructuredWolfram = require('./structured/Wolfram');
const SelfReflectionTool = require('./SelfReflection');
const AzureAiSearch = require('./AzureAiSearch');
const StructuredACS = require('./structured/AzureAISearch');
const OpenAICreateImage = require('./DALL-E');
const StableDiffusionAPI = require('./StableDiffusion');
const SelfReflectionTool = require('./SelfReflection');
// Structured Tools
const DALLE3 = require('./structured/DALLE3');
const ChatTool = require('./structured/ChatTool');
const E2BTools = require('./structured/E2BTools');
const CodeSherpa = require('./structured/CodeSherpa');
const StructuredSD = require('./structured/StableDiffusion');
const StructuredACS = require('./structured/AzureAISearch');
const CodeSherpaTools = require('./structured/CodeSherpaTools');
const availableTools = require('./manifest.json');
const CodeBrew = require('./CodeBrew');
const StructuredWolfram = require('./structured/Wolfram');
const TavilySearchResults = require('./structured/TavilySearchResults');
module.exports = {
availableTools,
GoogleSearchAPI,
OpenAICreateImage,
DALLE3,
StableDiffusionAPI,
StructuredSD,
WolframAlphaAPI,
StructuredWolfram,
SelfReflectionTool,
AzureAiSearch,
StructuredACS,
E2BTools,
ChatTool,
CodeSherpa,
CodeSherpaTools,
// Basic Tools
CodeBrew,
AzureAiSearch,
GoogleSearchAPI,
WolframAlphaAPI,
OpenAICreateImage,
StableDiffusionAPI,
SelfReflectionTool,
// Structured Tools
DALLE3,
ChatTool,
E2BTools,
CodeSherpa,
StructuredSD,
StructuredACS,
CodeSherpaTools,
StructuredWolfram,
TavilySearchResults,
};

View file

@ -108,6 +108,19 @@
}
]
},
{
"name": "Tavily Search",
"pluginKey": "tavily_search_results_json",
"description": "Tavily Search is a robust search API tailored specifically for LLM Agents. It seamlessly integrates with diverse data sources to ensure a superior, relevant search experience.",
"icon": "https://tavily.com/favicon.ico",
"authConfig": [
{
"authField": "TAVILY_API_KEY",
"label": "Tavily API Key",
"description": "Get your API key here: https://app.tavily.com/"
}
]
},
{
"name": "Calculator",
"pluginKey": "calculator",

View file

@ -19,6 +19,13 @@ class AzureAISearch extends StructuredTool {
this.name = 'azure-ai-search';
this.description =
'Use the \'azure-ai-search\' tool to retrieve search results relevant to your input';
/* Used to initialize the Tool without necessary variables. */
this.override = fields.override ?? false;
// Define schema
this.schema = z.object({
query: z.string().describe('Search word or phrase to Azure AI Search'),
});
// Initialize properties using helper function
this.serviceEndpoint = this._initializeField(
@ -51,12 +58,16 @@ class AzureAISearch extends StructuredTool {
);
// Check for required fields
if (!this.serviceEndpoint || !this.indexName || !this.apiKey) {
if (!this.override && (!this.serviceEndpoint || !this.indexName || !this.apiKey)) {
throw new Error(
'Missing AZURE_AI_SEARCH_SERVICE_ENDPOINT, AZURE_AI_SEARCH_INDEX_NAME, or AZURE_AI_SEARCH_API_KEY environment variable.',
);
}
if (this.override) {
return;
}
// Create SearchClient
this.client = new SearchClient(
this.serviceEndpoint,
@ -64,11 +75,6 @@ class AzureAISearch extends StructuredTool {
new AzureKeyCredential(this.apiKey),
{ apiVersion: this.apiVersion },
);
// Define schema
this.schema = z.object({
query: z.string().describe('Search word or phrase to Azure AI Search'),
});
}
// Improved error handling and logging

View file

@ -4,17 +4,25 @@ const OpenAI = require('openai');
const { v4: uuidv4 } = require('uuid');
const { Tool } = require('langchain/tools');
const { HttpsProxyAgent } = require('https-proxy-agent');
const { FileContext } = require('librechat-data-provider');
const { getImageBasename } = require('~/server/services/Files/images');
const { processFileURL } = require('~/server/services/Files/process');
const extractBaseURL = require('~/utils/extractBaseURL');
const { logger } = require('~/config');
class DALLE3 extends Tool {
constructor(fields = {}) {
super();
/* Used to initialize the Tool without necessary variables. */
this.override = fields.override ?? false;
/* Necessary for output to contain all image metadata. */
this.returnMetadata = fields.returnMetadata ?? false;
this.userId = fields.userId;
this.fileStrategy = fields.fileStrategy;
if (fields.processFileURL) {
this.processFileURL = fields.processFileURL.bind(this);
}
let apiKey = fields.DALLE3_API_KEY ?? fields.DALLE_API_KEY ?? this.getApiKey();
const config = { apiKey };
if (process.env.DALLE_REVERSE_PROXY) {
@ -81,7 +89,7 @@ class DALLE3 extends Tool {
getApiKey() {
const apiKey = process.env.DALLE3_API_KEY ?? process.env.DALLE_API_KEY ?? '';
if (!apiKey) {
if (!apiKey && !this.override) {
throw new Error('Missing DALLE_API_KEY environment variable.');
}
return apiKey;
@ -115,6 +123,7 @@ class DALLE3 extends Tool {
n: 1,
});
} catch (error) {
logger.error('[DALL-E-3] Problem generating the image:', error);
return `Something went wrong when trying to generate the image. The DALL-E API may be unavailable:
Error Message: ${error.message}`;
}
@ -145,15 +154,26 @@ Error Message: ${error.message}`;
});
try {
const result = await processFileURL({
const result = await this.processFileURL({
fileStrategy: this.fileStrategy,
userId: this.userId,
URL: theImageUrl,
fileName: imageName,
basePath: 'images',
context: FileContext.image_generation,
});
this.result = this.wrapInMarkdown(result);
if (this.returnMetadata) {
this.result = {
file_id: result.file_id,
filename: result.filename,
filepath: result.filepath,
height: result.height,
width: result.width,
};
} else {
this.result = this.wrapInMarkdown(result.filepath);
}
} catch (error) {
logger.error('Error while saving the image:', error);
this.result = `Failed to save the image locally. ${error.message}`;

View file

@ -10,6 +10,9 @@ const { logger } = require('~/config');
class StableDiffusionAPI extends StructuredTool {
constructor(fields) {
super();
/* Used to initialize the Tool without necessary variables. */
this.override = fields.override ?? false;
this.name = 'stable-diffusion';
this.url = fields.SD_WEBUI_URL || this.getServerURL();
this.description_for_model = `// Generate images and visuals using text.
@ -52,7 +55,7 @@ class StableDiffusionAPI extends StructuredTool {
getServerURL() {
const url = process.env.SD_WEBUI_URL || '';
if (!url) {
if (!url && !this.override) {
throw new Error('Missing SD_WEBUI_URL environment variable.');
}
return url;

View file

@ -0,0 +1,92 @@
const { z } = require('zod');
const { Tool } = require('@langchain/core/tools');
const { getEnvironmentVariable } = require('@langchain/core/utils/env');
class TavilySearchResults extends Tool {
static lc_name() {
return 'TavilySearchResults';
}
constructor(fields = {}) {
super(fields);
this.envVar = 'TAVILY_API_KEY';
/* Used to initialize the Tool without necessary variables. */
this.override = fields.override ?? false;
this.apiKey = fields.apiKey ?? this.getApiKey();
this.kwargs = fields?.kwargs ?? {};
this.name = 'tavily_search_results_json';
this.description =
'A search engine optimized for comprehensive, accurate, and trusted results. Useful for when you need to answer questions about current events.';
this.schema = z.object({
query: z.string().min(1).describe('The search query string.'),
max_results: z
.number()
.min(1)
.max(10)
.optional()
.describe('The maximum number of search results to return. Defaults to 5.'),
search_depth: z
.enum(['basic', 'advanced'])
.optional()
.describe(
'The depth of the search, affecting result quality and response time (`basic` or `advanced`). Default is basic for quick results and advanced for indepth high quality results but longer response time. Advanced calls equals 2 requests.',
),
include_images: z
.boolean()
.optional()
.describe(
'Whether to include a list of query-related images in the response. Default is False.',
),
include_answer: z
.boolean()
.optional()
.describe('Whether to include answers in the search results. Default is False.'),
// include_raw_content: z.boolean().optional().describe('Whether to include raw content in the search results. Default is False.'),
// include_domains: z.array(z.string()).optional().describe('A list of domains to specifically include in the search results.'),
// exclude_domains: z.array(z.string()).optional().describe('A list of domains to specifically exclude from the search results.'),
});
}
getApiKey() {
const apiKey = getEnvironmentVariable(this.envVar);
if (!apiKey && !this.override) {
throw new Error(`Missing ${this.envVar} environment variable.`);
}
return apiKey;
}
async _call(input) {
const validationResult = this.schema.safeParse(input);
if (!validationResult.success) {
throw new Error(`Validation failed: ${JSON.stringify(validationResult.error.issues)}`);
}
const { query, ...rest } = validationResult.data;
const requestBody = {
api_key: this.apiKey,
query,
...rest,
...this.kwargs,
};
const response = await fetch('https://api.tavily.com/search', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify(requestBody),
});
const json = await response.json();
if (!response.ok) {
throw new Error(`Request failed with status ${response.status}: ${json.error}`);
}
return JSON.stringify(json);
}
}
module.exports = TavilySearchResults;

View file

@ -7,6 +7,9 @@ const { logger } = require('~/config');
class WolframAlphaAPI extends StructuredTool {
constructor(fields) {
super();
/* Used to initialize the Tool without necessary variables. */
this.override = fields.override ?? false;
this.name = 'wolfram';
this.apiKey = fields.WOLFRAM_APP_ID || this.getAppId();
this.description_for_model = `// Access dynamic computation and curated data from WolframAlpha and Wolfram Cloud.
@ -55,7 +58,7 @@ class WolframAlphaAPI extends StructuredTool {
getAppId() {
const appId = process.env.WOLFRAM_APP_ID || '';
if (!appId) {
if (!appId && !this.override) {
throw new Error('Missing WOLFRAM_APP_ID environment variable.');
}
return appId;

View file

@ -1,14 +1,11 @@
const OpenAI = require('openai');
const DALLE3 = require('../DALLE3');
const { processFileURL } = require('~/server/services/Files/process');
const { logger } = require('~/config');
jest.mock('openai');
jest.mock('~/server/services/Files/process', () => ({
processFileURL: jest.fn(),
}));
const processFileURL = jest.fn();
jest.mock('~/server/services/Files/images', () => ({
getImageBasename: jest.fn().mockImplementation((url) => {
@ -69,7 +66,7 @@ describe('DALLE3', () => {
jest.resetModules();
process.env = { ...originalEnv, DALLE_API_KEY: mockApiKey };
// Instantiate DALLE3 for tests that do not depend on DALLE3_SYSTEM_PROMPT
dalle = new DALLE3();
dalle = new DALLE3({ processFileURL });
});
afterEach(() => {
@ -78,7 +75,8 @@ describe('DALLE3', () => {
process.env = originalEnv;
});
it('should throw an error if DALLE_API_KEY is missing', () => {
it('should throw an error if all potential API keys are missing', () => {
delete process.env.DALLE3_API_KEY;
delete process.env.DALLE_API_KEY;
expect(() => new DALLE3()).toThrow('Missing DALLE_API_KEY environment variable.');
});
@ -112,7 +110,9 @@ describe('DALLE3', () => {
};
generate.mockResolvedValue(mockResponse);
processFileURL.mockResolvedValue('http://example.com/img-test.png');
processFileURL.mockResolvedValue({
filepath: 'http://example.com/img-test.png',
});
const result = await dalle._call(mockData);

View file

@ -6,19 +6,22 @@ const { OpenAIEmbeddings } = require('langchain/embeddings/openai');
const { getUserPluginAuthValue } = require('~/server/services/PluginService');
const {
availableTools,
// Basic Tools
CodeBrew,
AzureAISearch,
GoogleSearchAPI,
WolframAlphaAPI,
StructuredWolfram,
OpenAICreateImage,
StableDiffusionAPI,
// Structured Tools
DALLE3,
StructuredSD,
AzureAISearch,
StructuredACS,
E2BTools,
CodeSherpa,
StructuredSD,
StructuredACS,
CodeSherpaTools,
CodeBrew,
StructuredWolfram,
TavilySearchResults,
} = require('../');
const { loadToolSuite } = require('./loadToolSuite');
const { loadSpecs } = require('./loadSpecs');
@ -151,8 +154,10 @@ const loadTools = async ({
returnMap = false,
tools = [],
options = {},
skipSpecs = false,
}) => {
const toolConstructors = {
tavily_search_results_json: TavilySearchResults,
calculator: Calculator,
google: GoogleSearchAPI,
wolfram: functions ? StructuredWolfram : WolframAlphaAPI,
@ -229,10 +234,17 @@ const loadTools = async ({
toolConstructors.codesherpa = CodeSherpa;
}
const imageGenOptions = {
fileStrategy: options.fileStrategy,
processFileURL: options.processFileURL,
returnMetadata: options.returnMetadata,
};
const toolOptions = {
serpapi: { location: 'Austin,Texas,United States', hl: 'en', gl: 'us' },
dalle: { fileStrategy: options.fileStrategy },
'dall-e': { fileStrategy: options.fileStrategy },
dalle: imageGenOptions,
'dall-e': imageGenOptions,
'stable-diffusion': imageGenOptions,
};
const toolAuthFields = {};
@ -271,7 +283,7 @@ const loadTools = async ({
}
let specs = null;
if (functions && remainingTools.length > 0) {
if (functions && remainingTools.length > 0 && skipSpecs !== true) {
specs = await loadSpecs({
llm: model,
user,
@ -298,6 +310,9 @@ const loadTools = async ({
let result = [];
for (const tool of tools) {
const validTool = requestedTools[tool];
if (!validTool) {
continue;
}
const plugin = await validTool();
if (Array.isArray(plugin)) {