From 656e1abaea387ee330a8c9736e05508bf8d8b0c3 Mon Sep 17 00:00:00 2001 From: Danny Avila Date: Tue, 25 Nov 2025 15:20:07 -0500 Subject: [PATCH] =?UTF-8?q?=F0=9F=AA=A6=20refactor:=20Remove=20Legacy=20Co?= =?UTF-8?q?de=20(#10533)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * 🗑️ chore: Remove unused Legacy Provider clients and related helpers * Deleted OpenAIClient and GoogleClient files along with their associated tests. * Removed references to these clients in the clients index file. * Cleaned up typedefs by removing the OpenAISpecClient export. * Updated chat controllers to use the OpenAI SDK directly instead of the removed client classes. * chore/remove-openapi-specs * 🗑️ chore: Remove unused mergeSort and misc utility functions * Deleted mergeSort.js and misc.js files as they are no longer needed. * Removed references to cleanUpPrimaryKeyValue in messages.js and adjusted related logic. * Updated mongoMeili.ts to eliminate local implementations of removed functions. * chore: remove legacy endpoints * chore: remove all plugins endpoint related code * chore: remove unused prompt handling code and clean up imports * Deleted handleInputs.js and instructions.js files as they are no longer needed. * Removed references to these files in the prompts index.js. * Updated docker-compose.yml to simplify reverse proxy configuration. * chore: remove unused LightningIcon import from Icons.tsx * chore: clean up translation.json by removing deprecated and unused keys * chore: update Jest configuration and remove unused mock file * Simplified the setupFiles array in jest.config.js by removing the fetchEventSource mock. * Deleted the fetchEventSource.js mock file as it is no longer needed. * fix: simplify endpoint type check in Landing and ConversationStarters components * Updated the endpoint type check to use strict equality for better clarity and performance. * Ensured consistency in the handling of the azureOpenAI endpoint across both components. * chore: remove unused dependencies from package.json and package-lock.json * chore: remove legacy EditController, associated routes and imports * chore: update banResponse logic to refine request handling for banned users * chore: remove unused validateEndpoint middleware and its references * chore: remove unused 'res' parameter from initializeClient in multiple endpoint files * chore: remove unused 'isSmallScreen' prop from BookmarkNav and NewChat components; clean up imports in ArchivedChatsTable and useSetIndexOptions hooks; enhance localization in PromptVersions * chore: remove unused import of Constants and TMessage from MobileNav; retain only necessary QueryKeys import * chore: remove unused TResPlugin type and related references; clean up imports in types and schemas --- .devcontainer/docker-compose.yml | 3 +- .env.example | 9 - api/app/clients/AnthropicClient.js | 991 -------------- api/app/clients/GoogleClient.js | 994 -------------- api/app/clients/OpenAIClient.js | 1207 ----------------- api/app/clients/document/index.js | 5 - api/app/clients/document/tokenSplit.js | 51 - api/app/clients/document/tokenSplit.spec.js | 56 - api/app/clients/index.js | 6 - api/app/clients/llm/createCoherePayload.js | 85 -- api/app/clients/llm/index.js | 5 - api/app/clients/output_parsers/addImages.js | 90 -- .../clients/output_parsers/addImages.spec.js | 246 ---- .../clients/output_parsers/handleOutputs.js | 88 -- api/app/clients/output_parsers/index.js | 7 - api/app/clients/prompts/handleInputs.js | 38 - api/app/clients/prompts/index.js | 4 - api/app/clients/prompts/instructions.js | 10 - api/app/clients/specs/AnthropicClient.test.js | 1043 -------------- api/app/clients/specs/OpenAIClient.test.js | 630 --------- api/app/clients/specs/OpenAIClient.tokens.js | 130 -- api/app/clients/tools/.well-known/Ai_PDF.json | 18 - .../clients/tools/.well-known/BrowserOp.json | 17 - .../tools/.well-known/Dr_Thoths_Tarot.json | 89 -- .../tools/.well-known/DreamInterpreter.json | 18 - .../clients/tools/.well-known/VoxScript.json | 22 - .../clients/tools/.well-known/askyourpdf.json | 18 - .../tools/.well-known/drink_maestro.json | 18 - .../earthImagesAndVisualizations.json | 18 - .../has-issues/scholarly_graph_link.json | 18 - .../.well-known/has-issues/web_pilot.json | 24 - .../.well-known/image_prompt_enhancer.json | 18 - .../tools/.well-known/openapi/askyourpdf.yaml | 157 --- .../tools/.well-known/openapi/scholarai.yaml | 185 --- .../clients/tools/.well-known/qrCodes.json | 17 - .../clients/tools/.well-known/scholarai.json | 22 - .../clients/tools/.well-known/uberchord.json | 18 - .../clients/tools/.well-known/web_search.json | 18 - .../clients/tools/util/handleOpenAIErrors.js | 33 - api/app/clients/tools/util/index.js | 2 - api/jest.config.js | 6 +- api/lib/utils/mergeSort.js | 29 - api/lib/utils/misc.js | 8 - api/package.json | 7 - api/server/controllers/EditController.js | 247 ---- api/server/controllers/assistants/chatV1.js | 18 +- api/server/controllers/assistants/chatV2.js | 6 +- api/server/controllers/assistants/helpers.js | 4 +- api/server/experimental.js | 1 - api/server/index.js | 2 - api/server/middleware/checkBan.js | 6 +- api/server/middleware/index.js | 2 - api/server/middleware/validateEndpoint.js | 20 - api/server/routes/assistants/chatV1.js | 1 - api/server/routes/assistants/chatV2.js | 1 - api/server/routes/edit/anthropic.js | 24 - api/server/routes/edit/custom.js | 26 - api/server/routes/edit/google.js | 24 - api/server/routes/edit/index.js | 45 - api/server/routes/edit/openAI.js | 26 - api/server/routes/index.js | 4 - api/server/routes/messages.js | 4 - api/server/routes/plugins.js | 9 - api/server/services/AssistantService.js | 8 +- api/server/services/Config/EndpointService.js | 12 +- .../services/Config/loadAsyncEndpoints.js | 26 +- .../services/Config/loadConfigModels.js | 4 - .../services/Config/loadDefaultEConfig.js | 6 +- .../Endpoints/anthropic/initialize.js | 43 +- .../Endpoints/assistants/initalize.js | 20 +- .../Endpoints/assistants/initialize.spec.js | 113 -- .../services/Endpoints/assistants/title.js | 84 +- .../Endpoints/azureAssistants/initialize.js | 10 - .../azureAssistants/initialize.spec.js | 134 -- .../services/Endpoints/custom/initialize.js | 46 +- .../Endpoints/custom/initialize.spec.js | 106 -- .../services/Endpoints/google/initialize.js | 32 +- .../Endpoints/google/initialize.spec.js | 101 -- .../services/Endpoints/openAI/initialize.js | 43 +- .../Endpoints/openAI/initialize.spec.js | 431 ------ api/server/services/ModelService.js | 35 +- api/server/services/ModelService.spec.js | 6 - api/server/utils/handleText.js | 13 +- api/test/__mocks__/fetchEventSource.js | 27 - api/typedefs.js | 19 - client/src/common/types.ts | 7 +- .../Chat/Input/ConversationStarters.tsx | 8 +- .../components/Chat/Input/PopoverButtons.tsx | 25 +- client/src/components/Chat/Landing.tsx | 8 +- .../Endpoints/components/EndpointItem.tsx | 19 +- .../Chat/Menus/Presets/EditPresetDialog.tsx | 17 +- .../Chat/Messages/ui/MessageRender.tsx | 2 - .../Endpoints/MessageEndpointIcon.tsx | 16 +- .../src/components/Endpoints/MinimalIcon.tsx | 10 +- .../Endpoints/Settings/AgentSettings.tsx | 248 ---- .../Settings/MultiView/PluginSettings.tsx | 26 - .../Endpoints/Settings/MultiView/index.ts | 1 - .../Endpoints/Settings/OptionHover.tsx | 5 - .../components/Endpoints/Settings/Plugins.tsx | 392 ------ .../components/Endpoints/Settings/index.ts | 2 - .../components/Endpoints/Settings/settings.ts | 3 +- .../Input/ModelSelect/PluginsByIndex.tsx | 110 -- .../components/Input/ModelSelect/options.ts | 4 - .../Input/SetKeyDialog/SetKeyDialog.tsx | 44 +- .../components/Messages/Content/Plugin.tsx | 130 -- .../src/components/Messages/Content/index.ts | 1 - .../components/Nav/Bookmarks/BookmarkNav.tsx | 3 +- client/src/components/Nav/MobileNav.tsx | 3 +- client/src/components/Nav/NewChat.tsx | 3 +- .../General/ArchivedChatsTable.tsx | 22 +- .../Plugins/Store/PluginAuthForm.tsx | 2 +- .../Plugins/Store/PluginStoreDialog.tsx | 245 ---- .../Plugins/Store/PluginStoreItem.tsx | 76 -- .../Plugins/Store/PluginStoreLinkButton.tsx | 18 - .../Plugins/Store/PluginTooltip.tsx | 7 +- .../__tests__/PluginStoreDialog.spec.tsx | 223 --- .../Store/__tests__/PluginStoreItem.spec.tsx | 60 - client/src/components/Plugins/Store/index.ts | 3 - .../Plugins/Store/styles.module.css | 4 - client/src/components/Plugins/index.ts | 1 - .../src/components/Prompts/PromptVersions.tsx | 4 +- client/src/components/Share/Message.tsx | 3 - .../SidePanel/Agents/AgentPanel.test.tsx | 2 - .../SidePanel/Agents/AgentPanel.tsx | 4 +- .../components/Tools/AssistantToolsDialog.tsx | 4 +- .../components/Tools/MCPToolSelectDialog.tsx | 4 +- .../src/components/Tools/ToolSelectDialog.tsx | 4 +- client/src/hooks/Chat/useChatHelpers.ts | 7 +- client/src/hooks/Config/useAppStartup.ts | 58 +- client/src/hooks/Config/useClearStates.ts | 1 - .../Conversations/usePresetIndexOptions.ts | 71 +- .../hooks/Conversations/useSetIndexOptions.ts | 72 +- client/src/hooks/Endpoint/Icons.tsx | 10 +- client/src/hooks/Input/useUserKey.ts | 2 - client/src/hooks/Plugins/index.ts | 1 - client/src/hooks/Plugins/usePluginInstall.ts | 77 -- client/src/hooks/SSE/useEventHandlers.ts | 13 +- client/src/hooks/SSE/useSSE.ts | 6 +- client/src/hooks/useGenerationsByLatest.ts | 3 - client/src/locales/en/translation.json | 20 +- client/src/store/endpoints.ts | 11 - client/src/store/families.ts | 6 - client/src/store/settings.ts | 2 - client/src/utils/buildDefaultConvo.ts | 13 +- client/src/utils/convos.spec.ts | 13 - client/src/utils/convos.ts | 9 +- e2e/specs/keys.spec.ts | 2 +- e2e/specs/messages.spec.ts | 2 +- helm/librechat/values.yaml | 2 - package-lock.json | 37 - packages/data-provider/src/config.ts | 13 - packages/data-provider/src/parsers.ts | 34 +- packages/data-provider/src/schemas.ts | 162 --- packages/data-provider/src/types.ts | 13 +- .../src/models/plugins/mongoMeili.ts | 33 +- packages/data-schemas/src/schema/convo.ts | 3 - packages/data-schemas/src/schema/defaults.ts | 3 +- packages/data-schemas/src/schema/message.ts | 19 - packages/data-schemas/src/schema/preset.ts | 8 +- packages/data-schemas/src/types/app.ts | 2 - packages/data-schemas/src/types/convo.ts | 1 - 161 files changed, 256 insertions(+), 10513 deletions(-) delete mode 100644 api/app/clients/AnthropicClient.js delete mode 100644 api/app/clients/GoogleClient.js delete mode 100644 api/app/clients/OpenAIClient.js delete mode 100644 api/app/clients/document/index.js delete mode 100644 api/app/clients/document/tokenSplit.js delete mode 100644 api/app/clients/document/tokenSplit.spec.js delete mode 100644 api/app/clients/llm/createCoherePayload.js delete mode 100644 api/app/clients/llm/index.js delete mode 100644 api/app/clients/output_parsers/addImages.js delete mode 100644 api/app/clients/output_parsers/addImages.spec.js delete mode 100644 api/app/clients/output_parsers/handleOutputs.js delete mode 100644 api/app/clients/output_parsers/index.js delete mode 100644 api/app/clients/prompts/handleInputs.js delete mode 100644 api/app/clients/prompts/instructions.js delete mode 100644 api/app/clients/specs/AnthropicClient.test.js delete mode 100644 api/app/clients/specs/OpenAIClient.test.js delete mode 100644 api/app/clients/specs/OpenAIClient.tokens.js delete mode 100644 api/app/clients/tools/.well-known/Ai_PDF.json delete mode 100644 api/app/clients/tools/.well-known/BrowserOp.json delete mode 100644 api/app/clients/tools/.well-known/Dr_Thoths_Tarot.json delete mode 100644 api/app/clients/tools/.well-known/DreamInterpreter.json delete mode 100644 api/app/clients/tools/.well-known/VoxScript.json delete mode 100644 api/app/clients/tools/.well-known/askyourpdf.json delete mode 100644 api/app/clients/tools/.well-known/drink_maestro.json delete mode 100644 api/app/clients/tools/.well-known/earthImagesAndVisualizations.json delete mode 100644 api/app/clients/tools/.well-known/has-issues/scholarly_graph_link.json delete mode 100644 api/app/clients/tools/.well-known/has-issues/web_pilot.json delete mode 100644 api/app/clients/tools/.well-known/image_prompt_enhancer.json delete mode 100644 api/app/clients/tools/.well-known/openapi/askyourpdf.yaml delete mode 100644 api/app/clients/tools/.well-known/openapi/scholarai.yaml delete mode 100644 api/app/clients/tools/.well-known/qrCodes.json delete mode 100644 api/app/clients/tools/.well-known/scholarai.json delete mode 100644 api/app/clients/tools/.well-known/uberchord.json delete mode 100644 api/app/clients/tools/.well-known/web_search.json delete mode 100644 api/app/clients/tools/util/handleOpenAIErrors.js delete mode 100644 api/lib/utils/mergeSort.js delete mode 100644 api/lib/utils/misc.js delete mode 100644 api/server/controllers/EditController.js delete mode 100644 api/server/middleware/validateEndpoint.js delete mode 100644 api/server/routes/edit/anthropic.js delete mode 100644 api/server/routes/edit/custom.js delete mode 100644 api/server/routes/edit/google.js delete mode 100644 api/server/routes/edit/index.js delete mode 100644 api/server/routes/edit/openAI.js delete mode 100644 api/server/routes/plugins.js delete mode 100644 api/server/services/Endpoints/assistants/initialize.spec.js delete mode 100644 api/server/services/Endpoints/azureAssistants/initialize.spec.js delete mode 100644 api/server/services/Endpoints/custom/initialize.spec.js delete mode 100644 api/server/services/Endpoints/google/initialize.spec.js delete mode 100644 api/server/services/Endpoints/openAI/initialize.spec.js delete mode 100644 api/test/__mocks__/fetchEventSource.js delete mode 100644 client/src/components/Endpoints/Settings/AgentSettings.tsx delete mode 100644 client/src/components/Endpoints/Settings/MultiView/PluginSettings.tsx delete mode 100644 client/src/components/Endpoints/Settings/Plugins.tsx delete mode 100644 client/src/components/Input/ModelSelect/PluginsByIndex.tsx delete mode 100644 client/src/components/Messages/Content/Plugin.tsx delete mode 100644 client/src/components/Plugins/Store/PluginStoreDialog.tsx delete mode 100644 client/src/components/Plugins/Store/PluginStoreItem.tsx delete mode 100644 client/src/components/Plugins/Store/PluginStoreLinkButton.tsx delete mode 100644 client/src/components/Plugins/Store/__tests__/PluginStoreDialog.spec.tsx delete mode 100644 client/src/components/Plugins/Store/__tests__/PluginStoreItem.spec.tsx delete mode 100644 client/src/components/Plugins/Store/styles.module.css delete mode 100644 client/src/components/Plugins/index.ts delete mode 100644 client/src/hooks/Plugins/usePluginInstall.ts diff --git a/.devcontainer/docker-compose.yml b/.devcontainer/docker-compose.yml index e7c36c5535..70ebf9b955 100644 --- a/.devcontainer/docker-compose.yml +++ b/.devcontainer/docker-compose.yml @@ -20,8 +20,7 @@ services: environment: - HOST=0.0.0.0 - MONGO_URI=mongodb://mongodb:27017/LibreChat - # - CHATGPT_REVERSE_PROXY=http://host.docker.internal:8080/api/conversation # if you are hosting your own chatgpt reverse proxy with docker - # - OPENAI_REVERSE_PROXY=http://host.docker.internal:8070/v1/chat/completions # if you are hosting your own chatgpt reverse proxy with docker + # - OPENAI_REVERSE_PROXY=http://host.docker.internal:8070/v1 - MEILI_HOST=http://meilisearch:7700 # Runs app on the same network as the service container, allows "forwardPorts" in devcontainer.json function. diff --git a/.env.example b/.env.example index 90995be72f..799e44525b 100644 --- a/.env.example +++ b/.env.example @@ -129,7 +129,6 @@ ANTHROPIC_API_KEY=user_provided # AZURE_OPENAI_API_VERSION= # Deprecated # AZURE_OPENAI_API_COMPLETIONS_DEPLOYMENT_NAME= # Deprecated # AZURE_OPENAI_API_EMBEDDINGS_DEPLOYMENT_NAME= # Deprecated -# PLUGINS_USE_AZURE="true" # Deprecated #=================# # AWS Bedrock # @@ -230,14 +229,6 @@ ASSISTANTS_API_KEY=user_provided # More info, including how to enable use of Assistants with Azure here: # https://www.librechat.ai/docs/configuration/librechat_yaml/ai_endpoints/azure#using-assistants-with-azure -#============# -# Plugins # -#============# - -# PLUGIN_MODELS=gpt-4o,gpt-4o-mini,gpt-4,gpt-4-turbo-preview,gpt-4-0125-preview,gpt-4-1106-preview,gpt-4-0613,gpt-3.5-turbo,gpt-3.5-turbo-0125,gpt-3.5-turbo-1106,gpt-3.5-turbo-0613 - -DEBUG_PLUGINS=true - CREDS_KEY=f34be427ebb29de8d88c107a71546019685ed8b241d8f2ed00c3df97ad2566f0 CREDS_IV=e2341419ec3dd3d19b13a1a87fafcbfb diff --git a/api/app/clients/AnthropicClient.js b/api/app/clients/AnthropicClient.js deleted file mode 100644 index 16a79278f1..0000000000 --- a/api/app/clients/AnthropicClient.js +++ /dev/null @@ -1,991 +0,0 @@ -const Anthropic = require('@anthropic-ai/sdk'); -const { logger } = require('@librechat/data-schemas'); -const { HttpsProxyAgent } = require('https-proxy-agent'); -const { - Constants, - ErrorTypes, - EModelEndpoint, - parseTextParts, - anthropicSettings, - getResponseSender, - validateVisionModel, -} = require('librechat-data-provider'); -const { sleep, SplitStreamHandler: _Handler, addCacheControl } = require('@librechat/agents'); -const { - Tokenizer, - createFetch, - matchModelName, - getClaudeHeaders, - getModelMaxTokens, - configureReasoning, - checkPromptCacheSupport, - getModelMaxOutputTokens, - createStreamEventHandlers, -} = require('@librechat/api'); -const { - truncateText, - formatMessage, - titleFunctionPrompt, - parseParamFromPrompt, - createContextHandlers, -} = require('./prompts'); -const { spendTokens, spendStructuredTokens } = require('~/models/spendTokens'); -const { encodeAndFormat } = require('~/server/services/Files/images/encode'); -const BaseClient = require('./BaseClient'); - -const HUMAN_PROMPT = '\n\nHuman:'; -const AI_PROMPT = '\n\nAssistant:'; - -class SplitStreamHandler extends _Handler { - getDeltaContent(chunk) { - return (chunk?.delta?.text ?? chunk?.completion) || ''; - } - getReasoningDelta(chunk) { - return chunk?.delta?.thinking || ''; - } -} - -/** Helper function to introduce a delay before retrying */ -function delayBeforeRetry(attempts, baseDelay = 1000) { - return new Promise((resolve) => setTimeout(resolve, baseDelay * attempts)); -} - -const tokenEventTypes = new Set(['message_start', 'message_delta']); -const { legacy } = anthropicSettings; - -class AnthropicClient extends BaseClient { - constructor(apiKey, options = {}) { - super(apiKey, options); - this.apiKey = apiKey || process.env.ANTHROPIC_API_KEY; - this.userLabel = HUMAN_PROMPT; - this.assistantLabel = AI_PROMPT; - this.contextStrategy = options.contextStrategy - ? options.contextStrategy.toLowerCase() - : 'discard'; - this.setOptions(options); - /** @type {string | undefined} */ - this.systemMessage; - /** @type {AnthropicMessageStartEvent| undefined} */ - this.message_start; - /** @type {AnthropicMessageDeltaEvent| undefined} */ - this.message_delta; - /** Whether the model is part of the Claude 3 Family - * @type {boolean} */ - this.isClaudeLatest; - /** Whether to use Messages API or Completions API - * @type {boolean} */ - this.useMessages; - /** Whether or not the model supports Prompt Caching - * @type {boolean} */ - this.supportsCacheControl; - /** The key for the usage object's input tokens - * @type {string} */ - this.inputTokensKey = 'input_tokens'; - /** The key for the usage object's output tokens - * @type {string} */ - this.outputTokensKey = 'output_tokens'; - /** @type {SplitStreamHandler | undefined} */ - this.streamHandler; - } - - setOptions(options) { - if (this.options && !this.options.replaceOptions) { - // nested options aren't spread properly, so we need to do this manually - this.options.modelOptions = { - ...this.options.modelOptions, - ...options.modelOptions, - }; - delete options.modelOptions; - // now we can merge options - this.options = { - ...this.options, - ...options, - }; - } else { - this.options = options; - } - - this.modelOptions = Object.assign( - { - model: anthropicSettings.model.default, - }, - this.modelOptions, - this.options.modelOptions, - ); - - const modelMatch = matchModelName(this.modelOptions.model, EModelEndpoint.anthropic); - this.isClaudeLatest = - /claude-[3-9]/.test(modelMatch) || /claude-(?:sonnet|opus|haiku)-[4-9]/.test(modelMatch); - const isLegacyOutput = !( - /claude-3[-.]5-sonnet/.test(modelMatch) || - /claude-3[-.]7/.test(modelMatch) || - /claude-(?:sonnet|opus|haiku)-[4-9]/.test(modelMatch) || - /claude-[4-9]/.test(modelMatch) - ); - this.supportsCacheControl = this.options.promptCache && checkPromptCacheSupport(modelMatch); - - if ( - isLegacyOutput && - this.modelOptions.maxOutputTokens && - this.modelOptions.maxOutputTokens > legacy.maxOutputTokens.default - ) { - this.modelOptions.maxOutputTokens = legacy.maxOutputTokens.default; - } - - this.useMessages = this.isClaudeLatest || !!this.options.attachments; - - this.defaultVisionModel = this.options.visionModel ?? 'claude-3-sonnet-20240229'; - this.options.attachments?.then((attachments) => this.checkVisionRequest(attachments)); - - this.maxContextTokens = - this.options.maxContextTokens ?? - getModelMaxTokens(this.modelOptions.model, EModelEndpoint.anthropic) ?? - 100000; - this.maxResponseTokens = - this.modelOptions.maxOutputTokens ?? - getModelMaxOutputTokens( - this.modelOptions.model, - this.options.endpointType ?? this.options.endpoint, - this.options.endpointTokenConfig, - ) ?? - anthropicSettings.maxOutputTokens.reset(this.modelOptions.model); - this.maxPromptTokens = - this.options.maxPromptTokens || this.maxContextTokens - this.maxResponseTokens; - - const reservedTokens = this.maxPromptTokens + this.maxResponseTokens; - if (reservedTokens > this.maxContextTokens) { - const info = `Total Possible Tokens + Max Output Tokens must be less than or equal to Max Context Tokens: ${this.maxPromptTokens} (total possible output) + ${this.maxResponseTokens} (max output) = ${reservedTokens}/${this.maxContextTokens} (max context)`; - const errorMessage = `{ "type": "${ErrorTypes.INPUT_LENGTH}", "info": "${info}" }`; - logger.warn(info); - throw new Error(errorMessage); - } else if (this.maxResponseTokens === this.maxContextTokens) { - const info = `Max Output Tokens must be less than Max Context Tokens: ${this.maxResponseTokens} (max output) = ${this.maxContextTokens} (max context)`; - const errorMessage = `{ "type": "${ErrorTypes.INPUT_LENGTH}", "info": "${info}" }`; - logger.warn(info); - throw new Error(errorMessage); - } - - this.sender = - this.options.sender ?? - getResponseSender({ - model: this.modelOptions.model, - endpoint: EModelEndpoint.anthropic, - modelLabel: this.options.modelLabel, - }); - - this.startToken = '||>'; - this.endToken = ''; - - return this; - } - - /** - * Get the initialized Anthropic client. - * @param {Partial} requestOptions - The options for the client. - * @returns {Anthropic} The Anthropic client instance. - */ - getClient(requestOptions) { - /** @type {Anthropic.ClientOptions} */ - const options = { - fetch: createFetch({ - directEndpoint: this.options.directEndpoint, - reverseProxyUrl: this.options.reverseProxyUrl, - }), - apiKey: this.apiKey, - fetchOptions: {}, - }; - - if (this.options.proxy) { - options.fetchOptions.agent = new HttpsProxyAgent(this.options.proxy); - } - - if (this.options.reverseProxyUrl) { - options.baseURL = this.options.reverseProxyUrl; - } - - const headers = getClaudeHeaders(requestOptions?.model, this.supportsCacheControl); - if (headers) { - options.defaultHeaders = headers; - } - - return new Anthropic(options); - } - - /** - * Get stream usage as returned by this client's API response. - * @returns {AnthropicStreamUsage} The stream usage object. - */ - getStreamUsage() { - const inputUsage = this.message_start?.message?.usage ?? {}; - const outputUsage = this.message_delta?.usage ?? {}; - return Object.assign({}, inputUsage, outputUsage); - } - - /** - * Calculates the correct token count for the current user message based on the token count map and API usage. - * Edge case: If the calculation results in a negative value, it returns the original estimate. - * If revisiting a conversation with a chat history entirely composed of token estimates, - * the cumulative token count going forward should become more accurate as the conversation progresses. - * @param {Object} params - The parameters for the calculation. - * @param {Record} params.tokenCountMap - A map of message IDs to their token counts. - * @param {string} params.currentMessageId - The ID of the current message to calculate. - * @param {AnthropicStreamUsage} params.usage - The usage object returned by the API. - * @returns {number} The correct token count for the current user message. - */ - calculateCurrentTokenCount({ tokenCountMap, currentMessageId, usage }) { - const originalEstimate = tokenCountMap[currentMessageId] || 0; - - if (!usage || typeof usage.input_tokens !== 'number') { - return originalEstimate; - } - - tokenCountMap[currentMessageId] = 0; - const totalTokensFromMap = Object.values(tokenCountMap).reduce((sum, count) => { - const numCount = Number(count); - return sum + (isNaN(numCount) ? 0 : numCount); - }, 0); - const totalInputTokens = - (usage.input_tokens ?? 0) + - (usage.cache_creation_input_tokens ?? 0) + - (usage.cache_read_input_tokens ?? 0); - - const currentMessageTokens = totalInputTokens - totalTokensFromMap; - return currentMessageTokens > 0 ? currentMessageTokens : originalEstimate; - } - - /** - * Get Token Count for LibreChat Message - * @param {TMessage} responseMessage - * @returns {number} - */ - getTokenCountForResponse(responseMessage) { - return this.getTokenCountForMessage({ - role: 'assistant', - content: responseMessage.text, - }); - } - - /** - * - * Checks if the model is a vision model based on request attachments and sets the appropriate options: - * - Sets `this.modelOptions.model` to `gpt-4-vision-preview` if the request is a vision request. - * - Sets `this.isVisionModel` to `true` if vision request. - * - Deletes `this.modelOptions.stop` if vision request. - * @param {MongoFile[]} attachments - */ - checkVisionRequest(attachments) { - const availableModels = this.options.modelsConfig?.[EModelEndpoint.anthropic]; - this.isVisionModel = validateVisionModel({ model: this.modelOptions.model, availableModels }); - - const visionModelAvailable = availableModels?.includes(this.defaultVisionModel); - if ( - attachments && - attachments.some((file) => file?.type && file?.type?.includes('image')) && - visionModelAvailable && - !this.isVisionModel - ) { - this.modelOptions.model = this.defaultVisionModel; - this.isVisionModel = true; - } - } - - /** - * Calculate the token cost in tokens for an image based on its dimensions and detail level. - * - * For reference, see: https://docs.anthropic.com/claude/docs/vision#image-costs - * - * @param {Object} image - The image object. - * @param {number} image.width - The width of the image. - * @param {number} image.height - The height of the image. - * @returns {number} The calculated token cost measured by tokens. - * - */ - calculateImageTokenCost({ width, height }) { - return Math.ceil((width * height) / 750); - } - - async addImageURLs(message, attachments) { - const { files, image_urls } = await encodeAndFormat(this.options.req, attachments, { - endpoint: EModelEndpoint.anthropic, - }); - message.image_urls = image_urls.length ? image_urls : undefined; - return files; - } - - /** - * @param {object} params - * @param {number} params.promptTokens - * @param {number} params.completionTokens - * @param {AnthropicStreamUsage} [params.usage] - * @param {string} [params.model] - * @param {string} [params.context='message'] - * @returns {Promise} - */ - async recordTokenUsage({ promptTokens, completionTokens, usage, model, context = 'message' }) { - if (usage != null && usage?.input_tokens != null) { - const input = usage.input_tokens ?? 0; - const write = usage.cache_creation_input_tokens ?? 0; - const read = usage.cache_read_input_tokens ?? 0; - - await spendStructuredTokens( - { - context, - user: this.user, - conversationId: this.conversationId, - model: model ?? this.modelOptions.model, - endpointTokenConfig: this.options.endpointTokenConfig, - }, - { - promptTokens: { input, write, read }, - completionTokens, - }, - ); - - return; - } - - await spendTokens( - { - context, - user: this.user, - conversationId: this.conversationId, - model: model ?? this.modelOptions.model, - endpointTokenConfig: this.options.endpointTokenConfig, - }, - { promptTokens, completionTokens }, - ); - } - - async buildMessages(messages, parentMessageId) { - const orderedMessages = this.constructor.getMessagesForConversation({ - messages, - parentMessageId, - }); - - logger.debug('[AnthropicClient] orderedMessages', { orderedMessages, parentMessageId }); - - if (this.options.attachments) { - const attachments = await this.options.attachments; - const images = attachments.filter((file) => file.type.includes('image')); - - if (images.length && !this.isVisionModel) { - throw new Error('Images are only supported with the Claude 3 family of models'); - } - - const latestMessage = orderedMessages[orderedMessages.length - 1]; - - if (this.message_file_map) { - this.message_file_map[latestMessage.messageId] = attachments; - } else { - this.message_file_map = { - [latestMessage.messageId]: attachments, - }; - } - - const files = await this.addImageURLs(latestMessage, attachments); - - this.options.attachments = files; - } - - if (this.message_file_map) { - this.contextHandlers = createContextHandlers( - this.options.req, - orderedMessages[orderedMessages.length - 1].text, - ); - } - - const formattedMessages = orderedMessages.map((message, i) => { - const formattedMessage = this.useMessages - ? formatMessage({ - message, - endpoint: EModelEndpoint.anthropic, - }) - : { - author: message.isCreatedByUser ? this.userLabel : this.assistantLabel, - content: message?.content ?? message.text, - }; - - const needsTokenCount = this.contextStrategy && !orderedMessages[i].tokenCount; - /* If tokens were never counted, or, is a Vision request and the message has files, count again */ - if (needsTokenCount || (this.isVisionModel && (message.image_urls || message.files))) { - orderedMessages[i].tokenCount = this.getTokenCountForMessage(formattedMessage); - } - - /* If message has files, calculate image token cost */ - if (this.message_file_map && this.message_file_map[message.messageId]) { - const attachments = this.message_file_map[message.messageId]; - for (const file of attachments) { - if (file.embedded) { - this.contextHandlers?.processFile(file); - continue; - } - if (file.metadata?.fileIdentifier) { - continue; - } - - orderedMessages[i].tokenCount += this.calculateImageTokenCost({ - width: file.width, - height: file.height, - }); - } - } - - formattedMessage.tokenCount = orderedMessages[i].tokenCount; - return formattedMessage; - }); - - if (this.contextHandlers) { - this.augmentedPrompt = await this.contextHandlers.createContext(); - this.options.promptPrefix = this.augmentedPrompt + (this.options.promptPrefix ?? ''); - } - - let { context: messagesInWindow, remainingContextTokens } = - await this.getMessagesWithinTokenLimit({ messages: formattedMessages }); - - const tokenCountMap = orderedMessages - .slice(orderedMessages.length - messagesInWindow.length) - .reduce((map, message, index) => { - const { messageId } = message; - if (!messageId) { - return map; - } - - map[messageId] = orderedMessages[index].tokenCount; - return map; - }, {}); - - logger.debug('[AnthropicClient]', { - messagesInWindow: messagesInWindow.length, - remainingContextTokens, - }); - - let lastAuthor = ''; - let groupedMessages = []; - - for (let i = 0; i < messagesInWindow.length; i++) { - const message = messagesInWindow[i]; - const author = message.role ?? message.author; - // If last author is not same as current author, add to new group - if (lastAuthor !== author) { - const newMessage = { - content: [message.content], - }; - - if (message.role) { - newMessage.role = message.role; - } else { - newMessage.author = message.author; - } - - groupedMessages.push(newMessage); - lastAuthor = author; - // If same author, append content to the last group - } else { - groupedMessages[groupedMessages.length - 1].content.push(message.content); - } - } - - groupedMessages = groupedMessages.map((msg, i) => { - const isLast = i === groupedMessages.length - 1; - if (msg.content.length === 1) { - const content = msg.content[0]; - return { - ...msg, - // reason: final assistant content cannot end with trailing whitespace - content: - isLast && this.useMessages && msg.role === 'assistant' && typeof content === 'string' - ? content?.trim() - : content, - }; - } - - if (!this.useMessages && msg.tokenCount) { - delete msg.tokenCount; - } - - return msg; - }); - - let identityPrefix = ''; - if (this.options.userLabel) { - identityPrefix = `\nHuman's name: ${this.options.userLabel}`; - } - - if (this.options.modelLabel) { - identityPrefix = `${identityPrefix}\nYou are ${this.options.modelLabel}`; - } - - let promptPrefix = (this.options.promptPrefix ?? '').trim(); - if (typeof this.options.artifactsPrompt === 'string' && this.options.artifactsPrompt) { - promptPrefix = `${promptPrefix ?? ''}\n${this.options.artifactsPrompt}`.trim(); - } - if (promptPrefix) { - // If the prompt prefix doesn't end with the end token, add it. - if (!promptPrefix.endsWith(`${this.endToken}`)) { - promptPrefix = `${promptPrefix.trim()}${this.endToken}\n\n`; - } - promptPrefix = `\nContext:\n${promptPrefix}`; - } - - if (identityPrefix) { - promptPrefix = `${identityPrefix}${promptPrefix}`; - } - - // Prompt AI to respond, empty if last message was from AI - let isEdited = lastAuthor === this.assistantLabel; - const promptSuffix = isEdited ? '' : `${promptPrefix}${this.assistantLabel}\n`; - let currentTokenCount = - isEdited || this.useMessages - ? this.getTokenCount(promptPrefix) - : this.getTokenCount(promptSuffix); - - let promptBody = ''; - const maxTokenCount = this.maxPromptTokens; - - const context = []; - - // Iterate backwards through the messages, adding them to the prompt until we reach the max token count. - // Do this within a recursive async function so that it doesn't block the event loop for too long. - // Also, remove the next message when the message that puts us over the token limit is created by the user. - // Otherwise, remove only the exceeding message. This is due to Anthropic's strict payload rule to start with "Human:". - const nextMessage = { - remove: false, - tokenCount: 0, - messageString: '', - }; - - const buildPromptBody = async () => { - if (currentTokenCount < maxTokenCount && groupedMessages.length > 0) { - const message = groupedMessages.pop(); - const isCreatedByUser = message.author === this.userLabel; - // Use promptPrefix if message is edited assistant' - const messagePrefix = - isCreatedByUser || !isEdited ? message.author : `${promptPrefix}${message.author}`; - const messageString = `${messagePrefix}\n${message.content}${this.endToken}\n`; - let newPromptBody = `${messageString}${promptBody}`; - - context.unshift(message); - - const tokenCountForMessage = this.getTokenCount(messageString); - const newTokenCount = currentTokenCount + tokenCountForMessage; - - if (!isCreatedByUser) { - nextMessage.messageString = messageString; - nextMessage.tokenCount = tokenCountForMessage; - } - - if (newTokenCount > maxTokenCount) { - if (!promptBody) { - // This is the first message, so we can't add it. Just throw an error. - throw new Error( - `Prompt is too long. Max token count is ${maxTokenCount}, but prompt is ${newTokenCount} tokens long.`, - ); - } - - // Otherwise, ths message would put us over the token limit, so don't add it. - // if created by user, remove next message, otherwise remove only this message - if (isCreatedByUser) { - nextMessage.remove = true; - } - - return false; - } - promptBody = newPromptBody; - currentTokenCount = newTokenCount; - - // Switch off isEdited after using it for the first time - if (isEdited) { - isEdited = false; - } - - // wait for next tick to avoid blocking the event loop - await new Promise((resolve) => setImmediate(resolve)); - return buildPromptBody(); - } - return true; - }; - - const messagesPayload = []; - const buildMessagesPayload = async () => { - let canContinue = true; - - if (promptPrefix) { - this.systemMessage = promptPrefix; - } - - while (currentTokenCount < maxTokenCount && groupedMessages.length > 0 && canContinue) { - const message = groupedMessages.pop(); - - let tokenCountForMessage = message.tokenCount ?? this.getTokenCountForMessage(message); - - const newTokenCount = currentTokenCount + tokenCountForMessage; - const exceededMaxCount = newTokenCount > maxTokenCount; - - if (exceededMaxCount && messagesPayload.length === 0) { - throw new Error( - `Prompt is too long. Max token count is ${maxTokenCount}, but prompt is ${newTokenCount} tokens long.`, - ); - } else if (exceededMaxCount) { - canContinue = false; - break; - } - - delete message.tokenCount; - messagesPayload.unshift(message); - currentTokenCount = newTokenCount; - - // Switch off isEdited after using it once - if (isEdited && message.role === 'assistant') { - isEdited = false; - } - - // Wait for next tick to avoid blocking the event loop - await new Promise((resolve) => setImmediate(resolve)); - } - }; - - const processTokens = () => { - // Add 2 tokens for metadata after all messages have been counted. - currentTokenCount += 2; - - // Use up to `this.maxContextTokens` tokens (prompt + response), but try to leave `this.maxTokens` tokens for the response. - this.modelOptions.maxOutputTokens = Math.min( - this.maxContextTokens - currentTokenCount, - this.maxResponseTokens, - ); - }; - - if ( - /claude-[3-9]/.test(this.modelOptions.model) || - /claude-(?:sonnet|opus|haiku)-[4-9]/.test(this.modelOptions.model) - ) { - await buildMessagesPayload(); - processTokens(); - return { - prompt: messagesPayload, - context: messagesInWindow, - promptTokens: currentTokenCount, - tokenCountMap, - }; - } else { - await buildPromptBody(); - processTokens(); - } - - if (nextMessage.remove) { - promptBody = promptBody.replace(nextMessage.messageString, ''); - currentTokenCount -= nextMessage.tokenCount; - context.shift(); - } - - let prompt = `${promptBody}${promptSuffix}`; - - return { prompt, context, promptTokens: currentTokenCount, tokenCountMap }; - } - - getCompletion() { - logger.debug("AnthropicClient doesn't use getCompletion (all handled in sendCompletion)"); - } - - /** - * Creates a message or completion response using the Anthropic client. - * @param {Anthropic} client - The Anthropic client instance. - * @param {Anthropic.default.MessageCreateParams | Anthropic.default.CompletionCreateParams} options - The options for the message or completion. - * @param {boolean} useMessages - Whether to use messages or completions. Defaults to `this.useMessages`. - * @returns {Promise} The response from the Anthropic client. - */ - async createResponse(client, options, useMessages) { - return (useMessages ?? this.useMessages) - ? await client.messages.create(options) - : await client.completions.create(options); - } - - getMessageMapMethod() { - /** - * @param {TMessage} msg - */ - return (msg) => { - if (msg.text != null && msg.text && msg.text.startsWith(':::thinking')) { - msg.text = msg.text.replace(/:::thinking.*?:::/gs, '').trim(); - } else if (msg.content != null) { - msg.text = parseTextParts(msg.content, true); - delete msg.content; - } - - return msg; - }; - } - - /** - * @param {string[]} [intermediateReply] - * @returns {string} - */ - getStreamText(intermediateReply) { - if (!this.streamHandler) { - return intermediateReply?.join('') ?? ''; - } - - const reasoningText = this.streamHandler.reasoningTokens.join(''); - - const reasoningBlock = reasoningText.length > 0 ? `:::thinking\n${reasoningText}\n:::\n` : ''; - - return `${reasoningBlock}${this.streamHandler.tokens.join('')}`; - } - - async sendCompletion(payload, { onProgress, abortController }) { - if (!abortController) { - abortController = new AbortController(); - } - - const { signal } = abortController; - - const modelOptions = { ...this.modelOptions }; - if (typeof onProgress === 'function') { - modelOptions.stream = true; - } - - logger.debug('modelOptions', { modelOptions }); - const metadata = { - user_id: this.user, - }; - - const { - stream, - model, - temperature, - maxOutputTokens, - stop: stop_sequences, - topP: top_p, - topK: top_k, - } = this.modelOptions; - - let requestOptions = { - model, - stream: stream || true, - stop_sequences, - temperature, - metadata, - }; - - if (this.useMessages) { - requestOptions.messages = payload; - requestOptions.max_tokens = - maxOutputTokens || anthropicSettings.maxOutputTokens.reset(requestOptions.model); - } else { - requestOptions.prompt = payload; - requestOptions.max_tokens_to_sample = maxOutputTokens || legacy.maxOutputTokens.default; - } - - requestOptions = configureReasoning(requestOptions, { - thinking: this.options.thinking, - thinkingBudget: this.options.thinkingBudget, - }); - - if (!/claude-3[-.]7/.test(model)) { - requestOptions.top_p = top_p; - requestOptions.top_k = top_k; - } else if (requestOptions.thinking == null) { - requestOptions.topP = top_p; - requestOptions.topK = top_k; - } - - if (this.systemMessage && this.supportsCacheControl === true) { - requestOptions.system = [ - { - type: 'text', - text: this.systemMessage, - cache_control: { type: 'ephemeral' }, - }, - ]; - } else if (this.systemMessage) { - requestOptions.system = this.systemMessage; - } - - if (this.supportsCacheControl === true && this.useMessages) { - requestOptions.messages = addCacheControl(requestOptions.messages); - } - - logger.debug('[AnthropicClient]', { ...requestOptions }); - const handlers = createStreamEventHandlers(this.options.res); - this.streamHandler = new SplitStreamHandler({ - accumulate: true, - runId: this.responseMessageId, - handlers, - }); - - let intermediateReply = this.streamHandler.tokens; - - const maxRetries = 3; - const streamRate = this.options.streamRate ?? Constants.DEFAULT_STREAM_RATE; - async function processResponse() { - let attempts = 0; - - while (attempts < maxRetries) { - let response; - try { - const client = this.getClient(requestOptions); - response = await this.createResponse(client, requestOptions); - - signal.addEventListener('abort', () => { - logger.debug('[AnthropicClient] message aborted!'); - if (response.controller?.abort) { - response.controller.abort(); - } - }); - - for await (const completion of response) { - const type = completion?.type ?? ''; - if (tokenEventTypes.has(type)) { - logger.debug(`[AnthropicClient] ${type}`, completion); - this[type] = completion; - } - this.streamHandler.handle(completion); - await sleep(streamRate); - } - - break; - } catch (error) { - attempts += 1; - logger.warn( - `User: ${this.user} | Anthropic Request ${attempts} failed: ${error.message}`, - ); - - if (attempts < maxRetries) { - await delayBeforeRetry(attempts, 350); - } else if (this.streamHandler && this.streamHandler.reasoningTokens.length) { - return this.getStreamText(); - } else if (intermediateReply.length > 0) { - return this.getStreamText(intermediateReply); - } else { - throw new Error(`Operation failed after ${maxRetries} attempts: ${error.message}`); - } - } finally { - signal.removeEventListener('abort', () => { - logger.debug('[AnthropicClient] message aborted!'); - if (response.controller?.abort) { - response.controller.abort(); - } - }); - } - } - } - - await processResponse.bind(this)(); - return this.getStreamText(intermediateReply); - } - - getSaveOptions() { - return { - maxContextTokens: this.options.maxContextTokens, - artifacts: this.options.artifacts, - promptPrefix: this.options.promptPrefix, - modelLabel: this.options.modelLabel, - promptCache: this.options.promptCache, - thinking: this.options.thinking, - thinkingBudget: this.options.thinkingBudget, - resendFiles: this.options.resendFiles, - iconURL: this.options.iconURL, - greeting: this.options.greeting, - spec: this.options.spec, - ...this.modelOptions, - }; - } - - getBuildMessagesOptions() { - logger.debug("AnthropicClient doesn't use getBuildMessagesOptions"); - } - - getEncoding() { - return 'cl100k_base'; - } - - /** - * Returns the token count of a given text. It also checks and resets the tokenizers if necessary. - * @param {string} text - The text to get the token count for. - * @returns {number} The token count of the given text. - */ - getTokenCount(text) { - const encoding = this.getEncoding(); - return Tokenizer.getTokenCount(text, encoding); - } - - /** - * Generates a concise title for a conversation based on the user's input text and response. - * Involves sending a chat completion request with specific instructions for title generation. - * - * This function capitlizes on [Anthropic's function calling training](https://docs.anthropic.com/claude/docs/functions-external-tools). - * - * @param {Object} params - The parameters for the conversation title generation. - * @param {string} params.text - The user's input. - * @param {string} [params.responseText=''] - The AI's immediate response to the user. - * - * @returns {Promise} A promise that resolves to the generated conversation title. - * In case of failure, it will return the default title, "New Chat". - */ - async titleConvo({ text, responseText = '' }) { - let title = 'New Chat'; - this.message_delta = undefined; - this.message_start = undefined; - const convo = ` - ${truncateText(text)} - - - ${JSON.stringify(truncateText(responseText))} - `; - - const { ANTHROPIC_TITLE_MODEL } = process.env ?? {}; - const model = this.options.titleModel ?? ANTHROPIC_TITLE_MODEL ?? 'claude-3-haiku-20240307'; - const system = titleFunctionPrompt; - - const titleChatCompletion = async () => { - const content = ` - ${convo} - - - Please generate a title for this conversation.`; - - const titleMessage = { role: 'user', content }; - const requestOptions = { - model, - temperature: 0.3, - max_tokens: 1024, - system, - stop_sequences: ['\n\nHuman:', '\n\nAssistant', ''], - messages: [titleMessage], - }; - - try { - const response = await this.createResponse( - this.getClient(requestOptions), - requestOptions, - true, - ); - let promptTokens = response?.usage?.input_tokens; - let completionTokens = response?.usage?.output_tokens; - if (!promptTokens) { - promptTokens = this.getTokenCountForMessage(titleMessage); - promptTokens += this.getTokenCountForMessage({ role: 'system', content: system }); - } - if (!completionTokens) { - completionTokens = this.getTokenCountForMessage(response.content[0]); - } - await this.recordTokenUsage({ - model, - promptTokens, - completionTokens, - context: 'title', - }); - const text = response.content[0].text; - title = parseParamFromPrompt(text, 'title'); - } catch (e) { - logger.error('[AnthropicClient] There was an issue generating the title', e); - } - }; - - await titleChatCompletion(); - logger.debug('[AnthropicClient] Convo Title: ' + title); - return title; - } -} - -module.exports = AnthropicClient; diff --git a/api/app/clients/GoogleClient.js b/api/app/clients/GoogleClient.js deleted file mode 100644 index 760889df8c..0000000000 --- a/api/app/clients/GoogleClient.js +++ /dev/null @@ -1,994 +0,0 @@ -const { google } = require('googleapis'); -const { sleep } = require('@librechat/agents'); -const { logger } = require('@librechat/data-schemas'); -const { getModelMaxTokens } = require('@librechat/api'); -const { concat } = require('@langchain/core/utils/stream'); -const { ChatVertexAI } = require('@langchain/google-vertexai'); -const { Tokenizer, getSafetySettings } = require('@librechat/api'); -const { ChatGoogleGenerativeAI } = require('@langchain/google-genai'); -const { GoogleGenerativeAI: GenAI } = require('@google/generative-ai'); -const { HumanMessage, SystemMessage } = require('@langchain/core/messages'); -const { - googleGenConfigSchema, - validateVisionModel, - getResponseSender, - endpointSettings, - parseTextParts, - EModelEndpoint, - googleSettings, - ContentTypes, - VisionModes, - ErrorTypes, - Constants, - AuthKeys, -} = require('librechat-data-provider'); -const { encodeAndFormat } = require('~/server/services/Files/images'); -const { spendTokens } = require('~/models/spendTokens'); -const { - formatMessage, - createContextHandlers, - titleInstruction, - truncateText, -} = require('./prompts'); -const BaseClient = require('./BaseClient'); - -const loc = process.env.GOOGLE_LOC || 'us-central1'; -const publisher = 'google'; -const endpointPrefix = - loc === 'global' ? 'aiplatform.googleapis.com' : `${loc}-aiplatform.googleapis.com`; - -const settings = endpointSettings[EModelEndpoint.google]; -const EXCLUDED_GENAI_MODELS = /gemini-(?:1\.0|1-0|pro)/; - -class GoogleClient extends BaseClient { - constructor(credentials, options = {}) { - super('apiKey', options); - let creds = {}; - - if (typeof credentials === 'string') { - creds = JSON.parse(credentials); - } else if (credentials) { - creds = credentials; - } - - const serviceKey = creds[AuthKeys.GOOGLE_SERVICE_KEY] ?? {}; - this.serviceKey = - serviceKey && typeof serviceKey === 'string' ? JSON.parse(serviceKey) : (serviceKey ?? {}); - /** @type {string | null | undefined} */ - this.project_id = this.serviceKey.project_id; - this.client_email = this.serviceKey.client_email; - this.private_key = this.serviceKey.private_key; - this.access_token = null; - - this.apiKey = creds[AuthKeys.GOOGLE_API_KEY]; - - this.reverseProxyUrl = options.reverseProxyUrl; - - this.authHeader = options.authHeader; - - /** @type {UsageMetadata | undefined} */ - this.usage; - /** The key for the usage object's input tokens - * @type {string} */ - this.inputTokensKey = 'input_tokens'; - /** The key for the usage object's output tokens - * @type {string} */ - this.outputTokensKey = 'output_tokens'; - this.visionMode = VisionModes.generative; - /** @type {string} */ - this.systemMessage; - if (options.skipSetOptions) { - return; - } - this.setOptions(options); - } - - /* Google specific methods */ - constructUrl() { - return `https://${endpointPrefix}/v1/projects/${this.project_id}/locations/${loc}/publishers/${publisher}/models/${this.modelOptions.model}:serverStreamingPredict`; - } - - async getClient() { - const scopes = ['https://www.googleapis.com/auth/cloud-platform']; - const jwtClient = new google.auth.JWT(this.client_email, null, this.private_key, scopes); - - jwtClient.authorize((err) => { - if (err) { - logger.error('jwtClient failed to authorize', err); - throw err; - } - }); - - return jwtClient; - } - - async getAccessToken() { - const scopes = ['https://www.googleapis.com/auth/cloud-platform']; - const jwtClient = new google.auth.JWT(this.client_email, null, this.private_key, scopes); - - return new Promise((resolve, reject) => { - jwtClient.authorize((err, tokens) => { - if (err) { - logger.error('jwtClient failed to authorize', err); - reject(err); - } else { - resolve(tokens.access_token); - } - }); - }); - } - - /* Required Client methods */ - setOptions(options) { - if (this.options && !this.options.replaceOptions) { - // nested options aren't spread properly, so we need to do this manually - this.options.modelOptions = { - ...this.options.modelOptions, - ...options.modelOptions, - }; - delete options.modelOptions; - // now we can merge options - this.options = { - ...this.options, - ...options, - }; - } else { - this.options = options; - } - - this.modelOptions = this.options.modelOptions || {}; - - this.options.attachments?.then((attachments) => this.checkVisionRequest(attachments)); - - /** @type {boolean} Whether using a "GenerativeAI" Model */ - this.isGenerativeModel = /gemini|learnlm|gemma/.test(this.modelOptions.model); - - this.maxContextTokens = - this.options.maxContextTokens ?? - getModelMaxTokens(this.modelOptions.model, EModelEndpoint.google); - - // The max prompt tokens is determined by the max context tokens minus the max response tokens. - // Earlier messages will be dropped until the prompt is within the limit. - this.maxResponseTokens = this.modelOptions.maxOutputTokens || settings.maxOutputTokens.default; - - if (this.maxContextTokens > 32000) { - this.maxContextTokens = this.maxContextTokens - this.maxResponseTokens; - } - - this.maxPromptTokens = - this.options.maxPromptTokens || this.maxContextTokens - this.maxResponseTokens; - - if (this.maxPromptTokens + this.maxResponseTokens > this.maxContextTokens) { - throw new Error( - `maxPromptTokens + maxOutputTokens (${this.maxPromptTokens} + ${this.maxResponseTokens} = ${ - this.maxPromptTokens + this.maxResponseTokens - }) must be less than or equal to maxContextTokens (${this.maxContextTokens})`, - ); - } - - // Add thinking configuration - this.modelOptions.thinkingConfig = { - thinkingBudget: - (this.modelOptions.thinking ?? googleSettings.thinking.default) - ? this.modelOptions.thinkingBudget - : 0, - }; - delete this.modelOptions.thinking; - delete this.modelOptions.thinkingBudget; - - this.sender = - this.options.sender ?? - getResponseSender({ - model: this.modelOptions.model, - endpoint: EModelEndpoint.google, - modelLabel: this.options.modelLabel, - }); - - this.userLabel = this.options.userLabel || 'User'; - this.modelLabel = this.options.modelLabel || 'Assistant'; - - if (this.options.reverseProxyUrl) { - this.completionsUrl = this.options.reverseProxyUrl; - } else { - this.completionsUrl = this.constructUrl(); - } - - let promptPrefix = (this.options.promptPrefix ?? '').trim(); - if (typeof this.options.artifactsPrompt === 'string' && this.options.artifactsPrompt) { - promptPrefix = `${promptPrefix ?? ''}\n${this.options.artifactsPrompt}`.trim(); - } - this.systemMessage = promptPrefix; - this.initializeClient(); - return this; - } - - /** - * - * Checks if the model is a vision model based on request attachments and sets the appropriate options: - * @param {MongoFile[]} attachments - */ - checkVisionRequest(attachments) { - /* Validation vision request */ - this.defaultVisionModel = - this.options.visionModel ?? - (!EXCLUDED_GENAI_MODELS.test(this.modelOptions.model) - ? this.modelOptions.model - : 'gemini-pro-vision'); - const availableModels = this.options.modelsConfig?.[EModelEndpoint.google]; - this.isVisionModel = validateVisionModel({ model: this.modelOptions.model, availableModels }); - - if ( - attachments && - attachments.some((file) => file?.type && file?.type?.includes('image')) && - availableModels?.includes(this.defaultVisionModel) && - !this.isVisionModel - ) { - this.modelOptions.model = this.defaultVisionModel; - this.isVisionModel = true; - } - - if (this.isVisionModel && !attachments && this.modelOptions.model.includes('gemini-pro')) { - this.modelOptions.model = 'gemini-pro'; - this.isVisionModel = false; - } - } - - formatMessages() { - return ((message) => { - const msg = { - author: message?.author ?? (message.isCreatedByUser ? this.userLabel : this.modelLabel), - content: message?.content ?? message.text, - }; - - if (!message.image_urls?.length) { - return msg; - } - - msg.content = ( - !Array.isArray(msg.content) - ? [ - { - type: ContentTypes.TEXT, - [ContentTypes.TEXT]: msg.content, - }, - ] - : msg.content - ).concat(message.image_urls); - - return msg; - }).bind(this); - } - - /** - * Formats messages for generative AI - * @param {TMessage[]} messages - * @returns - */ - async formatGenerativeMessages(messages) { - const formattedMessages = []; - const attachments = await this.options.attachments; - const latestMessage = { ...messages[messages.length - 1] }; - const files = await this.addImageURLs(latestMessage, attachments, VisionModes.generative); - this.options.attachments = files; - messages[messages.length - 1] = latestMessage; - - for (const _message of messages) { - const role = _message.isCreatedByUser ? this.userLabel : this.modelLabel; - const parts = []; - parts.push({ text: _message.text }); - if (!_message.image_urls?.length) { - formattedMessages.push({ role, parts }); - continue; - } - - for (const images of _message.image_urls) { - if (images.inlineData) { - parts.push({ inlineData: images.inlineData }); - } - } - - formattedMessages.push({ role, parts }); - } - - return formattedMessages; - } - - /** - * - * Adds image URLs to the message object and returns the files - * - * @param {TMessage[]} messages - * @param {MongoFile[]} files - * @returns {Promise} - */ - async addImageURLs(message, attachments, mode = '') { - const { files, image_urls } = await encodeAndFormat( - this.options.req, - attachments, - { - endpoint: EModelEndpoint.google, - }, - mode, - ); - message.image_urls = image_urls.length ? image_urls : undefined; - return files; - } - - /** - * Builds the augmented prompt for attachments - * TODO: Add File API Support - * @param {TMessage[]} messages - */ - async buildAugmentedPrompt(messages = []) { - const attachments = await this.options.attachments; - const latestMessage = { ...messages[messages.length - 1] }; - this.contextHandlers = createContextHandlers(this.options.req, latestMessage.text); - - if (this.contextHandlers) { - for (const file of attachments) { - if (file.embedded) { - this.contextHandlers?.processFile(file); - continue; - } - if (file.metadata?.fileIdentifier) { - continue; - } - } - - this.augmentedPrompt = await this.contextHandlers.createContext(); - this.systemMessage = this.augmentedPrompt + this.systemMessage; - } - } - - async buildVisionMessages(messages = [], parentMessageId) { - const attachments = await this.options.attachments; - const latestMessage = { ...messages[messages.length - 1] }; - await this.buildAugmentedPrompt(messages); - - const { prompt } = await this.buildMessagesPrompt(messages, parentMessageId); - - const files = await this.addImageURLs(latestMessage, attachments); - - this.options.attachments = files; - - latestMessage.text = prompt; - - const payload = { - instances: [ - { - messages: [new HumanMessage(formatMessage({ message: latestMessage }))], - }, - ], - }; - return { prompt: payload }; - } - - /** @param {TMessage[]} [messages=[]] */ - async buildGenerativeMessages(messages = []) { - this.userLabel = 'user'; - this.modelLabel = 'model'; - const promises = []; - promises.push(await this.formatGenerativeMessages(messages)); - promises.push(this.buildAugmentedPrompt(messages)); - const [formattedMessages] = await Promise.all(promises); - return { prompt: formattedMessages }; - } - - /** - * @param {TMessage[]} [messages=[]] - * @param {string} [parentMessageId] - */ - async buildMessages(_messages = [], parentMessageId) { - if (!this.isGenerativeModel && !this.project_id) { - throw new Error('[GoogleClient] PaLM 2 and Codey models are no longer supported.'); - } - - if (this.systemMessage) { - const instructionsTokenCount = this.getTokenCount(this.systemMessage); - - this.maxContextTokens = this.maxContextTokens - instructionsTokenCount; - if (this.maxContextTokens < 0) { - const info = `${instructionsTokenCount} / ${this.maxContextTokens}`; - const errorMessage = `{ "type": "${ErrorTypes.INPUT_LENGTH}", "info": "${info}" }`; - logger.warn(`Instructions token count exceeds max context (${info}).`); - throw new Error(errorMessage); - } - } - - for (let i = 0; i < _messages.length; i++) { - const message = _messages[i]; - if (!message.tokenCount) { - _messages[i].tokenCount = this.getTokenCountForMessage({ - role: message.isCreatedByUser ? 'user' : 'assistant', - content: message.content ?? message.text, - }); - } - } - - const { - payload: messages, - tokenCountMap, - promptTokens, - } = await this.handleContextStrategy({ - orderedMessages: _messages, - formattedMessages: _messages, - }); - - if (!this.project_id && !EXCLUDED_GENAI_MODELS.test(this.modelOptions.model)) { - const result = await this.buildGenerativeMessages(messages); - result.tokenCountMap = tokenCountMap; - result.promptTokens = promptTokens; - return result; - } - - if (this.options.attachments && this.isGenerativeModel) { - const result = this.buildVisionMessages(messages, parentMessageId); - result.tokenCountMap = tokenCountMap; - result.promptTokens = promptTokens; - return result; - } - - let payload = { - instances: [ - { - messages: messages - .map(this.formatMessages()) - .map((msg) => ({ ...msg, role: msg.author === 'User' ? 'user' : 'assistant' })) - .map((message) => formatMessage({ message, langChain: true })), - }, - ], - }; - - if (this.systemMessage) { - payload.instances[0].context = this.systemMessage; - } - - logger.debug('[GoogleClient] buildMessages', payload); - return { prompt: payload, tokenCountMap, promptTokens }; - } - - async buildMessagesPrompt(messages, parentMessageId) { - const orderedMessages = this.constructor.getMessagesForConversation({ - messages, - parentMessageId, - }); - - logger.debug('[GoogleClient]', { - orderedMessages, - parentMessageId, - }); - - const formattedMessages = orderedMessages.map(this.formatMessages()); - - let lastAuthor = ''; - let groupedMessages = []; - - for (let message of formattedMessages) { - // If last author is not same as current author, add to new group - if (lastAuthor !== message.author) { - groupedMessages.push({ - author: message.author, - content: [message.content], - }); - lastAuthor = message.author; - // If same author, append content to the last group - } else { - groupedMessages[groupedMessages.length - 1].content.push(message.content); - } - } - - let identityPrefix = ''; - if (this.options.userLabel) { - identityPrefix = `\nHuman's name: ${this.options.userLabel}`; - } - - if (this.options.modelLabel) { - identityPrefix = `${identityPrefix}\nYou are ${this.options.modelLabel}`; - } - - let promptPrefix = (this.systemMessage ?? '').trim(); - - if (identityPrefix) { - promptPrefix = `${identityPrefix}${promptPrefix}`; - } - - // Prompt AI to respond, empty if last message was from AI - let isEdited = lastAuthor === this.modelLabel; - const promptSuffix = isEdited ? '' : `${promptPrefix}\n\n${this.modelLabel}:\n`; - let currentTokenCount = isEdited - ? this.getTokenCount(promptPrefix) - : this.getTokenCount(promptSuffix); - - let promptBody = ''; - const maxTokenCount = this.maxPromptTokens; - - const context = []; - - // Iterate backwards through the messages, adding them to the prompt until we reach the max token count. - // Do this within a recursive async function so that it doesn't block the event loop for too long. - // Also, remove the next message when the message that puts us over the token limit is created by the user. - // Otherwise, remove only the exceeding message. This is due to Anthropic's strict payload rule to start with "Human:". - const nextMessage = { - remove: false, - tokenCount: 0, - messageString: '', - }; - - const buildPromptBody = async () => { - if (currentTokenCount < maxTokenCount && groupedMessages.length > 0) { - const message = groupedMessages.pop(); - const isCreatedByUser = message.author === this.userLabel; - // Use promptPrefix if message is edited assistant' - const messagePrefix = - isCreatedByUser || !isEdited - ? `\n\n${message.author}:` - : `${promptPrefix}\n\n${message.author}:`; - const messageString = `${messagePrefix}\n${message.content}\n`; - let newPromptBody = `${messageString}${promptBody}`; - - context.unshift(message); - - const tokenCountForMessage = this.getTokenCount(messageString); - const newTokenCount = currentTokenCount + tokenCountForMessage; - - if (!isCreatedByUser) { - nextMessage.messageString = messageString; - nextMessage.tokenCount = tokenCountForMessage; - } - - if (newTokenCount > maxTokenCount) { - if (!promptBody) { - // This is the first message, so we can't add it. Just throw an error. - throw new Error( - `Prompt is too long. Max token count is ${maxTokenCount}, but prompt is ${newTokenCount} tokens long.`, - ); - } - - // Otherwise, ths message would put us over the token limit, so don't add it. - // if created by user, remove next message, otherwise remove only this message - if (isCreatedByUser) { - nextMessage.remove = true; - } - - return false; - } - promptBody = newPromptBody; - currentTokenCount = newTokenCount; - - // Switch off isEdited after using it for the first time - if (isEdited) { - isEdited = false; - } - - // wait for next tick to avoid blocking the event loop - await new Promise((resolve) => setImmediate(resolve)); - return buildPromptBody(); - } - return true; - }; - - await buildPromptBody(); - - if (nextMessage.remove) { - promptBody = promptBody.replace(nextMessage.messageString, ''); - currentTokenCount -= nextMessage.tokenCount; - context.shift(); - } - - let prompt = `${promptBody}${promptSuffix}`.trim(); - - // Add 2 tokens for metadata after all messages have been counted. - currentTokenCount += 2; - - // Use up to `this.maxContextTokens` tokens (prompt + response), but try to leave `this.maxTokens` tokens for the response. - this.modelOptions.maxOutputTokens = Math.min( - this.maxContextTokens - currentTokenCount, - this.maxResponseTokens, - ); - - return { prompt, context }; - } - - createLLM(clientOptions) { - const model = clientOptions.modelName ?? clientOptions.model; - clientOptions.location = loc; - clientOptions.endpoint = endpointPrefix; - - let requestOptions = null; - if (this.reverseProxyUrl) { - requestOptions = { - baseUrl: this.reverseProxyUrl, - }; - - if (this.authHeader) { - requestOptions.customHeaders = { - Authorization: `Bearer ${this.apiKey}`, - }; - } - } - - if (this.project_id != null) { - logger.debug('Creating VertexAI client'); - this.visionMode = undefined; - clientOptions.streaming = true; - const client = new ChatVertexAI(clientOptions); - client.temperature = clientOptions.temperature; - client.topP = clientOptions.topP; - client.topK = clientOptions.topK; - client.topLogprobs = clientOptions.topLogprobs; - client.frequencyPenalty = clientOptions.frequencyPenalty; - client.presencePenalty = clientOptions.presencePenalty; - client.maxOutputTokens = clientOptions.maxOutputTokens; - return client; - } else if (!EXCLUDED_GENAI_MODELS.test(model)) { - logger.debug('Creating GenAI client'); - return new GenAI(this.apiKey).getGenerativeModel({ model }, requestOptions); - } - - logger.debug('Creating Chat Google Generative AI client'); - return new ChatGoogleGenerativeAI({ ...clientOptions, apiKey: this.apiKey }); - } - - initializeClient() { - let clientOptions = { ...this.modelOptions }; - - if (this.project_id) { - clientOptions['authOptions'] = { - credentials: { - ...this.serviceKey, - }, - projectId: this.project_id, - }; - } - - if (this.isGenerativeModel && !this.project_id) { - clientOptions.modelName = clientOptions.model; - delete clientOptions.model; - } - - this.client = this.createLLM(clientOptions); - return this.client; - } - - async getCompletion(_payload, options = {}) { - const { onProgress, abortController } = options; - const safetySettings = getSafetySettings(this.modelOptions.model); - const streamRate = this.options.streamRate ?? Constants.DEFAULT_STREAM_RATE; - const modelName = this.modelOptions.modelName ?? this.modelOptions.model ?? ''; - - let reply = ''; - /** @type {Error} */ - let error; - try { - if (!EXCLUDED_GENAI_MODELS.test(modelName) && !this.project_id) { - /** @type {GenerativeModel} */ - const client = this.client; - /** @type {GenerateContentRequest} */ - const requestOptions = { - safetySettings, - contents: _payload, - generationConfig: googleGenConfigSchema.parse(this.modelOptions), - }; - - const promptPrefix = (this.systemMessage ?? '').trim(); - if (promptPrefix.length) { - requestOptions.systemInstruction = { - parts: [ - { - text: promptPrefix, - }, - ], - }; - } - - const delay = modelName.includes('flash') ? 8 : 15; - /** @type {GenAIUsageMetadata} */ - let usageMetadata; - - abortController.signal.addEventListener( - 'abort', - () => { - logger.warn('[GoogleClient] Request was aborted', abortController.signal.reason); - }, - { once: true }, - ); - - const result = await client.generateContentStream(requestOptions, { - signal: abortController.signal, - }); - for await (const chunk of result.stream) { - usageMetadata = !usageMetadata - ? chunk?.usageMetadata - : Object.assign(usageMetadata, chunk?.usageMetadata); - const chunkText = chunk.text(); - await this.generateTextStream(chunkText, onProgress, { - delay, - }); - reply += chunkText; - await sleep(streamRate); - } - - if (usageMetadata) { - this.usage = { - input_tokens: usageMetadata.promptTokenCount, - output_tokens: usageMetadata.candidatesTokenCount, - }; - } - - return reply; - } - - const { instances } = _payload; - const { messages: messages, context } = instances?.[0] ?? {}; - - if (!this.isVisionModel && context && messages?.length > 0) { - messages.unshift(new SystemMessage(context)); - } - - /** @type {import('@langchain/core/messages').AIMessageChunk['usage_metadata']} */ - let usageMetadata; - /** @type {ChatVertexAI} */ - const client = this.client; - const stream = await client.stream(messages, { - signal: abortController.signal, - streamUsage: true, - safetySettings, - }); - - let delay = this.options.streamRate || 8; - - if (!this.options.streamRate) { - if (this.isGenerativeModel) { - delay = 15; - } - if (modelName.includes('flash')) { - delay = 5; - } - } - - for await (const chunk of stream) { - if (chunk?.usage_metadata) { - const metadata = chunk.usage_metadata; - for (const key in metadata) { - if (Number.isNaN(metadata[key])) { - delete metadata[key]; - } - } - - usageMetadata = !usageMetadata ? metadata : concat(usageMetadata, metadata); - } - - const chunkText = chunk?.content ?? ''; - await this.generateTextStream(chunkText, onProgress, { - delay, - }); - reply += chunkText; - } - - if (usageMetadata) { - this.usage = usageMetadata; - } - } catch (e) { - error = e; - logger.error('[GoogleClient] There was an issue generating the completion', e); - } - - if (error != null && reply === '') { - const errorMessage = `{ "type": "${ErrorTypes.GoogleError}", "info": "${ - error.message ?? 'The Google provider failed to generate content, please contact the Admin.' - }" }`; - throw new Error(errorMessage); - } - return reply; - } - - /** - * Get stream usage as returned by this client's API response. - * @returns {UsageMetadata} The stream usage object. - */ - getStreamUsage() { - return this.usage; - } - - getMessageMapMethod() { - /** - * @param {TMessage} msg - */ - return (msg) => { - if (msg.text != null && msg.text && msg.text.startsWith(':::thinking')) { - msg.text = msg.text.replace(/:::thinking.*?:::/gs, '').trim(); - } else if (msg.content != null) { - msg.text = parseTextParts(msg.content, true); - delete msg.content; - } - - return msg; - }; - } - - /** - * Calculates the correct token count for the current user message based on the token count map and API usage. - * Edge case: If the calculation results in a negative value, it returns the original estimate. - * If revisiting a conversation with a chat history entirely composed of token estimates, - * the cumulative token count going forward should become more accurate as the conversation progresses. - * @param {Object} params - The parameters for the calculation. - * @param {Record} params.tokenCountMap - A map of message IDs to their token counts. - * @param {string} params.currentMessageId - The ID of the current message to calculate. - * @param {UsageMetadata} params.usage - The usage object returned by the API. - * @returns {number} The correct token count for the current user message. - */ - calculateCurrentTokenCount({ tokenCountMap, currentMessageId, usage }) { - const originalEstimate = tokenCountMap[currentMessageId] || 0; - - if (!usage || typeof usage.input_tokens !== 'number') { - return originalEstimate; - } - - tokenCountMap[currentMessageId] = 0; - const totalTokensFromMap = Object.values(tokenCountMap).reduce((sum, count) => { - const numCount = Number(count); - return sum + (isNaN(numCount) ? 0 : numCount); - }, 0); - const totalInputTokens = usage.input_tokens ?? 0; - const currentMessageTokens = totalInputTokens - totalTokensFromMap; - return currentMessageTokens > 0 ? currentMessageTokens : originalEstimate; - } - - /** - * @param {object} params - * @param {number} params.promptTokens - * @param {number} params.completionTokens - * @param {UsageMetadata} [params.usage] - * @param {string} [params.model] - * @param {string} [params.context='message'] - * @returns {Promise} - */ - async recordTokenUsage({ promptTokens, completionTokens, model, context = 'message' }) { - await spendTokens( - { - context, - user: this.user ?? this.options.req?.user?.id, - conversationId: this.conversationId, - model: model ?? this.modelOptions.model, - endpointTokenConfig: this.options.endpointTokenConfig, - }, - { promptTokens, completionTokens }, - ); - } - - /** - * Stripped-down logic for generating a title. This uses the non-streaming APIs, since the user does not see titles streaming - */ - async titleChatCompletion(_payload, options = {}) { - let reply = ''; - const { abortController } = options; - - const model = - this.options.titleModel ?? this.modelOptions.modelName ?? this.modelOptions.model ?? ''; - const safetySettings = getSafetySettings(model); - if (!EXCLUDED_GENAI_MODELS.test(model) && !this.project_id) { - logger.debug('Identified titling model as GenAI version'); - /** @type {GenerativeModel} */ - const client = this.client; - const requestOptions = { - contents: _payload, - safetySettings, - generationConfig: { - temperature: 0.5, - }, - }; - - const result = await client.generateContent(requestOptions); - reply = result.response?.text(); - return reply; - } else { - const { instances } = _payload; - const { messages } = instances?.[0] ?? {}; - const titleResponse = await this.client.invoke(messages, { - signal: abortController.signal, - timeout: 7000, - safetySettings, - }); - - if (titleResponse.usage_metadata) { - await this.recordTokenUsage({ - model, - promptTokens: titleResponse.usage_metadata.input_tokens, - completionTokens: titleResponse.usage_metadata.output_tokens, - context: 'title', - }); - } - - reply = titleResponse.content; - return reply; - } - } - - async titleConvo({ text, responseText = '' }) { - let title = 'New Chat'; - const convo = `||>User: -"${truncateText(text)}" -||>Response: -"${JSON.stringify(truncateText(responseText))}"`; - - let { prompt: payload } = await this.buildMessages([ - { - text: `Please generate ${titleInstruction} - - ${convo} - - ||>Title:`, - isCreatedByUser: true, - author: this.userLabel, - }, - ]); - - try { - this.initializeClient(); - title = await this.titleChatCompletion(payload, { - abortController: new AbortController(), - onProgress: () => {}, - }); - } catch (e) { - logger.error('[GoogleClient] There was an issue generating the title', e); - } - logger.debug(`Title response: ${title}`); - return title; - } - - getSaveOptions() { - return { - endpointType: null, - artifacts: this.options.artifacts, - promptPrefix: this.options.promptPrefix, - maxContextTokens: this.options.maxContextTokens, - modelLabel: this.options.modelLabel, - iconURL: this.options.iconURL, - greeting: this.options.greeting, - spec: this.options.spec, - ...this.modelOptions, - }; - } - - getBuildMessagesOptions() { - // logger.debug('GoogleClient doesn\'t use getBuildMessagesOptions'); - } - - async sendCompletion(payload, opts = {}) { - let reply = ''; - reply = await this.getCompletion(payload, opts); - return reply.trim(); - } - - getEncoding() { - return 'cl100k_base'; - } - - async getVertexTokenCount(text) { - /** @type {ChatVertexAI} */ - const client = this.client ?? this.initializeClient(); - const connection = client.connection; - const gAuthClient = connection.client; - const tokenEndpoint = `https://${connection._endpoint}/${connection.apiVersion}/projects/${this.project_id}/locations/${connection._location}/publishers/google/models/${connection.model}/:countTokens`; - const result = await gAuthClient.request({ - url: tokenEndpoint, - method: 'POST', - data: { - contents: [{ role: 'user', parts: [{ text }] }], - }, - }); - return result; - } - - /** - * Returns the token count of a given text. It also checks and resets the tokenizers if necessary. - * @param {string} text - The text to get the token count for. - * @returns {number} The token count of the given text. - */ - getTokenCount(text) { - const encoding = this.getEncoding(); - return Tokenizer.getTokenCount(text, encoding); - } -} - -module.exports = GoogleClient; diff --git a/api/app/clients/OpenAIClient.js b/api/app/clients/OpenAIClient.js deleted file mode 100644 index f4c42351e3..0000000000 --- a/api/app/clients/OpenAIClient.js +++ /dev/null @@ -1,1207 +0,0 @@ -const { logger } = require('@librechat/data-schemas'); -const { HttpsProxyAgent } = require('https-proxy-agent'); -const { sleep, SplitStreamHandler, CustomOpenAIClient: OpenAI } = require('@librechat/agents'); -const { - isEnabled, - Tokenizer, - createFetch, - resolveHeaders, - constructAzureURL, - getModelMaxTokens, - genAzureChatCompletion, - getModelMaxOutputTokens, - createStreamEventHandlers, -} = require('@librechat/api'); -const { - Constants, - ImageDetail, - ContentTypes, - parseTextParts, - EModelEndpoint, - KnownEndpoints, - openAISettings, - ImageDetailCost, - getResponseSender, - validateVisionModel, - mapModelToAzureConfig, -} = require('librechat-data-provider'); -const { encodeAndFormat } = require('~/server/services/Files/images/encode'); -const { formatMessage, createContextHandlers } = require('./prompts'); -const { spendTokens } = require('~/models/spendTokens'); -const { addSpaceIfNeeded } = require('~/server/utils'); -const { handleOpenAIErrors } = require('./tools/util'); -const { OllamaClient } = require('./OllamaClient'); -const { extractBaseURL } = require('~/utils'); -const BaseClient = require('./BaseClient'); - -class OpenAIClient extends BaseClient { - constructor(apiKey, options = {}) { - super(apiKey, options); - this.contextStrategy = options.contextStrategy - ? options.contextStrategy.toLowerCase() - : 'discard'; - this.shouldSummarize = this.contextStrategy === 'summarize'; - /** @type {AzureOptions} */ - this.azure = options.azure || false; - this.setOptions(options); - this.metadata = {}; - - /** @type {string | undefined} - The API Completions URL */ - this.completionsUrl; - - /** @type {OpenAIUsageMetadata | undefined} */ - this.usage; - /** @type {boolean|undefined} */ - this.isOmni; - /** @type {SplitStreamHandler | undefined} */ - this.streamHandler; - } - - // TODO: PluginsClient calls this 3x, unneeded - setOptions(options) { - if (this.options && !this.options.replaceOptions) { - this.options.modelOptions = { - ...this.options.modelOptions, - ...options.modelOptions, - }; - delete options.modelOptions; - this.options = { - ...this.options, - ...options, - }; - } else { - this.options = options; - } - - if (this.options.openaiApiKey) { - this.apiKey = this.options.openaiApiKey; - } - - this.modelOptions = Object.assign( - { - model: openAISettings.model.default, - }, - this.modelOptions, - this.options.modelOptions, - ); - - this.defaultVisionModel = this.options.visionModel ?? 'gpt-4-vision-preview'; - if (typeof this.options.attachments?.then === 'function') { - this.options.attachments.then((attachments) => this.checkVisionRequest(attachments)); - } else { - this.checkVisionRequest(this.options.attachments); - } - - const omniPattern = /\b(o\d)\b/i; - this.isOmni = omniPattern.test(this.modelOptions.model); - - const { OPENAI_FORCE_PROMPT } = process.env ?? {}; - const { reverseProxyUrl: reverseProxy } = this.options; - - if ( - !this.useOpenRouter && - ((reverseProxy && reverseProxy.includes(KnownEndpoints.openrouter)) || - (this.options.endpoint && - this.options.endpoint.toLowerCase().includes(KnownEndpoints.openrouter))) - ) { - this.useOpenRouter = true; - } - - if (this.options.endpoint?.toLowerCase() === 'ollama') { - this.isOllama = true; - } - - this.FORCE_PROMPT = - isEnabled(OPENAI_FORCE_PROMPT) || - (reverseProxy && reverseProxy.includes('completions') && !reverseProxy.includes('chat')); - - if (typeof this.options.forcePrompt === 'boolean') { - this.FORCE_PROMPT = this.options.forcePrompt; - } - - if (this.azure && process.env.AZURE_OPENAI_DEFAULT_MODEL) { - this.azureEndpoint = genAzureChatCompletion(this.azure, this.modelOptions.model, this); - this.modelOptions.model = process.env.AZURE_OPENAI_DEFAULT_MODEL; - } else if (this.azure) { - this.azureEndpoint = genAzureChatCompletion(this.azure, this.modelOptions.model, this); - } - - const { model } = this.modelOptions; - - this.isChatCompletion = - omniPattern.test(model) || model.includes('gpt') || this.useOpenRouter || !!reverseProxy; - this.isChatGptModel = this.isChatCompletion; - if ( - model.includes('text-davinci') || - model.includes('gpt-3.5-turbo-instruct') || - this.FORCE_PROMPT - ) { - this.isChatCompletion = false; - this.isChatGptModel = false; - } - const { isChatGptModel } = this; - this.isUnofficialChatGptModel = - model.startsWith('text-chat') || model.startsWith('text-davinci-002-render'); - - this.maxContextTokens = - this.options.maxContextTokens ?? - getModelMaxTokens( - model, - this.options.endpointType ?? this.options.endpoint, - this.options.endpointTokenConfig, - ) ?? - 4095; // 1 less than maximum - - if (this.shouldSummarize) { - this.maxContextTokens = Math.floor(this.maxContextTokens / 2); - } - - if (this.options.debug) { - logger.debug('[OpenAIClient] maxContextTokens', this.maxContextTokens); - } - - this.maxResponseTokens = - this.modelOptions.max_tokens ?? - getModelMaxOutputTokens( - model, - this.options.endpointType ?? this.options.endpoint, - this.options.endpointTokenConfig, - ) ?? - 1024; - this.maxPromptTokens = - this.options.maxPromptTokens || this.maxContextTokens - this.maxResponseTokens; - - if (this.maxPromptTokens + this.maxResponseTokens > this.maxContextTokens) { - throw new Error( - `maxPromptTokens + max_tokens (${this.maxPromptTokens} + ${this.maxResponseTokens} = ${ - this.maxPromptTokens + this.maxResponseTokens - }) must be less than or equal to maxContextTokens (${this.maxContextTokens})`, - ); - } - - this.sender = - this.options.sender ?? - getResponseSender({ - model: this.modelOptions.model, - endpoint: this.options.endpoint, - endpointType: this.options.endpointType, - modelDisplayLabel: this.options.modelDisplayLabel, - chatGptLabel: this.options.chatGptLabel || this.options.modelLabel, - }); - - this.userLabel = this.options.userLabel || 'User'; - this.chatGptLabel = this.options.chatGptLabel || 'Assistant'; - - this.setupTokens(); - - if (reverseProxy) { - this.completionsUrl = reverseProxy; - this.langchainProxy = extractBaseURL(reverseProxy); - } else if (isChatGptModel) { - this.completionsUrl = 'https://api.openai.com/v1/chat/completions'; - } else { - this.completionsUrl = 'https://api.openai.com/v1/completions'; - } - - if (this.azureEndpoint) { - this.completionsUrl = this.azureEndpoint; - } - - if (this.azureEndpoint && this.options.debug) { - logger.debug('Using Azure endpoint'); - } - - return this; - } - - /** - * - * Checks if the model is a vision model based on request attachments and sets the appropriate options: - * - Sets `this.modelOptions.model` to `gpt-4-vision-preview` if the request is a vision request. - * - Sets `this.isVisionModel` to `true` if vision request. - * - Deletes `this.modelOptions.stop` if vision request. - * @param {MongoFile[]} attachments - */ - checkVisionRequest(attachments) { - if (!attachments) { - return; - } - - const availableModels = this.options.modelsConfig?.[this.options.endpoint]; - if (!availableModels) { - return; - } - - let visionRequestDetected = false; - for (const file of attachments) { - if (file?.type?.includes('image')) { - visionRequestDetected = true; - break; - } - } - if (!visionRequestDetected) { - return; - } - - this.isVisionModel = validateVisionModel({ model: this.modelOptions.model, availableModels }); - if (this.isVisionModel) { - delete this.modelOptions.stop; - return; - } - - for (const model of availableModels) { - if (!validateVisionModel({ model, availableModels })) { - continue; - } - this.modelOptions.model = model; - this.isVisionModel = true; - delete this.modelOptions.stop; - return; - } - - if (!availableModels.includes(this.defaultVisionModel)) { - return; - } - if (!validateVisionModel({ model: this.defaultVisionModel, availableModels })) { - return; - } - - this.modelOptions.model = this.defaultVisionModel; - this.isVisionModel = true; - delete this.modelOptions.stop; - } - - setupTokens() { - if (this.isChatCompletion) { - this.startToken = '||>'; - this.endToken = ''; - } else if (this.isUnofficialChatGptModel) { - this.startToken = '<|im_start|>'; - this.endToken = '<|im_end|>'; - } else { - this.startToken = '||>'; - this.endToken = ''; - } - } - - getEncoding() { - return this.modelOptions?.model && /gpt-4[^-\s]/.test(this.modelOptions.model) - ? 'o200k_base' - : 'cl100k_base'; - } - - /** - * Returns the token count of a given text. It also checks and resets the tokenizers if necessary. - * @param {string} text - The text to get the token count for. - * @returns {number} The token count of the given text. - */ - getTokenCount(text) { - const encoding = this.getEncoding(); - return Tokenizer.getTokenCount(text, encoding); - } - - /** - * Calculate the token cost for an image based on its dimensions and detail level. - * - * @param {Object} image - The image object. - * @param {number} image.width - The width of the image. - * @param {number} image.height - The height of the image. - * @param {'low'|'high'|string|undefined} [image.detail] - The detail level ('low', 'high', or other). - * @returns {number} The calculated token cost. - */ - calculateImageTokenCost({ width, height, detail }) { - if (detail === 'low') { - return ImageDetailCost.LOW; - } - - // Calculate the number of 512px squares - const numSquares = Math.ceil(width / 512) * Math.ceil(height / 512); - - // Default to high detail cost calculation - return numSquares * ImageDetailCost.HIGH + ImageDetailCost.ADDITIONAL; - } - - getSaveOptions() { - return { - artifacts: this.options.artifacts, - maxContextTokens: this.options.maxContextTokens, - chatGptLabel: this.options.chatGptLabel, - promptPrefix: this.options.promptPrefix, - resendFiles: this.options.resendFiles, - imageDetail: this.options.imageDetail, - modelLabel: this.options.modelLabel, - iconURL: this.options.iconURL, - greeting: this.options.greeting, - spec: this.options.spec, - ...this.modelOptions, - }; - } - - getBuildMessagesOptions(opts) { - return { - isChatCompletion: this.isChatCompletion, - promptPrefix: opts.promptPrefix, - abortController: opts.abortController, - }; - } - - /** - * - * Adds image URLs to the message object and returns the files - * - * @param {TMessage[]} messages - * @param {MongoFile[]} files - * @returns {Promise} - */ - async addImageURLs(message, attachments) { - const { files, image_urls } = await encodeAndFormat(this.options.req, attachments, { - endpoint: this.options.endpoint, - }); - message.image_urls = image_urls.length ? image_urls : undefined; - return files; - } - - async buildMessages(messages, parentMessageId, { promptPrefix = null }, opts) { - let orderedMessages = this.constructor.getMessagesForConversation({ - messages, - parentMessageId, - summary: this.shouldSummarize, - }); - - let payload; - let instructions; - let tokenCountMap; - let promptTokens; - - promptPrefix = (promptPrefix || this.options.promptPrefix || '').trim(); - if (typeof this.options.artifactsPrompt === 'string' && this.options.artifactsPrompt) { - promptPrefix = `${promptPrefix ?? ''}\n${this.options.artifactsPrompt}`.trim(); - } - - if (this.options.attachments) { - const attachments = await this.options.attachments; - - if (this.message_file_map) { - this.message_file_map[orderedMessages[orderedMessages.length - 1].messageId] = attachments; - } else { - this.message_file_map = { - [orderedMessages[orderedMessages.length - 1].messageId]: attachments, - }; - } - - const files = await this.addImageURLs( - orderedMessages[orderedMessages.length - 1], - attachments, - ); - - this.options.attachments = files; - } - - if (this.message_file_map) { - this.contextHandlers = createContextHandlers( - this.options.req, - orderedMessages[orderedMessages.length - 1].text, - ); - } - - const formattedMessages = orderedMessages.map((message, i) => { - const formattedMessage = formatMessage({ - message, - userName: this.options?.name, - assistantName: this.options?.chatGptLabel, - }); - - const needsTokenCount = this.contextStrategy && !orderedMessages[i].tokenCount; - - /* If tokens were never counted, or, is a Vision request and the message has files, count again */ - if (needsTokenCount || (this.isVisionModel && (message.image_urls || message.files))) { - orderedMessages[i].tokenCount = this.getTokenCountForMessage(formattedMessage); - } - - /* If message has files, calculate image token cost */ - if (this.message_file_map && this.message_file_map[message.messageId]) { - const attachments = this.message_file_map[message.messageId]; - for (const file of attachments) { - if (file.embedded) { - this.contextHandlers?.processFile(file); - continue; - } - if (file.metadata?.fileIdentifier) { - continue; - } - - orderedMessages[i].tokenCount += this.calculateImageTokenCost({ - width: file.width, - height: file.height, - detail: this.options.imageDetail ?? ImageDetail.auto, - }); - } - } - - return formattedMessage; - }); - - if (this.contextHandlers) { - this.augmentedPrompt = await this.contextHandlers.createContext(); - promptPrefix = this.augmentedPrompt + promptPrefix; - } - - const noSystemModelRegex = /\b(o1-preview|o1-mini)\b/i.test(this.modelOptions.model); - - if (promptPrefix && !noSystemModelRegex) { - promptPrefix = `Instructions:\n${promptPrefix.trim()}`; - instructions = { - role: 'system', - content: promptPrefix, - }; - - if (this.contextStrategy) { - instructions.tokenCount = this.getTokenCountForMessage(instructions); - } - } - - // TODO: need to handle interleaving instructions better - if (this.contextStrategy) { - ({ payload, tokenCountMap, promptTokens, messages } = await this.handleContextStrategy({ - instructions, - orderedMessages, - formattedMessages, - })); - } - - const result = { - prompt: payload, - promptTokens, - messages, - }; - - /** EXPERIMENTAL */ - if (promptPrefix && noSystemModelRegex) { - const lastUserMessageIndex = payload.findLastIndex((message) => message.role === 'user'); - if (lastUserMessageIndex !== -1) { - if (Array.isArray(payload[lastUserMessageIndex].content)) { - const firstTextPartIndex = payload[lastUserMessageIndex].content.findIndex( - (part) => part.type === ContentTypes.TEXT, - ); - if (firstTextPartIndex !== -1) { - const firstTextPart = payload[lastUserMessageIndex].content[firstTextPartIndex]; - payload[lastUserMessageIndex].content[firstTextPartIndex].text = - `${promptPrefix}\n${firstTextPart.text}`; - } else { - payload[lastUserMessageIndex].content.unshift({ - type: ContentTypes.TEXT, - text: promptPrefix, - }); - } - } else { - payload[lastUserMessageIndex].content = - `${promptPrefix}\n${payload[lastUserMessageIndex].content}`; - } - } - } - - if (tokenCountMap) { - tokenCountMap.instructions = instructions?.tokenCount; - result.tokenCountMap = tokenCountMap; - } - - if (promptTokens >= 0 && typeof opts?.getReqData === 'function') { - opts.getReqData({ promptTokens }); - } - - return result; - } - - /** @type {sendCompletion} */ - async sendCompletion(payload, opts = {}) { - let reply = ''; - let result = null; - let streamResult = null; - this.modelOptions.user = this.user; - const invalidBaseUrl = this.completionsUrl && extractBaseURL(this.completionsUrl) === null; - const useOldMethod = !!(invalidBaseUrl || !this.isChatCompletion); - if (typeof opts.onProgress === 'function' && useOldMethod) { - const completionResult = await this.getCompletion( - payload, - (progressMessage) => { - if (progressMessage === '[DONE]') { - return; - } - - if (progressMessage.choices) { - streamResult = progressMessage; - } - - let token = null; - if (this.isChatCompletion) { - token = - progressMessage.choices?.[0]?.delta?.content ?? progressMessage.choices?.[0]?.text; - } else { - token = progressMessage.choices?.[0]?.text; - } - - if (!token && this.useOpenRouter) { - token = progressMessage.choices?.[0]?.message?.content; - } - // first event's delta content is always undefined - if (!token) { - return; - } - - if (token === this.endToken) { - return; - } - opts.onProgress(token); - reply += token; - }, - opts.onProgress, - opts.abortController || new AbortController(), - ); - - if (completionResult && typeof completionResult === 'string') { - reply = completionResult; - } else if ( - completionResult && - typeof completionResult === 'object' && - Array.isArray(completionResult.choices) - ) { - reply = completionResult.choices[0]?.text?.replace(this.endToken, ''); - } - } else if (typeof opts.onProgress === 'function' || this.options.useChatCompletion) { - reply = await this.chatCompletion({ - payload, - onProgress: opts.onProgress, - abortController: opts.abortController, - }); - } else { - result = await this.getCompletion( - payload, - null, - opts.onProgress, - opts.abortController || new AbortController(), - ); - - if (result && typeof result === 'string') { - return result.trim(); - } - - logger.debug('[OpenAIClient] sendCompletion: result', { ...result }); - - if (this.isChatCompletion) { - reply = result.choices[0].message.content; - } else { - reply = result.choices[0].text.replace(this.endToken, ''); - } - } - - if (streamResult) { - const { finish_reason } = streamResult.choices[0]; - this.metadata = { finish_reason }; - } - return (reply ?? '').trim(); - } - - initializeLLM() { - throw new Error('Deprecated'); - } - - /** - * Get stream usage as returned by this client's API response. - * @returns {OpenAIUsageMetadata} The stream usage object. - */ - getStreamUsage() { - if ( - this.usage && - typeof this.usage === 'object' && - 'completion_tokens_details' in this.usage && - this.usage.completion_tokens_details && - typeof this.usage.completion_tokens_details === 'object' && - 'reasoning_tokens' in this.usage.completion_tokens_details - ) { - const outputTokens = Math.abs( - this.usage.completion_tokens_details.reasoning_tokens - this.usage[this.outputTokensKey], - ); - return { - ...this.usage.completion_tokens_details, - [this.inputTokensKey]: this.usage[this.inputTokensKey], - [this.outputTokensKey]: outputTokens, - }; - } - return this.usage; - } - - /** - * Calculates the correct token count for the current user message based on the token count map and API usage. - * Edge case: If the calculation results in a negative value, it returns the original estimate. - * If revisiting a conversation with a chat history entirely composed of token estimates, - * the cumulative token count going forward should become more accurate as the conversation progresses. - * @param {Object} params - The parameters for the calculation. - * @param {Record} params.tokenCountMap - A map of message IDs to their token counts. - * @param {string} params.currentMessageId - The ID of the current message to calculate. - * @param {OpenAIUsageMetadata} params.usage - The usage object returned by the API. - * @returns {number} The correct token count for the current user message. - */ - calculateCurrentTokenCount({ tokenCountMap, currentMessageId, usage }) { - const originalEstimate = tokenCountMap[currentMessageId] || 0; - - if (!usage || typeof usage[this.inputTokensKey] !== 'number') { - return originalEstimate; - } - - tokenCountMap[currentMessageId] = 0; - const totalTokensFromMap = Object.values(tokenCountMap).reduce((sum, count) => { - const numCount = Number(count); - return sum + (isNaN(numCount) ? 0 : numCount); - }, 0); - const totalInputTokens = usage[this.inputTokensKey] ?? 0; - - const currentMessageTokens = totalInputTokens - totalTokensFromMap; - return currentMessageTokens > 0 ? currentMessageTokens : originalEstimate; - } - - /** - * @param {object} params - * @param {number} params.promptTokens - * @param {number} params.completionTokens - * @param {OpenAIUsageMetadata} [params.usage] - * @param {string} [params.model] - * @param {string} [params.context='message'] - * @returns {Promise} - */ - async recordTokenUsage({ promptTokens, completionTokens, usage, context = 'message' }) { - await spendTokens( - { - context, - model: this.modelOptions.model, - conversationId: this.conversationId, - user: this.user ?? this.options.req.user?.id, - endpointTokenConfig: this.options.endpointTokenConfig, - }, - { promptTokens, completionTokens }, - ); - - if ( - usage && - typeof usage === 'object' && - 'reasoning_tokens' in usage && - typeof usage.reasoning_tokens === 'number' - ) { - await spendTokens( - { - context: 'reasoning', - model: this.modelOptions.model, - conversationId: this.conversationId, - user: this.user ?? this.options.req.user?.id, - endpointTokenConfig: this.options.endpointTokenConfig, - }, - { completionTokens: usage.reasoning_tokens }, - ); - } - } - - getTokenCountForResponse(response) { - return this.getTokenCountForMessage({ - role: 'assistant', - content: response.text, - }); - } - - /** - * - * @param {string[]} [intermediateReply] - * @returns {string} - */ - getStreamText(intermediateReply) { - if (!this.streamHandler) { - return intermediateReply?.join('') ?? ''; - } - - let thinkMatch; - let remainingText; - let reasoningText = ''; - - if (this.streamHandler.reasoningTokens.length > 0) { - reasoningText = this.streamHandler.reasoningTokens.join(''); - thinkMatch = reasoningText.match(/([\s\S]*?)<\/think>/)?.[1]?.trim(); - if (thinkMatch != null && thinkMatch) { - const reasoningTokens = `:::thinking\n${thinkMatch}\n:::\n`; - remainingText = reasoningText.split(/<\/think>/)?.[1]?.trim() || ''; - return `${reasoningTokens}${remainingText}${this.streamHandler.tokens.join('')}`; - } else if (thinkMatch === '') { - remainingText = reasoningText.split(/<\/think>/)?.[1]?.trim() || ''; - return `${remainingText}${this.streamHandler.tokens.join('')}`; - } - } - - const reasoningTokens = - reasoningText.length > 0 - ? `:::thinking\n${reasoningText.replace('', '').replace('', '').trim()}\n:::\n` - : ''; - - return `${reasoningTokens}${this.streamHandler.tokens.join('')}`; - } - - getMessageMapMethod() { - /** - * @param {TMessage} msg - */ - return (msg) => { - if (msg.text != null && msg.text && msg.text.startsWith(':::thinking')) { - msg.text = msg.text.replace(/:::thinking.*?:::/gs, '').trim(); - } else if (msg.content != null) { - msg.text = parseTextParts(msg.content, true); - delete msg.content; - } - - return msg; - }; - } - - async chatCompletion({ payload, onProgress, abortController = null }) { - const appConfig = this.options.req?.config; - let error = null; - let intermediateReply = []; - const errorCallback = (err) => (error = err); - try { - if (!abortController) { - abortController = new AbortController(); - } - - let modelOptions = { ...this.modelOptions }; - - if (typeof onProgress === 'function') { - modelOptions.stream = true; - } - if (this.isChatCompletion) { - modelOptions.messages = payload; - } else { - modelOptions.prompt = payload; - } - - const baseURL = extractBaseURL(this.completionsUrl); - logger.debug('[OpenAIClient] chatCompletion', { baseURL, modelOptions }); - const opts = { - baseURL, - fetchOptions: {}, - }; - - if (this.useOpenRouter) { - opts.defaultHeaders = { - 'HTTP-Referer': 'https://librechat.ai', - 'X-Title': 'LibreChat', - }; - } - - if (this.options.headers) { - opts.defaultHeaders = { ...opts.defaultHeaders, ...this.options.headers }; - } - - if (this.options.defaultQuery) { - opts.defaultQuery = this.options.defaultQuery; - } - - if (this.options.proxy) { - opts.fetchOptions.agent = new HttpsProxyAgent(this.options.proxy); - } - - const azureConfig = appConfig?.endpoints?.[EModelEndpoint.azureOpenAI]; - - if ( - (this.azure && this.isVisionModel && azureConfig) || - (azureConfig && this.isVisionModel && this.options.endpoint === EModelEndpoint.azureOpenAI) - ) { - const { modelGroupMap, groupMap } = azureConfig; - const { - azureOptions, - baseURL, - headers = {}, - serverless, - } = mapModelToAzureConfig({ - modelName: modelOptions.model, - modelGroupMap, - groupMap, - }); - opts.defaultHeaders = resolveHeaders({ headers }); - this.langchainProxy = extractBaseURL(baseURL); - this.apiKey = azureOptions.azureOpenAIApiKey; - - const groupName = modelGroupMap[modelOptions.model].group; - this.options.addParams = azureConfig.groupMap[groupName].addParams; - this.options.dropParams = azureConfig.groupMap[groupName].dropParams; - // Note: `forcePrompt` not re-assigned as only chat models are vision models - - this.azure = !serverless && azureOptions; - this.azureEndpoint = - !serverless && genAzureChatCompletion(this.azure, modelOptions.model, this); - if (serverless === true) { - this.options.defaultQuery = azureOptions.azureOpenAIApiVersion - ? { 'api-version': azureOptions.azureOpenAIApiVersion } - : undefined; - this.options.headers['api-key'] = this.apiKey; - } - } - - if (this.azure || this.options.azure) { - /* Azure Bug, extremely short default `max_tokens` response */ - if (!modelOptions.max_tokens && modelOptions.model === 'gpt-4-vision-preview') { - modelOptions.max_tokens = 4000; - } - - /* Azure does not accept `model` in the body, so we need to remove it. */ - delete modelOptions.model; - - opts.baseURL = this.langchainProxy - ? constructAzureURL({ - baseURL: this.langchainProxy, - azureOptions: this.azure, - }) - : this.azureEndpoint.split(/(? msg.role === 'system'); - - if (systemMessageIndex > 0) { - const [systemMessage] = messages.splice(systemMessageIndex, 1); - messages.unshift(systemMessage); - } - - modelOptions.messages = messages; - } - - /* If there is only one message and it's a system message, change the role to user */ - if ( - (opts.baseURL.includes('api.mistral.ai') || opts.baseURL.includes('api.perplexity.ai')) && - modelOptions.messages && - modelOptions.messages.length === 1 && - modelOptions.messages[0]?.role === 'system' - ) { - modelOptions.messages[0].role = 'user'; - } - - if ( - (this.options.endpoint === EModelEndpoint.openAI || - this.options.endpoint === EModelEndpoint.azureOpenAI) && - modelOptions.stream === true - ) { - modelOptions.stream_options = { include_usage: true }; - } - - if (this.options.addParams && typeof this.options.addParams === 'object') { - const addParams = { ...this.options.addParams }; - modelOptions = { - ...modelOptions, - ...addParams, - }; - logger.debug('[OpenAIClient] chatCompletion: added params', { - addParams: addParams, - modelOptions, - }); - } - - /** Note: OpenAI Web Search models do not support any known parameters besdies `max_tokens` */ - if (modelOptions.model && /gpt-4o.*search/.test(modelOptions.model)) { - const searchExcludeParams = [ - 'frequency_penalty', - 'presence_penalty', - 'temperature', - 'top_p', - 'top_k', - 'stop', - 'logit_bias', - 'seed', - 'response_format', - 'n', - 'logprobs', - 'user', - ]; - - this.options.dropParams = this.options.dropParams || []; - this.options.dropParams = [ - ...new Set([...this.options.dropParams, ...searchExcludeParams]), - ]; - } - - if (this.options.dropParams && Array.isArray(this.options.dropParams)) { - const dropParams = [...this.options.dropParams]; - dropParams.forEach((param) => { - delete modelOptions[param]; - }); - logger.debug('[OpenAIClient] chatCompletion: dropped params', { - dropParams: dropParams, - modelOptions, - }); - } - - const streamRate = this.options.streamRate ?? Constants.DEFAULT_STREAM_RATE; - - if (this.message_file_map && this.isOllama) { - const ollamaClient = new OllamaClient({ baseURL, streamRate }); - return await ollamaClient.chatCompletion({ - payload: modelOptions, - onProgress, - abortController, - }); - } - - let UnexpectedRoleError = false; - /** @type {Promise} */ - let streamPromise; - /** @type {(value: void | PromiseLike) => void} */ - let streamResolve; - - if ( - (!this.isOmni || /^o1-(mini|preview)/i.test(modelOptions.model)) && - modelOptions.reasoning_effort != null - ) { - delete modelOptions.reasoning_effort; - delete modelOptions.temperature; - } - - let reasoningKey = 'reasoning_content'; - if (this.useOpenRouter) { - modelOptions.include_reasoning = true; - reasoningKey = 'reasoning'; - } - if (this.useOpenRouter && modelOptions.reasoning_effort != null) { - modelOptions.reasoning = { - effort: modelOptions.reasoning_effort, - }; - delete modelOptions.reasoning_effort; - } - - const handlers = createStreamEventHandlers(this.options.res); - this.streamHandler = new SplitStreamHandler({ - reasoningKey, - accumulate: true, - runId: this.responseMessageId, - handlers, - }); - - intermediateReply = this.streamHandler.tokens; - - if (modelOptions.stream) { - streamPromise = new Promise((resolve) => { - streamResolve = resolve; - }); - /** @type {OpenAI.OpenAI.CompletionCreateParamsStreaming} */ - const params = { - ...modelOptions, - stream: true, - }; - const stream = await openai.chat.completions - .stream(params) - .on('abort', () => { - /* Do nothing here */ - }) - .on('error', (err) => { - handleOpenAIErrors(err, errorCallback, 'stream'); - }) - .on('finalChatCompletion', async (finalChatCompletion) => { - const finalMessage = finalChatCompletion?.choices?.[0]?.message; - if (!finalMessage) { - return; - } - await streamPromise; - if (finalMessage?.role !== 'assistant') { - finalChatCompletion.choices[0].message.role = 'assistant'; - } - - if (typeof finalMessage.content !== 'string' || finalMessage.content.trim() === '') { - finalChatCompletion.choices[0].message.content = this.streamHandler.tokens.join(''); - } - }) - .on('finalMessage', (message) => { - if (message?.role !== 'assistant') { - stream.messages.push({ - role: 'assistant', - content: this.streamHandler.tokens.join(''), - }); - UnexpectedRoleError = true; - } - }); - - if (this.continued === true) { - const latestText = addSpaceIfNeeded( - this.currentMessages[this.currentMessages.length - 1]?.text ?? '', - ); - this.streamHandler.handle({ - choices: [ - { - delta: { - content: latestText, - }, - }, - ], - }); - } - - for await (const chunk of stream) { - // Add finish_reason: null if missing in any choice - if (chunk.choices) { - chunk.choices.forEach((choice) => { - if (!('finish_reason' in choice)) { - choice.finish_reason = null; - } - }); - } - this.streamHandler.handle(chunk); - if (abortController.signal.aborted) { - stream.controller.abort(); - break; - } - - await sleep(streamRate); - } - - streamResolve(); - - if (!UnexpectedRoleError) { - chatCompletion = await stream.finalChatCompletion().catch((err) => { - handleOpenAIErrors(err, errorCallback, 'finalChatCompletion'); - }); - } - } - // regular completion - else { - chatCompletion = await openai.chat.completions - .create({ - ...modelOptions, - }) - .catch((err) => { - handleOpenAIErrors(err, errorCallback, 'create'); - }); - } - - if (openai.abortHandler && abortController.signal) { - abortController.signal.removeEventListener('abort', openai.abortHandler); - openai.abortHandler = undefined; - } - - if (!chatCompletion && UnexpectedRoleError) { - throw new Error( - 'OpenAI error: Invalid final message: OpenAI expects final message to include role=assistant', - ); - } else if (!chatCompletion && error) { - throw new Error(error); - } else if (!chatCompletion) { - throw new Error('Chat completion failed'); - } - - const { choices } = chatCompletion; - this.usage = chatCompletion.usage; - - if (!Array.isArray(choices) || choices.length === 0) { - logger.warn('[OpenAIClient] Chat completion response has no choices'); - return this.streamHandler.tokens.join(''); - } - - const { message, finish_reason } = choices[0] ?? {}; - this.metadata = { finish_reason }; - - logger.debug('[OpenAIClient] chatCompletion response', chatCompletion); - - if (!message) { - logger.warn('[OpenAIClient] Message is undefined in chatCompletion response'); - return this.streamHandler.tokens.join(''); - } - - if (typeof message.content !== 'string' || message.content.trim() === '') { - const reply = this.streamHandler.tokens.join(''); - logger.debug( - '[OpenAIClient] chatCompletion: using intermediateReply due to empty message.content', - { intermediateReply: reply }, - ); - return reply; - } - - if ( - this.streamHandler.reasoningTokens.length > 0 && - this.options.context !== 'title' && - !message.content.startsWith('') - ) { - return this.getStreamText(); - } else if ( - this.streamHandler.reasoningTokens.length > 0 && - this.options.context !== 'title' && - message.content.startsWith('') - ) { - return this.getStreamText(); - } - - return message.content; - } catch (err) { - if ( - err?.message?.includes('abort') || - (err instanceof OpenAI.APIError && err?.message?.includes('abort')) - ) { - return this.getStreamText(intermediateReply); - } - if ( - err?.message?.includes( - 'OpenAI error: Invalid final message: OpenAI expects final message to include role=assistant', - ) || - err?.message?.includes( - 'stream ended without producing a ChatCompletionMessage with role=assistant', - ) || - err?.message?.includes('The server had an error processing your request') || - err?.message?.includes('missing finish_reason') || - err?.message?.includes('missing role') || - (err instanceof OpenAI.OpenAIError && err?.message?.includes('missing finish_reason')) - ) { - logger.error('[OpenAIClient] Known OpenAI error:', err); - if (this.streamHandler && this.streamHandler.reasoningTokens.length) { - return this.getStreamText(); - } else if (intermediateReply.length > 0) { - return this.getStreamText(intermediateReply); - } else { - throw err; - } - } else if (err instanceof OpenAI.APIError) { - if (this.streamHandler && this.streamHandler.reasoningTokens.length) { - return this.getStreamText(); - } else if (intermediateReply.length > 0) { - return this.getStreamText(intermediateReply); - } else { - throw err; - } - } else { - logger.error('[OpenAIClient.chatCompletion] Unhandled error type', err); - throw err; - } - } - } -} - -module.exports = OpenAIClient; diff --git a/api/app/clients/document/index.js b/api/app/clients/document/index.js deleted file mode 100644 index 9ff3da72f0..0000000000 --- a/api/app/clients/document/index.js +++ /dev/null @@ -1,5 +0,0 @@ -const tokenSplit = require('./tokenSplit'); - -module.exports = { - tokenSplit, -}; diff --git a/api/app/clients/document/tokenSplit.js b/api/app/clients/document/tokenSplit.js deleted file mode 100644 index 497249c519..0000000000 --- a/api/app/clients/document/tokenSplit.js +++ /dev/null @@ -1,51 +0,0 @@ -const { TokenTextSplitter } = require('@langchain/textsplitters'); - -/** - * Splits a given text by token chunks, based on the provided parameters for the TokenTextSplitter. - * Note: limit or memoize use of this function as its calculation is expensive. - * - * @param {Object} obj - Configuration object for the text splitting operation. - * @param {string} obj.text - The text to be split. - * @param {string} [obj.encodingName='cl100k_base'] - Encoding name. Defaults to 'cl100k_base'. - * @param {number} [obj.chunkSize=1] - The token size of each chunk. Defaults to 1. - * @param {number} [obj.chunkOverlap=0] - The number of chunk elements to be overlapped between adjacent chunks. Defaults to 0. - * @param {number} [obj.returnSize] - If specified and not 0, slices the return array from the end by this amount. - * - * @returns {Promise} Returns a promise that resolves to an array of text chunks. - * If no text is provided, an empty array is returned. - * If returnSize is specified and not 0, slices the return array from the end by returnSize. - * - * @async - * @function tokenSplit - */ -async function tokenSplit({ - text, - encodingName = 'cl100k_base', - chunkSize = 1, - chunkOverlap = 0, - returnSize, -}) { - if (!text) { - return []; - } - - const splitter = new TokenTextSplitter({ - encodingName, - chunkSize, - chunkOverlap, - }); - - if (!returnSize) { - return await splitter.splitText(text); - } - - const splitText = await splitter.splitText(text); - - if (returnSize && returnSize > 0 && splitText.length > 0) { - return splitText.slice(-Math.abs(returnSize)); - } - - return splitText; -} - -module.exports = tokenSplit; diff --git a/api/app/clients/document/tokenSplit.spec.js b/api/app/clients/document/tokenSplit.spec.js deleted file mode 100644 index d39c7d73cd..0000000000 --- a/api/app/clients/document/tokenSplit.spec.js +++ /dev/null @@ -1,56 +0,0 @@ -const tokenSplit = require('./tokenSplit'); - -describe('tokenSplit', () => { - const text = 'Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nullam id.'; - - it('returns correct text chunks with provided parameters', async () => { - const result = await tokenSplit({ - text: text, - encodingName: 'gpt2', - chunkSize: 2, - chunkOverlap: 1, - returnSize: 5, - }); - - expect(result).toEqual(['it.', '. Null', ' Nullam', 'am id', ' id.']); - }); - - it('returns correct text chunks with default parameters', async () => { - const result = await tokenSplit({ text }); - expect(result).toEqual([ - 'Lorem', - ' ipsum', - ' dolor', - ' sit', - ' amet', - ',', - ' consectetur', - ' adipiscing', - ' elit', - '.', - ' Null', - 'am', - ' id', - '.', - ]); - }); - - it('returns correct text chunks with specific return size', async () => { - const result = await tokenSplit({ text, returnSize: 2 }); - expect(result.length).toEqual(2); - expect(result).toEqual([' id', '.']); - }); - - it('returns correct text chunks with specified chunk size', async () => { - const result = await tokenSplit({ text, chunkSize: 10 }); - expect(result).toEqual([ - 'Lorem ipsum dolor sit amet, consectetur adipiscing elit.', - ' Nullam id.', - ]); - }); - - it('returns empty array with no text', async () => { - const result = await tokenSplit({ text: '' }); - expect(result).toEqual([]); - }); -}); diff --git a/api/app/clients/index.js b/api/app/clients/index.js index d8b2bae27b..3dbe397b31 100644 --- a/api/app/clients/index.js +++ b/api/app/clients/index.js @@ -1,13 +1,7 @@ -const OpenAIClient = require('./OpenAIClient'); -const GoogleClient = require('./GoogleClient'); const TextStream = require('./TextStream'); -const AnthropicClient = require('./AnthropicClient'); const toolUtils = require('./tools/util'); module.exports = { - OpenAIClient, - GoogleClient, TextStream, - AnthropicClient, ...toolUtils, }; diff --git a/api/app/clients/llm/createCoherePayload.js b/api/app/clients/llm/createCoherePayload.js deleted file mode 100644 index 58803d76f3..0000000000 --- a/api/app/clients/llm/createCoherePayload.js +++ /dev/null @@ -1,85 +0,0 @@ -const { CohereConstants } = require('librechat-data-provider'); -const { titleInstruction } = require('../prompts/titlePrompts'); - -// Mapping OpenAI roles to Cohere roles -const roleMap = { - user: CohereConstants.ROLE_USER, - assistant: CohereConstants.ROLE_CHATBOT, - system: CohereConstants.ROLE_SYSTEM, // Recognize and map the system role explicitly -}; - -/** - * Adjusts an OpenAI ChatCompletionPayload to conform with Cohere's expected chat payload format. - * Now includes handling for "system" roles explicitly mentioned. - * - * @param {Object} options - Object containing the model options. - * @param {ChatCompletionPayload} options.modelOptions - The OpenAI model payload options. - * @returns {CohereChatStreamRequest} Cohere-compatible chat API payload. - */ -function createCoherePayload({ modelOptions }) { - /** @type {string | undefined} */ - let preamble; - let latestUserMessageContent = ''; - const { - stream, - stop, - top_p, - temperature, - frequency_penalty, - presence_penalty, - max_tokens, - messages, - model, - ...rest - } = modelOptions; - - // Filter out the latest user message and transform remaining messages to Cohere's chat_history format - let chatHistory = messages.reduce((acc, message, index, arr) => { - const isLastUserMessage = index === arr.length - 1 && message.role === 'user'; - - const messageContent = - typeof message.content === 'string' - ? message.content - : message.content.map((part) => (part.type === 'text' ? part.text : '')).join(' '); - - if (isLastUserMessage) { - latestUserMessageContent = messageContent; - } else { - acc.push({ - role: roleMap[message.role] || CohereConstants.ROLE_USER, - message: messageContent, - }); - } - - return acc; - }, []); - - if ( - chatHistory.length === 1 && - chatHistory[0].role === CohereConstants.ROLE_SYSTEM && - !latestUserMessageContent.length - ) { - const message = chatHistory[0].message; - latestUserMessageContent = message.includes(titleInstruction) - ? CohereConstants.TITLE_MESSAGE - : '.'; - preamble = message; - } - - return { - message: latestUserMessageContent, - model: model, - chatHistory, - stream: stream ?? false, - temperature: temperature, - frequencyPenalty: frequency_penalty, - presencePenalty: presence_penalty, - maxTokens: max_tokens, - stopSequences: stop, - preamble, - p: top_p, - ...rest, - }; -} - -module.exports = createCoherePayload; diff --git a/api/app/clients/llm/index.js b/api/app/clients/llm/index.js deleted file mode 100644 index c7770ce103..0000000000 --- a/api/app/clients/llm/index.js +++ /dev/null @@ -1,5 +0,0 @@ -const createCoherePayload = require('./createCoherePayload'); - -module.exports = { - createCoherePayload, -}; diff --git a/api/app/clients/output_parsers/addImages.js b/api/app/clients/output_parsers/addImages.js deleted file mode 100644 index f0860ef8bd..0000000000 --- a/api/app/clients/output_parsers/addImages.js +++ /dev/null @@ -1,90 +0,0 @@ -const { getBasePath } = require('@librechat/api'); -const { logger } = require('@librechat/data-schemas'); - -/** - * The `addImages` function corrects any erroneous image URLs in the `responseMessage.text` - * and appends image observations from `intermediateSteps` if they are not already present. - * - * @function - * @module addImages - * - * @param {Array.} intermediateSteps - An array of objects, each containing an observation. - * @param {Object} responseMessage - An object containing the text property which might have image URLs. - * - * @property {string} intermediateSteps[].observation - The observation string which might contain an image markdown. - * @property {string} responseMessage.text - The text which might contain image URLs. - * - * @example - * - * const intermediateSteps = [ - * { observation: '![desc](/images/test.png)' } - * ]; - * const responseMessage = { text: 'Some text with ![desc](sandbox:/images/test.png)' }; - * - * addImages(intermediateSteps, responseMessage); - * - * logger.debug(responseMessage.text); - * // Outputs: 'Some text with ![desc](/images/test.png)\n![desc](/images/test.png)' - * - * @returns {void} - */ -function addImages(intermediateSteps, responseMessage) { - if (!intermediateSteps || !responseMessage) { - return; - } - - const basePath = getBasePath(); - - // Correct any erroneous URLs in the responseMessage.text first - intermediateSteps.forEach((step) => { - const { observation } = step; - if (!observation || !observation.includes('![')) { - return; - } - - const match = observation.match(/\/images\/.*\.\w*/); - if (!match) { - return; - } - const essentialImagePath = match[0]; - const fullImagePath = `${basePath}${essentialImagePath}`; - - const regex = /!\[.*?\]\((.*?)\)/g; - let matchErroneous; - while ((matchErroneous = regex.exec(responseMessage.text)) !== null) { - if (matchErroneous[1] && !matchErroneous[1].startsWith(`${basePath}/images/`)) { - // Replace with the full path including base path - responseMessage.text = responseMessage.text.replace(matchErroneous[1], fullImagePath); - } - } - }); - - // Now, check if the responseMessage already includes the correct image file path and append if not - intermediateSteps.forEach((step) => { - const { observation } = step; - if (!observation || !observation.includes('![')) { - return; - } - const observedImagePath = observation.match(/!\[[^(]*\]\([^)]*\)/g); - if (observedImagePath) { - // Fix the image path to include base path if it doesn't already - let imageMarkdown = observedImagePath[0]; - const urlMatch = imageMarkdown.match(/\(([^)]+)\)/); - if ( - urlMatch && - urlMatch[1] && - !urlMatch[1].startsWith(`${basePath}/images/`) && - urlMatch[1].startsWith('/images/') - ) { - imageMarkdown = imageMarkdown.replace(urlMatch[1], `${basePath}${urlMatch[1]}`); - } - - if (!responseMessage.text.includes(imageMarkdown)) { - responseMessage.text += '\n' + imageMarkdown; - logger.debug('[addImages] added image from intermediateSteps:', imageMarkdown); - } - } - }); -} - -module.exports = addImages; diff --git a/api/app/clients/output_parsers/addImages.spec.js b/api/app/clients/output_parsers/addImages.spec.js deleted file mode 100644 index ef4dd22c0b..0000000000 --- a/api/app/clients/output_parsers/addImages.spec.js +++ /dev/null @@ -1,246 +0,0 @@ -let addImages = require('./addImages'); - -describe('addImages', () => { - let intermediateSteps; - let responseMessage; - let options; - - beforeEach(() => { - intermediateSteps = []; - responseMessage = { text: '' }; - options = { debug: false }; - this.options = options; - addImages = addImages.bind(this); - }); - - it('should handle null or undefined parameters', () => { - addImages(null, responseMessage); - expect(responseMessage.text).toBe(''); - - addImages(intermediateSteps, null); - expect(responseMessage.text).toBe(''); - - addImages(null, null); - expect(responseMessage.text).toBe(''); - }); - - it('should append correct image markdown if not present in responseMessage', () => { - intermediateSteps.push({ observation: '![desc](/images/test.png)' }); - addImages(intermediateSteps, responseMessage); - expect(responseMessage.text).toBe('\n![desc](/images/test.png)'); - }); - - it('should not append image markdown if already present in responseMessage', () => { - responseMessage.text = '![desc](/images/test.png)'; - intermediateSteps.push({ observation: '![desc](/images/test.png)' }); - addImages(intermediateSteps, responseMessage); - expect(responseMessage.text).toBe('![desc](/images/test.png)'); - }); - - it('should correct and append image markdown with erroneous URL', () => { - responseMessage.text = '![desc](sandbox:/images/test.png)'; - intermediateSteps.push({ observation: '![desc](/images/test.png)' }); - addImages(intermediateSteps, responseMessage); - expect(responseMessage.text).toBe('![desc](/images/test.png)'); - }); - - it('should correct multiple erroneous URLs in responseMessage', () => { - responseMessage.text = - '![desc1](sandbox:/images/test1.png) ![desc2](version:/images/test2.png)'; - intermediateSteps.push({ observation: '![desc1](/images/test1.png)' }); - intermediateSteps.push({ observation: '![desc2](/images/test2.png)' }); - addImages(intermediateSteps, responseMessage); - expect(responseMessage.text).toBe('![desc1](/images/test1.png) ![desc2](/images/test2.png)'); - }); - - it('should not append non-image markdown observations', () => { - intermediateSteps.push({ observation: '[desc](/images/test.png)' }); - addImages(intermediateSteps, responseMessage); - expect(responseMessage.text).toBe(''); - }); - - it('should handle multiple observations', () => { - intermediateSteps.push({ observation: '![desc1](/images/test1.png)' }); - intermediateSteps.push({ observation: '![desc2](/images/test2.png)' }); - addImages(intermediateSteps, responseMessage); - expect(responseMessage.text).toBe('\n![desc1](/images/test1.png)\n![desc2](/images/test2.png)'); - }); - - it('should not append if observation does not contain image markdown', () => { - intermediateSteps.push({ observation: 'This is a test observation without image markdown.' }); - addImages(intermediateSteps, responseMessage); - expect(responseMessage.text).toBe(''); - }); - - it('should append correctly from a real scenario', () => { - responseMessage.text = - "Here is the generated image based on your request. It depicts a surreal landscape filled with floating musical notes. The style is impressionistic, with vibrant sunset hues dominating the scene. At the center, there's a silhouette of a grand piano, adding a dreamy emotion to the overall image. This could serve as a unique and creative music album cover. Would you like to make any changes or generate another image?"; - const originalText = responseMessage.text; - const imageMarkdown = '![generated image](/images/img-RnVWaYo2Yg4x3e0isICiMuf5.png)'; - intermediateSteps.push({ observation: imageMarkdown }); - addImages(intermediateSteps, responseMessage); - expect(responseMessage.text).toBe(`${originalText}\n${imageMarkdown}`); - }); - - it('should extract only image markdowns when there is text between them', () => { - const markdownWithTextBetweenImages = ` - ![image1](/images/image1.png) - Some text between images that should not be included. - ![image2](/images/image2.png) - More text that should be ignored. - ![image3](/images/image3.png) - `; - intermediateSteps.push({ observation: markdownWithTextBetweenImages }); - addImages(intermediateSteps, responseMessage); - expect(responseMessage.text).toBe('\n![image1](/images/image1.png)'); - }); - - it('should only return the first image when multiple images are present', () => { - const markdownWithMultipleImages = ` - ![image1](/images/image1.png) - ![image2](/images/image2.png) - ![image3](/images/image3.png) - `; - intermediateSteps.push({ observation: markdownWithMultipleImages }); - addImages(intermediateSteps, responseMessage); - expect(responseMessage.text).toBe('\n![image1](/images/image1.png)'); - }); - - it('should not include any text or metadata surrounding the image markdown', () => { - const markdownWithMetadata = ` - Title: Test Document - Author: John Doe - ![image1](/images/image1.png) - Some content after the image. - Vector values: [0.1, 0.2, 0.3] - `; - intermediateSteps.push({ observation: markdownWithMetadata }); - addImages(intermediateSteps, responseMessage); - expect(responseMessage.text).toBe('\n![image1](/images/image1.png)'); - }); - - it('should handle complex markdown with multiple images and only return the first one', () => { - const complexMarkdown = ` - # Document Title - - ## Section 1 - Here's some text with an embedded image: - ![image1](/images/image1.png) - - ## Section 2 - More text here... - ![image2](/images/image2.png) - - ### Subsection - Even more content - ![image3](/images/image3.png) - `; - intermediateSteps.push({ observation: complexMarkdown }); - addImages(intermediateSteps, responseMessage); - expect(responseMessage.text).toBe('\n![image1](/images/image1.png)'); - }); - - describe('basePath functionality', () => { - let originalDomainClient; - - beforeEach(() => { - originalDomainClient = process.env.DOMAIN_CLIENT; - }); - - afterEach(() => { - process.env.DOMAIN_CLIENT = originalDomainClient; - }); - - it('should prepend base path to image URLs when DOMAIN_CLIENT is set', () => { - process.env.DOMAIN_CLIENT = 'http://localhost:3080/librechat'; - intermediateSteps.push({ observation: '![desc](/images/test.png)' }); - addImages(intermediateSteps, responseMessage); - expect(responseMessage.text).toBe('\n![desc](/librechat/images/test.png)'); - }); - - it('should not prepend base path when image URL already has base path', () => { - process.env.DOMAIN_CLIENT = 'http://localhost:3080/librechat'; - intermediateSteps.push({ observation: '![desc](/librechat/images/test.png)' }); - addImages(intermediateSteps, responseMessage); - expect(responseMessage.text).toBe('\n![desc](/librechat/images/test.png)'); - }); - - it('should correct erroneous URLs with base path', () => { - process.env.DOMAIN_CLIENT = 'http://localhost:3080/librechat'; - responseMessage.text = '![desc](sandbox:/images/test.png)'; - intermediateSteps.push({ observation: '![desc](/images/test.png)' }); - addImages(intermediateSteps, responseMessage); - expect(responseMessage.text).toBe('![desc](/librechat/images/test.png)'); - }); - - it('should handle empty base path (root deployment)', () => { - process.env.DOMAIN_CLIENT = 'http://localhost:3080/'; - intermediateSteps.push({ observation: '![desc](/images/test.png)' }); - addImages(intermediateSteps, responseMessage); - expect(responseMessage.text).toBe('\n![desc](/images/test.png)'); - }); - - it('should handle missing DOMAIN_CLIENT', () => { - delete process.env.DOMAIN_CLIENT; - intermediateSteps.push({ observation: '![desc](/images/test.png)' }); - addImages(intermediateSteps, responseMessage); - expect(responseMessage.text).toBe('\n![desc](/images/test.png)'); - }); - - it('should handle observation without image path match', () => { - process.env.DOMAIN_CLIENT = 'http://localhost:3080/librechat'; - intermediateSteps.push({ observation: '![desc](not-an-image-path)' }); - addImages(intermediateSteps, responseMessage); - expect(responseMessage.text).toBe('\n![desc](not-an-image-path)'); - }); - - it('should handle nested subdirectories in base path', () => { - process.env.DOMAIN_CLIENT = 'http://localhost:3080/apps/librechat'; - intermediateSteps.push({ observation: '![desc](/images/test.png)' }); - addImages(intermediateSteps, responseMessage); - expect(responseMessage.text).toBe('\n![desc](/apps/librechat/images/test.png)'); - }); - - it('should handle multiple observations with mixed base path scenarios', () => { - process.env.DOMAIN_CLIENT = 'http://localhost:3080/librechat'; - intermediateSteps.push({ observation: '![desc1](/images/test1.png)' }); - intermediateSteps.push({ observation: '![desc2](/librechat/images/test2.png)' }); - addImages(intermediateSteps, responseMessage); - expect(responseMessage.text).toBe( - '\n![desc1](/librechat/images/test1.png)\n![desc2](/librechat/images/test2.png)', - ); - }); - - it('should handle complex markdown with base path', () => { - process.env.DOMAIN_CLIENT = 'http://localhost:3080/librechat'; - const complexMarkdown = ` - # Document Title - ![image1](/images/image1.png) - Some text between images - ![image2](/images/image2.png) - `; - intermediateSteps.push({ observation: complexMarkdown }); - addImages(intermediateSteps, responseMessage); - expect(responseMessage.text).toBe('\n![image1](/librechat/images/image1.png)'); - }); - - it('should handle URLs that are already absolute', () => { - process.env.DOMAIN_CLIENT = 'http://localhost:3080/librechat'; - intermediateSteps.push({ observation: '![desc](https://example.com/image.png)' }); - addImages(intermediateSteps, responseMessage); - expect(responseMessage.text).toBe('\n![desc](https://example.com/image.png)'); - }); - - it('should handle data URLs', () => { - process.env.DOMAIN_CLIENT = 'http://localhost:3080/librechat'; - intermediateSteps.push({ - observation: - '![desc](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mP8/5+hHgAHggJ/PchI7wAAAABJRU5ErkJggg==)', - }); - addImages(intermediateSteps, responseMessage); - expect(responseMessage.text).toBe( - '\n![desc](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mP8/5+hHgAHggJ/PchI7wAAAABJRU5ErkJggg==)', - ); - }); - }); -}); diff --git a/api/app/clients/output_parsers/handleOutputs.js b/api/app/clients/output_parsers/handleOutputs.js deleted file mode 100644 index b25eaaad80..0000000000 --- a/api/app/clients/output_parsers/handleOutputs.js +++ /dev/null @@ -1,88 +0,0 @@ -const { instructions, imageInstructions, errorInstructions } = require('../prompts'); - -function getActions(actions = [], functionsAgent = false) { - let output = 'Internal thoughts & actions taken:\n"'; - - if (actions[0]?.action && functionsAgent) { - actions = actions.map((step) => ({ - log: `Action: ${step.action?.tool || ''}\nInput: ${ - JSON.stringify(step.action?.toolInput) || '' - }\nObservation: ${step.observation}`, - })); - } else if (actions[0]?.action) { - actions = actions.map((step) => ({ - log: `${step.action.log}\nObservation: ${step.observation}`, - })); - } - - actions.forEach((actionObj, index) => { - output += `${actionObj.log}`; - if (index < actions.length - 1) { - output += '\n'; - } - }); - - return output + '"'; -} - -function buildErrorInput({ message, errorMessage, actions, functionsAgent }) { - const log = errorMessage.includes('Could not parse LLM output:') - ? `A formatting error occurred with your response to the human's last message. You didn't follow the formatting instructions. Remember to ${instructions}` - : `You encountered an error while replying to the human's last message. Attempt to answer again or admit an answer cannot be given.\nError: ${errorMessage}`; - - return ` - ${log} - - ${getActions(actions, functionsAgent)} - - Human's last message: ${message} - `; -} - -function buildPromptPrefix({ result, message, functionsAgent }) { - if ((result.output && result.output.includes('N/A')) || result.output === undefined) { - return null; - } - - if ( - result?.intermediateSteps?.length === 1 && - result?.intermediateSteps[0]?.action?.toolInput === 'N/A' - ) { - return null; - } - - const internalActions = - result?.intermediateSteps?.length > 0 - ? getActions(result.intermediateSteps, functionsAgent) - : 'Internal Actions Taken: None'; - - const toolBasedInstructions = internalActions.toLowerCase().includes('image') - ? imageInstructions - : ''; - - const errorMessage = result.errorMessage ? `${errorInstructions} ${result.errorMessage}\n` : ''; - - const preliminaryAnswer = - result.output?.length > 0 ? `Preliminary Answer: "${result.output.trim()}"` : ''; - const prefix = preliminaryAnswer - ? 'review and improve the answer you generated using plugins in response to the User Message below. The user hasn\'t seen your answer or thoughts yet.' - : 'respond to the User Message below based on your preliminary thoughts & actions.'; - - return `As a helpful AI Assistant, ${prefix}${errorMessage}\n${internalActions} -${preliminaryAnswer} -Reply conversationally to the User based on your ${ - preliminaryAnswer ? 'preliminary answer, ' : '' -}internal actions, thoughts, and observations, making improvements wherever possible, but do not modify URLs. -${ - preliminaryAnswer - ? '' - : '\nIf there is an incomplete thought or action, you are expected to complete it in your response now.\n' -}You must cite sources if you are using any web links. ${toolBasedInstructions} -Only respond with your conversational reply to the following User Message: -"${message}"`; -} - -module.exports = { - buildErrorInput, - buildPromptPrefix, -}; diff --git a/api/app/clients/output_parsers/index.js b/api/app/clients/output_parsers/index.js deleted file mode 100644 index 4c176ade49..0000000000 --- a/api/app/clients/output_parsers/index.js +++ /dev/null @@ -1,7 +0,0 @@ -const addImages = require('./addImages'); -const handleOutputs = require('./handleOutputs'); - -module.exports = { - addImages, - ...handleOutputs, -}; diff --git a/api/app/clients/prompts/handleInputs.js b/api/app/clients/prompts/handleInputs.js deleted file mode 100644 index 1a193e058f..0000000000 --- a/api/app/clients/prompts/handleInputs.js +++ /dev/null @@ -1,38 +0,0 @@ -// Escaping curly braces is necessary for LangChain to correctly process the prompt -function escapeBraces(str) { - return str - .replace(/({{2,})|(}{2,})/g, (match) => `${match[0]}`) - .replace(/{|}/g, (match) => `${match}${match}`); -} - -function getSnippet(text) { - let limit = 50; - let splitText = escapeBraces(text).split(' '); - - if (splitText.length === 1 && splitText[0].length > limit) { - return splitText[0].substring(0, limit); - } - - let result = ''; - let spaceCount = 0; - - for (let i = 0; i < splitText.length; i++) { - if (result.length + splitText[i].length <= limit) { - result += splitText[i] + ' '; - spaceCount++; - } else { - break; - } - - if (spaceCount == 10) { - break; - } - } - - return result.trim(); -} - -module.exports = { - escapeBraces, - getSnippet, -}; diff --git a/api/app/clients/prompts/index.js b/api/app/clients/prompts/index.js index a8392348b5..ba4859efe3 100644 --- a/api/app/clients/prompts/index.js +++ b/api/app/clients/prompts/index.js @@ -1,7 +1,5 @@ const formatMessages = require('./formatMessages'); const summaryPrompts = require('./summaryPrompts'); -const handleInputs = require('./handleInputs'); -const instructions = require('./instructions'); const truncate = require('./truncate'); const createVisionPrompt = require('./createVisionPrompt'); const createContextHandlers = require('./createContextHandlers'); @@ -9,8 +7,6 @@ const createContextHandlers = require('./createContextHandlers'); module.exports = { ...formatMessages, ...summaryPrompts, - ...handleInputs, - ...instructions, ...truncate, createVisionPrompt, createContextHandlers, diff --git a/api/app/clients/prompts/instructions.js b/api/app/clients/prompts/instructions.js deleted file mode 100644 index c630711771..0000000000 --- a/api/app/clients/prompts/instructions.js +++ /dev/null @@ -1,10 +0,0 @@ -module.exports = { - instructions: - 'Remember, all your responses MUST be in the format described. Do not respond unless it\'s in the format described, using the structure of Action, Action Input, etc.', - errorInstructions: - '\nYou encountered an error in attempting a response. The user is not aware of the error so you shouldn\'t mention it.\nReview the actions taken carefully in case there is a partial or complete answer within them.\nError Message:', - imageInstructions: - 'You must include the exact image paths from above, formatted in Markdown syntax: ![alt-text](URL)', - completionInstructions: - 'Instructions:\nYou are ChatGPT, a large language model trained by OpenAI. Respond conversationally.\nCurrent date:', -}; diff --git a/api/app/clients/specs/AnthropicClient.test.js b/api/app/clients/specs/AnthropicClient.test.js deleted file mode 100644 index 35477005fb..0000000000 --- a/api/app/clients/specs/AnthropicClient.test.js +++ /dev/null @@ -1,1043 +0,0 @@ -const { SplitStreamHandler } = require('@librechat/agents'); -const { anthropicSettings } = require('librechat-data-provider'); -const AnthropicClient = require('~/app/clients/AnthropicClient'); - -const HUMAN_PROMPT = '\n\nHuman:'; -const AI_PROMPT = '\n\nAssistant:'; - -describe('AnthropicClient', () => { - let client; - const model = 'claude-2'; - const parentMessageId = '1'; - const messages = [ - { role: 'user', isCreatedByUser: true, text: 'Hello', messageId: parentMessageId }, - { role: 'assistant', isCreatedByUser: false, text: 'Hi', messageId: '2', parentMessageId }, - { - role: 'user', - isCreatedByUser: true, - text: "What's up", - messageId: '3', - parentMessageId: '2', - }, - ]; - - beforeEach(() => { - const options = { - modelOptions: { - model, - temperature: anthropicSettings.temperature.default, - }, - }; - client = new AnthropicClient('test-api-key'); - client.setOptions(options); - }); - - describe('setOptions', () => { - it('should set the options correctly', () => { - expect(client.apiKey).toBe('test-api-key'); - expect(client.modelOptions.model).toBe(model); - expect(client.modelOptions.temperature).toBe(anthropicSettings.temperature.default); - }); - - it('should set legacy maxOutputTokens for non-Claude-3 models', () => { - const client = new AnthropicClient('test-api-key'); - client.setOptions({ - modelOptions: { - model: 'claude-2', - maxOutputTokens: anthropicSettings.maxOutputTokens.default, - }, - }); - expect(client.modelOptions.maxOutputTokens).toBe( - anthropicSettings.legacy.maxOutputTokens.default, - ); - }); - it('should not set maxOutputTokens if not provided', () => { - const client = new AnthropicClient('test-api-key'); - client.setOptions({ - modelOptions: { - model: 'claude-3', - }, - }); - expect(client.modelOptions.maxOutputTokens).toBeUndefined(); - }); - - it('should not set legacy maxOutputTokens for Claude-3 models', () => { - const client = new AnthropicClient('test-api-key'); - client.setOptions({ - modelOptions: { - model: 'claude-3-opus-20240229', - maxOutputTokens: anthropicSettings.legacy.maxOutputTokens.default, - }, - }); - expect(client.modelOptions.maxOutputTokens).toBe( - anthropicSettings.legacy.maxOutputTokens.default, - ); - }); - }); - - describe('getSaveOptions', () => { - it('should return the correct save options', () => { - const options = client.getSaveOptions(); - expect(options).toHaveProperty('modelLabel'); - expect(options).toHaveProperty('promptPrefix'); - }); - }); - - describe('buildMessages', () => { - it('should handle promptPrefix from options when promptPrefix argument is not provided', async () => { - client.options.promptPrefix = 'Test Prefix from options'; - const result = await client.buildMessages(messages, parentMessageId); - const { prompt } = result; - expect(prompt).toContain('Test Prefix from options'); - }); - - it('should build messages correctly for chat completion', async () => { - const result = await client.buildMessages(messages, '2'); - expect(result).toHaveProperty('prompt'); - expect(result.prompt).toContain(HUMAN_PROMPT); - expect(result.prompt).toContain('Hello'); - expect(result.prompt).toContain(AI_PROMPT); - expect(result.prompt).toContain('Hi'); - }); - - it('should group messages by the same author', async () => { - const groupedMessages = messages.map((m) => ({ ...m, isCreatedByUser: true, role: 'user' })); - const result = await client.buildMessages(groupedMessages, '3'); - expect(result.context).toHaveLength(1); - - // Check that HUMAN_PROMPT appears only once in the prompt - const matches = result.prompt.match(new RegExp(HUMAN_PROMPT, 'g')); - expect(matches).toHaveLength(1); - - groupedMessages.push({ - role: 'assistant', - isCreatedByUser: false, - text: 'I heard you the first time', - messageId: '4', - parentMessageId: '3', - }); - - const result2 = await client.buildMessages(groupedMessages, '4'); - expect(result2.context).toHaveLength(2); - - // Check that HUMAN_PROMPT appears only once in the prompt - const human_matches = result2.prompt.match(new RegExp(HUMAN_PROMPT, 'g')); - const ai_matches = result2.prompt.match(new RegExp(AI_PROMPT, 'g')); - expect(human_matches).toHaveLength(1); - expect(ai_matches).toHaveLength(1); - }); - - it('should handle isEdited condition', async () => { - const editedMessages = [ - { role: 'user', isCreatedByUser: true, text: 'Hello', messageId: '1' }, - { role: 'assistant', isCreatedByUser: false, text: 'Hi', messageId: '2', parentMessageId }, - ]; - - const trimmedLabel = AI_PROMPT.trim(); - const result = await client.buildMessages(editedMessages, '2'); - expect(result.prompt.trim().endsWith(trimmedLabel)).toBeFalsy(); - - // Add a human message at the end to test the opposite - editedMessages.push({ - role: 'user', - isCreatedByUser: true, - text: 'Hi again', - messageId: '3', - parentMessageId: '2', - }); - const result2 = await client.buildMessages(editedMessages, '3'); - expect(result2.prompt.trim().endsWith(trimmedLabel)).toBeTruthy(); - }); - - it('should build messages correctly with a promptPrefix', async () => { - const promptPrefix = 'Test Prefix'; - client.options.promptPrefix = promptPrefix; - const result = await client.buildMessages(messages, parentMessageId); - const { prompt } = result; - expect(prompt).toBeDefined(); - expect(prompt).toContain(promptPrefix); - const textAfterPrefix = prompt.split(promptPrefix)[1]; - expect(textAfterPrefix).toContain(AI_PROMPT); - - const editedMessages = messages.slice(0, -1); - const result2 = await client.buildMessages(editedMessages, parentMessageId); - const textAfterPrefix2 = result2.prompt.split(promptPrefix)[1]; - expect(textAfterPrefix2).toContain(AI_PROMPT); - }); - - it('should handle identityPrefix from options', async () => { - client.options.userLabel = 'John'; - client.options.modelLabel = 'Claude-2'; - const result = await client.buildMessages(messages, parentMessageId); - const { prompt } = result; - expect(prompt).toContain("Human's name: John"); - expect(prompt).toContain('You are Claude-2'); - }); - }); - - describe('getClient', () => { - it('should set legacy maxOutputTokens for non-Claude-3 models', () => { - const client = new AnthropicClient('test-api-key'); - client.setOptions({ - modelOptions: { - model: 'claude-2', - maxOutputTokens: anthropicSettings.legacy.maxOutputTokens.default, - }, - }); - expect(client.modelOptions.maxOutputTokens).toBe( - anthropicSettings.legacy.maxOutputTokens.default, - ); - }); - - it('should not set legacy maxOutputTokens for Claude-3 models', () => { - const client = new AnthropicClient('test-api-key'); - client.setOptions({ - modelOptions: { - model: 'claude-3-opus-20240229', - maxOutputTokens: anthropicSettings.legacy.maxOutputTokens.default, - }, - }); - expect(client.modelOptions.maxOutputTokens).toBe( - anthropicSettings.legacy.maxOutputTokens.default, - ); - }); - - it('should add "max-tokens" & "prompt-caching" beta header for claude-3-5-sonnet model', () => { - const client = new AnthropicClient('test-api-key'); - const modelOptions = { - model: 'claude-3-5-sonnet-20241022', - }; - client.setOptions({ modelOptions, promptCache: true }); - const anthropicClient = client.getClient(modelOptions); - expect(anthropicClient._options.defaultHeaders).toBeDefined(); - expect(anthropicClient._options.defaultHeaders).toHaveProperty('anthropic-beta'); - expect(anthropicClient._options.defaultHeaders['anthropic-beta']).toBe( - 'max-tokens-3-5-sonnet-2024-07-15,prompt-caching-2024-07-31', - ); - }); - - it('should add "prompt-caching" beta header for claude-3-haiku model', () => { - const client = new AnthropicClient('test-api-key'); - const modelOptions = { - model: 'claude-3-haiku-2028', - }; - client.setOptions({ modelOptions, promptCache: true }); - const anthropicClient = client.getClient(modelOptions); - expect(anthropicClient._options.defaultHeaders).toBeDefined(); - expect(anthropicClient._options.defaultHeaders).toHaveProperty('anthropic-beta'); - expect(anthropicClient._options.defaultHeaders['anthropic-beta']).toBe( - 'prompt-caching-2024-07-31', - ); - }); - - it('should add "prompt-caching" beta header for claude-3-opus model', () => { - const client = new AnthropicClient('test-api-key'); - const modelOptions = { - model: 'claude-3-opus-2028', - }; - client.setOptions({ modelOptions, promptCache: true }); - const anthropicClient = client.getClient(modelOptions); - expect(anthropicClient._options.defaultHeaders).toBeDefined(); - expect(anthropicClient._options.defaultHeaders).toHaveProperty('anthropic-beta'); - expect(anthropicClient._options.defaultHeaders['anthropic-beta']).toBe( - 'prompt-caching-2024-07-31', - ); - }); - - describe('Claude 4 model headers', () => { - it('should add "prompt-caching" and "context-1m" beta headers for claude-sonnet-4 model', () => { - const client = new AnthropicClient('test-api-key'); - const modelOptions = { - model: 'claude-sonnet-4-20250514', - }; - client.setOptions({ modelOptions, promptCache: true }); - const anthropicClient = client.getClient(modelOptions); - expect(anthropicClient._options.defaultHeaders).toBeDefined(); - expect(anthropicClient._options.defaultHeaders).toHaveProperty('anthropic-beta'); - expect(anthropicClient._options.defaultHeaders['anthropic-beta']).toBe( - 'prompt-caching-2024-07-31,context-1m-2025-08-07', - ); - }); - - it('should add "prompt-caching" and "context-1m" beta headers for claude-sonnet-4 model formats', () => { - const client = new AnthropicClient('test-api-key'); - const modelVariations = [ - 'claude-sonnet-4-20250514', - 'claude-sonnet-4-latest', - 'anthropic/claude-sonnet-4-20250514', - ]; - - modelVariations.forEach((model) => { - const modelOptions = { model }; - client.setOptions({ modelOptions, promptCache: true }); - const anthropicClient = client.getClient(modelOptions); - expect(anthropicClient._options.defaultHeaders).toBeDefined(); - expect(anthropicClient._options.defaultHeaders).toHaveProperty('anthropic-beta'); - expect(anthropicClient._options.defaultHeaders['anthropic-beta']).toBe( - 'prompt-caching-2024-07-31,context-1m-2025-08-07', - ); - }); - }); - - it('should add "prompt-caching" beta header for claude-opus-4 model', () => { - const client = new AnthropicClient('test-api-key'); - const modelOptions = { - model: 'claude-opus-4-20250514', - }; - client.setOptions({ modelOptions, promptCache: true }); - const anthropicClient = client.getClient(modelOptions); - expect(anthropicClient._options.defaultHeaders).toBeDefined(); - expect(anthropicClient._options.defaultHeaders).toHaveProperty('anthropic-beta'); - expect(anthropicClient._options.defaultHeaders['anthropic-beta']).toBe( - 'prompt-caching-2024-07-31', - ); - }); - - it('should add "prompt-caching" beta header for claude-4-opus model', () => { - const client = new AnthropicClient('test-api-key'); - const modelOptions = { - model: 'claude-4-opus-20250514', - }; - client.setOptions({ modelOptions, promptCache: true }); - const anthropicClient = client.getClient(modelOptions); - expect(anthropicClient._options.defaultHeaders).toBeDefined(); - expect(anthropicClient._options.defaultHeaders).toHaveProperty('anthropic-beta'); - expect(anthropicClient._options.defaultHeaders['anthropic-beta']).toBe( - 'prompt-caching-2024-07-31', - ); - }); - }); - - it('should not add beta header for claude-3-5-sonnet-latest model', () => { - const client = new AnthropicClient('test-api-key'); - const modelOptions = { - model: 'anthropic/claude-3-5-sonnet-latest', - }; - client.setOptions({ modelOptions, promptCache: true }); - const anthropicClient = client.getClient(modelOptions); - expect(anthropicClient._options.defaultHeaders).toBeUndefined(); - }); - - it('should not add beta header for other models', () => { - const client = new AnthropicClient('test-api-key'); - client.setOptions({ - modelOptions: { - model: 'claude-2', - }, - }); - const anthropicClient = client.getClient(); - expect(anthropicClient._options.defaultHeaders).toBeUndefined(); - }); - }); - - describe('calculateCurrentTokenCount', () => { - let client; - - beforeEach(() => { - client = new AnthropicClient('test-api-key'); - }); - - it('should calculate correct token count when usage is provided', () => { - const tokenCountMap = { - msg1: 10, - msg2: 20, - currentMsg: 30, - }; - const currentMessageId = 'currentMsg'; - const usage = { - input_tokens: 70, - output_tokens: 50, - }; - - const result = client.calculateCurrentTokenCount({ tokenCountMap, currentMessageId, usage }); - - expect(result).toBe(40); // 70 - (10 + 20) = 40 - }); - - it('should return original estimate if calculation results in negative value', () => { - const tokenCountMap = { - msg1: 40, - msg2: 50, - currentMsg: 30, - }; - const currentMessageId = 'currentMsg'; - const usage = { - input_tokens: 80, - output_tokens: 50, - }; - - const result = client.calculateCurrentTokenCount({ tokenCountMap, currentMessageId, usage }); - - expect(result).toBe(30); // Original estimate - }); - - it('should handle cache creation and read input tokens', () => { - const tokenCountMap = { - msg1: 10, - msg2: 20, - currentMsg: 30, - }; - const currentMessageId = 'currentMsg'; - const usage = { - input_tokens: 50, - cache_creation_input_tokens: 10, - cache_read_input_tokens: 20, - output_tokens: 40, - }; - - const result = client.calculateCurrentTokenCount({ tokenCountMap, currentMessageId, usage }); - - expect(result).toBe(50); // (50 + 10 + 20) - (10 + 20) = 50 - }); - - it('should handle missing usage properties', () => { - const tokenCountMap = { - msg1: 10, - msg2: 20, - currentMsg: 30, - }; - const currentMessageId = 'currentMsg'; - const usage = { - output_tokens: 40, - }; - - const result = client.calculateCurrentTokenCount({ tokenCountMap, currentMessageId, usage }); - - expect(result).toBe(30); // Original estimate - }); - - it('should handle empty tokenCountMap', () => { - const tokenCountMap = {}; - const currentMessageId = 'currentMsg'; - const usage = { - input_tokens: 50, - output_tokens: 40, - }; - - const result = client.calculateCurrentTokenCount({ tokenCountMap, currentMessageId, usage }); - - expect(result).toBe(50); - expect(Number.isNaN(result)).toBe(false); - }); - - it('should handle zero values in usage', () => { - const tokenCountMap = { - msg1: 10, - currentMsg: 20, - }; - const currentMessageId = 'currentMsg'; - const usage = { - input_tokens: 0, - cache_creation_input_tokens: 0, - cache_read_input_tokens: 0, - output_tokens: 0, - }; - - const result = client.calculateCurrentTokenCount({ tokenCountMap, currentMessageId, usage }); - - expect(result).toBe(20); // Should return original estimate - expect(Number.isNaN(result)).toBe(false); - }); - - it('should handle undefined usage', () => { - const tokenCountMap = { - msg1: 10, - currentMsg: 20, - }; - const currentMessageId = 'currentMsg'; - const usage = undefined; - - const result = client.calculateCurrentTokenCount({ tokenCountMap, currentMessageId, usage }); - - expect(result).toBe(20); // Should return original estimate - expect(Number.isNaN(result)).toBe(false); - }); - - it('should handle non-numeric values in tokenCountMap', () => { - const tokenCountMap = { - msg1: 'ten', - currentMsg: 20, - }; - const currentMessageId = 'currentMsg'; - const usage = { - input_tokens: 30, - output_tokens: 10, - }; - - const result = client.calculateCurrentTokenCount({ tokenCountMap, currentMessageId, usage }); - - expect(result).toBe(30); // Should return 30 (input_tokens) - 0 (ignored 'ten') = 30 - expect(Number.isNaN(result)).toBe(false); - }); - }); - - describe('maxOutputTokens handling for different models', () => { - it('should not cap maxOutputTokens for Claude 3.5 Sonnet models', () => { - const client = new AnthropicClient('test-api-key'); - const highTokenValue = anthropicSettings.legacy.maxOutputTokens.default * 10; - - client.setOptions({ - modelOptions: { - model: 'claude-3-5-sonnet', - maxOutputTokens: highTokenValue, - }, - }); - - expect(client.modelOptions.maxOutputTokens).toBe(highTokenValue); - - // Test with decimal notation - client.setOptions({ - modelOptions: { - model: 'claude-3.5-sonnet', - maxOutputTokens: highTokenValue, - }, - }); - - expect(client.modelOptions.maxOutputTokens).toBe(highTokenValue); - }); - - it('should not cap maxOutputTokens for Claude 3.7 models', () => { - const client = new AnthropicClient('test-api-key'); - const highTokenValue = anthropicSettings.legacy.maxOutputTokens.default * 2; - - client.setOptions({ - modelOptions: { - model: 'claude-3-7-sonnet', - maxOutputTokens: highTokenValue, - }, - }); - - expect(client.modelOptions.maxOutputTokens).toBe(highTokenValue); - - // Test with decimal notation - client.setOptions({ - modelOptions: { - model: 'claude-3.7-sonnet', - maxOutputTokens: highTokenValue, - }, - }); - - expect(client.modelOptions.maxOutputTokens).toBe(highTokenValue); - }); - - it('should not cap maxOutputTokens for Claude 4 Sonnet models', () => { - const client = new AnthropicClient('test-api-key'); - const highTokenValue = anthropicSettings.legacy.maxOutputTokens.default * 10; // 40,960 tokens - - client.setOptions({ - modelOptions: { - model: 'claude-sonnet-4-20250514', - maxOutputTokens: highTokenValue, - }, - }); - - expect(client.modelOptions.maxOutputTokens).toBe(highTokenValue); - }); - - it('should not cap maxOutputTokens for Claude 4 Opus models', () => { - const client = new AnthropicClient('test-api-key'); - const highTokenValue = anthropicSettings.legacy.maxOutputTokens.default * 6; // 24,576 tokens (under 32K limit) - - client.setOptions({ - modelOptions: { - model: 'claude-opus-4-20250514', - maxOutputTokens: highTokenValue, - }, - }); - - expect(client.modelOptions.maxOutputTokens).toBe(highTokenValue); - }); - - it('should cap maxOutputTokens for Claude 3.5 Haiku models', () => { - const client = new AnthropicClient('test-api-key'); - const highTokenValue = anthropicSettings.legacy.maxOutputTokens.default * 2; - - client.setOptions({ - modelOptions: { - model: 'claude-3-5-haiku', - maxOutputTokens: highTokenValue, - }, - }); - - expect(client.modelOptions.maxOutputTokens).toBe( - anthropicSettings.legacy.maxOutputTokens.default, - ); - - // Test with decimal notation - client.setOptions({ - modelOptions: { - model: 'claude-3.5-haiku', - maxOutputTokens: highTokenValue, - }, - }); - - expect(client.modelOptions.maxOutputTokens).toBe( - anthropicSettings.legacy.maxOutputTokens.default, - ); - }); - - it('should cap maxOutputTokens for Claude 3 Haiku and Opus models', () => { - const client = new AnthropicClient('test-api-key'); - const highTokenValue = anthropicSettings.legacy.maxOutputTokens.default * 2; - - // Test haiku - client.setOptions({ - modelOptions: { - model: 'claude-3-haiku', - maxOutputTokens: highTokenValue, - }, - }); - - expect(client.modelOptions.maxOutputTokens).toBe( - anthropicSettings.legacy.maxOutputTokens.default, - ); - - // Test opus - client.setOptions({ - modelOptions: { - model: 'claude-3-opus', - maxOutputTokens: highTokenValue, - }, - }); - - expect(client.modelOptions.maxOutputTokens).toBe( - anthropicSettings.legacy.maxOutputTokens.default, - ); - }); - }); - - describe('topK/topP parameters for different models', () => { - beforeEach(() => { - // Mock the SplitStreamHandler - jest.spyOn(SplitStreamHandler.prototype, 'handle').mockImplementation(() => {}); - }); - - afterEach(() => { - jest.restoreAllMocks(); - }); - - it('should include top_k and top_p parameters for non-claude-3.7 models', async () => { - const client = new AnthropicClient('test-api-key'); - - // Create a mock async generator function - async function* mockAsyncGenerator() { - yield { type: 'message_start', message: { usage: {} } }; - yield { delta: { text: 'Test response' } }; - yield { type: 'message_delta', usage: {} }; - } - - // Mock createResponse to return the async generator - jest.spyOn(client, 'createResponse').mockImplementation(() => { - return mockAsyncGenerator(); - }); - - client.setOptions({ - modelOptions: { - model: 'claude-3-opus', - temperature: 0.7, - topK: 10, - topP: 0.9, - }, - }); - - // Mock getClient to capture the request options - let capturedOptions = null; - jest.spyOn(client, 'getClient').mockImplementation((options) => { - capturedOptions = options; - return {}; - }); - - const payload = [{ role: 'user', content: 'Test message' }]; - await client.sendCompletion(payload, {}); - - // Check the options passed to getClient - expect(capturedOptions).toHaveProperty('top_k', 10); - expect(capturedOptions).toHaveProperty('top_p', 0.9); - }); - - it('should include top_k and top_p parameters for claude-3-5-sonnet models', async () => { - const client = new AnthropicClient('test-api-key'); - - // Create a mock async generator function - async function* mockAsyncGenerator() { - yield { type: 'message_start', message: { usage: {} } }; - yield { delta: { text: 'Test response' } }; - yield { type: 'message_delta', usage: {} }; - } - - // Mock createResponse to return the async generator - jest.spyOn(client, 'createResponse').mockImplementation(() => { - return mockAsyncGenerator(); - }); - - client.setOptions({ - modelOptions: { - model: 'claude-3-5-sonnet', - temperature: 0.7, - topK: 10, - topP: 0.9, - }, - }); - - // Mock getClient to capture the request options - let capturedOptions = null; - jest.spyOn(client, 'getClient').mockImplementation((options) => { - capturedOptions = options; - return {}; - }); - - const payload = [{ role: 'user', content: 'Test message' }]; - await client.sendCompletion(payload, {}); - - // Check the options passed to getClient - expect(capturedOptions).toHaveProperty('top_k', 10); - expect(capturedOptions).toHaveProperty('top_p', 0.9); - }); - - it('should not include top_k and top_p parameters for claude-3-7-sonnet models', async () => { - const client = new AnthropicClient('test-api-key'); - - // Create a mock async generator function - async function* mockAsyncGenerator() { - yield { type: 'message_start', message: { usage: {} } }; - yield { delta: { text: 'Test response' } }; - yield { type: 'message_delta', usage: {} }; - } - - // Mock createResponse to return the async generator - jest.spyOn(client, 'createResponse').mockImplementation(() => { - return mockAsyncGenerator(); - }); - - client.setOptions({ - modelOptions: { - model: 'claude-3-7-sonnet', - temperature: 0.7, - topK: 10, - topP: 0.9, - }, - }); - - // Mock getClient to capture the request options - let capturedOptions = null; - jest.spyOn(client, 'getClient').mockImplementation((options) => { - capturedOptions = options; - return {}; - }); - - const payload = [{ role: 'user', content: 'Test message' }]; - await client.sendCompletion(payload, {}); - - // Check the options passed to getClient - expect(capturedOptions).not.toHaveProperty('top_k'); - expect(capturedOptions).not.toHaveProperty('top_p'); - }); - - it('should not include top_k and top_p parameters for models with decimal notation (claude-3.7)', async () => { - const client = new AnthropicClient('test-api-key'); - - // Create a mock async generator function - async function* mockAsyncGenerator() { - yield { type: 'message_start', message: { usage: {} } }; - yield { delta: { text: 'Test response' } }; - yield { type: 'message_delta', usage: {} }; - } - - // Mock createResponse to return the async generator - jest.spyOn(client, 'createResponse').mockImplementation(() => { - return mockAsyncGenerator(); - }); - - client.setOptions({ - modelOptions: { - model: 'claude-3.7-sonnet', - temperature: 0.7, - topK: 10, - topP: 0.9, - }, - }); - - // Mock getClient to capture the request options - let capturedOptions = null; - jest.spyOn(client, 'getClient').mockImplementation((options) => { - capturedOptions = options; - return {}; - }); - - const payload = [{ role: 'user', content: 'Test message' }]; - await client.sendCompletion(payload, {}); - - // Check the options passed to getClient - expect(capturedOptions).not.toHaveProperty('top_k'); - expect(capturedOptions).not.toHaveProperty('top_p'); - }); - }); - - it('should include top_k and top_p parameters for Claude-3.7 models when thinking is explicitly disabled', async () => { - const client = new AnthropicClient('test-api-key', { - modelOptions: { - model: 'claude-3-7-sonnet', - temperature: 0.7, - topK: 10, - topP: 0.9, - }, - thinking: false, - }); - - async function* mockAsyncGenerator() { - yield { type: 'message_start', message: { usage: {} } }; - yield { delta: { text: 'Test response' } }; - yield { type: 'message_delta', usage: {} }; - } - - jest.spyOn(client, 'createResponse').mockImplementation(() => { - return mockAsyncGenerator(); - }); - - let capturedOptions = null; - jest.spyOn(client, 'getClient').mockImplementation((options) => { - capturedOptions = options; - return {}; - }); - - const payload = [{ role: 'user', content: 'Test message' }]; - await client.sendCompletion(payload, {}); - - expect(capturedOptions).toHaveProperty('topK', 10); - expect(capturedOptions).toHaveProperty('topP', 0.9); - - client.setOptions({ - modelOptions: { - model: 'claude-3.7-sonnet', - temperature: 0.7, - topK: 10, - topP: 0.9, - }, - thinking: false, - }); - - await client.sendCompletion(payload, {}); - - expect(capturedOptions).toHaveProperty('topK', 10); - expect(capturedOptions).toHaveProperty('topP', 0.9); - }); - - describe('isClaudeLatest', () => { - it('should set isClaudeLatest to true for claude-3 models', () => { - const client = new AnthropicClient('test-api-key'); - client.setOptions({ - modelOptions: { - model: 'claude-3-sonnet-20240229', - }, - }); - expect(client.isClaudeLatest).toBe(true); - }); - - it('should set isClaudeLatest to true for claude-3.5 models', () => { - const client = new AnthropicClient('test-api-key'); - client.setOptions({ - modelOptions: { - model: 'claude-3.5-sonnet-20240229', - }, - }); - expect(client.isClaudeLatest).toBe(true); - }); - - it('should set isClaudeLatest to true for claude-sonnet-4 models', () => { - const client = new AnthropicClient('test-api-key'); - client.setOptions({ - modelOptions: { - model: 'claude-sonnet-4-20240229', - }, - }); - expect(client.isClaudeLatest).toBe(true); - }); - - it('should set isClaudeLatest to true for claude-opus-4 models', () => { - const client = new AnthropicClient('test-api-key'); - client.setOptions({ - modelOptions: { - model: 'claude-opus-4-20240229', - }, - }); - expect(client.isClaudeLatest).toBe(true); - }); - - it('should set isClaudeLatest to true for claude-3.5-haiku models', () => { - const client = new AnthropicClient('test-api-key'); - client.setOptions({ - modelOptions: { - model: 'claude-3.5-haiku-20240229', - }, - }); - expect(client.isClaudeLatest).toBe(true); - }); - - it('should set isClaudeLatest to false for claude-2 models', () => { - const client = new AnthropicClient('test-api-key'); - client.setOptions({ - modelOptions: { - model: 'claude-2', - }, - }); - expect(client.isClaudeLatest).toBe(false); - }); - - it('should set isClaudeLatest to false for claude-instant models', () => { - const client = new AnthropicClient('test-api-key'); - client.setOptions({ - modelOptions: { - model: 'claude-instant', - }, - }); - expect(client.isClaudeLatest).toBe(false); - }); - - it('should set isClaudeLatest to false for claude-sonnet-3 models', () => { - const client = new AnthropicClient('test-api-key'); - client.setOptions({ - modelOptions: { - model: 'claude-sonnet-3-20240229', - }, - }); - expect(client.isClaudeLatest).toBe(false); - }); - - it('should set isClaudeLatest to false for claude-opus-3 models', () => { - const client = new AnthropicClient('test-api-key'); - client.setOptions({ - modelOptions: { - model: 'claude-opus-3-20240229', - }, - }); - expect(client.isClaudeLatest).toBe(false); - }); - - it('should set isClaudeLatest to false for claude-haiku-3 models', () => { - const client = new AnthropicClient('test-api-key'); - client.setOptions({ - modelOptions: { - model: 'claude-haiku-3-20240229', - }, - }); - expect(client.isClaudeLatest).toBe(false); - }); - }); - - describe('configureReasoning', () => { - it('should enable thinking for claude-opus-4 and claude-sonnet-4 models', async () => { - const client = new AnthropicClient('test-api-key'); - // Create a mock async generator function - async function* mockAsyncGenerator() { - yield { type: 'message_start', message: { usage: {} } }; - yield { delta: { text: 'Test response' } }; - yield { type: 'message_delta', usage: {} }; - } - - // Mock createResponse to return the async generator - jest.spyOn(client, 'createResponse').mockImplementation(() => { - return mockAsyncGenerator(); - }); - - // Test claude-opus-4 - client.setOptions({ - modelOptions: { - model: 'claude-opus-4-20250514', - }, - thinking: true, - thinkingBudget: 2000, - }); - - let capturedOptions = null; - jest.spyOn(client, 'getClient').mockImplementation((options) => { - capturedOptions = options; - return {}; - }); - - const payload = [{ role: 'user', content: 'Test message' }]; - await client.sendCompletion(payload, {}); - - expect(capturedOptions).toHaveProperty('thinking'); - expect(capturedOptions.thinking).toEqual({ - type: 'enabled', - budget_tokens: 2000, - }); - - // Test claude-sonnet-4 - client.setOptions({ - modelOptions: { - model: 'claude-sonnet-4-20250514', - }, - thinking: true, - thinkingBudget: 2000, - }); - - await client.sendCompletion(payload, {}); - - expect(capturedOptions).toHaveProperty('thinking'); - expect(capturedOptions.thinking).toEqual({ - type: 'enabled', - budget_tokens: 2000, - }); - }); - }); -}); - -describe('Claude Model Tests', () => { - it('should handle Claude 3 and 4 series models correctly', () => { - const client = new AnthropicClient('test-key'); - // Claude 3 series models - const claude3Models = [ - 'claude-3-opus-20240229', - 'claude-3-sonnet-20240229', - 'claude-3-haiku-20240307', - 'claude-3-5-sonnet-20240620', - 'claude-3-5-haiku-20240620', - 'claude-3.5-sonnet-20240620', - 'claude-3.5-haiku-20240620', - 'claude-3.7-sonnet-20240620', - 'claude-3.7-haiku-20240620', - 'anthropic/claude-3-opus-20240229', - 'claude-3-opus-20240229/anthropic', - ]; - - // Claude 4 series models - const claude4Models = [ - 'claude-sonnet-4-20250514', - 'claude-opus-4-20250514', - 'claude-4-sonnet-20250514', - 'claude-4-opus-20250514', - 'anthropic/claude-sonnet-4-20250514', - 'claude-sonnet-4-20250514/anthropic', - ]; - - // Test Claude 3 series - claude3Models.forEach((model) => { - client.setOptions({ modelOptions: { model } }); - expect( - /claude-[3-9]/.test(client.modelOptions.model) || - /claude-(?:sonnet|opus|haiku)-[4-9]/.test(client.modelOptions.model), - ).toBe(true); - }); - - // Test Claude 4 series - claude4Models.forEach((model) => { - client.setOptions({ modelOptions: { model } }); - expect( - /claude-[3-9]/.test(client.modelOptions.model) || - /claude-(?:sonnet|opus|haiku)-[4-9]/.test(client.modelOptions.model), - ).toBe(true); - }); - - // Test non-Claude 3/4 models - const nonClaudeModels = ['claude-2', 'claude-instant', 'gpt-4', 'gpt-3.5-turbo']; - - nonClaudeModels.forEach((model) => { - client.setOptions({ modelOptions: { model } }); - expect( - /claude-[3-9]/.test(client.modelOptions.model) || - /claude-(?:sonnet|opus|haiku)-[4-9]/.test(client.modelOptions.model), - ).toBe(false); - }); - }); -}); diff --git a/api/app/clients/specs/OpenAIClient.test.js b/api/app/clients/specs/OpenAIClient.test.js deleted file mode 100644 index efca66a867..0000000000 --- a/api/app/clients/specs/OpenAIClient.test.js +++ /dev/null @@ -1,630 +0,0 @@ -jest.mock('~/cache/getLogStores'); -require('dotenv').config(); -const { fetchEventSource } = require('@waylaidwanderer/fetch-event-source'); -const getLogStores = require('~/cache/getLogStores'); -const OpenAIClient = require('../OpenAIClient'); -jest.mock('meilisearch'); - -jest.mock('~/db/connect'); -jest.mock('~/models', () => ({ - User: jest.fn(), - Key: jest.fn(), - Session: jest.fn(), - Balance: jest.fn(), - Transaction: jest.fn(), - getMessages: jest.fn().mockResolvedValue([]), - saveMessage: jest.fn(), - updateMessage: jest.fn(), - deleteMessagesSince: jest.fn(), - deleteMessages: jest.fn(), - getConvoTitle: jest.fn(), - getConvo: jest.fn(), - saveConvo: jest.fn(), - deleteConvos: jest.fn(), - getPreset: jest.fn(), - getPresets: jest.fn(), - savePreset: jest.fn(), - deletePresets: jest.fn(), - findFileById: jest.fn(), - createFile: jest.fn(), - updateFile: jest.fn(), - deleteFile: jest.fn(), - deleteFiles: jest.fn(), - getFiles: jest.fn(), - updateFileUsage: jest.fn(), -})); - -// Import the actual module but mock specific parts -const agents = jest.requireActual('@librechat/agents'); -const { CustomOpenAIClient } = agents; - -// Also mock ChatOpenAI to prevent real API calls -agents.ChatOpenAI = jest.fn().mockImplementation(() => { - return {}; -}); -agents.AzureChatOpenAI = jest.fn().mockImplementation(() => { - return {}; -}); - -// Mock only the CustomOpenAIClient constructor -jest.spyOn(CustomOpenAIClient, 'constructor').mockImplementation(function (...options) { - return new CustomOpenAIClient(...options); -}); - -const finalChatCompletion = jest.fn().mockResolvedValue({ - choices: [ - { - message: { role: 'assistant', content: 'Mock message content' }, - finish_reason: 'Mock finish reason', - }, - ], -}); - -const stream = jest.fn().mockImplementation(() => { - let isDone = false; - let isError = false; - let errorCallback = null; - - const onEventHandlers = { - abort: () => { - // Mock abort behavior - }, - error: (callback) => { - errorCallback = callback; // Save the error callback for later use - }, - finalMessage: (callback) => { - callback({ role: 'assistant', content: 'Mock Response' }); - isDone = true; // Set stream to done - }, - }; - - const mockStream = { - on: jest.fn((event, callback) => { - if (onEventHandlers[event]) { - onEventHandlers[event](callback); - } - return mockStream; - }), - finalChatCompletion, - controller: { abort: jest.fn() }, - triggerError: () => { - isError = true; - if (errorCallback) { - errorCallback(new Error('Mock error')); - } - }, - [Symbol.asyncIterator]: () => { - return { - next: () => { - if (isError) { - return Promise.reject(new Error('Mock error')); - } - if (isDone) { - return Promise.resolve({ done: true }); - } - const chunk = { choices: [{ delta: { content: 'Mock chunk' } }] }; - return Promise.resolve({ value: chunk, done: false }); - }, - }; - }, - }; - return mockStream; -}); - -const create = jest.fn().mockResolvedValue({ - choices: [ - { - message: { content: 'Mock message content' }, - finish_reason: 'Mock finish reason', - }, - ], -}); - -// Mock the implementation of CustomOpenAIClient instances -jest.spyOn(CustomOpenAIClient.prototype, 'constructor').mockImplementation(function () { - return this; -}); - -// Create a mock for the CustomOpenAIClient class -const mockCustomOpenAIClient = jest.fn().mockImplementation(() => ({ - beta: { - chat: { - completions: { - stream, - }, - }, - }, - chat: { - completions: { - create, - }, - }, -})); - -CustomOpenAIClient.mockImplementation = mockCustomOpenAIClient; - -describe('OpenAIClient', () => { - beforeEach(() => { - const mockCache = { - get: jest.fn().mockResolvedValue({}), - set: jest.fn(), - }; - getLogStores.mockReturnValue(mockCache); - }); - let client; - const model = 'gpt-4'; - const parentMessageId = '1'; - const messages = [ - { role: 'user', sender: 'User', text: 'Hello', messageId: parentMessageId }, - { role: 'assistant', sender: 'Assistant', text: 'Hi', messageId: '2' }, - ]; - - const defaultOptions = { - // debug: true, - req: {}, - openaiApiKey: 'new-api-key', - modelOptions: { - model, - temperature: 0.7, - }, - }; - - const defaultAzureOptions = { - azureOpenAIApiInstanceName: 'your-instance-name', - azureOpenAIApiDeploymentName: 'your-deployment-name', - azureOpenAIApiVersion: '2020-07-01-preview', - }; - - let originalWarn; - - beforeAll(() => { - originalWarn = console.warn; - console.warn = jest.fn(); - }); - - afterAll(() => { - console.warn = originalWarn; - }); - - beforeEach(() => { - console.warn.mockClear(); - }); - - beforeEach(() => { - const options = { ...defaultOptions }; - client = new OpenAIClient('test-api-key', options); - client.summarizeMessages = jest.fn().mockResolvedValue({ - role: 'assistant', - content: 'Refined answer', - tokenCount: 30, - }); - client.buildPrompt = jest - .fn() - .mockResolvedValue({ prompt: messages.map((m) => m.text).join('\n') }); - client.getMessages = jest.fn().mockResolvedValue([]); - }); - - describe('setOptions', () => { - it('should set the options correctly', () => { - expect(client.apiKey).toBe('new-api-key'); - expect(client.modelOptions.model).toBe(model); - expect(client.modelOptions.temperature).toBe(0.7); - }); - - it('should set FORCE_PROMPT based on OPENAI_FORCE_PROMPT or reverseProxyUrl', () => { - process.env.OPENAI_FORCE_PROMPT = 'true'; - client.setOptions({}); - expect(client.FORCE_PROMPT).toBe(true); - delete process.env.OPENAI_FORCE_PROMPT; // Cleanup - client.FORCE_PROMPT = undefined; - - client.setOptions({ reverseProxyUrl: 'https://example.com/completions' }); - expect(client.FORCE_PROMPT).toBe(true); - client.FORCE_PROMPT = undefined; - - client.setOptions({ reverseProxyUrl: 'https://example.com/chat' }); - expect(client.FORCE_PROMPT).toBe(false); - }); - - it('should set isChatCompletion based on useOpenRouter, reverseProxyUrl, or model', () => { - client.setOptions({ reverseProxyUrl: null }); - // true by default since default model will be gpt-4o-mini - expect(client.isChatCompletion).toBe(true); - client.isChatCompletion = undefined; - - // false because completions url will force prompt payload - client.setOptions({ reverseProxyUrl: 'https://example.com/completions' }); - expect(client.isChatCompletion).toBe(false); - client.isChatCompletion = undefined; - - client.setOptions({ modelOptions: { model: 'gpt-4o-mini' }, reverseProxyUrl: null }); - expect(client.isChatCompletion).toBe(true); - }); - - it('should set completionsUrl and langchainProxy based on reverseProxyUrl', () => { - client.setOptions({ reverseProxyUrl: 'https://localhost:8080/v1/chat/completions' }); - expect(client.completionsUrl).toBe('https://localhost:8080/v1/chat/completions'); - expect(client.langchainProxy).toBe('https://localhost:8080/v1'); - - client.setOptions({ reverseProxyUrl: 'https://example.com/completions' }); - expect(client.completionsUrl).toBe('https://example.com/completions'); - expect(client.langchainProxy).toBe('https://example.com/completions'); - }); - }); - - describe('setOptions with Simplified Azure Integration', () => { - afterEach(() => { - delete process.env.AZURE_OPENAI_DEFAULT_MODEL; - delete process.env.AZURE_USE_MODEL_AS_DEPLOYMENT_NAME; - }); - - const azureOpenAIApiInstanceName = 'test-instance'; - const azureOpenAIApiDeploymentName = 'test-deployment'; - const azureOpenAIApiVersion = '2020-07-01-preview'; - - const createOptions = (model) => ({ - modelOptions: { model }, - azure: { - azureOpenAIApiInstanceName, - azureOpenAIApiDeploymentName, - azureOpenAIApiVersion, - }, - }); - - it('should set model from AZURE_OPENAI_DEFAULT_MODEL when Azure is enabled', () => { - process.env.AZURE_OPENAI_DEFAULT_MODEL = 'gpt-4-azure'; - const options = createOptions('test'); - client.azure = options.azure; - client.setOptions(options); - expect(client.modelOptions.model).toBe('gpt-4-azure'); - }); - - it('should not change model if Azure is not enabled', () => { - process.env.AZURE_OPENAI_DEFAULT_MODEL = 'gpt-4-azure'; - const originalModel = 'test'; - client.azure = false; - client.setOptions(createOptions('test')); - expect(client.modelOptions.model).toBe(originalModel); - }); - - it('should not change model if AZURE_OPENAI_DEFAULT_MODEL is not set and model is passed', () => { - const originalModel = 'GROK-LLM'; - const options = createOptions(originalModel); - client.azure = options.azure; - client.setOptions(options); - expect(client.modelOptions.model).toBe(originalModel); - }); - - it('should change model if AZURE_OPENAI_DEFAULT_MODEL is set and model is passed', () => { - process.env.AZURE_OPENAI_DEFAULT_MODEL = 'gpt-4-azure'; - const originalModel = 'GROK-LLM'; - const options = createOptions(originalModel); - client.azure = options.azure; - client.setOptions(options); - expect(client.modelOptions.model).toBe(process.env.AZURE_OPENAI_DEFAULT_MODEL); - }); - - it('should include model in deployment name if AZURE_USE_MODEL_AS_DEPLOYMENT_NAME is set', () => { - process.env.AZURE_USE_MODEL_AS_DEPLOYMENT_NAME = 'true'; - const model = 'gpt-4-azure'; - - const AzureClient = new OpenAIClient('test-api-key', createOptions(model)); - - const expectedValue = `https://${azureOpenAIApiInstanceName}.openai.azure.com/openai/deployments/${model}/chat/completions?api-version=${azureOpenAIApiVersion}`; - - expect(AzureClient.modelOptions.model).toBe(model); - expect(AzureClient.azureEndpoint).toBe(expectedValue); - }); - - it('should include model in deployment name if AZURE_USE_MODEL_AS_DEPLOYMENT_NAME and default model is set', () => { - const defaultModel = 'gpt-4-azure'; - process.env.AZURE_USE_MODEL_AS_DEPLOYMENT_NAME = 'true'; - process.env.AZURE_OPENAI_DEFAULT_MODEL = defaultModel; - const model = 'gpt-4-this-is-a-test-model-name'; - - const AzureClient = new OpenAIClient('test-api-key', createOptions(model)); - - const expectedValue = `https://${azureOpenAIApiInstanceName}.openai.azure.com/openai/deployments/${model}/chat/completions?api-version=${azureOpenAIApiVersion}`; - - expect(AzureClient.modelOptions.model).toBe(defaultModel); - expect(AzureClient.azureEndpoint).toBe(expectedValue); - }); - - it('should not include model in deployment name if AZURE_USE_MODEL_AS_DEPLOYMENT_NAME is not set', () => { - const model = 'gpt-4-azure'; - - const AzureClient = new OpenAIClient('test-api-key', createOptions(model)); - - const expectedValue = `https://${azureOpenAIApiInstanceName}.openai.azure.com/openai/deployments/${azureOpenAIApiDeploymentName}/chat/completions?api-version=${azureOpenAIApiVersion}`; - - expect(AzureClient.modelOptions.model).toBe(model); - expect(AzureClient.azureEndpoint).toBe(expectedValue); - }); - }); - - describe('getTokenCount', () => { - it('should return the correct token count', () => { - const count = client.getTokenCount('Hello, world!'); - expect(count).toBeGreaterThan(0); - }); - }); - - describe('getSaveOptions', () => { - it('should return the correct save options', () => { - const options = client.getSaveOptions(); - expect(options).toHaveProperty('chatGptLabel'); - expect(options).toHaveProperty('modelLabel'); - expect(options).toHaveProperty('promptPrefix'); - }); - }); - - describe('getBuildMessagesOptions', () => { - it('should return the correct build messages options', () => { - const options = client.getBuildMessagesOptions({ promptPrefix: 'Hello' }); - expect(options).toHaveProperty('isChatCompletion'); - expect(options).toHaveProperty('promptPrefix'); - expect(options.promptPrefix).toBe('Hello'); - }); - }); - - describe('buildMessages', () => { - it('should build messages correctly for chat completion', async () => { - const result = await client.buildMessages(messages, parentMessageId, { - isChatCompletion: true, - }); - expect(result).toHaveProperty('prompt'); - }); - - it('should build messages correctly for non-chat completion', async () => { - const result = await client.buildMessages(messages, parentMessageId, { - isChatCompletion: false, - }); - expect(result).toHaveProperty('prompt'); - }); - - it('should build messages correctly with a promptPrefix', async () => { - const result = await client.buildMessages(messages, parentMessageId, { - isChatCompletion: true, - promptPrefix: 'Test Prefix', - }); - expect(result).toHaveProperty('prompt'); - const instructions = result.prompt.find((item) => item.content.includes('Test Prefix')); - expect(instructions).toBeDefined(); - expect(instructions.content).toContain('Test Prefix'); - }); - - it('should handle context strategy correctly', async () => { - client.contextStrategy = 'summarize'; - const result = await client.buildMessages(messages, parentMessageId, { - isChatCompletion: true, - }); - expect(result).toHaveProperty('prompt'); - expect(result).toHaveProperty('tokenCountMap'); - }); - - it('should assign name property for user messages when options.name is set', async () => { - client.options.name = 'Test User'; - const result = await client.buildMessages(messages, parentMessageId, { - isChatCompletion: true, - }); - const hasUserWithName = result.prompt.some( - (item) => item.role === 'user' && item.name === 'Test_User', - ); - expect(hasUserWithName).toBe(true); - }); - - it('should handle promptPrefix from options when promptPrefix argument is not provided', async () => { - client.options.promptPrefix = 'Test Prefix from options'; - const result = await client.buildMessages(messages, parentMessageId, { - isChatCompletion: true, - }); - const instructions = result.prompt.find((item) => - item.content.includes('Test Prefix from options'), - ); - expect(instructions.content).toContain('Test Prefix from options'); - }); - - it('should handle case when neither promptPrefix argument nor options.promptPrefix is set', async () => { - const result = await client.buildMessages(messages, parentMessageId, { - isChatCompletion: true, - }); - const instructions = result.prompt.find((item) => item.content.includes('Test Prefix')); - expect(instructions).toBeUndefined(); - }); - - it('should handle case when getMessagesForConversation returns null or an empty array', async () => { - const messages = []; - const result = await client.buildMessages(messages, parentMessageId, { - isChatCompletion: true, - }); - expect(result.prompt).toEqual([]); - }); - }); - - describe('getTokenCountForMessage', () => { - const example_messages = [ - { - role: 'system', - content: - 'You are a helpful, pattern-following assistant that translates corporate jargon into plain English.', - }, - { - role: 'system', - name: 'example_user', - content: 'New synergies will help drive top-line growth.', - }, - { - role: 'system', - name: 'example_assistant', - content: 'Things working well together will increase revenue.', - }, - { - role: 'system', - name: 'example_user', - content: - "Let's circle back when we have more bandwidth to touch base on opportunities for increased leverage.", - }, - { - role: 'system', - name: 'example_assistant', - content: "Let's talk later when we're less busy about how to do better.", - }, - { - role: 'user', - content: - "This late pivot means we don't have time to boil the ocean for the client deliverable.", - }, - ]; - - const testCases = [ - { model: 'gpt-3.5-turbo-0301', expected: 127 }, - { model: 'gpt-3.5-turbo-0613', expected: 129 }, - { model: 'gpt-3.5-turbo', expected: 129 }, - { model: 'gpt-4-0314', expected: 129 }, - { model: 'gpt-4-0613', expected: 129 }, - { model: 'gpt-4', expected: 129 }, - { model: 'unknown', expected: 129 }, - ]; - - testCases.forEach((testCase) => { - it(`should return ${testCase.expected} tokens for model ${testCase.model}`, () => { - client.modelOptions.model = testCase.model; - // 3 tokens for assistant label - let totalTokens = 3; - for (let message of example_messages) { - totalTokens += client.getTokenCountForMessage(message); - } - expect(totalTokens).toBe(testCase.expected); - }); - }); - - const vision_request = [ - { - role: 'user', - content: [ - { - type: 'text', - text: 'describe what is in this image?', - }, - { - type: 'image_url', - image_url: { - url: 'https://venturebeat.com/wp-content/uploads/2019/03/openai-1.png', - detail: 'high', - }, - }, - ], - }, - ]; - - const expectedTokens = 14; - const visionModel = 'gpt-4-vision-preview'; - - it(`should return ${expectedTokens} tokens for model ${visionModel} (Vision Request)`, () => { - client.modelOptions.model = visionModel; - // 3 tokens for assistant label - let totalTokens = 3; - for (let message of vision_request) { - totalTokens += client.getTokenCountForMessage(message); - } - expect(totalTokens).toBe(expectedTokens); - }); - }); - - describe('checkVisionRequest functionality', () => { - let client; - const attachments = [{ type: 'image/png' }]; - - beforeEach(() => { - client = new OpenAIClient('test-api-key', { - endpoint: 'ollama', - modelOptions: { - model: 'initial-model', - }, - modelsConfig: { - ollama: ['initial-model', 'llava', 'other-model'], - }, - }); - - client.defaultVisionModel = 'non-valid-default-model'; - }); - - afterEach(() => { - jest.restoreAllMocks(); - }); - - it('should set "llava" as the model if it is the first valid model when default validation fails', () => { - client.checkVisionRequest(attachments); - - expect(client.modelOptions.model).toBe('llava'); - expect(client.isVisionModel).toBeTruthy(); - expect(client.modelOptions.stop).toBeUndefined(); - }); - }); - - describe('getStreamUsage', () => { - it('should return this.usage when completion_tokens_details is null', () => { - const client = new OpenAIClient('test-api-key', defaultOptions); - client.usage = { - completion_tokens_details: null, - prompt_tokens: 10, - completion_tokens: 20, - }; - client.inputTokensKey = 'prompt_tokens'; - client.outputTokensKey = 'completion_tokens'; - - const result = client.getStreamUsage(); - - expect(result).toEqual(client.usage); - }); - - it('should return this.usage when completion_tokens_details is missing reasoning_tokens', () => { - const client = new OpenAIClient('test-api-key', defaultOptions); - client.usage = { - completion_tokens_details: { - other_tokens: 5, - }, - prompt_tokens: 10, - completion_tokens: 20, - }; - client.inputTokensKey = 'prompt_tokens'; - client.outputTokensKey = 'completion_tokens'; - - const result = client.getStreamUsage(); - - expect(result).toEqual(client.usage); - }); - - it('should calculate output tokens correctly when completion_tokens_details is present with reasoning_tokens', () => { - const client = new OpenAIClient('test-api-key', defaultOptions); - client.usage = { - completion_tokens_details: { - reasoning_tokens: 30, - other_tokens: 5, - }, - prompt_tokens: 10, - completion_tokens: 20, - }; - client.inputTokensKey = 'prompt_tokens'; - client.outputTokensKey = 'completion_tokens'; - - const result = client.getStreamUsage(); - - expect(result).toEqual({ - reasoning_tokens: 30, - other_tokens: 5, - prompt_tokens: 10, - completion_tokens: 10, // |30 - 20| = 10 - }); - }); - - it('should return this.usage when it is undefined', () => { - const client = new OpenAIClient('test-api-key', defaultOptions); - client.usage = undefined; - - const result = client.getStreamUsage(); - - expect(result).toBeUndefined(); - }); - }); -}); diff --git a/api/app/clients/specs/OpenAIClient.tokens.js b/api/app/clients/specs/OpenAIClient.tokens.js deleted file mode 100644 index 9b556b38b9..0000000000 --- a/api/app/clients/specs/OpenAIClient.tokens.js +++ /dev/null @@ -1,130 +0,0 @@ -/* - This is a test script to see how much memory is used by the client when encoding. - On my work machine, it was able to process 10,000 encoding requests / 48.686 seconds = approximately 205.4 RPS - I've significantly reduced the amount of encoding needed by saving token counts in the database, so these - numbers should only be hit with a large amount of concurrent users - It would take 103 concurrent users sending 1 message every 1 second to hit these numbers, which is rather unrealistic, - and at that point, out-sourcing the encoding to a separate server would be a better solution - Also, for scaling, could increase the rate at which the encoder resets; the trade-off is more resource usage on the server. - Initial memory usage: 25.93 megabytes - Peak memory usage: 55 megabytes - Final memory usage: 28.03 megabytes - Post-test (timeout of 15s): 21.91 megabytes -*/ - -require('dotenv').config(); -const { OpenAIClient } = require('../'); - -function timeout(ms) { - return new Promise((resolve) => setTimeout(resolve, ms)); -} - -const run = async () => { - const text = ` - The standard Lorem Ipsum passage, used since the 1500s - - "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum." - Section 1.10.32 of "de Finibus Bonorum et Malorum", written by Cicero in 45 BC - - "Sed ut perspiciatis unde omnis iste natus error sit voluptatem accusantium doloremque laudantium, totam rem aperiam, eaque ipsa quae ab illo inventore veritatis et quasi architecto beatae vitae dicta sunt explicabo. Nemo enim ipsam voluptatem quia voluptas sit aspernatur aut odit aut fugit, sed quia consequuntur magni dolores eos qui ratione voluptatem sequi nesciunt. Neque porro quisquam est, qui dolorem ipsum quia dolor sit amet, consectetur, adipisci velit, sed quia non numquam eius modi tempora incidunt ut labore et dolore magnam aliquam quaerat voluptatem. Ut enim ad minima veniam, quis nostrum exercitationem ullam corporis suscipit laboriosam, nisi ut aliquid ex ea commodi consequatur? Quis autem vel eum iure reprehenderit qui in ea voluptate velit esse quam nihil molestiae consequatur, vel illum qui dolorem eum fugiat quo voluptas nulla pariatur?" - 1914 translation by H. Rackham - - "But I must explain to you how all this mistaken idea of denouncing pleasure and praising pain was born and I will give you a complete account of the system, and expound the actual teachings of the great explorer of the truth, the master-builder of human happiness. No one rejects, dislikes, or avoids pleasure itself, because it is pleasure, but because those who do not know how to pursue pleasure rationally encounter consequences that are extremely painful. Nor again is there anyone who loves or pursues or desires to obtain pain of itself, because it is pain, but because occasionally circumstances occur in which toil and pain can procure him some great pleasure. To take a trivial example, which of us ever undertakes laborious physical exercise, except to obtain some advantage from it? But who has any right to find fault with a man who chooses to enjoy a pleasure that has no annoying consequences, or one who avoids a pain that produces no resultant pleasure?" - Section 1.10.33 of "de Finibus Bonorum et Malorum", written by Cicero in 45 BC - - "At vero eos et accusamus et iusto odio dignissimos ducimus qui blanditiis praesentium voluptatum deleniti atque corrupti quos dolores et quas molestias excepturi sint occaecati cupiditate non provident, similique sunt in culpa qui officia deserunt mollitia animi, id est laborum et dolorum fuga. Et harum quidem rerum facilis est et expedita distinctio. Nam libero tempore, cum soluta nobis est eligendi optio cumque nihil impedit quo minus id quod maxime placeat facere possimus, omnis voluptas assumenda est, omnis dolor repellendus. Temporibus autem quibusdam et aut officiis debitis aut rerum necessitatibus saepe eveniet ut et voluptates repudiandae sint et molestiae non recusandae. Itaque earum rerum hic tenetur a sapiente delectus, ut aut reiciendis voluptatibus maiores alias consequatur aut perferendis doloribus asperiores repellat." - 1914 translation by H. Rackham - - "On the other hand, we denounce with righteous indignation and dislike men who are so beguiled and demoralized by the charms of pleasure of the moment, so blinded by desire, that they cannot foresee the pain and trouble that are bound to ensue; and equal blame belongs to those who fail in their duty through weakness of will, which is the same as saying through shrinking from toil and pain. These cases are perfectly simple and easy to distinguish. In a free hour, when our power of choice is untrammelled and when nothing prevents our being able to do what we like best, every pleasure is to be welcomed and every pain avoided. But in certain circumstances and owing to the claims of duty or the obligations of business it will frequently occur that pleasures have to be repudiated and annoyances accepted. The wise man therefore always holds in these matters to this principle of selection: he rejects pleasures to secure other greater pleasures, or else he endures pains to avoid worse pains." - `; - const model = 'gpt-3.5-turbo'; - let maxContextTokens = 4095; - if (model === 'gpt-4') { - maxContextTokens = 8191; - } else if (model === 'gpt-4-32k') { - maxContextTokens = 32767; - } - const clientOptions = { - reverseProxyUrl: process.env.OPENAI_REVERSE_PROXY || null, - maxContextTokens, - modelOptions: { - model, - }, - proxy: process.env.PROXY || null, - debug: true, - }; - - let apiKey = process.env.OPENAI_API_KEY; - - const maxMemory = 0.05 * 1024 * 1024 * 1024; - - // Calculate initial percentage of memory used - const initialMemoryUsage = process.memoryUsage().heapUsed; - - function printProgressBar(percentageUsed) { - const filledBlocks = Math.round(percentageUsed / 2); // Each block represents 2% - const emptyBlocks = 50 - filledBlocks; // Total blocks is 50 (each represents 2%), so the rest are empty - const progressBar = - '[' + - '█'.repeat(filledBlocks) + - ' '.repeat(emptyBlocks) + - '] ' + - percentageUsed.toFixed(2) + - '%'; - console.log(progressBar); - } - - const iterations = 10000; - console.time('loopTime'); - // Trying to catch the error doesn't help; all future calls will immediately crash - for (let i = 0; i < iterations; i++) { - try { - console.log(`Iteration ${i}`); - const client = new OpenAIClient(apiKey, clientOptions); - - client.getTokenCount(text); - // const encoder = client.constructor.getTokenizer('cl100k_base'); - // console.log(`Iteration ${i}: call encode()...`); - // encoder.encode(text, 'all'); - // encoder.free(); - - const memoryUsageDuringLoop = process.memoryUsage().heapUsed; - const percentageUsed = (memoryUsageDuringLoop / maxMemory) * 100; - printProgressBar(percentageUsed); - - if (i === iterations - 1) { - console.log(' done'); - // encoder.free(); - } - } catch (e) { - console.log(`caught error! in Iteration ${i}`); - console.log(e); - } - } - - console.timeEnd('loopTime'); - // Calculate final percentage of memory used - const finalMemoryUsage = process.memoryUsage().heapUsed; - // const finalPercentageUsed = finalMemoryUsage / maxMemory * 100; - console.log(`Initial memory usage: ${initialMemoryUsage / 1024 / 1024} megabytes`); - console.log(`Final memory usage: ${finalMemoryUsage / 1024 / 1024} megabytes`); - await timeout(15000); - const memoryUsageAfterTimeout = process.memoryUsage().heapUsed; - console.log(`Post timeout: ${memoryUsageAfterTimeout / 1024 / 1024} megabytes`); -}; - -run(); - -process.on('uncaughtException', (err) => { - if (!err.message.includes('fetch failed')) { - console.error('There was an uncaught error:'); - console.error(err); - } - - if (err.message.includes('fetch failed')) { - console.log('fetch failed error caught'); - // process.exit(0); - } else { - process.exit(1); - } -}); diff --git a/api/app/clients/tools/.well-known/Ai_PDF.json b/api/app/clients/tools/.well-known/Ai_PDF.json deleted file mode 100644 index e3caf6e2c7..0000000000 --- a/api/app/clients/tools/.well-known/Ai_PDF.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "schema_version": "v1", - "name_for_human": "Ai PDF", - "name_for_model": "Ai_PDF", - "description_for_human": "Super-fast, interactive chats with PDFs of any size, complete with page references for fact checking.", - "description_for_model": "Provide a URL to a PDF and search the document. Break the user question in multiple semantic search queries and calls as needed. Think step by step.", - "auth": { - "type": "none" - }, - "api": { - "type": "openapi", - "url": "https://plugin-3c56b9d4c8a6465998395f28b6a445b2-jexkai4vea-uc.a.run.app/openapi.yaml", - "is_user_authenticated": false - }, - "logo_url": "https://plugin-3c56b9d4c8a6465998395f28b6a445b2-jexkai4vea-uc.a.run.app/logo.png", - "contact_email": "support@promptapps.ai", - "legal_info_url": "https://plugin-3c56b9d4c8a6465998395f28b6a445b2-jexkai4vea-uc.a.run.app/legal.html" -} diff --git a/api/app/clients/tools/.well-known/BrowserOp.json b/api/app/clients/tools/.well-known/BrowserOp.json deleted file mode 100644 index 5a3bb86f92..0000000000 --- a/api/app/clients/tools/.well-known/BrowserOp.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "schema_version": "v1", - "name_for_human": "BrowserOp", - "name_for_model": "BrowserOp", - "description_for_human": "Browse dozens of webpages in one query. Fetch information more efficiently.", - "description_for_model": "This tool offers the feature for users to input a URL or multiple URLs and interact with them as needed. It's designed to comprehend the user's intent and proffer tailored suggestions in line with the content and functionality of the webpage at hand. Services like text rewrites, translations and more can be requested. When users need specific information to finish a task or if they intend to perform a search, this tool becomes a bridge to the search engine and generates responses based on the results. Whether the user is seeking information about restaurants, rentals, weather, or shopping, this tool connects to the internet and delivers the most recent results.", - "auth": { - "type": "none" - }, - "api": { - "type": "openapi", - "url": "https://testplugin.feednews.com/.well-known/openapi.yaml" - }, - "logo_url": "https://openapi-af.op-mobile.opera.com/openapi/testplugin/.well-known/logo.png", - "contact_email": "aiplugins-contact-list@opera.com", - "legal_info_url": "https://legal.apexnews.com/terms/" -} diff --git a/api/app/clients/tools/.well-known/Dr_Thoths_Tarot.json b/api/app/clients/tools/.well-known/Dr_Thoths_Tarot.json deleted file mode 100644 index 99774d9573..0000000000 --- a/api/app/clients/tools/.well-known/Dr_Thoths_Tarot.json +++ /dev/null @@ -1,89 +0,0 @@ -{ - "schema_version": "v1", - "name_for_human": "Dr. Thoth's Tarot", - "name_for_model": "Dr_Thoths_Tarot", - "description_for_human": "Tarot card novelty entertainment & analysis, by Mnemosyne Labs.", - "description_for_model": "Intelligent analysis program for tarot card entertaiment, data, & prompts, by Mnemosyne Labs, a division of AzothCorp.", - "auth": { - "type": "none" - }, - "api": { - "type": "openapi", - "url": "https://dr-thoth-tarot.herokuapp.com/openapi.yaml", - "is_user_authenticated": false - }, - "logo_url": "https://dr-thoth-tarot.herokuapp.com/logo.png", - "contact_email": "legal@AzothCorp.com", - "legal_info_url": "http://AzothCorp.com/legal", - "endpoints": [ - { - "name": "Draw Card", - "path": "/drawcard", - "method": "GET", - "description": "Generate a single tarot card from the deck of 78 cards." - }, - { - "name": "Occult Card", - "path": "/occult_card", - "method": "GET", - "description": "Generate a tarot card using the specified planet's Kamea matrix.", - "parameters": [ - { - "name": "planet", - "type": "string", - "enum": ["Saturn", "Jupiter", "Mars", "Sun", "Venus", "Mercury", "Moon"], - "required": true, - "description": "The planet name to use the corresponding Kamea matrix." - } - ] - }, - { - "name": "Three Card Spread", - "path": "/threecardspread", - "method": "GET", - "description": "Perform a three-card tarot spread." - }, - { - "name": "Celtic Cross Spread", - "path": "/celticcross", - "method": "GET", - "description": "Perform a Celtic Cross tarot spread with 10 cards." - }, - { - "name": "Past, Present, Future Spread", - "path": "/pastpresentfuture", - "method": "GET", - "description": "Perform a Past, Present, Future tarot spread with 3 cards." - }, - { - "name": "Horseshoe Spread", - "path": "/horseshoe", - "method": "GET", - "description": "Perform a Horseshoe tarot spread with 7 cards." - }, - { - "name": "Relationship Spread", - "path": "/relationship", - "method": "GET", - "description": "Perform a Relationship tarot spread." - }, - { - "name": "Career Spread", - "path": "/career", - "method": "GET", - "description": "Perform a Career tarot spread." - }, - { - "name": "Yes/No Spread", - "path": "/yesno", - "method": "GET", - "description": "Perform a Yes/No tarot spread." - }, - { - "name": "Chakra Spread", - "path": "/chakra", - "method": "GET", - "description": "Perform a Chakra tarot spread with 7 cards." - } - ] -} diff --git a/api/app/clients/tools/.well-known/DreamInterpreter.json b/api/app/clients/tools/.well-known/DreamInterpreter.json deleted file mode 100644 index d6d5bb7cf8..0000000000 --- a/api/app/clients/tools/.well-known/DreamInterpreter.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "schema_version": "v1", - "name_for_model": "DreamInterpreter", - "name_for_human": "Dream Interpreter", - "description_for_model": "Interprets your dreams using advanced techniques.", - "description_for_human": "Interprets your dreams using advanced techniques.", - "auth": { - "type": "none" - }, - "api": { - "type": "openapi", - "url": "https://dreamplugin.bgnetmobile.com/.well-known/openapi.json", - "has_user_authentication": false - }, - "logo_url": "https://dreamplugin.bgnetmobile.com/.well-known/logo.png", - "contact_email": "ismail.orkler@bgnetmobile.com", - "legal_info_url": "https://dreamplugin.bgnetmobile.com/terms.html" -} diff --git a/api/app/clients/tools/.well-known/VoxScript.json b/api/app/clients/tools/.well-known/VoxScript.json deleted file mode 100644 index 8691f0ccfd..0000000000 --- a/api/app/clients/tools/.well-known/VoxScript.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "schema_version": "v1", - "name_for_human": "VoxScript", - "name_for_model": "VoxScript", - "description_for_human": "Enables searching of YouTube transcripts, financial data sources Google Search results, and more!", - "description_for_model": "Plugin for searching through varius data sources.", - "auth": { - "type": "service_http", - "authorization_type": "bearer", - "verification_tokens": { - "openai": "ffc5226d1af346c08a98dee7deec9f76" - } - }, - "api": { - "type": "openapi", - "url": "https://voxscript.awt.icu/swagger/v1/swagger.yaml", - "is_user_authenticated": false - }, - "logo_url": "https://voxscript.awt.icu/images/VoxScript_logo_32x32.png", - "contact_email": "voxscript@allwiretech.com", - "legal_info_url": "https://voxscript.awt.icu/legal/" -} diff --git a/api/app/clients/tools/.well-known/askyourpdf.json b/api/app/clients/tools/.well-known/askyourpdf.json deleted file mode 100644 index 0eb31e37c7..0000000000 --- a/api/app/clients/tools/.well-known/askyourpdf.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "schema_version": "v1", - "name_for_model": "askyourpdf", - "name_for_human": "AskYourPDF", - "description_for_model": "This plugin is designed to expedite the extraction of information from PDF documents. It works by accepting a URL link to a PDF or a document ID (doc_id) from the user. If a URL is provided, the plugin first validates that it is a correct URL. \\nAfter validating the URL, the plugin proceeds to download the PDF and store its content in a vector database. If the user provides a doc_id, the plugin directly retrieves the document from the database. The plugin then scans through the stored PDFs to find answers to user queries or retrieve specific details.\\n\\nHowever, if an error occurs while querying the API, the user is prompted to download their document first, then manually upload it to [![Upload Document](https://raw.githubusercontent.com/AskYourPdf/ask-plugin/main/upload.png)](https://askyourpdf.com/upload). Once the upload is complete, the user should copy the resulting doc_id and paste it back into the chat for further interaction.\nThe plugin is particularly useful when the user's question pertains to content within a PDF document. When providing answers, the plugin also specifies the page number (highlighted in bold) where the relevant information was found. Remember, the URL must be valid for a successful query. Failure to validate the URL may lead to errors or unsuccessful queries.", - "description_for_human": "Unlock the power of your PDFs!, dive into your documents, find answers, and bring information to your fingertips.", - "auth": { - "type": "none" - }, - "api": { - "type": "openapi", - "url": "askyourpdf.yaml", - "has_user_authentication": false - }, - "logo_url": "https://plugin.askyourpdf.com/.well-known/logo.png", - "contact_email": "plugin@askyourpdf.com", - "legal_info_url": "https://askyourpdf.com/terms" -} diff --git a/api/app/clients/tools/.well-known/drink_maestro.json b/api/app/clients/tools/.well-known/drink_maestro.json deleted file mode 100644 index d461a4e3f2..0000000000 --- a/api/app/clients/tools/.well-known/drink_maestro.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "schema_version": "v1", - "name_for_human": "Drink Maestro", - "name_for_model": "drink_maestro", - "description_for_human": "Learn to mix any drink you can imagine (real or made-up), and discover new ones. Includes drink images.", - "description_for_model": "You are a silly bartender/comic who knows how to make any drink imaginable. You provide recipes for specific drinks, suggest new drinks, and show pictures of drinks. Be creative in your descriptions and make jokes and puns. Use a lot of emojis. If the user makes a request in another language, send API call in English, and then translate the response.", - "auth": { - "type": "none" - }, - "api": { - "type": "openapi", - "url": "https://api.drinkmaestro.space/.well-known/openapi.yaml", - "is_user_authenticated": false - }, - "logo_url": "https://i.imgur.com/6q8HWdz.png", - "contact_email": "nikkmitchell@gmail.com", - "legal_info_url": "https://github.com/nikkmitchell/DrinkMaestro/blob/main/Legal.txt" -} diff --git a/api/app/clients/tools/.well-known/earthImagesAndVisualizations.json b/api/app/clients/tools/.well-known/earthImagesAndVisualizations.json deleted file mode 100644 index 695a955be1..0000000000 --- a/api/app/clients/tools/.well-known/earthImagesAndVisualizations.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "schema_version": "v1", - "name_for_human": "Earth", - "name_for_model": "earthImagesAndVisualizations", - "description_for_human": "Generates a map image based on provided location, tilt and style.", - "description_for_model": "Generates a map image based on provided coordinates or location, tilt and style, and even geoJson to provide markers, paths, and polygons. Responds with an image-link. For the styles choose one of these: [light, dark, streets, outdoors, satellite, satellite-streets]", - "auth": { - "type": "none" - }, - "api": { - "type": "openapi", - "url": "https://api.earth-plugin.com/openapi.yaml", - "is_user_authenticated": false - }, - "logo_url": "https://api.earth-plugin.com/logo.png", - "contact_email": "contact@earth-plugin.com", - "legal_info_url": "https://api.earth-plugin.com/legal.html" -} diff --git a/api/app/clients/tools/.well-known/has-issues/scholarly_graph_link.json b/api/app/clients/tools/.well-known/has-issues/scholarly_graph_link.json deleted file mode 100644 index 8b92e6e381..0000000000 --- a/api/app/clients/tools/.well-known/has-issues/scholarly_graph_link.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "schema_version": "v1", - "name_for_human": "Scholarly Graph Link", - "name_for_model": "scholarly_graph_link", - "description_for_human": "You can search papers, authors, datasets and software. It has access to Figshare, Arxiv, and many others.", - "description_for_model": "Run GraphQL queries against an API hosted by DataCite API. The API supports most GraphQL query but does not support mutations statements. Use `{ __schema { types { name kind } } }` to get all the types in the GraphQL schema. Use `{ datasets { nodes { id sizes citations { nodes { id titles { title } } } } } }` to get all the citations of all datasets in the API. Use `{ datasets { nodes { id sizes citations { nodes { id titles { title } } } } } }` to get all the citations of all datasets in the API. Use `{person(id:ORCID) {works(first:50) {nodes {id titles(first: 1){title} publicationYear}}}}` to get the first 50 works of a person based on their ORCID. All Ids are urls, e.g., https://orcid.org/0012-0000-1012-1110. Mutations statements are not allowed.", - "auth": { - "type": "none" - }, - "api": { - "type": "openapi", - "url": "https://api.datacite.org/graphql-openapi.yaml", - "is_user_authenticated": false - }, - "logo_url": "https://raw.githubusercontent.com/kjgarza/scholarly_graph_link/master/logo.png", - "contact_email": "kj.garza@gmail.com", - "legal_info_url": "https://github.com/kjgarza/scholarly_graph_link/blob/master/LICENSE" -} diff --git a/api/app/clients/tools/.well-known/has-issues/web_pilot.json b/api/app/clients/tools/.well-known/has-issues/web_pilot.json deleted file mode 100644 index d68c919eb3..0000000000 --- a/api/app/clients/tools/.well-known/has-issues/web_pilot.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "schema_version": "v1", - "name_for_human": "WebPilot", - "name_for_model": "web_pilot", - "description_for_human": "Browse & QA Webpage/PDF/Data. Generate articles, from one or more URLs.", - "description_for_model": "This tool allows users to provide a URL(or URLs) and optionally requests for interacting with, extracting specific information or how to do with the content from the URL. Requests may include rewrite, translate, and others. If there any requests, when accessing the /api/visit-web endpoint, the parameter 'user_has_request' should be set to 'true. And if there's no any requests, 'user_has_request' should be set to 'false'.", - "auth": { - "type": "none" - }, - "api": { - "type": "openapi", - "url": "https://webreader.webpilotai.com/openapi.yaml", - "is_user_authenticated": false - }, - "logo_url": "https://webreader.webpilotai.com/logo.png", - "contact_email": "dev@webpilot.ai", - "legal_info_url": "https://webreader.webpilotai.com/legal_info.html", - "headers": { - "id": "WebPilot-Friend-UID" - }, - "params": { - "user_has_request": true - } -} diff --git a/api/app/clients/tools/.well-known/image_prompt_enhancer.json b/api/app/clients/tools/.well-known/image_prompt_enhancer.json deleted file mode 100644 index 72f28658c8..0000000000 --- a/api/app/clients/tools/.well-known/image_prompt_enhancer.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "schema_version": "v1", - "name_for_human": "Image Prompt Enhancer", - "name_for_model": "image_prompt_enhancer", - "description_for_human": "Transform your ideas into complex, personalized image generation prompts.", - "description_for_model": "Provides instructions for crafting an enhanced image prompt. Use this whenever the user wants to enhance a prompt.", - "auth": { - "type": "none" - }, - "api": { - "type": "openapi", - "url": "https://image-prompt-enhancer.gafo.tech/openapi.yaml", - "is_user_authenticated": false - }, - "logo_url": "https://image-prompt-enhancer.gafo.tech/logo.png", - "contact_email": "gafotech1@gmail.com", - "legal_info_url": "https://image-prompt-enhancer.gafo.tech/legal" -} diff --git a/api/app/clients/tools/.well-known/openapi/askyourpdf.yaml b/api/app/clients/tools/.well-known/openapi/askyourpdf.yaml deleted file mode 100644 index cb3affc8b8..0000000000 --- a/api/app/clients/tools/.well-known/openapi/askyourpdf.yaml +++ /dev/null @@ -1,157 +0,0 @@ -openapi: 3.0.2 -info: - title: FastAPI - version: 0.1.0 -servers: - - url: https://plugin.askyourpdf.com -paths: - /api/download_pdf: - post: - summary: Download Pdf - description: Download a PDF file from a URL and save it to the vector database. - operationId: download_pdf_api_download_pdf_post - parameters: - - required: true - schema: - title: Url - type: string - name: url - in: query - responses: - '200': - description: Successful Response - content: - application/json: - schema: - $ref: '#/components/schemas/FileResponse' - '422': - description: Validation Error - content: - application/json: - schema: - $ref: '#/components/schemas/HTTPValidationError' - /query: - post: - summary: Perform Query - description: Perform a query on a document. - operationId: perform_query_query_post - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/InputData' - required: true - responses: - '200': - description: Successful Response - content: - application/json: - schema: - $ref: '#/components/schemas/ResponseModel' - '422': - description: Validation Error - content: - application/json: - schema: - $ref: '#/components/schemas/HTTPValidationError' -components: - schemas: - DocumentMetadata: - title: DocumentMetadata - required: - - source - - page_number - - author - type: object - properties: - source: - title: Source - type: string - page_number: - title: Page Number - type: integer - author: - title: Author - type: string - FileResponse: - title: FileResponse - required: - - docId - type: object - properties: - docId: - title: Docid - type: string - error: - title: Error - type: string - HTTPValidationError: - title: HTTPValidationError - type: object - properties: - detail: - title: Detail - type: array - items: - $ref: '#/components/schemas/ValidationError' - InputData: - title: InputData - required: - - doc_id - - query - type: object - properties: - doc_id: - title: Doc Id - type: string - query: - title: Query - type: string - ResponseModel: - title: ResponseModel - required: - - results - type: object - properties: - results: - title: Results - type: array - items: - $ref: '#/components/schemas/SearchResult' - SearchResult: - title: SearchResult - required: - - doc_id - - text - - metadata - type: object - properties: - doc_id: - title: Doc Id - type: string - text: - title: Text - type: string - metadata: - $ref: '#/components/schemas/DocumentMetadata' - ValidationError: - title: ValidationError - required: - - loc - - msg - - type - type: object - properties: - loc: - title: Location - type: array - items: - anyOf: - - type: string - - type: integer - msg: - title: Message - type: string - type: - title: Error Type - type: string diff --git a/api/app/clients/tools/.well-known/openapi/scholarai.yaml b/api/app/clients/tools/.well-known/openapi/scholarai.yaml deleted file mode 100644 index 34cca8296f..0000000000 --- a/api/app/clients/tools/.well-known/openapi/scholarai.yaml +++ /dev/null @@ -1,185 +0,0 @@ -openapi: 3.0.1 -info: - title: ScholarAI - description: Allows the user to search facts and findings from scientific articles - version: 'v1' -servers: - - url: https://scholar-ai.net -paths: - /api/abstracts: - get: - operationId: searchAbstracts - summary: Get relevant paper abstracts by keywords search - parameters: - - name: keywords - in: query - description: Keywords of inquiry which should appear in article. Must be in English. - required: true - schema: - type: string - - name: sort - in: query - description: The sort order for results. Valid values are cited_by_count or publication_date. Excluding this value does a relevance based search. - required: false - schema: - type: string - enum: - - cited_by_count - - publication_date - - name: query - in: query - description: The user query - required: true - schema: - type: string - - name: peer_reviewed_only - in: query - description: Whether to only return peer reviewed articles. Defaults to true, ChatGPT should cautiously suggest this value can be set to false - required: false - schema: - type: string - - name: start_year - in: query - description: The first year, inclusive, to include in the search range. Excluding this value will include all years. - required: false - schema: - type: string - - name: end_year - in: query - description: The last year, inclusive, to include in the search range. Excluding this value will include all years. - required: false - schema: - type: string - - name: offset - in: query - description: The offset of the first result to return. Defaults to 0. - required: false - schema: - type: string - responses: - "200": - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/searchAbstractsResponse' - /api/fulltext: - get: - operationId: getFullText - summary: Get full text of a paper by URL for PDF - parameters: - - name: pdf_url - in: query - description: URL for PDF - required: true - schema: - type: string - - name: chunk - in: query - description: chunk number to retrieve, defaults to 1 - required: false - schema: - type: number - responses: - "200": - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/getFullTextResponse' - /api/save-citation: - get: - operationId: saveCitation - summary: Save citation to reference manager - parameters: - - name: doi - in: query - description: Digital Object Identifier (DOI) of article - required: true - schema: - type: string - - name: zotero_user_id - in: query - description: Zotero User ID - required: true - schema: - type: string - - name: zotero_api_key - in: query - description: Zotero API Key - required: true - schema: - type: string - responses: - "200": - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/saveCitationResponse' -components: - schemas: - searchAbstractsResponse: - type: object - properties: - next_offset: - type: number - description: The offset of the next page of results. - total_num_results: - type: number - description: The total number of results. - abstracts: - type: array - items: - type: object - properties: - title: - type: string - abstract: - type: string - description: Summary of the context, methods, results, and conclusions of the paper. - doi: - type: string - description: The DOI of the paper. - landing_page_url: - type: string - description: Link to the paper on its open-access host. - pdf_url: - type: string - description: Link to the paper PDF. - publicationDate: - type: string - description: The date the paper was published in YYYY-MM-DD format. - relevance: - type: number - description: The relevance of the paper to the search query. 1 is the most relevant. - creators: - type: array - items: - type: string - description: The name of the creator. - cited_by_count: - type: number - description: The number of citations of the article. - description: The list of relevant abstracts. - getFullTextResponse: - type: object - properties: - full_text: - type: string - description: The full text of the paper. - pdf_url: - type: string - description: The PDF URL of the paper. - chunk: - type: number - description: The chunk of the paper. - total_chunk_num: - type: number - description: The total chunks of the paper. - saveCitationResponse: - type: object - properties: - message: - type: string - description: Confirmation of successful save or error message. \ No newline at end of file diff --git a/api/app/clients/tools/.well-known/qrCodes.json b/api/app/clients/tools/.well-known/qrCodes.json deleted file mode 100644 index b5618916ac..0000000000 --- a/api/app/clients/tools/.well-known/qrCodes.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "schema_version": "v1", - "name_for_human": "QR Codes", - "name_for_model": "qrCodes", - "description_for_human": "Create QR codes.", - "description_for_model": "Plugin for generating QR codes.", - "auth": { - "type": "none" - }, - "api": { - "type": "openapi", - "url": "https://chatgpt-qrcode-46d7d4ebefc8.herokuapp.com/openapi.yaml" - }, - "logo_url": "https://chatgpt-qrcode-46d7d4ebefc8.herokuapp.com/logo.png", - "contact_email": "chrismountzou@gmail.com", - "legal_info_url": "https://raw.githubusercontent.com/mountzou/qrCodeGPTv1/master/legal" -} diff --git a/api/app/clients/tools/.well-known/scholarai.json b/api/app/clients/tools/.well-known/scholarai.json deleted file mode 100644 index 1900a926c2..0000000000 --- a/api/app/clients/tools/.well-known/scholarai.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "schema_version": "v1", - "name_for_human": "ScholarAI", - "name_for_model": "scholarai", - "description_for_human": "Unleash scientific research: search 40M+ peer-reviewed papers, explore scientific PDFs, and save to reference managers.", - "description_for_model": "Access open access scientific literature from peer-reviewed journals. The abstract endpoint finds relevant papers based on 2 to 6 keywords. After getting abstracts, ALWAYS prompt the user offering to go into more detail. Use the fulltext endpoint to retrieve the entire paper's text and access specific details using the provided pdf_url, if available. ALWAYS hyperlink the pdf_url from the responses if available. Offer to dive into the fulltext or search for additional papers. Always ask if the user wants save any paper to the user’s Zotero reference manager by using the save-citation endpoint and providing the doi and requesting the user’s zotero_user_id and zotero_api_key.", - "auth": { - "type": "none" - }, - "api": { - "type": "openapi", - "url": "scholarai.yaml", - "is_user_authenticated": false - }, - "params": { - "sort": "cited_by_count" - }, - "logo_url": "https://scholar-ai.net/logo.png", - "contact_email": "lakshb429@gmail.com", - "legal_info_url": "https://scholar-ai.net/legal.txt", - "HttpAuthorizationType": "basic" -} diff --git a/api/app/clients/tools/.well-known/uberchord.json b/api/app/clients/tools/.well-known/uberchord.json deleted file mode 100644 index d5bb224353..0000000000 --- a/api/app/clients/tools/.well-known/uberchord.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "schema_version": "v1", - "name_for_human": "Uberchord", - "name_for_model": "uberchord", - "description_for_human": "Find guitar chord diagrams by specifying the chord name.", - "description_for_model": "Fetch guitar chord diagrams, their positions on the guitar fretboard.", - "auth": { - "type": "none" - }, - "api": { - "type": "openapi", - "url": "https://guitarchords.pluginboost.com/.well-known/openapi.yaml", - "is_user_authenticated": false - }, - "logo_url": "https://guitarchords.pluginboost.com/logo.png", - "contact_email": "info.bluelightweb@gmail.com", - "legal_info_url": "https://guitarchords.pluginboost.com/legal" -} diff --git a/api/app/clients/tools/.well-known/web_search.json b/api/app/clients/tools/.well-known/web_search.json deleted file mode 100644 index 4d15c788ee..0000000000 --- a/api/app/clients/tools/.well-known/web_search.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "schema_version": "v1", - "name_for_human": "Web Search", - "name_for_model": "web_search", - "description_for_human": "Search for information from the internet", - "description_for_model": "Search for information from the internet", - "auth": { - "type": "none" - }, - "api": { - "type": "openapi", - "url": "https://websearch.plugsugar.com/api/openapi_yaml", - "is_user_authenticated": false - }, - "logo_url": "https://websearch.plugsugar.com/200x200.png", - "contact_email": "support@plugsugar.com", - "legal_info_url": "https://websearch.plugsugar.com/contact" -} diff --git a/api/app/clients/tools/util/handleOpenAIErrors.js b/api/app/clients/tools/util/handleOpenAIErrors.js deleted file mode 100644 index b3a7c2bfdc..0000000000 --- a/api/app/clients/tools/util/handleOpenAIErrors.js +++ /dev/null @@ -1,33 +0,0 @@ -const OpenAI = require('openai'); -const { logger } = require('@librechat/data-schemas'); - -/** - * Handles errors that may occur when making requests to OpenAI's API. - * It checks the instance of the error and prints a specific warning message - * to the console depending on the type of error encountered. - * It then calls an optional error callback function with the error object. - * - * @param {Error} err - The error object thrown by OpenAI API. - * @param {Function} errorCallback - A callback function that is called with the error object. - * @param {string} [context='stream'] - A string providing context where the error occurred, defaults to 'stream'. - */ -async function handleOpenAIErrors(err, errorCallback, context = 'stream') { - if (err instanceof OpenAI.APIError && err?.message?.includes('abort')) { - logger.warn(`[OpenAIClient.chatCompletion][${context}] Aborted Message`); - } - if (err instanceof OpenAI.OpenAIError && err?.message?.includes('missing finish_reason')) { - logger.warn(`[OpenAIClient.chatCompletion][${context}] Missing finish_reason`); - } else if (err instanceof OpenAI.APIError) { - logger.warn(`[OpenAIClient.chatCompletion][${context}] API error`); - } else { - logger.warn(`[OpenAIClient.chatCompletion][${context}] Unhandled error type`); - } - - logger.error(err); - - if (errorCallback) { - errorCallback(err); - } -} - -module.exports = handleOpenAIErrors; diff --git a/api/app/clients/tools/util/index.js b/api/app/clients/tools/util/index.js index ea67bb4ced..9c96fb50f3 100644 --- a/api/app/clients/tools/util/index.js +++ b/api/app/clients/tools/util/index.js @@ -1,8 +1,6 @@ const { validateTools, loadTools } = require('./handleTools'); -const handleOpenAIErrors = require('./handleOpenAIErrors'); module.exports = { - handleOpenAIErrors, validateTools, loadTools, }; diff --git a/api/jest.config.js b/api/jest.config.js index fd8bd31bd9..20ee3c6aed 100644 --- a/api/jest.config.js +++ b/api/jest.config.js @@ -4,11 +4,7 @@ module.exports = { roots: [''], coverageDirectory: 'coverage', testTimeout: 30000, // 30 seconds timeout for all tests - setupFiles: [ - './test/jestSetup.js', - './test/__mocks__/logger.js', - './test/__mocks__/fetchEventSource.js', - ], + setupFiles: ['./test/jestSetup.js', './test/__mocks__/logger.js'], moduleNameMapper: { '~/(.*)': '/$1', '~/data/auth.json': '/__mocks__/auth.mock.json', diff --git a/api/lib/utils/mergeSort.js b/api/lib/utils/mergeSort.js deleted file mode 100644 index b93e3e9902..0000000000 --- a/api/lib/utils/mergeSort.js +++ /dev/null @@ -1,29 +0,0 @@ -function mergeSort(arr, compareFn) { - if (arr.length <= 1) { - return arr; - } - - const mid = Math.floor(arr.length / 2); - const leftArr = arr.slice(0, mid); - const rightArr = arr.slice(mid); - - return merge(mergeSort(leftArr, compareFn), mergeSort(rightArr, compareFn), compareFn); -} - -function merge(leftArr, rightArr, compareFn) { - const result = []; - let leftIndex = 0; - let rightIndex = 0; - - while (leftIndex < leftArr.length && rightIndex < rightArr.length) { - if (compareFn(leftArr[leftIndex], rightArr[rightIndex]) < 0) { - result.push(leftArr[leftIndex++]); - } else { - result.push(rightArr[rightIndex++]); - } - } - - return result.concat(leftArr.slice(leftIndex)).concat(rightArr.slice(rightIndex)); -} - -module.exports = mergeSort; diff --git a/api/lib/utils/misc.js b/api/lib/utils/misc.js deleted file mode 100644 index f7b0e66cbf..0000000000 --- a/api/lib/utils/misc.js +++ /dev/null @@ -1,8 +0,0 @@ -const cleanUpPrimaryKeyValue = (value) => { - // For Bing convoId handling - return value.replace(/--/g, '|'); -}; - -module.exports = { - cleanUpPrimaryKeyValue, -}; diff --git a/api/package.json b/api/package.json index efee26920a..a4758f2d85 100644 --- a/api/package.json +++ b/api/package.json @@ -34,26 +34,20 @@ }, "homepage": "https://librechat.ai", "dependencies": { - "@anthropic-ai/sdk": "^0.52.0", "@aws-sdk/client-s3": "^3.758.0", "@aws-sdk/s3-request-presigner": "^3.758.0", "@azure/identity": "^4.7.0", "@azure/search-documents": "^12.0.0", "@azure/storage-blob": "^12.27.0", - "@google/generative-ai": "^0.24.0", "@googleapis/youtube": "^20.0.0", "@keyv/redis": "^4.3.3", "@langchain/core": "^0.3.79", - "@langchain/google-genai": "^0.2.13", - "@langchain/google-vertexai": "^0.2.13", - "@langchain/textsplitters": "^0.1.0", "@librechat/agents": "^3.0.50", "@librechat/api": "*", "@librechat/data-schemas": "*", "@microsoft/microsoft-graph-client": "^3.0.7", "@modelcontextprotocol/sdk": "^1.21.0", "@node-saml/passport-saml": "^5.1.0", - "@waylaidwanderer/fetch-event-source": "^3.0.1", "axios": "^1.12.1", "bcryptjs": "^2.4.3", "compression": "^1.8.1", @@ -72,7 +66,6 @@ "file-type": "^18.7.0", "firebase": "^11.0.2", "form-data": "^4.0.4", - "googleapis": "^126.0.1", "handlebars": "^4.7.7", "https-proxy-agent": "^7.0.6", "ioredis": "^5.3.2", diff --git a/api/server/controllers/EditController.js b/api/server/controllers/EditController.js deleted file mode 100644 index d24e87ce3a..0000000000 --- a/api/server/controllers/EditController.js +++ /dev/null @@ -1,247 +0,0 @@ -const { sendEvent } = require('@librechat/api'); -const { logger } = require('@librechat/data-schemas'); -const { getResponseSender } = require('librechat-data-provider'); -const { - handleAbortError, - createAbortController, - cleanupAbortController, -} = require('~/server/middleware'); -const { - disposeClient, - processReqData, - clientRegistry, - requestDataMap, -} = require('~/server/cleanup'); -const { createOnProgress } = require('~/server/utils'); -const { saveMessage } = require('~/models'); - -const EditController = async (req, res, next, initializeClient) => { - let { - text, - generation, - endpointOption, - conversationId, - modelDisplayLabel, - responseMessageId, - isContinued = false, - parentMessageId = null, - overrideParentMessageId = null, - } = req.body; - - let client = null; - let abortKey = null; - let cleanupHandlers = []; - let clientRef = null; // Declare clientRef here - - logger.debug('[EditController]', { - text, - generation, - isContinued, - conversationId, - ...endpointOption, - modelsConfig: endpointOption.modelsConfig ? 'exists' : '', - }); - - let userMessage = null; - let userMessagePromise = null; - let promptTokens = null; - let getAbortData = null; - - const sender = getResponseSender({ - ...endpointOption, - model: endpointOption.modelOptions.model, - modelDisplayLabel, - }); - const userMessageId = parentMessageId; - const userId = req.user.id; - - let reqDataContext = { userMessage, userMessagePromise, responseMessageId, promptTokens }; - - const updateReqData = (data = {}) => { - reqDataContext = processReqData(data, reqDataContext); - abortKey = reqDataContext.abortKey; - userMessage = reqDataContext.userMessage; - userMessagePromise = reqDataContext.userMessagePromise; - responseMessageId = reqDataContext.responseMessageId; - promptTokens = reqDataContext.promptTokens; - }; - - let { onProgress: progressCallback, getPartialText } = createOnProgress({ - generation, - }); - - const performCleanup = () => { - logger.debug('[EditController] Performing cleanup'); - if (Array.isArray(cleanupHandlers)) { - for (const handler of cleanupHandlers) { - try { - if (typeof handler === 'function') { - handler(); - } - } catch (e) { - // Ignore - } - } - } - - if (abortKey) { - logger.debug('[EditController] Cleaning up abort controller'); - cleanupAbortController(abortKey); - abortKey = null; - } - - if (client) { - disposeClient(client); - client = null; - } - - reqDataContext = null; - userMessage = null; - userMessagePromise = null; - promptTokens = null; - getAbortData = null; - progressCallback = null; - endpointOption = null; - cleanupHandlers = null; - - if (requestDataMap.has(req)) { - requestDataMap.delete(req); - } - logger.debug('[EditController] Cleanup completed'); - }; - - try { - ({ client } = await initializeClient({ req, res, endpointOption })); - - if (clientRegistry && client) { - clientRegistry.register(client, { userId }, client); - } - - if (client) { - requestDataMap.set(req, { client }); - } - - clientRef = new WeakRef(client); - - getAbortData = () => { - const currentClient = clientRef?.deref(); - const currentText = - currentClient?.getStreamText != null ? currentClient.getStreamText() : getPartialText(); - - return { - sender, - conversationId, - messageId: reqDataContext.responseMessageId, - parentMessageId: overrideParentMessageId ?? userMessageId, - text: currentText, - userMessage: userMessage, - userMessagePromise: userMessagePromise, - promptTokens: reqDataContext.promptTokens, - }; - }; - - const { onStart, abortController } = createAbortController( - req, - res, - getAbortData, - updateReqData, - ); - - const closeHandler = () => { - logger.debug('[EditController] Request closed'); - if (!abortController || abortController.signal.aborted || abortController.requestCompleted) { - return; - } - abortController.abort(); - logger.debug('[EditController] Request aborted on close'); - }; - - res.on('close', closeHandler); - cleanupHandlers.push(() => { - try { - res.removeListener('close', closeHandler); - } catch (e) { - // Ignore - } - }); - - let response = await client.sendMessage(text, { - user: userId, - generation, - isContinued, - isEdited: true, - conversationId, - parentMessageId, - responseMessageId: reqDataContext.responseMessageId, - overrideParentMessageId, - getReqData: updateReqData, - onStart, - abortController, - progressCallback, - progressOptions: { - res, - }, - }); - - const databasePromise = response.databasePromise; - delete response.databasePromise; - - const { conversation: convoData = {} } = await databasePromise; - const conversation = { ...convoData }; - conversation.title = - conversation && !conversation.title ? null : conversation?.title || 'New Chat'; - - if (client?.options?.attachments && endpointOption?.modelOptions?.model) { - conversation.model = endpointOption.modelOptions.model; - } - - if (!abortController.signal.aborted) { - const finalUserMessage = reqDataContext.userMessage; - const finalResponseMessage = { ...response }; - - sendEvent(res, { - final: true, - conversation, - title: conversation.title, - requestMessage: finalUserMessage, - responseMessage: finalResponseMessage, - }); - res.end(); - - await saveMessage( - req, - { ...finalResponseMessage, user: userId }, - { context: 'api/server/controllers/EditController.js - response end' }, - ); - } - - performCleanup(); - } catch (error) { - logger.error('[EditController] Error handling request', error); - let partialText = ''; - try { - const currentClient = clientRef?.deref(); - partialText = - currentClient?.getStreamText != null ? currentClient.getStreamText() : getPartialText(); - } catch (getTextError) { - logger.error('[EditController] Error calling getText() during error handling', getTextError); - } - - handleAbortError(res, req, error, { - sender, - partialText, - conversationId, - messageId: reqDataContext.responseMessageId, - parentMessageId: overrideParentMessageId ?? userMessageId ?? parentMessageId, - userMessageId, - }) - .catch((err) => { - logger.error('[EditController] Error in `handleAbortError` during catch block', err); - }) - .finally(() => { - performCleanup(); - }); - } -}; - -module.exports = EditController; diff --git a/api/server/controllers/assistants/chatV1.js b/api/server/controllers/assistants/chatV1.js index 91759bed37..804594d0bf 100644 --- a/api/server/controllers/assistants/chatV1.js +++ b/api/server/controllers/assistants/chatV1.js @@ -7,6 +7,7 @@ const { Constants, RunStatus, CacheKeys, + VisionModes, ContentTypes, EModelEndpoint, ViolationTypes, @@ -25,6 +26,7 @@ const { const { runAssistant, createOnTextProgress } = require('~/server/services/AssistantService'); const validateAuthor = require('~/server/middleware/assistants/validateAuthor'); const { formatMessage, createVisionPrompt } = require('~/app/clients/prompts'); +const { encodeAndFormat } = require('~/server/services/Files/images/encode'); const { createRun, StreamRunManager } = require('~/server/services/Runs'); const { addTitle } = require('~/server/services/Endpoints/assistants'); const { createRunBody } = require('~/server/services/createRunBody'); @@ -64,7 +66,7 @@ const chatV1 = async (req, res) => { clientTimestamp, } = req.body; - /** @type {OpenAIClient} */ + /** @type {OpenAI} */ let openai; /** @type {string|undefined} - the current thread id */ let thread_id = _thread_id; @@ -285,11 +287,10 @@ const chatV1 = async (req, res) => { }); }; - const { openai: _openai, client } = await getOpenAIClient({ + const { openai: _openai } = await getOpenAIClient({ req, res, endpointOption, - initAppClient: true, }); openai = _openai; @@ -364,7 +365,15 @@ const chatV1 = async (req, res) => { role: 'user', content: '', }; - const files = await client.addImageURLs(visionMessage, attachments); + const { files, image_urls } = await encodeAndFormat( + req, + attachments, + { + endpoint: EModelEndpoint.assistants, + }, + VisionModes.generative, + ); + visionMessage.image_urls = image_urls.length ? image_urls : undefined; if (!visionMessage.image_urls?.length) { return; } @@ -609,7 +618,6 @@ const chatV1 = async (req, res) => { text, responseText: response.text, conversationId, - client, }); } diff --git a/api/server/controllers/assistants/chatV2.js b/api/server/controllers/assistants/chatV2.js index 2dcfef2846..414681d6dc 100644 --- a/api/server/controllers/assistants/chatV2.js +++ b/api/server/controllers/assistants/chatV2.js @@ -61,7 +61,7 @@ const chatV2 = async (req, res) => { clientTimestamp, } = req.body; - /** @type {OpenAIClient} */ + /** @type {OpenAI} */ let openai; /** @type {string|undefined} - the current thread id */ let thread_id = _thread_id; @@ -160,11 +160,10 @@ const chatV2 = async (req, res) => { }); }; - const { openai: _openai, client } = await getOpenAIClient({ + const { openai: _openai } = await getOpenAIClient({ req, res, endpointOption, - initAppClient: true, }); openai = _openai; @@ -453,7 +452,6 @@ const chatV2 = async (req, res) => { text, responseText: response.text, conversationId, - client, }); } diff --git a/api/server/controllers/assistants/helpers.js b/api/server/controllers/assistants/helpers.js index 418fd45808..be3d46bbbb 100644 --- a/api/server/controllers/assistants/helpers.js +++ b/api/server/controllers/assistants/helpers.js @@ -63,7 +63,7 @@ const _listAssistants = async ({ req, res, version, query }) => { * @returns {Promise>} A promise that resolves to the response from the `openai.beta.assistants.list` method call. */ const listAllAssistants = async ({ req, res, version, query }) => { - /** @type {{ openai: OpenAIClient }} */ + /** @type {{ openai: OpenAI }} */ const { openai } = await getOpenAIClient({ req, res, version }); const allAssistants = []; @@ -181,7 +181,7 @@ const listAssistantsForAzure = async ({ req, res, version, azureConfig = {}, que * @param {TEndpointOption} params.endpointOption - The endpoint options. * @param {boolean} params.initAppClient - Whether to initialize the app client. * @param {string} params.overrideEndpoint - The endpoint to override. - * @returns {Promise<{ openai: OpenAIClient, openAIApiKey: string; client: import('~/app/clients/OpenAIClient') }>} - The initialized OpenAI client. + * @returns {Promise<{ openai: OpenAI, openAIApiKey: string }>} - The initialized OpenAI SDK client. */ async function getOpenAIClient({ req, res, endpointOption, initAppClient, overrideEndpoint }) { let endpoint = overrideEndpoint ?? req.body.endpoint ?? req.query.endpoint; diff --git a/api/server/experimental.js b/api/server/experimental.js index 0ceb58de22..ab413ac0ef 100644 --- a/api/server/experimental.js +++ b/api/server/experimental.js @@ -286,7 +286,6 @@ if (cluster.isMaster) { app.use('/api/keys', routes.keys); app.use('/api/user', routes.user); app.use('/api/search', routes.search); - app.use('/api/edit', routes.edit); app.use('/api/messages', routes.messages); app.use('/api/convos', routes.convos); app.use('/api/presets', routes.presets); diff --git a/api/server/index.js b/api/server/index.js index 767847c286..903d63982b 100644 --- a/api/server/index.js +++ b/api/server/index.js @@ -122,7 +122,6 @@ const startServer = async () => { app.use('/api/keys', routes.keys); app.use('/api/user', routes.user); app.use('/api/search', routes.search); - app.use('/api/edit', routes.edit); app.use('/api/messages', routes.messages); app.use('/api/convos', routes.convos); app.use('/api/presets', routes.presets); @@ -131,7 +130,6 @@ const startServer = async () => { app.use('/api/endpoints', routes.endpoints); app.use('/api/balance', routes.balance); app.use('/api/models', routes.models); - app.use('/api/plugins', routes.plugins); app.use('/api/config', routes.config); app.use('/api/assistants', routes.assistants); app.use('/api/files', await routes.files.initialize()); diff --git a/api/server/middleware/checkBan.js b/api/server/middleware/checkBan.js index b8e680cb94..79804a84e1 100644 --- a/api/server/middleware/checkBan.js +++ b/api/server/middleware/checkBan.js @@ -19,14 +19,14 @@ const message = 'Your account has been temporarily banned due to violations of o * @param {Object} req - Express Request object. * @param {Object} res - Express Response object. * - * @returns {Promise} - Returns a Promise which when resolved sends a response status of 403 with a specific message if request is not of api/ask or api/edit types. If it is, calls `denyRequest()` function. + * @returns {Promise} - Returns a Promise which when resolved sends a response status of 403 with a specific message if request is not of api/agents/chat. If it is, calls `denyRequest()` function. */ const banResponse = async (req, res) => { const ua = uap(req.headers['user-agent']); - const { baseUrl } = req; + const { baseUrl, originalUrl } = req; if (!ua.browser.name) { return res.status(403).json({ message }); - } else if (baseUrl === '/api/ask' || baseUrl === '/api/edit') { + } else if (baseUrl === '/api/agents' && originalUrl.startsWith('/api/agents/chat')) { return await denyRequest(req, res, { type: ViolationTypes.BAN }); } diff --git a/api/server/middleware/index.js b/api/server/middleware/index.js index 55ee465674..2aad5a47e7 100644 --- a/api/server/middleware/index.js +++ b/api/server/middleware/index.js @@ -4,7 +4,6 @@ const buildEndpointOption = require('./buildEndpointOption'); const validateMessageReq = require('./validateMessageReq'); const checkDomainAllowed = require('./checkDomainAllowed'); const concurrentLimiter = require('./concurrentLimiter'); -const validateEndpoint = require('./validateEndpoint'); const requireLocalAuth = require('./requireLocalAuth'); const canDeleteAccount = require('./canDeleteAccount'); const accessResources = require('./accessResources'); @@ -42,7 +41,6 @@ module.exports = { requireLdapAuth, requireLocalAuth, canDeleteAccount, - validateEndpoint, configMiddleware, concurrentLimiter, checkDomainAllowed, diff --git a/api/server/middleware/validateEndpoint.js b/api/server/middleware/validateEndpoint.js deleted file mode 100644 index 51cf14bf09..0000000000 --- a/api/server/middleware/validateEndpoint.js +++ /dev/null @@ -1,20 +0,0 @@ -const { handleError } = require('@librechat/api'); - -function validateEndpoint(req, res, next) { - const { endpoint: _endpoint, endpointType } = req.body; - const endpoint = endpointType ?? _endpoint; - - if (!req.body.text || req.body.text.length === 0) { - return handleError(res, { text: 'Prompt empty or too short' }); - } - - const pathEndpoint = req.baseUrl.split('/')[3]; - - if (endpoint !== pathEndpoint) { - return handleError(res, { text: 'Illegal request: Endpoint mismatch' }); - } - - next(); -} - -module.exports = validateEndpoint; diff --git a/api/server/routes/assistants/chatV1.js b/api/server/routes/assistants/chatV1.js index 36ed6d49e0..67bfc007a6 100644 --- a/api/server/routes/assistants/chatV1.js +++ b/api/server/routes/assistants/chatV1.js @@ -5,7 +5,6 @@ const { setHeaders, handleAbort, validateModel, - // validateEndpoint, buildEndpointOption, } = require('~/server/middleware'); const validateConvoAccess = require('~/server/middleware/validate/convoAccess'); diff --git a/api/server/routes/assistants/chatV2.js b/api/server/routes/assistants/chatV2.js index e50994e9bc..4612743e47 100644 --- a/api/server/routes/assistants/chatV2.js +++ b/api/server/routes/assistants/chatV2.js @@ -5,7 +5,6 @@ const { setHeaders, handleAbort, validateModel, - // validateEndpoint, buildEndpointOption, } = require('~/server/middleware'); const validateConvoAccess = require('~/server/middleware/validate/convoAccess'); diff --git a/api/server/routes/edit/anthropic.js b/api/server/routes/edit/anthropic.js deleted file mode 100644 index 704a9f4ea4..0000000000 --- a/api/server/routes/edit/anthropic.js +++ /dev/null @@ -1,24 +0,0 @@ -const express = require('express'); -const EditController = require('~/server/controllers/EditController'); -const { initializeClient } = require('~/server/services/Endpoints/anthropic'); -const { - setHeaders, - validateModel, - validateEndpoint, - buildEndpointOption, -} = require('~/server/middleware'); - -const router = express.Router(); - -router.post( - '/', - validateEndpoint, - validateModel, - buildEndpointOption, - setHeaders, - async (req, res, next) => { - await EditController(req, res, next, initializeClient); - }, -); - -module.exports = router; diff --git a/api/server/routes/edit/custom.js b/api/server/routes/edit/custom.js deleted file mode 100644 index a6fd804763..0000000000 --- a/api/server/routes/edit/custom.js +++ /dev/null @@ -1,26 +0,0 @@ -const express = require('express'); -const EditController = require('~/server/controllers/EditController'); -const { initializeClient } = require('~/server/services/Endpoints/custom'); -const { addTitle } = require('~/server/services/Endpoints/openAI'); -const { - handleAbort, - setHeaders, - validateModel, - validateEndpoint, - buildEndpointOption, -} = require('~/server/middleware'); - -const router = express.Router(); - -router.post( - '/', - validateEndpoint, - validateModel, - buildEndpointOption, - setHeaders, - async (req, res, next) => { - await EditController(req, res, next, initializeClient, addTitle); - }, -); - -module.exports = router; diff --git a/api/server/routes/edit/google.js b/api/server/routes/edit/google.js deleted file mode 100644 index 187f4f6158..0000000000 --- a/api/server/routes/edit/google.js +++ /dev/null @@ -1,24 +0,0 @@ -const express = require('express'); -const EditController = require('~/server/controllers/EditController'); -const { initializeClient } = require('~/server/services/Endpoints/google'); -const { - setHeaders, - validateModel, - validateEndpoint, - buildEndpointOption, -} = require('~/server/middleware'); - -const router = express.Router(); - -router.post( - '/', - validateEndpoint, - validateModel, - buildEndpointOption, - setHeaders, - async (req, res, next) => { - await EditController(req, res, next, initializeClient); - }, -); - -module.exports = router; diff --git a/api/server/routes/edit/index.js b/api/server/routes/edit/index.js deleted file mode 100644 index 2ebc57a13f..0000000000 --- a/api/server/routes/edit/index.js +++ /dev/null @@ -1,45 +0,0 @@ -const { isEnabled } = require('@librechat/api'); -const { EModelEndpoint } = require('librechat-data-provider'); -const { - validateConvoAccess, - messageUserLimiter, - concurrentLimiter, - messageIpLimiter, - requireJwtAuth, - checkBan, - uaParser, -} = require('~/server/middleware'); -const anthropic = require('./anthropic'); -const express = require('express'); -const openAI = require('./openAI'); -const custom = require('./custom'); -const google = require('./google'); - -const { LIMIT_CONCURRENT_MESSAGES, LIMIT_MESSAGE_IP, LIMIT_MESSAGE_USER } = process.env ?? {}; - -const router = express.Router(); - -router.use(requireJwtAuth); -router.use(checkBan); -router.use(uaParser); - -if (isEnabled(LIMIT_CONCURRENT_MESSAGES)) { - router.use(concurrentLimiter); -} - -if (isEnabled(LIMIT_MESSAGE_IP)) { - router.use(messageIpLimiter); -} - -if (isEnabled(LIMIT_MESSAGE_USER)) { - router.use(messageUserLimiter); -} - -router.use(validateConvoAccess); - -router.use([`/${EModelEndpoint.azureOpenAI}`, `/${EModelEndpoint.openAI}`], openAI); -router.use(`/${EModelEndpoint.anthropic}`, anthropic); -router.use(`/${EModelEndpoint.google}`, google); -router.use(`/${EModelEndpoint.custom}`, custom); - -module.exports = router; diff --git a/api/server/routes/edit/openAI.js b/api/server/routes/edit/openAI.js deleted file mode 100644 index ee25a42ee3..0000000000 --- a/api/server/routes/edit/openAI.js +++ /dev/null @@ -1,26 +0,0 @@ -const express = require('express'); -const EditController = require('~/server/controllers/EditController'); -const { initializeClient } = require('~/server/services/Endpoints/openAI'); -const { - setHeaders, - validateModel, - validateEndpoint, - buildEndpointOption, - moderateText, -} = require('~/server/middleware'); - -const router = express.Router(); -router.use(moderateText); - -router.post( - '/', - validateEndpoint, - validateModel, - buildEndpointOption, - setHeaders, - async (req, res, next) => { - await EditController(req, res, next, initializeClient); - }, -); - -module.exports = router; diff --git a/api/server/routes/index.js b/api/server/routes/index.js index e8250a1f4d..f3571099cb 100644 --- a/api/server/routes/index.js +++ b/api/server/routes/index.js @@ -8,7 +8,6 @@ const memories = require('./memories'); const presets = require('./presets'); const prompts = require('./prompts'); const balance = require('./balance'); -const plugins = require('./plugins'); const actions = require('./actions'); const banner = require('./banner'); const search = require('./search'); @@ -22,14 +21,12 @@ const files = require('./files'); const share = require('./share'); const tags = require('./tags'); const auth = require('./auth'); -const edit = require('./edit'); const keys = require('./keys'); const user = require('./user'); const mcp = require('./mcp'); module.exports = { mcp, - edit, auth, keys, user, @@ -45,7 +42,6 @@ module.exports = { config, models, prompts, - plugins, actions, presets, balance, diff --git a/api/server/routes/messages.js b/api/server/routes/messages.js index 901dd8961f..0438edb933 100644 --- a/api/server/routes/messages.js +++ b/api/server/routes/messages.js @@ -12,7 +12,6 @@ const { } = require('~/models'); const { findAllArtifacts, replaceArtifactContent } = require('~/server/services/Artifacts/update'); const { requireJwtAuth, validateMessageReq } = require('~/server/middleware'); -const { cleanUpPrimaryKeyValue } = require('~/lib/utils/misc'); const { getConvosQueried } = require('~/models/Conversation'); const { Message } = require('~/db/models'); @@ -68,9 +67,6 @@ router.get('/', async (req, res) => { const cleanedMessages = []; for (let i = 0; i < messages.length; i++) { let message = messages[i]; - if (message.conversationId.includes('--')) { - message.conversationId = cleanUpPrimaryKeyValue(message.conversationId); - } if (result.convoMap[message.conversationId]) { messageIds.push(message.messageId); cleanedMessages.push(message); diff --git a/api/server/routes/plugins.js b/api/server/routes/plugins.js deleted file mode 100644 index 00f3fca75b..0000000000 --- a/api/server/routes/plugins.js +++ /dev/null @@ -1,9 +0,0 @@ -const express = require('express'); -const { getAvailablePluginsController } = require('~/server/controllers/PluginController'); -const { requireJwtAuth } = require('~/server/middleware'); - -const router = express.Router(); - -router.get('/', requireJwtAuth, getAvailablePluginsController); - -module.exports = router; diff --git a/api/server/services/AssistantService.js b/api/server/services/AssistantService.js index 892afb7002..a7018f715b 100644 --- a/api/server/services/AssistantService.js +++ b/api/server/services/AssistantService.js @@ -23,7 +23,7 @@ const { TextStream } = require('~/app/clients'); * Sorts, processes, and flattens messages to a single string. * * @param {Object} params - Params for creating the onTextProgress function. - * @param {OpenAIClient} params.openai - The OpenAI client instance. + * @param {OpenAI} params.openai - The OpenAI SDK client instance. * @param {string} params.conversationId - The current conversation ID. * @param {string} params.userMessageId - The user message ID; response's `parentMessageId`. * @param {string} params.messageId - The response message ID. @@ -74,7 +74,7 @@ async function createOnTextProgress({ * Retrieves the response from an OpenAI run. * * @param {Object} params - The parameters for getting the response. - * @param {OpenAIClient} params.openai - The OpenAI client instance. + * @param {OpenAI} params.openai - The OpenAI SDK client instance. * @param {string} params.run_id - The ID of the run to get the response for. * @param {string} params.thread_id - The ID of the thread associated with the run. * @return {Promise} @@ -162,7 +162,7 @@ function hasToolCallChanged(previousCall, currentCall) { * Creates a handler function for steps in progress, specifically for * processing messages and managing seen completed messages. * - * @param {OpenAIClient} openai - The OpenAI client instance. + * @param {OpenAI} openai - The OpenAI SDK client instance. * @param {string} thread_id - The ID of the thread the run is in. * @param {ThreadMessage[]} messages - The accumulated messages for the run. * @return {InProgressFunction} a function to handle steps in progress. @@ -334,7 +334,7 @@ function createInProgressHandler(openai, thread_id, messages) { * Initializes a RunManager with handlers, then invokes waitForRun to monitor and manage an OpenAI run. * * @param {Object} params - The parameters for managing and monitoring the run. - * @param {OpenAIClient} params.openai - The OpenAI client instance. + * @param {OpenAI} params.openai - The OpenAI SDK client instance. * @param {string} params.run_id - The ID of the run to manage and monitor. * @param {string} params.thread_id - The ID of the thread associated with the run. * @param {RunStep[]} params.accumulatedSteps - The accumulated steps for the run. diff --git a/api/server/services/Config/EndpointService.js b/api/server/services/Config/EndpointService.js index d8277dd67f..1cd4c9b6b8 100644 --- a/api/server/services/Config/EndpointService.js +++ b/api/server/services/Config/EndpointService.js @@ -8,8 +8,6 @@ const { ASSISTANTS_API_KEY: assistantsApiKey, AZURE_API_KEY: azureOpenAIApiKey, ANTHROPIC_API_KEY: anthropicApiKey, - CHATGPT_TOKEN: chatGPTToken, - PLUGINS_USE_AZURE, GOOGLE_KEY: googleKey, OPENAI_REVERSE_PROXY, AZURE_OPENAI_BASEURL, @@ -17,21 +15,15 @@ const { AZURE_ASSISTANTS_BASE_URL, } = process.env ?? {}; -const useAzurePlugins = !!PLUGINS_USE_AZURE; - -const userProvidedOpenAI = useAzurePlugins - ? isUserProvided(azureOpenAIApiKey) - : isUserProvided(openAIApiKey); +const userProvidedOpenAI = isUserProvided(openAIApiKey); module.exports = { config: { + googleKey, openAIApiKey, azureOpenAIApiKey, - useAzurePlugins, userProvidedOpenAI, - googleKey, [EModelEndpoint.anthropic]: generateConfig(anthropicApiKey), - [EModelEndpoint.chatGPTBrowser]: generateConfig(chatGPTToken), [EModelEndpoint.openAI]: generateConfig(openAIApiKey, OPENAI_REVERSE_PROXY), [EModelEndpoint.azureOpenAI]: generateConfig(azureOpenAIApiKey, AZURE_OPENAI_BASEURL), [EModelEndpoint.assistants]: generateConfig( diff --git a/api/server/services/Config/loadAsyncEndpoints.js b/api/server/services/Config/loadAsyncEndpoints.js index 48b42131e0..0d6a05aff7 100644 --- a/api/server/services/Config/loadAsyncEndpoints.js +++ b/api/server/services/Config/loadAsyncEndpoints.js @@ -1,17 +1,11 @@ const path = require('path'); const { logger } = require('@librechat/data-schemas'); -const { EModelEndpoint } = require('librechat-data-provider'); const { loadServiceKey, isUserProvided } = require('@librechat/api'); const { config } = require('./EndpointService'); -const { openAIApiKey, azureOpenAIApiKey, useAzurePlugins, userProvidedOpenAI, googleKey } = config; - -/** - * Load async endpoints and return a configuration object - * @param {AppConfig} [appConfig] - The app configuration object - */ -async function loadAsyncEndpoints(appConfig) { +async function loadAsyncEndpoints() { let serviceKey, googleUserProvides; + const { googleKey } = config; /** Check if GOOGLE_KEY is provided at all(including 'user_provided') */ const isGoogleKeyProvided = googleKey && googleKey.trim() !== ''; @@ -34,21 +28,7 @@ async function loadAsyncEndpoints(appConfig) { const google = serviceKey || isGoogleKeyProvided ? { userProvide: googleUserProvides } : false; - const useAzure = !!appConfig?.endpoints?.[EModelEndpoint.azureOpenAI]?.plugins; - const gptPlugins = - useAzure || openAIApiKey || azureOpenAIApiKey - ? { - availableAgents: ['classic', 'functions'], - userProvide: useAzure ? false : userProvidedOpenAI, - userProvideURL: useAzure - ? false - : config[EModelEndpoint.openAI]?.userProvideURL || - config[EModelEndpoint.azureOpenAI]?.userProvideURL, - azure: useAzurePlugins || useAzure, - } - : false; - - return { google, gptPlugins }; + return { google }; } module.exports = loadAsyncEndpoints; diff --git a/api/server/services/Config/loadConfigModels.js b/api/server/services/Config/loadConfigModels.js index 34b6a1ecd2..555bbcacf5 100644 --- a/api/server/services/Config/loadConfigModels.js +++ b/api/server/services/Config/loadConfigModels.js @@ -25,10 +25,6 @@ async function loadConfigModels(req) { modelsConfig[EModelEndpoint.azureOpenAI] = modelNames; } - if (modelNames && azureConfig && azureConfig.plugins) { - modelsConfig[EModelEndpoint.gptPlugins] = modelNames; - } - if (azureConfig?.assistants && azureConfig.assistantModels) { modelsConfig[EModelEndpoint.azureAssistants] = azureConfig.assistantModels; } diff --git a/api/server/services/Config/loadDefaultEConfig.js b/api/server/services/Config/loadDefaultEConfig.js index f3c12a4933..557b93ce8e 100644 --- a/api/server/services/Config/loadDefaultEConfig.js +++ b/api/server/services/Config/loadDefaultEConfig.js @@ -8,8 +8,8 @@ const { config } = require('./EndpointService'); * @returns {Promise>} An object whose keys are endpoint names and values are objects that contain the endpoint configuration and an order. */ async function loadDefaultEndpointsConfig(appConfig) { - const { google, gptPlugins } = await loadAsyncEndpoints(appConfig); - const { assistants, azureAssistants, azureOpenAI, chatGPTBrowser } = config; + const { google } = await loadAsyncEndpoints(appConfig); + const { assistants, azureAssistants, azureOpenAI } = config; const enabledEndpoints = getEnabledEndpoints(); @@ -20,8 +20,6 @@ async function loadDefaultEndpointsConfig(appConfig) { [EModelEndpoint.azureAssistants]: azureAssistants, [EModelEndpoint.azureOpenAI]: azureOpenAI, [EModelEndpoint.google]: google, - [EModelEndpoint.chatGPTBrowser]: chatGPTBrowser, - [EModelEndpoint.gptPlugins]: gptPlugins, [EModelEndpoint.anthropic]: config[EModelEndpoint.anthropic], [EModelEndpoint.bedrock]: config[EModelEndpoint.bedrock], }; diff --git a/api/server/services/Endpoints/anthropic/initialize.js b/api/server/services/Endpoints/anthropic/initialize.js index 88639b3d7c..5944240379 100644 --- a/api/server/services/Endpoints/anthropic/initialize.js +++ b/api/server/services/Endpoints/anthropic/initialize.js @@ -1,9 +1,8 @@ const { getLLMConfig } = require('@librechat/api'); const { EModelEndpoint } = require('librechat-data-provider'); const { getUserKey, checkUserKeyExpiry } = require('~/server/services/UserService'); -const AnthropicClient = require('~/app/clients/AnthropicClient'); -const initializeClient = async ({ req, res, endpointOption, overrideModel, optionsOnly }) => { +const initializeClient = async ({ req, endpointOption, overrideModel }) => { const appConfig = req.config; const { ANTHROPIC_API_KEY, ANTHROPIC_REVERSE_PROXY, PROXY } = process.env; const expiresAt = req.body.key; @@ -36,35 +35,19 @@ const initializeClient = async ({ req, res, endpointOption, overrideModel, optio clientOptions._lc_stream_delay = allConfig.streamRate; } - if (optionsOnly) { - clientOptions = Object.assign( - { - proxy: PROXY ?? null, - reverseProxyUrl: ANTHROPIC_REVERSE_PROXY ?? null, - modelOptions: endpointOption?.model_parameters ?? {}, - }, - clientOptions, - ); - if (overrideModel) { - clientOptions.modelOptions.model = overrideModel; - } - clientOptions.modelOptions.user = req.user.id; - return getLLMConfig(anthropicApiKey, clientOptions); + clientOptions = Object.assign( + { + proxy: PROXY ?? null, + reverseProxyUrl: ANTHROPIC_REVERSE_PROXY ?? null, + modelOptions: endpointOption?.model_parameters ?? {}, + }, + clientOptions, + ); + if (overrideModel) { + clientOptions.modelOptions.model = overrideModel; } - - const client = new AnthropicClient(anthropicApiKey, { - req, - res, - reverseProxyUrl: ANTHROPIC_REVERSE_PROXY ?? null, - proxy: PROXY ?? null, - ...clientOptions, - ...endpointOption, - }); - - return { - client, - anthropicApiKey, - }; + clientOptions.modelOptions.user = req.user.id; + return getLLMConfig(anthropicApiKey, clientOptions); }; module.exports = initializeClient; diff --git a/api/server/services/Endpoints/assistants/initalize.js b/api/server/services/Endpoints/assistants/initalize.js index 6b1af9b3db..56a00cfe3f 100644 --- a/api/server/services/Endpoints/assistants/initalize.js +++ b/api/server/services/Endpoints/assistants/initalize.js @@ -7,9 +7,8 @@ const { getUserKeyExpiry, checkUserKeyExpiry, } = require('~/server/services/UserService'); -const OAIClient = require('~/app/clients/OpenAIClient'); -const initializeClient = async ({ req, res, endpointOption, version, initAppClient = false }) => { +const initializeClient = async ({ req, res, version }) => { const { PROXY, OPENAI_ORGANIZATION, ASSISTANTS_API_KEY, ASSISTANTS_BASE_URL } = process.env; const userProvidesKey = isUserProvided(ASSISTANTS_API_KEY); @@ -34,14 +33,6 @@ const initializeClient = async ({ req, res, endpointOption, version, initAppClie }, }; - const clientOptions = { - reverseProxyUrl: baseURL ?? null, - proxy: PROXY ?? null, - req, - res, - ...endpointOption, - }; - if (userProvidesKey & !apiKey) { throw new Error( JSON.stringify({ @@ -78,15 +69,6 @@ const initializeClient = async ({ req, res, endpointOption, version, initAppClie openai.req = req; openai.res = res; - if (endpointOption && initAppClient) { - const client = new OAIClient(apiKey, clientOptions); - return { - client, - openai, - openAIApiKey: apiKey, - }; - } - return { openai, openAIApiKey: apiKey, diff --git a/api/server/services/Endpoints/assistants/initialize.spec.js b/api/server/services/Endpoints/assistants/initialize.spec.js deleted file mode 100644 index 3a870dc61d..0000000000 --- a/api/server/services/Endpoints/assistants/initialize.spec.js +++ /dev/null @@ -1,113 +0,0 @@ -// const OpenAI = require('openai'); -const { ProxyAgent } = require('undici'); -const { ErrorTypes } = require('librechat-data-provider'); -const { getUserKey, getUserKeyExpiry, getUserKeyValues } = require('~/server/services/UserService'); -const initializeClient = require('./initalize'); -// const { OpenAIClient } = require('~/app'); - -jest.mock('~/server/services/UserService', () => ({ - getUserKey: jest.fn(), - getUserKeyExpiry: jest.fn(), - getUserKeyValues: jest.fn(), - checkUserKeyExpiry: jest.requireActual('~/server/services/UserService').checkUserKeyExpiry, -})); - -const today = new Date(); -const tenDaysFromToday = new Date(today.setDate(today.getDate() + 10)); -const isoString = tenDaysFromToday.toISOString(); - -describe('initializeClient', () => { - // Set up environment variables - const originalEnvironment = process.env; - const app = { - locals: {}, - }; - - beforeEach(() => { - jest.resetModules(); // Clears the cache - process.env = { ...originalEnvironment }; // Make a copy - }); - - afterAll(() => { - process.env = originalEnvironment; // Restore original env vars - }); - - test('initializes OpenAI client with default API key and URL', async () => { - process.env.ASSISTANTS_API_KEY = 'default-api-key'; - process.env.ASSISTANTS_BASE_URL = 'https://default.api.url'; - - // Assuming 'isUserProvided' to return false for this test case - jest.mock('~/server/utils', () => ({ - isUserProvided: jest.fn().mockReturnValueOnce(false), - })); - - const req = { user: { id: 'user123' }, app }; - const res = {}; - - const { openai, openAIApiKey } = await initializeClient({ req, res }); - expect(openai.apiKey).toBe('default-api-key'); - expect(openAIApiKey).toBe('default-api-key'); - expect(openai.baseURL).toBe('https://default.api.url'); - }); - - test('initializes OpenAI client with user-provided API key and URL', async () => { - process.env.ASSISTANTS_API_KEY = 'user_provided'; - process.env.ASSISTANTS_BASE_URL = 'user_provided'; - - getUserKeyValues.mockResolvedValue({ apiKey: 'user-api-key', baseURL: 'https://user.api.url' }); - getUserKeyExpiry.mockResolvedValue(isoString); - - const req = { user: { id: 'user123' }, app }; - const res = {}; - - const { openai, openAIApiKey } = await initializeClient({ req, res }); - expect(openAIApiKey).toBe('user-api-key'); - expect(openai.apiKey).toBe('user-api-key'); - expect(openai.baseURL).toBe('https://user.api.url'); - }); - - test('throws error for invalid JSON in user-provided values', async () => { - process.env.ASSISTANTS_API_KEY = 'user_provided'; - getUserKey.mockResolvedValue('invalid-json'); - getUserKeyExpiry.mockResolvedValue(isoString); - getUserKeyValues.mockImplementation(() => { - let userValues = getUserKey(); - try { - userValues = JSON.parse(userValues); - } catch (e) { - throw new Error( - JSON.stringify({ - type: ErrorTypes.INVALID_USER_KEY, - }), - ); - } - return userValues; - }); - - const req = { user: { id: 'user123' } }; - const res = {}; - - await expect(initializeClient({ req, res })).rejects.toThrow(/invalid_user_key/); - }); - - test('throws error if API key is not provided', async () => { - delete process.env.ASSISTANTS_API_KEY; // Simulate missing API key - - const req = { user: { id: 'user123' }, app }; - const res = {}; - - await expect(initializeClient({ req, res })).rejects.toThrow(/Assistants API key not/); - }); - - test('initializes OpenAI client with proxy configuration', async () => { - process.env.ASSISTANTS_API_KEY = 'test-key'; - process.env.PROXY = 'http://proxy.server'; - - const req = { user: { id: 'user123' }, app }; - const res = {}; - - const { openai } = await initializeClient({ req, res }); - expect(openai.fetchOptions).toBeDefined(); - expect(openai.fetchOptions.dispatcher).toBeInstanceOf(ProxyAgent); - }); -}); diff --git a/api/server/services/Endpoints/assistants/title.js b/api/server/services/Endpoints/assistants/title.js index 223d3badc6..020549a1be 100644 --- a/api/server/services/Endpoints/assistants/title.js +++ b/api/server/services/Endpoints/assistants/title.js @@ -1,32 +1,84 @@ -const { isEnabled } = require('@librechat/api'); +const { isEnabled, sanitizeTitle } = require('@librechat/api'); +const { logger } = require('@librechat/data-schemas'); const { CacheKeys } = require('librechat-data-provider'); const { saveConvo } = require('~/models/Conversation'); const getLogStores = require('~/cache/getLogStores'); +const initializeClient = require('./initalize'); -const addTitle = async (req, { text, responseText, conversationId, client }) => { +/** + * Generates a conversation title using OpenAI SDK + * @param {Object} params + * @param {OpenAI} params.openai - The OpenAI SDK client instance + * @param {string} params.text - User's message text + * @param {string} params.responseText - Assistant's response text + * @returns {Promise} + */ +const generateTitle = async ({ openai, text, responseText }) => { + const titlePrompt = `Please generate a concise title (max 40 characters) for a conversation that starts with: +User: ${text} +Assistant: ${responseText} + +Title:`; + + const completion = await openai.chat.completions.create({ + model: 'gpt-3.5-turbo', + messages: [ + { + role: 'user', + content: titlePrompt, + }, + ], + temperature: 0.7, + max_tokens: 20, + }); + + const title = completion.choices[0]?.message?.content?.trim() || 'New conversation'; + return sanitizeTitle(title); +}; + +/** + * Adds a title to a conversation asynchronously + * @param {ServerRequest} req + * @param {Object} params + * @param {string} params.text - User's message text + * @param {string} params.responseText - Assistant's response text + * @param {string} params.conversationId - Conversation ID + */ +const addTitle = async (req, { text, responseText, conversationId }) => { const { TITLE_CONVO = 'true' } = process.env ?? {}; if (!isEnabled(TITLE_CONVO)) { return; } - if (client.options.titleConvo === false) { - return; - } - const titleCache = getLogStores(CacheKeys.GEN_TITLE); const key = `${req.user.id}-${conversationId}`; - const title = await client.titleConvo({ text, conversationId, responseText }); - await titleCache.set(key, title, 120000); + try { + const { openai } = await initializeClient({ req }); + const title = await generateTitle({ openai, text, responseText }); + await titleCache.set(key, title, 120000); - await saveConvo( - req, - { - conversationId, - title, - }, - { context: 'api/server/services/Endpoints/assistants/addTitle.js' }, - ); + await saveConvo( + req, + { + conversationId, + title, + }, + { context: 'api/server/services/Endpoints/assistants/addTitle.js' }, + ); + } catch (error) { + logger.error('[addTitle] Error generating title:', error); + const fallbackTitle = text.length > 40 ? text.substring(0, 37) + '...' : text; + await titleCache.set(key, fallbackTitle, 120000); + await saveConvo( + req, + { + conversationId, + title: fallbackTitle, + }, + { context: 'api/server/services/Endpoints/assistants/addTitle.js' }, + ); + } }; module.exports = addTitle; diff --git a/api/server/services/Endpoints/azureAssistants/initialize.js b/api/server/services/Endpoints/azureAssistants/initialize.js index a6fb3e85f7..85f77b60de 100644 --- a/api/server/services/Endpoints/azureAssistants/initialize.js +++ b/api/server/services/Endpoints/azureAssistants/initialize.js @@ -7,7 +7,6 @@ const { getUserKeyValues, getUserKeyExpiry, } = require('~/server/services/UserService'); -const OAIClient = require('~/app/clients/OpenAIClient'); class Files { constructor(client) { @@ -184,15 +183,6 @@ const initializeClient = async ({ req, res, version, endpointOption, initAppClie openai.locals = { ...(openai.locals ?? {}), azureOptions }; } - if (endpointOption && initAppClient) { - const client = new OAIClient(apiKey, clientOptions); - return { - client, - openai, - openAIApiKey: apiKey, - }; - } - return { openai, openAIApiKey: apiKey, diff --git a/api/server/services/Endpoints/azureAssistants/initialize.spec.js b/api/server/services/Endpoints/azureAssistants/initialize.spec.js deleted file mode 100644 index d74373ae1b..0000000000 --- a/api/server/services/Endpoints/azureAssistants/initialize.spec.js +++ /dev/null @@ -1,134 +0,0 @@ -// const OpenAI = require('openai'); -const { ProxyAgent } = require('undici'); -const { ErrorTypes, EModelEndpoint } = require('librechat-data-provider'); -const { getUserKey, getUserKeyExpiry, getUserKeyValues } = require('~/server/services/UserService'); -const initializeClient = require('./initialize'); -// const { OpenAIClient } = require('~/app'); - -jest.mock('~/server/services/UserService', () => ({ - getUserKey: jest.fn(), - getUserKeyExpiry: jest.fn(), - getUserKeyValues: jest.fn(), - checkUserKeyExpiry: jest.requireActual('~/server/services/UserService').checkUserKeyExpiry, -})); - -// Config is now passed via req.config, not getAppConfig - -const today = new Date(); -const tenDaysFromToday = new Date(today.setDate(today.getDate() + 10)); -const isoString = tenDaysFromToday.toISOString(); - -describe('initializeClient', () => { - // Set up environment variables - const originalEnvironment = process.env; - const app = { - locals: {}, - }; - - beforeEach(() => { - jest.resetModules(); // Clears the cache - process.env = { ...originalEnvironment }; // Make a copy - }); - - afterAll(() => { - process.env = originalEnvironment; // Restore original env vars - }); - - test('initializes OpenAI client with default API key and URL', async () => { - process.env.AZURE_ASSISTANTS_API_KEY = 'default-api-key'; - process.env.AZURE_ASSISTANTS_BASE_URL = 'https://default.api.url'; - - // Assuming 'isUserProvided' to return false for this test case - jest.mock('~/server/utils', () => ({ - isUserProvided: jest.fn().mockReturnValueOnce(false), - })); - - const req = { - user: { id: 'user123' }, - app, - config: { endpoints: { [EModelEndpoint.azureOpenAI]: {} } }, - }; - const res = {}; - - const { openai, openAIApiKey } = await initializeClient({ req, res }); - expect(openai.apiKey).toBe('default-api-key'); - expect(openAIApiKey).toBe('default-api-key'); - expect(openai.baseURL).toBe('https://default.api.url'); - }); - - test('initializes OpenAI client with user-provided API key and URL', async () => { - process.env.AZURE_ASSISTANTS_API_KEY = 'user_provided'; - process.env.AZURE_ASSISTANTS_BASE_URL = 'user_provided'; - - getUserKeyValues.mockResolvedValue({ apiKey: 'user-api-key', baseURL: 'https://user.api.url' }); - getUserKeyExpiry.mockResolvedValue(isoString); - - const req = { - user: { id: 'user123' }, - app, - config: { endpoints: { [EModelEndpoint.azureOpenAI]: {} } }, - }; - const res = {}; - - const { openai, openAIApiKey } = await initializeClient({ req, res }); - expect(openAIApiKey).toBe('user-api-key'); - expect(openai.apiKey).toBe('user-api-key'); - expect(openai.baseURL).toBe('https://user.api.url'); - }); - - test('throws error for invalid JSON in user-provided values', async () => { - process.env.AZURE_ASSISTANTS_API_KEY = 'user_provided'; - getUserKey.mockResolvedValue('invalid-json'); - getUserKeyExpiry.mockResolvedValue(isoString); - getUserKeyValues.mockImplementation(() => { - let userValues = getUserKey(); - try { - userValues = JSON.parse(userValues); - } catch { - throw new Error( - JSON.stringify({ - type: ErrorTypes.INVALID_USER_KEY, - }), - ); - } - return userValues; - }); - - const req = { - user: { id: 'user123' }, - config: { endpoints: { [EModelEndpoint.azureOpenAI]: {} } }, - }; - const res = {}; - - await expect(initializeClient({ req, res })).rejects.toThrow(/invalid_user_key/); - }); - - test('throws error if API key is not provided', async () => { - delete process.env.AZURE_ASSISTANTS_API_KEY; // Simulate missing API key - - const req = { - user: { id: 'user123' }, - app, - config: { endpoints: { [EModelEndpoint.azureOpenAI]: {} } }, - }; - const res = {}; - - await expect(initializeClient({ req, res })).rejects.toThrow(/Assistants API key not/); - }); - - test('initializes OpenAI client with proxy configuration', async () => { - process.env.AZURE_ASSISTANTS_API_KEY = 'test-key'; - process.env.PROXY = 'http://proxy.server'; - - const req = { - user: { id: 'user123' }, - app, - config: { endpoints: { [EModelEndpoint.azureOpenAI]: {} } }, - }; - const res = {}; - - const { openai } = await initializeClient({ req, res }); - expect(openai.fetchOptions).toBeDefined(); - expect(openai.fetchOptions.dispatcher).toBeInstanceOf(ProxyAgent); - }); -}); diff --git a/api/server/services/Endpoints/custom/initialize.js b/api/server/services/Endpoints/custom/initialize.js index 5aa8b08a92..e0527d7d8a 100644 --- a/api/server/services/Endpoints/custom/initialize.js +++ b/api/server/services/Endpoints/custom/initialize.js @@ -8,12 +8,11 @@ const { } = require('librechat-data-provider'); const { getUserKeyValues, checkUserKeyExpiry } = require('~/server/services/UserService'); const { fetchModels } = require('~/server/services/ModelService'); -const OpenAIClient = require('~/app/clients/OpenAIClient'); const getLogStores = require('~/cache/getLogStores'); const { PROXY } = process.env; -const initializeClient = async ({ req, res, endpointOption, optionsOnly, overrideEndpoint }) => { +const initializeClient = async ({ req, endpointOption, overrideEndpoint }) => { const appConfig = req.config; const { key: expiresAt } = req.body; const endpoint = overrideEndpoint ?? req.body.endpoint; @@ -120,38 +119,27 @@ const initializeClient = async ({ req, res, endpointOption, optionsOnly, overrid let clientOptions = { reverseProxyUrl: baseURL ?? null, proxy: PROXY ?? null, - req, - res, ...customOptions, ...endpointOption, }; - if (optionsOnly) { - const modelOptions = endpointOption?.model_parameters ?? {}; - clientOptions = Object.assign( - { - modelOptions, - }, - clientOptions, - ); - clientOptions.modelOptions.user = req.user.id; - const options = getOpenAIConfig(apiKey, clientOptions, endpoint); - if (options != null) { - options.useLegacyContent = true; - options.endpointTokenConfig = endpointTokenConfig; - } - if (!clientOptions.streamRate) { - return options; - } - options.llmConfig._lc_stream_delay = clientOptions.streamRate; - return options; + const modelOptions = endpointOption?.model_parameters ?? {}; + clientOptions = Object.assign( + { + modelOptions, + }, + clientOptions, + ); + clientOptions.modelOptions.user = req.user.id; + const options = getOpenAIConfig(apiKey, clientOptions, endpoint); + if (options != null) { + options.useLegacyContent = true; + options.endpointTokenConfig = endpointTokenConfig; } - - const client = new OpenAIClient(apiKey, clientOptions); - return { - client, - openAIApiKey: apiKey, - }; + if (clientOptions.streamRate) { + options.llmConfig._lc_stream_delay = clientOptions.streamRate; + } + return options; }; module.exports = initializeClient; diff --git a/api/server/services/Endpoints/custom/initialize.spec.js b/api/server/services/Endpoints/custom/initialize.spec.js deleted file mode 100644 index d12906df9a..0000000000 --- a/api/server/services/Endpoints/custom/initialize.spec.js +++ /dev/null @@ -1,106 +0,0 @@ -const initializeClient = require('./initialize'); - -jest.mock('@librechat/api', () => ({ - ...jest.requireActual('@librechat/api'), - resolveHeaders: jest.fn(), - getOpenAIConfig: jest.fn(), - getCustomEndpointConfig: jest.fn().mockReturnValue({ - apiKey: 'test-key', - baseURL: 'https://test.com', - headers: { 'x-user': '{{LIBRECHAT_USER_ID}}', 'x-email': '{{LIBRECHAT_USER_EMAIL}}' }, - models: { default: ['test-model'] }, - }), -})); - -jest.mock('~/server/services/UserService', () => ({ - getUserKeyValues: jest.fn(), - checkUserKeyExpiry: jest.fn(), -})); - -// Config is now passed via req.config, not getAppConfig - -jest.mock('~/server/services/ModelService', () => ({ - fetchModels: jest.fn(), -})); - -jest.mock('~/app/clients/OpenAIClient', () => { - return jest.fn().mockImplementation(() => ({ - options: {}, - })); -}); - -jest.mock('~/cache/getLogStores', () => - jest.fn().mockReturnValue({ - get: jest.fn(), - }), -); - -describe('custom/initializeClient', () => { - const mockRequest = { - body: { endpoint: 'test-endpoint' }, - user: { id: 'user-123', email: 'test@example.com', role: 'user' }, - app: { locals: {} }, - config: { - endpoints: { - all: { - streamRate: 25, - }, - }, - }, - }; - const mockResponse = {}; - - beforeEach(() => { - jest.clearAllMocks(); - const { getCustomEndpointConfig, resolveHeaders, getOpenAIConfig } = require('@librechat/api'); - getCustomEndpointConfig.mockReturnValue({ - apiKey: 'test-key', - baseURL: 'https://test.com', - headers: { 'x-user': '{{LIBRECHAT_USER_ID}}', 'x-email': '{{LIBRECHAT_USER_EMAIL}}' }, - models: { default: ['test-model'] }, - }); - resolveHeaders.mockReturnValue({ 'x-user': 'user-123', 'x-email': 'test@example.com' }); - getOpenAIConfig.mockReturnValue({ - useLegacyContent: true, - endpointTokenConfig: null, - llmConfig: { - callbacks: [], - }, - }); - }); - - it('stores original template headers for deferred resolution', async () => { - /** - * Note: Request-based Header Resolution is deferred until right before LLM request is made - * in the OpenAIClient or AgentClient, not during initialization. - * This test verifies that the initialize function completes successfully with optionsOnly flag, - * and that headers are passed through to be resolved later during the actual LLM request. - */ - const result = await initializeClient({ - req: mockRequest, - res: mockResponse, - optionsOnly: true, - }); - // Verify that options are returned for later use - expect(result).toBeDefined(); - expect(result).toHaveProperty('useLegacyContent', true); - }); - - it('throws if endpoint config is missing', async () => { - const { getCustomEndpointConfig } = require('@librechat/api'); - getCustomEndpointConfig.mockReturnValueOnce(null); - await expect( - initializeClient({ req: mockRequest, res: mockResponse, optionsOnly: true }), - ).rejects.toThrow('Config not found for the test-endpoint custom endpoint.'); - }); - - it('throws if user is missing', async () => { - await expect( - initializeClient({ - req: { ...mockRequest, user: undefined }, - res: mockResponse, - optionsOnly: true, - }), - ).rejects.toThrow("Cannot read properties of undefined (reading 'id')"); - }); -}); diff --git a/api/server/services/Endpoints/google/initialize.js b/api/server/services/Endpoints/google/initialize.js index 9a685d679a..de4cf74ae2 100644 --- a/api/server/services/Endpoints/google/initialize.js +++ b/api/server/services/Endpoints/google/initialize.js @@ -2,9 +2,8 @@ const path = require('path'); const { EModelEndpoint, AuthKeys } = require('librechat-data-provider'); const { getGoogleConfig, isEnabled, loadServiceKey } = require('@librechat/api'); const { getUserKey, checkUserKeyExpiry } = require('~/server/services/UserService'); -const { GoogleClient } = require('~/app'); -const initializeClient = async ({ req, res, endpointOption, overrideModel, optionsOnly }) => { +const initializeClient = async ({ req, endpointOption, overrideModel }) => { const { GOOGLE_KEY, GOOGLE_REVERSE_PROXY, GOOGLE_AUTH_HEADER, PROXY } = process.env; const isUserProvided = GOOGLE_KEY === 'user_provided'; const { key: expiresAt } = req.body; @@ -62,8 +61,6 @@ const initializeClient = async ({ req, res, endpointOption, overrideModel, optio } clientOptions = { - req, - res, reverseProxyUrl: GOOGLE_REVERSE_PROXY ?? null, authHeader: isEnabled(GOOGLE_AUTH_HEADER) ?? null, proxy: PROXY ?? null, @@ -71,25 +68,16 @@ const initializeClient = async ({ req, res, endpointOption, overrideModel, optio ...endpointOption, }; - if (optionsOnly) { - clientOptions = Object.assign( - { - modelOptions: endpointOption?.model_parameters ?? {}, - }, - clientOptions, - ); - if (overrideModel) { - clientOptions.modelOptions.model = overrideModel; - } - return getGoogleConfig(credentials, clientOptions); + clientOptions = Object.assign( + { + modelOptions: endpointOption?.model_parameters ?? {}, + }, + clientOptions, + ); + if (overrideModel) { + clientOptions.modelOptions.model = overrideModel; } - - const client = new GoogleClient(credentials, clientOptions); - - return { - client, - credentials, - }; + return getGoogleConfig(credentials, clientOptions); }; module.exports = initializeClient; diff --git a/api/server/services/Endpoints/google/initialize.spec.js b/api/server/services/Endpoints/google/initialize.spec.js deleted file mode 100644 index aa8a61e9c2..0000000000 --- a/api/server/services/Endpoints/google/initialize.spec.js +++ /dev/null @@ -1,101 +0,0 @@ -// file deepcode ignore HardcodedNonCryptoSecret: No hardcoded secrets -const { getUserKey } = require('~/server/services/UserService'); -const initializeClient = require('./initialize'); -const { GoogleClient } = require('~/app'); - -jest.mock('~/server/services/UserService', () => ({ - checkUserKeyExpiry: jest.requireActual('~/server/services/UserService').checkUserKeyExpiry, - getUserKey: jest.fn().mockImplementation(() => ({})), -})); - -// Config is now passed via req.config, not getAppConfig - -const app = { locals: {} }; - -describe('google/initializeClient', () => { - afterEach(() => { - jest.clearAllMocks(); - }); - - test('should initialize GoogleClient with user-provided credentials', async () => { - process.env.GOOGLE_KEY = 'user_provided'; - process.env.GOOGLE_REVERSE_PROXY = 'http://reverse.proxy'; - process.env.PROXY = 'http://proxy'; - - const expiresAt = new Date(Date.now() + 60000).toISOString(); - - const req = { - body: { key: expiresAt }, - user: { id: '123' }, - app, - config: { - endpoints: { - all: {}, - google: {}, - }, - }, - }; - const res = {}; - const endpointOption = { modelOptions: { model: 'default-model' } }; - - const { client, credentials } = await initializeClient({ req, res, endpointOption }); - - expect(getUserKey).toHaveBeenCalledWith({ userId: '123', name: 'google' }); - expect(client).toBeInstanceOf(GoogleClient); - expect(client.options.reverseProxyUrl).toBe('http://reverse.proxy'); - expect(client.options.proxy).toBe('http://proxy'); - expect(credentials).toEqual({}); - }); - - test('should initialize GoogleClient with service key credentials', async () => { - process.env.GOOGLE_KEY = 'service_key'; - process.env.GOOGLE_REVERSE_PROXY = 'http://reverse.proxy'; - process.env.PROXY = 'http://proxy'; - - const req = { - body: { key: null }, - user: { id: '123' }, - app, - config: { - endpoints: { - all: {}, - google: {}, - }, - }, - }; - const res = {}; - const endpointOption = { modelOptions: { model: 'default-model' } }; - - const { client, credentials } = await initializeClient({ req, res, endpointOption }); - - expect(client).toBeInstanceOf(GoogleClient); - expect(client.options.reverseProxyUrl).toBe('http://reverse.proxy'); - expect(client.options.proxy).toBe('http://proxy'); - expect(credentials).toEqual({ - GOOGLE_SERVICE_KEY: {}, - GOOGLE_API_KEY: 'service_key', - }); - }); - - test('should handle expired user-provided key', async () => { - process.env.GOOGLE_KEY = 'user_provided'; - - const expiresAt = new Date(Date.now() - 10000).toISOString(); // Expired - const req = { - body: { key: expiresAt }, - user: { id: '123' }, - app, - config: { - endpoints: { - all: {}, - google: {}, - }, - }, - }; - const res = {}; - const endpointOption = { modelOptions: { model: 'default-model' } }; - await expect(initializeClient({ req, res, endpointOption })).rejects.toThrow( - /expired_user_key/, - ); - }); -}); diff --git a/api/server/services/Endpoints/openAI/initialize.js b/api/server/services/Endpoints/openAI/initialize.js index cd691c6240..c6eccd5716 100644 --- a/api/server/services/Endpoints/openAI/initialize.js +++ b/api/server/services/Endpoints/openAI/initialize.js @@ -7,16 +7,8 @@ const { getAzureCredentials, } = require('@librechat/api'); const { getUserKeyValues, checkUserKeyExpiry } = require('~/server/services/UserService'); -const OpenAIClient = require('~/app/clients/OpenAIClient'); -const initializeClient = async ({ - req, - res, - endpointOption, - optionsOnly, - overrideEndpoint, - overrideModel, -}) => { +const initializeClient = async ({ req, endpointOption, overrideEndpoint, overrideModel }) => { const appConfig = req.config; const { PROXY, @@ -137,28 +129,19 @@ const initializeClient = async ({ throw new Error(`${endpoint} API Key not provided.`); } - if (optionsOnly) { - const modelOptions = endpointOption?.model_parameters ?? {}; - modelOptions.model = modelName; - clientOptions = Object.assign({ modelOptions }, clientOptions); - clientOptions.modelOptions.user = req.user.id; - const options = getOpenAIConfig(apiKey, clientOptions, endpoint); - if (options != null && serverless === true) { - options.useLegacyContent = true; - } - const streamRate = clientOptions.streamRate; - if (!streamRate) { - return options; - } - options.llmConfig._lc_stream_delay = streamRate; - return options; + const modelOptions = endpointOption?.model_parameters ?? {}; + modelOptions.model = modelName; + clientOptions = Object.assign({ modelOptions }, clientOptions); + clientOptions.modelOptions.user = req.user.id; + const options = getOpenAIConfig(apiKey, clientOptions, endpoint); + if (options != null && serverless === true) { + options.useLegacyContent = true; } - - const client = new OpenAIClient(apiKey, Object.assign({ req, res }, clientOptions)); - return { - client, - openAIApiKey: apiKey, - }; + const streamRate = clientOptions.streamRate; + if (streamRate) { + options.llmConfig._lc_stream_delay = streamRate; + } + return options; }; module.exports = initializeClient; diff --git a/api/server/services/Endpoints/openAI/initialize.spec.js b/api/server/services/Endpoints/openAI/initialize.spec.js deleted file mode 100644 index d51300aafe..0000000000 --- a/api/server/services/Endpoints/openAI/initialize.spec.js +++ /dev/null @@ -1,431 +0,0 @@ -jest.mock('~/cache/getLogStores', () => ({ - getLogStores: jest.fn().mockReturnValue({ - get: jest.fn().mockResolvedValue({ - openAI: { apiKey: 'test-key' }, - }), - set: jest.fn(), - delete: jest.fn(), - }), -})); - -const { EModelEndpoint, ErrorTypes, validateAzureGroups } = require('librechat-data-provider'); -const { getUserKey, getUserKeyValues } = require('~/server/services/UserService'); -const initializeClient = require('./initialize'); -const { OpenAIClient } = require('~/app'); - -// Mock getUserKey since it's the only function we want to mock -jest.mock('~/server/services/UserService', () => ({ - getUserKey: jest.fn(), - getUserKeyValues: jest.fn(), - checkUserKeyExpiry: jest.requireActual('~/server/services/UserService').checkUserKeyExpiry, -})); - -const mockAppConfig = { - endpoints: { - openAI: { - apiKey: 'test-key', - }, - azureOpenAI: { - apiKey: 'test-azure-key', - modelNames: ['gpt-4-vision-preview', 'gpt-3.5-turbo', 'gpt-4'], - modelGroupMap: { - 'gpt-4-vision-preview': { - group: 'librechat-westus', - deploymentName: 'gpt-4-vision-preview', - version: '2024-02-15-preview', - }, - }, - groupMap: { - 'librechat-westus': { - apiKey: 'WESTUS_API_KEY', - instanceName: 'librechat-westus', - version: '2023-12-01-preview', - models: { - 'gpt-4-vision-preview': { - deploymentName: 'gpt-4-vision-preview', - version: '2024-02-15-preview', - }, - }, - }, - }, - }, - }, -}; - -describe('initializeClient', () => { - // Set up environment variables - const originalEnvironment = process.env; - const app = { - locals: {}, - }; - - const validAzureConfigs = [ - { - group: 'librechat-westus', - apiKey: 'WESTUS_API_KEY', - instanceName: 'librechat-westus', - version: '2023-12-01-preview', - models: { - 'gpt-4-vision-preview': { - deploymentName: 'gpt-4-vision-preview', - version: '2024-02-15-preview', - }, - 'gpt-3.5-turbo': { - deploymentName: 'gpt-35-turbo', - }, - 'gpt-3.5-turbo-1106': { - deploymentName: 'gpt-35-turbo-1106', - }, - 'gpt-4': { - deploymentName: 'gpt-4', - }, - 'gpt-4-1106-preview': { - deploymentName: 'gpt-4-1106-preview', - }, - }, - }, - { - group: 'librechat-eastus', - apiKey: 'EASTUS_API_KEY', - instanceName: 'librechat-eastus', - deploymentName: 'gpt-4-turbo', - version: '2024-02-15-preview', - models: { - 'gpt-4-turbo': true, - }, - baseURL: 'https://eastus.example.com', - additionalHeaders: { - 'x-api-key': 'x-api-key-value', - }, - }, - { - group: 'mistral-inference', - apiKey: 'AZURE_MISTRAL_API_KEY', - baseURL: - 'https://Mistral-large-vnpet-serverless.region.inference.ai.azure.com/v1/chat/completions', - serverless: true, - models: { - 'mistral-large': true, - }, - }, - { - group: 'llama-70b-chat', - apiKey: 'AZURE_LLAMA2_70B_API_KEY', - baseURL: - 'https://Llama-2-70b-chat-qmvyb-serverless.region.inference.ai.azure.com/v1/chat/completions', - serverless: true, - models: { - 'llama-70b-chat': true, - }, - }, - ]; - - const { modelNames } = validateAzureGroups(validAzureConfigs); - - beforeEach(() => { - jest.resetModules(); // Clears the cache - process.env = { ...originalEnvironment }; // Make a copy - }); - - afterAll(() => { - process.env = originalEnvironment; // Restore original env vars - }); - - test('should initialize client with OpenAI API key and default options', async () => { - process.env.OPENAI_API_KEY = 'test-openai-api-key'; - process.env.DEBUG_OPENAI = 'false'; - process.env.OPENAI_SUMMARIZE = 'false'; - - const req = { - body: { key: null, endpoint: EModelEndpoint.openAI }, - user: { id: '123' }, - app, - config: mockAppConfig, - }; - const res = {}; - const endpointOption = {}; - - const result = await initializeClient({ req, res, endpointOption }); - - expect(result.openAIApiKey).toBe('test-openai-api-key'); - expect(result.client).toBeInstanceOf(OpenAIClient); - }); - - test('should initialize client with Azure credentials when endpoint is azureOpenAI', async () => { - process.env.AZURE_API_KEY = 'test-azure-api-key'; - (process.env.AZURE_OPENAI_API_INSTANCE_NAME = 'some-value'), - (process.env.AZURE_OPENAI_API_DEPLOYMENT_NAME = 'some-value'), - (process.env.AZURE_OPENAI_API_VERSION = 'some-value'), - (process.env.AZURE_OPENAI_API_COMPLETIONS_DEPLOYMENT_NAME = 'some-value'), - (process.env.AZURE_OPENAI_API_EMBEDDINGS_DEPLOYMENT_NAME = 'some-value'), - (process.env.OPENAI_API_KEY = 'test-openai-api-key'); - process.env.DEBUG_OPENAI = 'false'; - process.env.OPENAI_SUMMARIZE = 'false'; - - const req = { - body: { - key: null, - endpoint: 'azureOpenAI', - model: 'gpt-4-vision-preview', - }, - user: { id: '123' }, - app, - config: mockAppConfig, - }; - const res = {}; - const endpointOption = {}; - - const client = await initializeClient({ req, res, endpointOption }); - - expect(client.openAIApiKey).toBe('WESTUS_API_KEY'); - expect(client.client).toBeInstanceOf(OpenAIClient); - }); - - test('should use the debug option when DEBUG_OPENAI is enabled', async () => { - process.env.OPENAI_API_KEY = 'test-openai-api-key'; - process.env.DEBUG_OPENAI = 'true'; - - const req = { - body: { key: null, endpoint: EModelEndpoint.openAI }, - user: { id: '123' }, - app, - config: mockAppConfig, - }; - const res = {}; - const endpointOption = {}; - - const client = await initializeClient({ req, res, endpointOption }); - - expect(client.client.options.debug).toBe(true); - }); - - test('should set contextStrategy to summarize when OPENAI_SUMMARIZE is enabled', async () => { - process.env.OPENAI_API_KEY = 'test-openai-api-key'; - process.env.OPENAI_SUMMARIZE = 'true'; - - const req = { - body: { key: null, endpoint: EModelEndpoint.openAI }, - user: { id: '123' }, - app, - config: mockAppConfig, - }; - const res = {}; - const endpointOption = {}; - - const client = await initializeClient({ req, res, endpointOption }); - - expect(client.client.options.contextStrategy).toBe('summarize'); - }); - - test('should set reverseProxyUrl and proxy when they are provided in the environment', async () => { - process.env.OPENAI_API_KEY = 'test-openai-api-key'; - process.env.OPENAI_REVERSE_PROXY = 'http://reverse.proxy'; - process.env.PROXY = 'http://proxy'; - - const req = { - body: { key: null, endpoint: EModelEndpoint.openAI }, - user: { id: '123' }, - app, - config: mockAppConfig, - }; - const res = {}; - const endpointOption = {}; - - const client = await initializeClient({ req, res, endpointOption }); - - expect(client.client.options.reverseProxyUrl).toBe('http://reverse.proxy'); - expect(client.client.options.proxy).toBe('http://proxy'); - }); - - test('should throw an error if the user-provided key has expired', async () => { - process.env.OPENAI_API_KEY = 'user_provided'; - process.env.AZURE_API_KEY = 'user_provided'; - process.env.DEBUG_OPENAI = 'false'; - process.env.OPENAI_SUMMARIZE = 'false'; - - const expiresAt = new Date(Date.now() - 10000).toISOString(); // Expired - const req = { - body: { key: expiresAt, endpoint: EModelEndpoint.openAI }, - user: { id: '123' }, - app, - config: mockAppConfig, - }; - const res = {}; - const endpointOption = {}; - - await expect(initializeClient({ req, res, endpointOption })).rejects.toThrow( - /expired_user_key/, - ); - }); - - test('should throw an error if no API keys are provided in the environment', async () => { - // Clear the environment variables for API keys - delete process.env.OPENAI_API_KEY; - delete process.env.AZURE_API_KEY; - - const req = { - body: { key: null, endpoint: EModelEndpoint.openAI }, - user: { id: '123' }, - app, - config: mockAppConfig, - }; - const res = {}; - const endpointOption = {}; - - await expect(initializeClient({ req, res, endpointOption })).rejects.toThrow( - `${EModelEndpoint.openAI} API Key not provided.`, - ); - }); - - it('should handle user-provided keys and check expiry', async () => { - // Set up the req.body to simulate user-provided key scenario - const req = { - body: { - key: new Date(Date.now() + 10000).toISOString(), - endpoint: EModelEndpoint.openAI, - }, - user: { - id: '123', - }, - app, - config: mockAppConfig, - }; - - const res = {}; - const endpointOption = {}; - - // Ensure the environment variable is set to 'user_provided' to match the isUserProvided condition - process.env.OPENAI_API_KEY = 'user_provided'; - - // Mock getUserKey to return the expected key - getUserKeyValues.mockResolvedValue({ apiKey: 'test-user-provided-openai-api-key' }); - - // Call the initializeClient function - const result = await initializeClient({ req, res, endpointOption }); - - // Assertions - expect(result.openAIApiKey).toBe('test-user-provided-openai-api-key'); - }); - - test('should throw an error if the user-provided key is invalid', async () => { - const invalidKey = new Date(Date.now() - 100000).toISOString(); - const req = { - body: { key: invalidKey, endpoint: EModelEndpoint.openAI }, - user: { id: '123' }, - app, - config: mockAppConfig, - }; - const res = {}; - const endpointOption = {}; - - // Ensure the environment variable is set to 'user_provided' to match the isUserProvided condition - process.env.OPENAI_API_KEY = 'user_provided'; - - // Mock getUserKey to return an invalid key - getUserKey.mockResolvedValue(invalidKey); - - await expect(initializeClient({ req, res, endpointOption })).rejects.toThrow( - /expired_user_key/, - ); - }); - - test('should throw an error when user-provided values are not valid JSON', async () => { - process.env.OPENAI_API_KEY = 'user_provided'; - const req = { - body: { key: new Date(Date.now() + 10000).toISOString(), endpoint: EModelEndpoint.openAI }, - user: { id: '123' }, - app, - config: mockAppConfig, - }; - const res = {}; - const endpointOption = {}; - - // Mock getUserKey to return a non-JSON string - getUserKey.mockResolvedValue('not-a-json'); - getUserKeyValues.mockImplementation(() => { - let userValues = getUserKey(); - try { - userValues = JSON.parse(userValues); - } catch { - throw new Error( - JSON.stringify({ - type: ErrorTypes.INVALID_USER_KEY, - }), - ); - } - return userValues; - }); - - await expect(initializeClient({ req, res, endpointOption })).rejects.toThrow( - /invalid_user_key/, - ); - }); - - test('should initialize client correctly for Azure OpenAI with valid configuration', async () => { - // Set up Azure environment variables - process.env.WESTUS_API_KEY = 'test-westus-key'; - - const req = { - body: { - key: null, - endpoint: EModelEndpoint.azureOpenAI, - model: modelNames[0], - }, - user: { id: '123' }, - config: mockAppConfig, - }; - const res = {}; - const endpointOption = {}; - - const client = await initializeClient({ req, res, endpointOption }); - expect(client.client.options.azure).toBeDefined(); - }); - - test('should initialize client with default options when certain env vars are not set', async () => { - delete process.env.DEBUG_OPENAI; - delete process.env.OPENAI_SUMMARIZE; - process.env.OPENAI_API_KEY = 'some-api-key'; - - const req = { - body: { key: null, endpoint: EModelEndpoint.openAI }, - user: { id: '123' }, - app, - config: mockAppConfig, - }; - const res = {}; - const endpointOption = {}; - - const client = await initializeClient({ req, res, endpointOption }); - - expect(client.client.options.debug).toBe(false); - expect(client.client.options.contextStrategy).toBe(null); - }); - - test('should correctly use user-provided apiKey and baseURL when provided', async () => { - process.env.OPENAI_API_KEY = 'user_provided'; - process.env.OPENAI_REVERSE_PROXY = 'user_provided'; - const req = { - body: { - key: new Date(Date.now() + 10000).toISOString(), - endpoint: EModelEndpoint.openAI, - }, - user: { - id: '123', - }, - app, - config: mockAppConfig, - }; - const res = {}; - const endpointOption = {}; - - getUserKeyValues.mockResolvedValue({ - apiKey: 'test', - baseURL: 'https://user-provided-url.com', - }); - - const result = await initializeClient({ req, res, endpointOption }); - - expect(result.openAIApiKey).toBe('test'); - expect(result.client.options.reverseProxyUrl).toBe('https://user-provided-url.com'); - }); -}); diff --git a/api/server/services/ModelService.js b/api/server/services/ModelService.js index d5b36558ba..88a14f1c2c 100644 --- a/api/server/services/ModelService.js +++ b/api/server/services/ModelService.js @@ -9,6 +9,7 @@ const { EModelEndpoint, } = require('librechat-data-provider'); const { OllamaClient } = require('~/app/clients/OllamaClient'); +const { config } = require('./Config/EndpointService'); const getLogStores = require('~/cache/getLogStores'); const { extractBaseURL } = require('~/utils'); @@ -27,8 +28,6 @@ const splitAndTrim = (input) => { .filter(Boolean); }; -const { openAIApiKey, userProvidedOpenAI } = require('./Config/EndpointService').config; - /** * Fetches OpenAI models from the specified base API path or Azure, based on the provided configuration. * @@ -138,11 +137,11 @@ const fetchModels = async ({ * @param {string} opts.user - The user ID to send to the API. * @param {boolean} [opts.azure=false] - Whether to fetch models from Azure. * @param {boolean} [opts.assistants=false] - Whether to fetch models from Azure. - * @param {boolean} [opts.plugins=false] - Whether to fetch models from the plugins. * @param {string[]} [_models=[]] - The models to use as a fallback. */ const fetchOpenAIModels = async (opts, _models = []) => { let models = _models.slice() ?? []; + const { openAIApiKey } = config; let apiKey = openAIApiKey; const openaiBaseURL = 'https://api.openai.com/v1'; let baseURL = openaiBaseURL; @@ -204,7 +203,6 @@ const fetchOpenAIModels = async (opts, _models = []) => { * @param {object} opts - The options for fetching the models. * @param {string} opts.user - The user ID to send to the API. * @param {boolean} [opts.azure=false] - Whether to fetch models from Azure. - * @param {boolean} [opts.plugins=false] - Whether to fetch models for the plugins endpoint. * @param {boolean} [opts.assistants=false] - Whether to fetch models for the Assistants endpoint. */ const getOpenAIModels = async (opts) => { @@ -216,24 +214,11 @@ const getOpenAIModels = async (opts) => { models = defaultModels[EModelEndpoint.azureAssistants]; } - if (opts.plugins) { - models = models.filter( - (model) => - !model.includes('text-davinci') && - !model.includes('instruct') && - !model.includes('0613') && - !model.includes('0314') && - !model.includes('0301'), - ); - } - let key; if (opts.assistants) { key = 'ASSISTANTS_MODELS'; } else if (opts.azure) { key = 'AZURE_OPENAI_MODELS'; - } else if (opts.plugins) { - key = 'PLUGIN_MODELS'; } else { key = 'OPENAI_MODELS'; } @@ -243,22 +228,13 @@ const getOpenAIModels = async (opts) => { return models; } - if (userProvidedOpenAI) { + if (config.userProvidedOpenAI) { return models; } return await fetchOpenAIModels(opts, models); }; -const getChatGPTBrowserModels = () => { - let models = ['text-davinci-002-render-sha', 'gpt-4']; - if (process.env.CHATGPT_MODELS) { - models = splitAndTrim(process.env.CHATGPT_MODELS); - } - - return models; -}; - /** * Fetches models from the Anthropic API. * @async @@ -348,8 +324,7 @@ module.exports = { fetchModels, splitAndTrim, getOpenAIModels, - getBedrockModels, - getChatGPTBrowserModels, - getAnthropicModels, getGoogleModels, + getBedrockModels, + getAnthropicModels, }; diff --git a/api/server/services/ModelService.spec.js b/api/server/services/ModelService.spec.js index a7aeb92181..8880768c91 100644 --- a/api/server/services/ModelService.spec.js +++ b/api/server/services/ModelService.spec.js @@ -216,12 +216,6 @@ describe('getOpenAIModels', () => { expect(models).toEqual(expect.arrayContaining(['azure-model', 'azure-model-2'])); }); - it('returns `PLUGIN_MODELS` with `plugins` flag (and fetch fails)', async () => { - process.env.PLUGIN_MODELS = 'plugins-model,plugins-model-2'; - const models = await getOpenAIModels({ plugins: true }); - expect(models).toEqual(expect.arrayContaining(['plugins-model', 'plugins-model-2'])); - }); - it('returns `OPENAI_MODELS` with no flags (and fetch fails)', async () => { process.env.OPENAI_MODELS = 'openai-model,openai-model-2'; const models = await getOpenAIModels({}); diff --git a/api/server/utils/handleText.js b/api/server/utils/handleText.js index a798dc99bd..cce96feff4 100644 --- a/api/server/utils/handleText.js +++ b/api/server/utils/handleText.js @@ -1,3 +1,4 @@ +const partialRight = require('lodash/partialRight'); const { Capabilities, EModelEndpoint, @@ -7,8 +8,7 @@ const { defaultAssistantsVersion, defaultAgentCapabilities, } = require('librechat-data-provider'); -const { sendEvent } = require('@librechat/api'); -const partialRight = require('lodash/partialRight'); +const { sendEvent, isUserProvided } = require('@librechat/api'); const addSpaceIfNeeded = (text) => (text.length > 0 && !text.endsWith(' ') ? text + ' ' : text); @@ -117,14 +117,6 @@ function formatAction(action) { return formattedAction; } -/** - * Checks if the provided value is 'user_provided'. - * - * @param {string} value - The value to check. - * @returns {boolean} - Returns true if the value is 'user_provided', otherwise false. - */ -const isUserProvided = (value) => value === 'user_provided'; - /** * Generate the configuration for a given key and base URL. * @param {string} key @@ -174,7 +166,6 @@ module.exports = { handleText, formatSteps, formatAction, - isUserProvided, generateConfig, addSpaceIfNeeded, createOnProgress, diff --git a/api/test/__mocks__/fetchEventSource.js b/api/test/__mocks__/fetchEventSource.js deleted file mode 100644 index 8f6d3cc575..0000000000 --- a/api/test/__mocks__/fetchEventSource.js +++ /dev/null @@ -1,27 +0,0 @@ -jest.mock('@waylaidwanderer/fetch-event-source', () => ({ - fetchEventSource: jest - .fn() - .mockImplementation((url, { onopen, onmessage, onclose, onerror, error }) => { - // Simulating the onopen event - onopen && onopen({ status: 200 }); - - // Simulating a few onmessage events - onmessage && - onmessage({ data: JSON.stringify({ message: 'First message' }), event: 'message' }); - onmessage && - onmessage({ data: JSON.stringify({ message: 'Second message' }), event: 'message' }); - onmessage && - onmessage({ data: JSON.stringify({ message: 'Third message' }), event: 'message' }); - - // Simulate the onclose event - onclose && onclose(); - - if (error) { - // Simulate the onerror event - onerror && onerror({ status: 500 }); - } - - // Return a Promise that resolves to simulate async behavior - return Promise.resolve(); - }), -})); diff --git a/api/typedefs.js b/api/typedefs.js index b6385c69a9..5200e7fd0e 100644 --- a/api/typedefs.js +++ b/api/typedefs.js @@ -1264,12 +1264,6 @@ * @memberof typedefs */ -/** - * @exports OpenAISpecClient - * @typedef {import('./app/clients/OpenAIClient')} OpenAISpecClient - * @memberof typedefs - */ - /** * @exports TAgentClient * @typedef {import('./server/controllers/agents/client')} TAgentClient @@ -1498,13 +1492,11 @@ * @typedef {Object} EndpointServiceConfig * @property {string} openAIApiKey - The API key for OpenAI. * @property {string} azureOpenAIApiKey - The API key for Azure OpenAI. - * @property {boolean} useAzurePlugins - Flag to indicate if Azure plugins are used. * @property {boolean} userProvidedOpenAI - Flag to indicate if OpenAI API key is user provided. * @property {string} googleKey - The Palm key. * @property {boolean|{userProvide: boolean}} [openAI] - Flag to indicate if OpenAI endpoint is user provided, or its configuration. * @property {boolean|{userProvide: boolean}} [assistant] - Flag to indicate if Assistant endpoint is user provided, or its configuration. * @property {boolean|{userProvide: boolean}} [azureOpenAI] - Flag to indicate if Azure OpenAI endpoint is user provided, or its configuration. - * @property {boolean|{userProvide: boolean}} [chatGPTBrowser] - Flag to indicate if ChatGPT Browser endpoint is user provided, or its configuration. * @property {boolean|{userProvide: boolean}} [anthropic] - Flag to indicate if Anthropic endpoint is user provided, or its configuration. * @property {boolean|{userProvide: boolean}} [google] - Flag to indicate if Google endpoint is user provided, or its configuration. * @property {boolean|{userProvide: boolean, userProvideURL: boolean, name: string}} [custom] - Custom Endpoint configuration. @@ -1519,23 +1511,12 @@ * @memberof typedefs */ -/** - * @exports GptPlugins - * @typedef {Object} GptPlugins - * @property {Plugin[]} plugins - An array of plugins available. - * @property {string[]} availableAgents - Available agents, 'classic' or 'functions'. - * @property {boolean} userProvide - A flag indicating if the user has provided the data. - * @property {boolean} azure - A flag indicating if azure plugins are used. - * @memberof typedefs - */ - /** * @exports DefaultConfig * @typedef {Object} DefaultConfig * @property {boolean|{userProvide: boolean}} [openAI] - Flag to indicate if OpenAI endpoint is user provided, or its configuration. * @property {boolean|{userProvide: boolean}} [assistant] - Flag to indicate if Assistant endpoint is user provided, or its configuration. * @property {boolean|{userProvide: boolean}} [azureOpenAI] - Flag to indicate if Azure OpenAI endpoint is user provided, or its configuration. - * @property {boolean|{userProvide: boolean}} [chatGPTBrowser] - Flag to indicate if ChatGPT Browser endpoint is user provided, or its configuration. * @property {boolean|{userProvide: boolean}} [anthropic] - Flag to indicate if Anthropic endpoint is user provided, or its configuration. * @property {boolean|{userProvide: boolean}} [google] - Flag to indicate if Google endpoint is user provided, or its configuration. * @property {boolean|{userProvide: boolean, userProvideURL: boolean, name: string}} [custom] - Custom Endpoint configuration. diff --git a/client/src/common/types.ts b/client/src/common/types.ts index 8f865ce7a8..bb3bdcfa6d 100644 --- a/client/src/common/types.ts +++ b/client/src/common/types.ts @@ -323,10 +323,6 @@ export type TSetOptionsPayload = { setExample: TSetExample; addExample: () => void; removeExample: () => void; - setAgentOption: TSetOption; - // getConversation: () => t.TConversation | t.TPreset | null; - checkPluginSelection: (value: string) => boolean; - setTools: (newValue: string, remove?: boolean) => void; setOptions?: TSetOptions; }; @@ -447,7 +443,7 @@ export type TDialogProps = { onOpenChange: (open: boolean) => void; }; -export type TPluginStoreDialogProps = { +export type ToolDialogProps = { isOpen: boolean; setIsOpen: (open: boolean) => void; }; @@ -602,7 +598,6 @@ export type NewConversationParams = { export type ConvoGenerator = (params: NewConversationParams) => void | t.TConversation; export type TBaseResData = { - plugin?: t.TResPlugin; final?: boolean; initial?: boolean; previousMessages?: t.TMessage[]; diff --git a/client/src/components/Chat/Input/ConversationStarters.tsx b/client/src/components/Chat/Input/ConversationStarters.tsx index 12408b55a6..bd78f4b2b4 100644 --- a/client/src/components/Chat/Input/ConversationStarters.tsx +++ b/client/src/components/Chat/Input/ConversationStarters.tsx @@ -13,13 +13,7 @@ const ConversationStarters = () => { const endpointType = useMemo(() => { let ep = conversation?.endpoint ?? ''; - if ( - [ - EModelEndpoint.chatGPTBrowser, - EModelEndpoint.azureOpenAI, - EModelEndpoint.gptPlugins, - ].includes(ep as EModelEndpoint) - ) { + if (ep === EModelEndpoint.azureOpenAI) { ep = EModelEndpoint.openAI; } return getIconEndpoint({ diff --git a/client/src/components/Chat/Input/PopoverButtons.tsx b/client/src/components/Chat/Input/PopoverButtons.tsx index 31bb6ee2f6..ca448b1824 100644 --- a/client/src/components/Chat/Input/PopoverButtons.tsx +++ b/client/src/components/Chat/Input/PopoverButtons.tsx @@ -1,10 +1,10 @@ import { useRecoilState } from 'recoil'; import { EModelEndpoint, SettingsViews } from 'librechat-data-provider'; -import { Button, MessagesSquared, GPTIcon, AssistantIcon, DataIcon } from '@librechat/client'; +import { Button, MessagesSquared, AssistantIcon, DataIcon } from '@librechat/client'; import type { ReactNode } from 'react'; import { useChatContext } from '~/Providers'; import { useLocalize } from '~/hooks'; -import { cn } from '~/utils/'; +import { cn } from '~/utils'; import store from '~/store'; type TPopoverButton = { @@ -28,14 +28,8 @@ export default function PopoverButtons({ endpointType?: EModelEndpoint | string | null; model?: string | null; }) { - const { - conversation, - optionSettings, - setOptionSettings, - showAgentSettings, - setShowAgentSettings, - } = useChatContext(); const localize = useLocalize(); + const { conversation, optionSettings, setOptionSettings } = useChatContext(); const [settingsView, setSettingsView] = useRecoilState(store.currentSettingsView); const { model: _model, endpoint: _endpoint, endpointType } = conversation ?? {}; @@ -64,19 +58,6 @@ export default function PopoverButtons({ icon: , }, ], - [EModelEndpoint.gptPlugins]: [ - { - label: localize( - showAgentSettings ? 'com_show_completion_settings' : 'com_show_agent_settings', - ), - buttonClass: '', - handler: () => { - setSettingsView(SettingsViews.default); - setShowAgentSettings((prev) => !prev); - }, - icon: , - }, - ], }; if (!endpoint) { diff --git a/client/src/components/Chat/Landing.tsx b/client/src/components/Chat/Landing.tsx index c0148a4373..7707fe7066 100644 --- a/client/src/components/Chat/Landing.tsx +++ b/client/src/components/Chat/Landing.tsx @@ -43,13 +43,7 @@ export default function Landing({ centerFormOnLanding }: { centerFormOnLanding: const endpointType = useMemo(() => { let ep = conversation?.endpoint ?? ''; - if ( - [ - EModelEndpoint.chatGPTBrowser, - EModelEndpoint.azureOpenAI, - EModelEndpoint.gptPlugins, - ].includes(ep as EModelEndpoint) - ) { + if (ep === EModelEndpoint.azureOpenAI) { ep = EModelEndpoint.openAI; } return getIconEndpoint({ diff --git a/client/src/components/Chat/Menus/Endpoints/components/EndpointItem.tsx b/client/src/components/Chat/Menus/Endpoints/components/EndpointItem.tsx index ad5d224bb8..ffdbdb3ddb 100644 --- a/client/src/components/Chat/Menus/Endpoints/components/EndpointItem.tsx +++ b/client/src/components/Chat/Menus/Endpoints/components/EndpointItem.tsx @@ -1,6 +1,6 @@ import { useMemo } from 'react'; import { SettingsIcon } from 'lucide-react'; -import { TooltipAnchor, Spinner } from '@librechat/client'; +import { Spinner } from '@librechat/client'; import { EModelEndpoint, isAgentsEndpoint, isAssistantsEndpoint } from 'librechat-data-provider'; import type { TModelSpec } from 'librechat-data-provider'; import type { Endpoint } from '~/common'; @@ -82,7 +82,10 @@ export function EndpointItem({ endpoint }: EndpointItemProps) { }, [modelSpecs, endpoint.value]); const searchValue = endpointSearchValues[endpoint.value] || ''; - const isUserProvided = useMemo(() => endpointRequiresUserKey(endpoint.value), [endpoint.value]); + const isUserProvided = useMemo( + () => endpointRequiresUserKey(endpoint.value), + [endpointRequiresUserKey, endpoint.value], + ); const renderIconLabel = () => (
@@ -99,18 +102,6 @@ export function EndpointItem({ endpoint }: EndpointItemProps) { > {endpoint.label} - {/* TODO: remove this after deprecation */} - {endpoint.value === 'gptPlugins' && ( - - {localize('com_endpoint_deprecated')} - - } - /> - )}
); diff --git a/client/src/components/Chat/Menus/Presets/EditPresetDialog.tsx b/client/src/components/Chat/Menus/Presets/EditPresetDialog.tsx index 7914820805..886bf1e63d 100644 --- a/client/src/components/Chat/Menus/Presets/EditPresetDialog.tsx +++ b/client/src/components/Chat/Menus/Presets/EditPresetDialog.tsx @@ -35,7 +35,7 @@ const EditPresetDialog = ({ const localize = useLocalize(); const queryClient = useQueryClient(); const { preset, setPreset } = useChatContext(); - const { setOption, setOptions, setAgentOption } = useSetIndexOptions(preset); + const { setOption, setOptions } = useSetIndexOptions(preset); const [onTitleChange, title] = useDebouncedInput({ setOption, optionKey: 'title', @@ -87,20 +87,7 @@ const EditPresetDialog = ({ console.log('setting model', models[0]); setOption('model')(models[0]); } - - if (preset.agentOptions?.model === models[0]) { - return; - } - - if ( - preset.agentOptions?.model != null && - preset.agentOptions.model && - !models.includes(preset.agentOptions.model) - ) { - console.log('setting agent model', models[0]); - setAgentOption('model')(models[0]); - } - }, [preset, queryClient, setOption, setAgentOption]); + }, [preset, queryClient, setOption]); const switchEndpoint = useCallback( (newEndpoint: string) => { diff --git a/client/src/components/Chat/Messages/ui/MessageRender.tsx b/client/src/components/Chat/Messages/ui/MessageRender.tsx index 179da5942d..48fea51d2b 100644 --- a/client/src/components/Chat/Messages/ui/MessageRender.tsx +++ b/client/src/components/Chat/Messages/ui/MessageRender.tsx @@ -8,7 +8,6 @@ import PlaceholderRow from '~/components/Chat/Messages/ui/PlaceholderRow'; import SiblingSwitch from '~/components/Chat/Messages/SiblingSwitch'; import HoverButtons from '~/components/Chat/Messages/HoverButtons'; import MessageIcon from '~/components/Chat/Messages/MessageIcon'; -import { Plugin } from '~/components/Messages/Content'; import SubRow from '~/components/Chat/Messages/SubRow'; import { fontSizeAtom } from '~/store/fontSize'; import { MessageContext } from '~/Providers'; @@ -178,7 +177,6 @@ const MessageRender = memo( isLatestMessage, }} > - {msg.plugin && } = (props) => { - const { - error, - button, - iconURL = '', - endpoint, - size = 30, - model = '', - assistantName, - agentName, - } = props; + const { error, iconURL = '', endpoint, size = 30, model = '', assistantName, agentName } = props; const assistantsIcon = { icon: iconURL ? ( @@ -142,11 +133,6 @@ const MessageEndpointIcon: React.FC = (props) => { bg: getOpenAIColor(model), name: 'ChatGPT', }, - [EModelEndpoint.gptPlugins]: { - icon: , - bg: `rgba(69, 89, 164, ${button === true ? 0.75 : 1})`, - name: 'Plugins', - }, [EModelEndpoint.google]: { icon: getGoogleIcon(model, size), name: getGoogleModelName(model), diff --git a/client/src/components/Endpoints/MinimalIcon.tsx b/client/src/components/Endpoints/MinimalIcon.tsx index b0f1fb22b9..4a85eb09ab 100644 --- a/client/src/components/Endpoints/MinimalIcon.tsx +++ b/client/src/components/Endpoints/MinimalIcon.tsx @@ -1,15 +1,13 @@ import { Feather } from 'lucide-react'; import { EModelEndpoint, alternateName } from 'librechat-data-provider'; import { + Sparkles, + BedrockIcon, + AnthropicIcon, AzureMinimalIcon, OpenAIMinimalIcon, - LightningIcon, - MinimalPlugin, GoogleMinimalIcon, CustomMinimalIcon, - AnthropicIcon, - BedrockIcon, - Sparkles, } from '@librechat/client'; import UnknownIcon from '~/hooks/Endpoint/UnknownIcon'; import { IconProps } from '~/common'; @@ -33,7 +31,6 @@ const MinimalIcon: React.FC = (props) => { icon: , name: props.chatGptLabel ?? 'ChatGPT', }, - [EModelEndpoint.gptPlugins]: { icon: , name: 'Plugins' }, [EModelEndpoint.google]: { icon: , name: props.modelLabel ?? 'Google' }, [EModelEndpoint.anthropic]: { icon: , @@ -43,7 +40,6 @@ const MinimalIcon: React.FC = (props) => { icon: , name: 'Custom', }, - [EModelEndpoint.chatGPTBrowser]: { icon: , name: 'ChatGPT' }, [EModelEndpoint.assistants]: { icon: , name: 'Assistant' }, [EModelEndpoint.azureAssistants]: { icon: , name: 'Assistant' }, [EModelEndpoint.agents]: { diff --git a/client/src/components/Endpoints/Settings/AgentSettings.tsx b/client/src/components/Endpoints/Settings/AgentSettings.tsx deleted file mode 100644 index f4425a4db4..0000000000 --- a/client/src/components/Endpoints/Settings/AgentSettings.tsx +++ /dev/null @@ -1,248 +0,0 @@ -import { - Switch, - Label, - Slider, - HoverCard, - InputNumber, - SelectDropDown, - HoverCardTrigger, -} from '@librechat/client'; -import type { TModelSelectProps } from '~/common'; -import { cn, optionText, defaultTextProps, removeFocusRings } from '~/utils'; -import OptionHover from './OptionHover'; -import { useLocalize } from '~/hooks'; -import { ESide } from '~/common'; - -export default function Settings({ conversation, setOption, models, readonly }: TModelSelectProps) { - const localize = useLocalize(); - if (!conversation) { - return null; - } - const { agent, skipCompletion, model, temperature } = conversation.agentOptions ?? {}; - - const setModel = setOption('model'); - const setTemperature = setOption('temperature'); - const setAgent = setOption('agent'); - const setSkipCompletion = setOption('skipCompletion'); - const onCheckedChangeAgent = (checked: boolean) => { - setAgent(checked ? 'functions' : 'classic'); - }; - - const onCheckedChangeSkip = (checked: boolean) => { - setSkipCompletion(checked); - }; - - return ( -
-
-
- -
-
-
- - -
- - setTemperature(Number(value))} - max={2} - min={0} - step={0.01} - controls={false} - className={cn( - defaultTextProps, - cn( - optionText, - 'reset-rc-number-input reset-rc-number-input-text-right h-auto w-12 border-0 group-hover/temp:border-gray-200', - ), - )} - /> -
- setTemperature(value[0])} - onDoubleClick={() => setTemperature(1)} - max={2} - min={0} - step={0.01} - className="flex h-4 w-full" - aria-labelledby="temp-int" - /> -
- -
-
- - - - - - - - - - - - - - -
- {/* - -
- - setTopP(value)} - max={1} - min={0} - step={0.01} - controls={false} - className={cn( - defaultTextProps, - cn( - optionText, - 'reset-rc-number-input reset-rc-number-input-text-right h-auto w-12 border-0 group-hover/temp:border-gray-200' - ) - )} - /> -
- setTopP(value[0])} - doubleClickHandler={() => setTopP(1)} - max={1} - min={0} - step={0.01} - className="flex h-4 w-full" - /> -
- -
- - - -
- - setFreqP(value)} - max={2} - min={-2} - step={0.01} - controls={false} - className={cn( - defaultTextProps, - cn( - optionText, - 'reset-rc-number-input reset-rc-number-input-text-right h-auto w-12 border-0 group-hover/temp:border-gray-200' - ) - )} - /> -
- setFreqP(value[0])} - doubleClickHandler={() => setFreqP(0)} - max={2} - min={-2} - step={0.01} - className="flex h-4 w-full" - /> -
- -
- - - -
- - setPresP(value)} - max={2} - min={-2} - step={0.01} - controls={false} - className={cn( - defaultTextProps, - cn( - optionText, - 'reset-rc-number-input reset-rc-number-input-text-right h-auto w-12 border-0 group-hover/temp:border-gray-200' - ) - )} - /> -
- setPresP(value[0])} - doubleClickHandler={() => setPresP(0)} - max={2} - min={-2} - step={0.01} - className="flex h-4 w-full" - /> -
- -
*/} -
-
- ); -} diff --git a/client/src/components/Endpoints/Settings/MultiView/PluginSettings.tsx b/client/src/components/Endpoints/Settings/MultiView/PluginSettings.tsx deleted file mode 100644 index 24f1ade440..0000000000 --- a/client/src/components/Endpoints/Settings/MultiView/PluginSettings.tsx +++ /dev/null @@ -1,26 +0,0 @@ -import Settings from '../Plugins'; -import AgentSettings from '../AgentSettings'; -import { useSetIndexOptions } from '~/hooks'; -import { useChatContext } from '~/Providers'; - -export default function PluginsView({ conversation, models, isPreset = false }) { - const { showAgentSettings } = useChatContext(); - const { setOption, setTools, setAgentOption, checkPluginSelection } = useSetIndexOptions( - isPreset ? conversation : null, - ); - if (!conversation) { - return null; - } - - return showAgentSettings ? ( - - ) : ( - - ); -} diff --git a/client/src/components/Endpoints/Settings/MultiView/index.ts b/client/src/components/Endpoints/Settings/MultiView/index.ts index f144aacc28..7289c867ca 100644 --- a/client/src/components/Endpoints/Settings/MultiView/index.ts +++ b/client/src/components/Endpoints/Settings/MultiView/index.ts @@ -1,2 +1 @@ export { default as GoogleSettings } from './GoogleSettings'; -export { default as PluginSettings } from './PluginSettings'; diff --git a/client/src/components/Endpoints/Settings/OptionHover.tsx b/client/src/components/Endpoints/Settings/OptionHover.tsx index 3a526e0b16..219cdfd6d7 100644 --- a/client/src/components/Endpoints/Settings/OptionHover.tsx +++ b/client/src/components/Endpoints/Settings/OptionHover.tsx @@ -36,11 +36,6 @@ const types = { }, openAI, azureOpenAI: openAI, - gptPlugins: { - func: 'com_endpoint_func_hover', - skip: 'com_endpoint_skip_hover', - ...openAI, - }, }; function OptionHover({ endpoint, type, side }: TOptionHoverProps) { diff --git a/client/src/components/Endpoints/Settings/Plugins.tsx b/client/src/components/Endpoints/Settings/Plugins.tsx deleted file mode 100644 index e2a9321b5f..0000000000 --- a/client/src/components/Endpoints/Settings/Plugins.tsx +++ /dev/null @@ -1,392 +0,0 @@ -import { useMemo } from 'react'; -import { useRecoilValue } from 'recoil'; -import TextareaAutosize from 'react-textarea-autosize'; -import { useAvailablePluginsQuery } from 'librechat-data-provider/react-query'; -import { - Input, - Label, - Slider, - HoverCard, - InputNumber, - SelectDropDown, - HoverCardTrigger, -} from '@librechat/client'; -import type { TModelSelectProps, OnInputNumberChange } from '~/common'; -import type { TPlugin } from 'librechat-data-provider'; -import { - removeFocusOutlines, - defaultTextProps, - removeFocusRings, - processPlugins, - selectPlugins, - optionText, - cn, -} from '~/utils'; -import OptionHoverAlt from '~/components/SidePanel/Parameters/OptionHover'; -import MultiSelectDropDown from '~/components/Input/ModelSelect/MultiSelectDropDown'; -import { useLocalize, useDebouncedInput } from '~/hooks'; -import OptionHover from './OptionHover'; -import { ESide } from '~/common'; -import store from '~/store'; - -export default function Settings({ - conversation, - setOption, - setTools, - checkPluginSelection, - models, - readonly, -}: TModelSelectProps & { - setTools: (newValue: string, remove?: boolean | undefined) => void; - checkPluginSelection: (value: string) => boolean; -}) { - const localize = useLocalize(); - const availableTools = useRecoilValue(store.availableTools); - const { data: allPlugins } = useAvailablePluginsQuery({ - select: selectPlugins, - }); - - const conversationTools: TPlugin[] = useMemo(() => { - if (!conversation?.tools) { - return []; - } - return processPlugins(conversation.tools, allPlugins?.map); - }, [conversation, allPlugins]); - - const availablePlugins = useMemo(() => { - if (!availableTools) { - return []; - } - - return Object.values(availableTools); - }, [availableTools]); - - const { - model, - modelLabel, - chatGptLabel, - promptPrefix, - temperature, - top_p: topP, - frequency_penalty: freqP, - presence_penalty: presP, - maxContextTokens, - } = conversation ?? {}; - - const [setChatGptLabel, chatGptLabelValue] = useDebouncedInput({ - setOption, - optionKey: 'chatGptLabel', - initialValue: modelLabel ?? chatGptLabel, - }); - const [setPromptPrefix, promptPrefixValue] = useDebouncedInput({ - setOption, - optionKey: 'promptPrefix', - initialValue: promptPrefix, - }); - const [setTemperature, temperatureValue] = useDebouncedInput({ - setOption, - optionKey: 'temperature', - initialValue: temperature, - }); - const [setTopP, topPValue] = useDebouncedInput({ - setOption, - optionKey: 'top_p', - initialValue: topP, - }); - const [setFreqP, freqPValue] = useDebouncedInput({ - setOption, - optionKey: 'frequency_penalty', - initialValue: freqP, - }); - const [setPresP, presPValue] = useDebouncedInput({ - setOption, - optionKey: 'presence_penalty', - initialValue: presP, - }); - const [setMaxContextTokens, maxContextTokensValue] = useDebouncedInput( - { - setOption, - optionKey: 'maxContextTokens', - initialValue: maxContextTokens, - }, - ); - - const setModel = setOption('model'); - - if (!conversation) { - return null; - } - - return ( -
-
-
- -
- <> -
- - setChatGptLabel(e.target.value ?? null)} - placeholder={localize('com_endpoint_openai_custom_name_placeholder')} - className={cn( - defaultTextProps, - 'flex h-10 max-h-10 w-full resize-none px-3 py-2', - removeFocusOutlines, - )} - /> -
-
- - setPromptPrefix(e.target.value ?? null)} - placeholder={localize( - 'com_endpoint_plug_set_custom_instructions_for_gpt_placeholder', - )} - className={cn( - defaultTextProps, - 'flex max-h-[138px] min-h-[100px] w-full resize-none px-3 py-2', - )} - /> -
- -
-
- - - -
- - -
-
- -
- - -
- - setTemperature(Number(value))} - max={2} - min={0} - step={0.01} - controls={false} - className={cn( - defaultTextProps, - cn( - optionText, - 'reset-rc-number-input reset-rc-number-input-text-right h-auto w-12 border-0 group-hover/temp:border-gray-200', - ), - )} - /> -
- setTemperature(value[0])} - onDoubleClick={() => setTemperature(0.8)} - max={2} - min={0} - step={0.01} - className="flex h-4 w-full" - aria-labelledby="temp-int" - /> -
- -
- - -
- - setTopP(Number(value))} - max={1} - min={0} - step={0.01} - controls={false} - className={cn( - defaultTextProps, - cn( - optionText, - 'reset-rc-number-input reset-rc-number-input-text-right h-auto w-12 border-0 group-hover/temp:border-gray-200', - ), - )} - /> -
- setTopP(value[0])} - onDoubleClick={() => setTopP(1)} - max={1} - min={0} - step={0.01} - className="flex h-4 w-full" - aria-labelledby="top-p-int" - /> -
- -
- - - -
- - setFreqP(Number(value))} - max={2} - min={-2} - step={0.01} - controls={false} - className={cn( - defaultTextProps, - cn( - optionText, - 'reset-rc-number-input reset-rc-number-input-text-right h-auto w-12 border-0 group-hover/temp:border-gray-200', - ), - )} - /> -
- setFreqP(value[0])} - onDoubleClick={() => setFreqP(0)} - max={2} - min={-2} - step={0.01} - className="flex h-4 w-full" - aria-labelledby="freq-penalty-int" - /> -
- -
- - - -
- - setPresP(Number(value))} - max={2} - min={-2} - step={0.01} - controls={false} - className={cn( - defaultTextProps, - cn( - optionText, - 'reset-rc-number-input reset-rc-number-input-text-right h-auto w-12 border-0 group-hover/temp:border-gray-200', - ), - )} - /> -
- setPresP(value[0])} - onDoubleClick={() => setPresP(0)} - max={2} - min={-2} - step={0.01} - className="flex h-4 w-full" - aria-labelledby="pres-penalty-int" - /> -
- -
-
-
- ); -} diff --git a/client/src/components/Endpoints/Settings/index.ts b/client/src/components/Endpoints/Settings/index.ts index fd4b1e7957..5804d8d466 100644 --- a/client/src/components/Endpoints/Settings/index.ts +++ b/client/src/components/Endpoints/Settings/index.ts @@ -3,8 +3,6 @@ export { default as AssistantsSettings } from './Assistants'; export { default as BedrockSettings } from './Bedrock'; export { default as OpenAISettings } from './OpenAI'; export { default as GoogleSettings } from './Google'; -export { default as PluginsSettings } from './Plugins'; export { default as Examples } from './Examples'; -export { default as AgentSettings } from './AgentSettings'; export { default as AnthropicSettings } from './Anthropic'; export * from './settings'; diff --git a/client/src/components/Endpoints/Settings/settings.ts b/client/src/components/Endpoints/Settings/settings.ts index 1dad4301e8..21a04ff3ff 100644 --- a/client/src/components/Endpoints/Settings/settings.ts +++ b/client/src/components/Endpoints/Settings/settings.ts @@ -1,8 +1,8 @@ import { EModelEndpoint } from 'librechat-data-provider'; import type { FC } from 'react'; import type { TModelSelectProps } from '~/common'; -import { GoogleSettings, PluginSettings } from './MultiView'; import AssistantsSettings from './Assistants'; +import { GoogleSettings } from './MultiView'; import AnthropicSettings from './Anthropic'; import BedrockSettings from './Bedrock'; import OpenAISettings from './OpenAI'; @@ -23,7 +23,6 @@ export const getSettings = () => { settings, multiViewSettings: { [EModelEndpoint.google]: GoogleSettings, - [EModelEndpoint.gptPlugins]: PluginSettings, }, }; }; diff --git a/client/src/components/Input/ModelSelect/PluginsByIndex.tsx b/client/src/components/Input/ModelSelect/PluginsByIndex.tsx deleted file mode 100644 index 6e11a87fe0..0000000000 --- a/client/src/components/Input/ModelSelect/PluginsByIndex.tsx +++ /dev/null @@ -1,110 +0,0 @@ -import { useRecoilValue } from 'recoil'; -import { ChevronDownIcon } from 'lucide-react'; -import { useState, useEffect, useMemo } from 'react'; -import { useAvailablePluginsQuery } from 'librechat-data-provider/react-query'; -import { - Button, - SelectDropDown, - SelectDropDownPop, - MultiSelectDropDown, - useMediaQuery, -} from '@librechat/client'; -import type { TPlugin } from 'librechat-data-provider'; -import type { TModelSelectProps } from '~/common'; -import { useSetIndexOptions, useAuthContext, useLocalize } from '~/hooks'; -import { cn, cardStyle, selectPlugins, processPlugins } from '~/utils'; -import MultiSelectPop from './MultiSelectPop'; -import store from '~/store'; - -export default function PluginsByIndex({ - conversation, - setOption, - models, - showAbove, - popover = false, -}: TModelSelectProps) { - const localize = useLocalize(); - const { user } = useAuthContext(); - const [visible, setVisibility] = useState(true); - const isSmallScreen = useMediaQuery('(max-width: 640px)'); - const availableTools = useRecoilValue(store.availableTools); - const { checkPluginSelection, setTools } = useSetIndexOptions(); - - const { data: allPlugins } = useAvailablePluginsQuery({ - enabled: !!user?.plugins, - select: selectPlugins, - }); - - useEffect(() => { - if (isSmallScreen) { - setVisibility(false); - } - }, [isSmallScreen]); - - const conversationTools: TPlugin[] = useMemo(() => { - if (!conversation?.tools) { - return []; - } - return processPlugins(conversation.tools, allPlugins?.map); - }, [conversation, allPlugins]); - - const availablePlugins = useMemo(() => { - if (!availableTools) { - return []; - } - - return Object.values(availableTools); - }, [availableTools]); - - if (!conversation) { - return null; - } - - const Menu = popover ? SelectDropDownPop : SelectDropDown; - const PluginsMenu = popover ? MultiSelectPop : MultiSelectDropDown; - - return ( - <> - - {visible && ( - <> - - - - )} - - ); -} diff --git a/client/src/components/Input/ModelSelect/options.ts b/client/src/components/Input/ModelSelect/options.ts index b93e2e7e2a..318da5b066 100644 --- a/client/src/components/Input/ModelSelect/options.ts +++ b/client/src/components/Input/ModelSelect/options.ts @@ -4,9 +4,7 @@ import type { FC } from 'react'; import OpenAI from './OpenAI'; import Google from './Google'; -import ChatGPT from './ChatGPT'; import Anthropic from './Anthropic'; -import PluginsByIndex from './PluginsByIndex'; export const options: { [key: string]: FC } = { [EModelEndpoint.openAI]: OpenAI, @@ -15,10 +13,8 @@ export const options: { [key: string]: FC } = { [EModelEndpoint.azureOpenAI]: OpenAI, [EModelEndpoint.google]: Google, [EModelEndpoint.anthropic]: Anthropic, - [EModelEndpoint.chatGPTBrowser]: ChatGPT, }; export const multiChatOptions = { ...options, - [EModelEndpoint.gptPlugins]: PluginsByIndex, }; diff --git a/client/src/components/Input/SetKeyDialog/SetKeyDialog.tsx b/client/src/components/Input/SetKeyDialog/SetKeyDialog.tsx index 9ea4249129..7fec25e4a5 100644 --- a/client/src/components/Input/SetKeyDialog/SetKeyDialog.tsx +++ b/client/src/components/Input/SetKeyDialog/SetKeyDialog.tsx @@ -1,25 +1,24 @@ import React, { useState } from 'react'; import { useForm, FormProvider } from 'react-hook-form'; -import { - OGDialog, - OGDialogContent, - OGDialogHeader, - OGDialogTitle, - OGDialogFooter, - Dropdown, - useToastContext, - Button, - Label, - OGDialogTrigger, - Spinner, -} from '@librechat/client'; import { EModelEndpoint, alternateName, isAssistantsEndpoint } from 'librechat-data-provider'; import { - useRevokeAllUserKeysMutation, useRevokeUserKeyMutation, + useRevokeAllUserKeysMutation, } from 'librechat-data-provider/react-query'; +import { + Label, + Button, + Spinner, + OGDialog, + Dropdown, + OGDialogTitle, + OGDialogHeader, + OGDialogFooter, + OGDialogContent, + useToastContext, + OGDialogTrigger, +} from '@librechat/client'; import type { TDialogProps } from '~/common'; -import { useGetEndpointsQuery } from '~/data-provider'; import { useUserKey, useLocalize } from '~/hooks'; import { NotificationSeverity } from '~/common'; import CustomConfig from './CustomEndpoint'; @@ -34,7 +33,6 @@ const endpointComponents = { [EModelEndpoint.openAI]: OpenAIConfig, [EModelEndpoint.custom]: CustomConfig, [EModelEndpoint.azureOpenAI]: OpenAIConfig, - [EModelEndpoint.gptPlugins]: OpenAIConfig, [EModelEndpoint.assistants]: OpenAIConfig, [EModelEndpoint.azureAssistants]: OpenAIConfig, default: OtherConfig, @@ -44,7 +42,6 @@ const formSet: Set = new Set([ EModelEndpoint.openAI, EModelEndpoint.custom, EModelEndpoint.azureOpenAI, - EModelEndpoint.gptPlugins, EModelEndpoint.assistants, EModelEndpoint.azureAssistants, ]); @@ -174,7 +171,6 @@ const SetKeyDialog = ({ }); const [userKey, setUserKey] = useState(''); - const { data: endpointsConfig } = useGetEndpointsQuery(); const [expiresAtLabel, setExpiresAtLabel] = useState(EXPIRY.TWELVE_HOURS.label); const { getExpiry, saveUserKey } = useUserKey(endpoint); const { showToast } = useToastContext(); @@ -218,10 +214,7 @@ const SetKeyDialog = ({ methods.handleSubmit((data) => { const isAzure = endpoint === EModelEndpoint.azureOpenAI; const isOpenAIBase = - isAzure || - endpoint === EModelEndpoint.openAI || - endpoint === EModelEndpoint.gptPlugins || - isAssistantsEndpoint(endpoint); + isAzure || endpoint === EModelEndpoint.openAI || isAssistantsEndpoint(endpoint); if (isAzure) { data.apiKey = 'n/a'; } @@ -280,7 +273,6 @@ const SetKeyDialog = ({ const EndpointComponent = endpointComponents[endpointType ?? endpoint] ?? endpointComponents['default']; const expiryTime = getExpiry(); - const config = endpointsConfig?.[endpoint]; return ( @@ -310,12 +302,8 @@ const SetKeyDialog = ({ diff --git a/client/src/components/Messages/Content/Plugin.tsx b/client/src/components/Messages/Content/Plugin.tsx deleted file mode 100644 index 0703843806..0000000000 --- a/client/src/components/Messages/Content/Plugin.tsx +++ /dev/null @@ -1,130 +0,0 @@ -import { useCallback, memo, ReactNode } from 'react'; -import { Spinner } from '@librechat/client'; -import { ChevronDownIcon, LucideProps } from 'lucide-react'; -import { Disclosure, DisclosureButton, DisclosurePanel } from '@headlessui/react'; -import type { TResPlugin, TInput } from 'librechat-data-provider'; -import { useGetEndpointsQuery } from '~/data-provider'; -import { useShareContext } from '~/Providers'; -import { cn, formatJSON } from '~/utils'; -import CodeBlock from './CodeBlock'; - -type PluginIconProps = LucideProps & { - className?: string; -}; - -function formatInputs(inputs: TInput[]) { - let output = ''; - - for (let i = 0; i < inputs.length; i++) { - const input = formatJSON(`${inputs[i]?.inputStr ?? inputs[i]}`); - output += input; - - if (inputs.length > 1 && i !== inputs.length - 1) { - output += ',\n'; - } - } - - return output; -} - -type PluginProps = { - plugin: TResPlugin; -}; - -const Plugin: React.FC = ({ plugin }) => { - const { isSharedConvo } = useShareContext(); - const { data: plugins = {} } = useGetEndpointsQuery({ - enabled: !isSharedConvo, - select: (data) => data?.gptPlugins?.plugins, - }); - - const getPluginName = useCallback( - (pluginKey: string) => { - if (!pluginKey) { - return null; - } - - if (pluginKey === 'n/a' || pluginKey === 'self reflection') { - return pluginKey; - } - return plugins[pluginKey] ?? 'self reflection'; - }, - [plugins], - ); - - if (!plugin || !plugin.latest) { - return null; - } - - const latestPlugin = getPluginName(plugin.latest); - - if (!latestPlugin || (latestPlugin && latestPlugin === 'n/a')) { - return null; - } - - const generateStatus = (): ReactNode => { - if (!plugin.loading && latestPlugin === 'self reflection') { - return 'Finished'; - } else if (latestPlugin === 'self reflection') { - return "I'm thinking..."; - } else { - return ( - <> - {plugin.loading ? 'Using' : 'Used'} {latestPlugin} - {plugin.loading ? '...' : ''} - - ); - } - }; - - return ( -
- - {({ open }) => { - const iconProps: PluginIconProps = { - className: cn(open ? 'rotate-180 transform' : '', 'h-4 w-4'), - }; - return ( - <> -
-
-
-
{generateStatus()}
-
-
- {plugin.loading && } - - - -
- - - - {plugin.outputs && plugin.outputs.length > 0 && ( - - )} - - - ); - }} -
-
- ); -}; - -export default memo(Plugin); diff --git a/client/src/components/Messages/Content/index.ts b/client/src/components/Messages/Content/index.ts index a558d09db6..73238fe6bc 100644 --- a/client/src/components/Messages/Content/index.ts +++ b/client/src/components/Messages/Content/index.ts @@ -1,2 +1 @@ export { default as SubRow } from './SubRow'; -export { default as Plugin } from './Plugin'; diff --git a/client/src/components/Nav/Bookmarks/BookmarkNav.tsx b/client/src/components/Nav/Bookmarks/BookmarkNav.tsx index 423d1f6a5f..7163b142b0 100644 --- a/client/src/components/Nav/Bookmarks/BookmarkNav.tsx +++ b/client/src/components/Nav/Bookmarks/BookmarkNav.tsx @@ -12,10 +12,9 @@ import { cn } from '~/utils'; type BookmarkNavProps = { tags: string[]; setTags: (tags: string[]) => void; - isSmallScreen: boolean; }; -const BookmarkNav: FC = ({ tags, setTags, isSmallScreen }: BookmarkNavProps) => { +const BookmarkNav: FC = ({ tags, setTags }: BookmarkNavProps) => { const localize = useLocalize(); const { data } = useGetConversationTags(); const label = useMemo( diff --git a/client/src/components/Nav/MobileNav.tsx b/client/src/components/Nav/MobileNav.tsx index 7a508a28eb..5542f2830f 100644 --- a/client/src/components/Nav/MobileNav.tsx +++ b/client/src/components/Nav/MobileNav.tsx @@ -1,8 +1,7 @@ import React from 'react'; import { useRecoilValue } from 'recoil'; +import { QueryKeys } from 'librechat-data-provider'; import { useQueryClient } from '@tanstack/react-query'; -import { QueryKeys, Constants } from 'librechat-data-provider'; -import type { TMessage } from 'librechat-data-provider'; import type { Dispatch, SetStateAction } from 'react'; import { useLocalize, useNewConvo } from '~/hooks'; import { clearMessagesCache } from '~/utils'; diff --git a/client/src/components/Nav/NewChat.tsx b/client/src/components/Nav/NewChat.tsx index 74e35a4360..2ab30721a8 100644 --- a/client/src/components/Nav/NewChat.tsx +++ b/client/src/components/Nav/NewChat.tsx @@ -1,9 +1,8 @@ import React, { useCallback } from 'react'; import { useNavigate } from 'react-router-dom'; +import { QueryKeys } from 'librechat-data-provider'; import { useQueryClient } from '@tanstack/react-query'; -import { QueryKeys, Constants } from 'librechat-data-provider'; import { TooltipAnchor, NewChatIcon, MobileSidebar, Sidebar, Button } from '@librechat/client'; -import type { TMessage } from 'librechat-data-provider'; import { useLocalize, useNewConvo } from '~/hooks'; import { clearMessagesCache } from '~/utils'; import store from '~/store'; diff --git a/client/src/components/Nav/SettingsTabs/General/ArchivedChatsTable.tsx b/client/src/components/Nav/SettingsTabs/General/ArchivedChatsTable.tsx index 7f0637749f..e276a9c9fb 100644 --- a/client/src/components/Nav/SettingsTabs/General/ArchivedChatsTable.tsx +++ b/client/src/components/Nav/SettingsTabs/General/ArchivedChatsTable.tsx @@ -5,27 +5,27 @@ import { useRecoilValue } from 'recoil'; import { TrashIcon, ArchiveRestore, ArrowUp, ArrowDown, ArrowUpDown } from 'lucide-react'; import { Button, - OGDialog, - OGDialogContent, - OGDialogHeader, - OGDialogTitle, Label, - TooltipAnchor, Spinner, + OGDialog, DataTable, - useToastContext, + TooltipAnchor, useMediaQuery, + OGDialogTitle, + OGDialogHeader, + useToastContext, + OGDialogContent, } from '@librechat/client'; import type { ConversationListParams, TConversation } from 'librechat-data-provider'; import { - useArchiveConvoMutation, useConversationsInfiniteQuery, useDeleteConversationMutation, + useArchiveConvoMutation, } from '~/data-provider'; import { MinimalIcon } from '~/components/Endpoints'; import { NotificationSeverity } from '~/common'; +import { formatDate, logger } from '~/utils'; import { useLocalize } from '~/hooks'; -import { formatDate } from '~/utils'; import store from '~/store'; const DEFAULT_PARAMS: ConversationListParams = { @@ -43,7 +43,7 @@ export default function ArchivedChatsTable({ const localize = useLocalize(); const isSmallScreen = useMediaQuery('(max-width: 768px)'); const { showToast } = useToastContext(); - const isSearchEnabled = useRecoilValue(store.search); + const searchState = useRecoilValue(store.search); const [isDeleteOpen, setIsDeleteOpen] = useState(false); const [queryParams, setQueryParams] = useState(DEFAULT_PARAMS); const [deleteConversation, setDeleteConversation] = useState(null); @@ -101,6 +101,7 @@ export default function ArchivedChatsTable({ }); }, onError: (error: unknown) => { + logger.error('Error deleting archived conversation:', error); showToast({ message: localize('com_ui_archive_delete_error') as string, severity: NotificationSeverity.ERROR, @@ -113,6 +114,7 @@ export default function ArchivedChatsTable({ await refetch(); }, onError: (error: unknown) => { + logger.error('Error unarchiving conversation', error); showToast({ message: localize('com_ui_unarchive_error') as string, severity: NotificationSeverity.ERROR, @@ -283,7 +285,7 @@ export default function ArchivedChatsTable({ isFetchingNextPage={isFetchingNextPage} isLoading={isLoading} showCheckboxes={false} - enableSearch={isSearchEnabled} + enableSearch={searchState.enabled === true} /> diff --git a/client/src/components/Plugins/Store/PluginAuthForm.tsx b/client/src/components/Plugins/Store/PluginAuthForm.tsx index 68fb41f7a2..f6eec0a6af 100644 --- a/client/src/components/Plugins/Store/PluginAuthForm.tsx +++ b/client/src/components/Plugins/Store/PluginAuthForm.tsx @@ -70,7 +70,7 @@ function PluginAuthForm({ plugin, onSubmit, isEntityTool }: TPluginAuthFormProps {errors[authField] && ( - {errors[authField].message as string} + {errors?.[authField]?.message ?? ''} )} diff --git a/client/src/components/Plugins/Store/PluginStoreDialog.tsx b/client/src/components/Plugins/Store/PluginStoreDialog.tsx deleted file mode 100644 index 2b8e820700..0000000000 --- a/client/src/components/Plugins/Store/PluginStoreDialog.tsx +++ /dev/null @@ -1,245 +0,0 @@ -import { Search, X } from 'lucide-react'; -import { Dialog, DialogPanel, DialogTitle } from '@headlessui/react'; -import { useState, useEffect, useCallback } from 'react'; -import { useAvailablePluginsQuery } from 'librechat-data-provider/react-query'; -import type { TError, TPlugin, TPluginAction } from 'librechat-data-provider'; -import type { TPluginStoreDialogProps } from '~/common/types'; -import { - usePluginDialogHelpers, - useSetIndexOptions, - usePluginInstall, - useAuthContext, - useLocalize, -} from '~/hooks'; -import PluginPagination from './PluginPagination'; -import PluginStoreItem from './PluginStoreItem'; -import PluginAuthForm from './PluginAuthForm'; - -function PluginStoreDialog({ isOpen, setIsOpen }: TPluginStoreDialogProps) { - const localize = useLocalize(); - const { user } = useAuthContext(); - const { data: availablePlugins } = useAvailablePluginsQuery(); - const { setTools } = useSetIndexOptions(); - - const [userPlugins, setUserPlugins] = useState([]); - - const { - maxPage, - setMaxPage, - currentPage, - setCurrentPage, - itemsPerPage, - searchChanged, - setSearchChanged, - searchValue, - setSearchValue, - gridRef, - handleSearch, - handleChangePage, - error, - setError, - errorMessage, - setErrorMessage, - showPluginAuthForm, - setShowPluginAuthForm, - selectedPlugin, - setSelectedPlugin, - } = usePluginDialogHelpers(); - - const handleInstallError = useCallback( - (error: TError) => { - setError(true); - if (error.response?.data?.message) { - setErrorMessage(error.response.data.message); - } - setTimeout(() => { - setError(false); - setErrorMessage(''); - }, 5000); - }, - [setError, setErrorMessage], - ); - - const { installPlugin, uninstallPlugin } = usePluginInstall({ - onInstallError: handleInstallError, - onUninstallError: handleInstallError, - onUninstallSuccess: (_data, variables) => { - setTools(variables.pluginKey, true); - }, - }); - - const handleInstall = (pluginAction: TPluginAction, plugin?: TPlugin) => { - if (!plugin) { - return; - } - installPlugin(pluginAction, plugin); - setShowPluginAuthForm(false); - }; - - const onPluginInstall = (pluginKey: string) => { - const plugin = availablePlugins?.find((p) => p.pluginKey === pluginKey); - if (!plugin) { - return; - } - setSelectedPlugin(plugin); - - const { authConfig, authenticated } = plugin ?? {}; - - if (authConfig && authConfig.length > 0 && !authenticated) { - setShowPluginAuthForm(true); - } else { - handleInstall({ pluginKey, action: 'install', auth: null }, plugin); - } - }; - - const filteredPlugins = availablePlugins?.filter((plugin) => - plugin.name.toLowerCase().includes(searchValue.toLowerCase()), - ); - - useEffect(() => { - if (user && user.plugins) { - setUserPlugins(user.plugins); - } - - if (filteredPlugins) { - setMaxPage(Math.ceil(filteredPlugins.length / itemsPerPage)); - if (searchChanged) { - setCurrentPage(1); - setSearchChanged(false); - } - } - }, [ - availablePlugins, - itemsPerPage, - user, - searchValue, - filteredPlugins, - searchChanged, - setMaxPage, - setCurrentPage, - setSearchChanged, - ]); - - return ( - { - setIsOpen(false); - setCurrentPage(1); - setSearchValue(''); - }} - className="relative z-[102]" - > - {/* The backdrop, rendered as a fixed sibling to the panel container */} -
- {/* Full-screen container to center the panel */} -
- -
-
-
- - {localize('com_nav_plugin_store')} - -
-
-
-
- -
-
-
- {error && ( -
- {localize('com_nav_plugin_auth_error')} {errorMessage} -
- )} - {showPluginAuthForm && ( -
- handleInstall(action, selectedPlugin)} - /> -
- )} -
-
-
-
-
-
-
- {filteredPlugins && - filteredPlugins - .slice((currentPage - 1) * itemsPerPage, currentPage * itemsPerPage) - .map((plugin, index) => ( - onPluginInstall(plugin.pluginKey)} - onUninstall={() => uninstallPlugin(plugin.pluginKey)} - /> - ))} -
-
-
- {maxPage > 0 ? ( - - ) : ( -
- )} - {/* API not yet implemented: */} - {/*
- -
- -
- -
*/} -
-
-
-
-
- ); -} - -export default PluginStoreDialog; diff --git a/client/src/components/Plugins/Store/PluginStoreItem.tsx b/client/src/components/Plugins/Store/PluginStoreItem.tsx deleted file mode 100644 index 83e169f528..0000000000 --- a/client/src/components/Plugins/Store/PluginStoreItem.tsx +++ /dev/null @@ -1,76 +0,0 @@ -import { TPlugin } from 'librechat-data-provider'; -import { XCircle, DownloadCloud } from 'lucide-react'; -import { useLocalize } from '~/hooks'; - -type TPluginStoreItemProps = { - plugin: TPlugin; - onInstall: () => void; - onUninstall: () => void; - isInstalled?: boolean; -}; - -function PluginStoreItem({ plugin, onInstall, onUninstall, isInstalled }: TPluginStoreItemProps) { - const localize = useLocalize(); - const handleClick = () => { - if (isInstalled) { - onUninstall(); - } else { - onInstall(); - } - }; - - return ( - <> -
-
-
-
- {`${plugin.name} -
-
-
-
-
- {plugin.name} -
- {!isInstalled ? ( - - ) : ( - - )} -
-
-
- {plugin.description} -
-
- - ); -} - -export default PluginStoreItem; diff --git a/client/src/components/Plugins/Store/PluginStoreLinkButton.tsx b/client/src/components/Plugins/Store/PluginStoreLinkButton.tsx deleted file mode 100644 index fba9b6da61..0000000000 --- a/client/src/components/Plugins/Store/PluginStoreLinkButton.tsx +++ /dev/null @@ -1,18 +0,0 @@ -type TPluginStoreLinkButtonProps = { - onClick: () => void; - label: string; -}; - -function PluginStoreLinkButton({ onClick, label }: TPluginStoreLinkButtonProps) { - return ( -
- {label} -
- ); -} - -export default PluginStoreLinkButton; diff --git a/client/src/components/Plugins/Store/PluginTooltip.tsx b/client/src/components/Plugins/Store/PluginTooltip.tsx index a383496f2f..378896fdec 100644 --- a/client/src/components/Plugins/Store/PluginTooltip.tsx +++ b/client/src/components/Plugins/Store/PluginTooltip.tsx @@ -1,5 +1,4 @@ import { HoverCardPortal, HoverCardContent } from '@librechat/client'; -import './styles.module.css'; type TPluginTooltipProps = { content: string; @@ -9,11 +8,9 @@ type TPluginTooltipProps = { function PluginTooltip({ content, position }: TPluginTooltipProps) { return ( - +
-
- {content} -
+
{content}
diff --git a/client/src/components/Plugins/Store/__tests__/PluginStoreDialog.spec.tsx b/client/src/components/Plugins/Store/__tests__/PluginStoreDialog.spec.tsx deleted file mode 100644 index 16b38661ec..0000000000 --- a/client/src/components/Plugins/Store/__tests__/PluginStoreDialog.spec.tsx +++ /dev/null @@ -1,223 +0,0 @@ -import { render, screen, fireEvent } from 'test/layout-test-utils'; -import PluginStoreDialog from '../PluginStoreDialog'; -import userEvent from '@testing-library/user-event'; -import * as mockDataProvider from 'librechat-data-provider/react-query'; -import * as authMutations from '~/data-provider/Auth/mutations'; -import * as authQueries from '~/data-provider/Auth/queries'; - -jest.mock('librechat-data-provider/react-query'); - -class ResizeObserver { - observe() { - // do nothing - } - unobserve() { - // do nothing - } - disconnect() { - // do nothing - } -} - -window.ResizeObserver = ResizeObserver; - -const pluginsQueryResult = [ - { - name: 'Google', - pluginKey: 'google', - description: 'Use Google Search to find information', - icon: 'https://i.imgur.com/SMmVkNB.png', - authConfig: [ - { - authField: 'GOOGLE_CSE_ID', - label: 'Google CSE ID', - description: 'This is your Google Custom Search Engine ID.', - }, - ], - }, - { - name: 'Wolfram', - pluginKey: 'wolfram', - description: - 'Access computation, math, curated knowledge & real-time data through Wolfram|Alpha and Wolfram Language.', - icon: 'https://www.wolframcdn.com/images/icons/Wolfram.png', - authConfig: [ - { - authField: 'WOLFRAM_APP_ID', - label: 'Wolfram App ID', - description: 'An AppID must be supplied in all calls to the Wolfram|Alpha API.', - }, - ], - }, - { - name: 'Calculator', - pluginKey: 'calculator', - description: 'A simple calculator plugin', - icon: 'https://i.imgur.com/SMmVkNB.png', - authConfig: [], - }, - { - name: 'Plugin 1', - pluginKey: 'plugin1', - description: 'description for Plugin 1.', - icon: 'mock-icon', - authConfig: [], - }, - { - name: 'Plugin 2', - pluginKey: 'plugin2', - description: 'description for Plugin 2.', - icon: 'mock-icon', - authConfig: [], - }, - { - name: 'Plugin 3', - pluginKey: 'plugin3', - description: 'description for Plugin 3.', - icon: 'mock-icon', - authConfig: [], - }, - { - name: 'Plugin 4', - pluginKey: 'plugin4', - description: 'description for Plugin 4.', - icon: 'mock-icon', - authConfig: [], - }, - { - name: 'Plugin 5', - pluginKey: 'plugin5', - description: 'description for Plugin 5.', - icon: 'mock-icon', - authConfig: [], - }, - { - name: 'Plugin 6', - pluginKey: 'plugin6', - description: 'description for Plugin 6.', - icon: 'mock-icon', - authConfig: [], - }, - { - name: 'Plugin 7', - pluginKey: 'plugin7', - description: 'description for Plugin 7.', - icon: 'mock-icon', - authConfig: [], - }, -]; - -const setup = ({ - useGetUserQueryReturnValue = { - isLoading: false, - isError: false, - data: { - plugins: ['wolfram'], - }, - }, - useRefreshTokenMutationReturnValue = { - isLoading: false, - isError: false, - mutate: jest.fn(), - data: { - token: 'mock-token', - user: {}, - }, - }, - useAvailablePluginsQueryReturnValue = { - isLoading: false, - isError: false, - data: pluginsQueryResult, - }, - useUpdateUserPluginsMutationReturnValue = { - isLoading: false, - isError: false, - mutate: jest.fn(), - data: {}, - }, -} = {}) => { - const mockUseAvailablePluginsQuery = jest - .spyOn(mockDataProvider, 'useAvailablePluginsQuery') - //@ts-ignore - we don't need all parameters of the QueryObserverSuccessResult - .mockReturnValue(useAvailablePluginsQueryReturnValue); - const mockUseUpdateUserPluginsMutation = jest - .spyOn(mockDataProvider, 'useUpdateUserPluginsMutation') - //@ts-ignore - we don't need all parameters of the QueryObserverSuccessResult - .mockReturnValue(useUpdateUserPluginsMutationReturnValue); - const mockUseGetUserQuery = jest - .spyOn(authQueries, 'useGetUserQuery') - //@ts-ignore - we don't need all parameters of the QueryObserverSuccessResult - .mockReturnValue(useGetUserQueryReturnValue); - const mockUseRefreshTokenMutation = jest - .spyOn(authMutations, 'useRefreshTokenMutation') - //@ts-ignore - we don't need all parameters of the QueryObserverSuccessResult - .mockReturnValue(useRefreshTokenMutationReturnValue); - const mockSetIsOpen = jest.fn(); - const renderResult = render(); - - return { - ...renderResult, - mockUseGetUserQuery, - mockUseAvailablePluginsQuery, - mockUseUpdateUserPluginsMutation, - mockUseRefreshTokenMutation, - mockSetIsOpen, - }; -}; - -test('renders plugin store dialog with plugins from the available plugins query and shows install/uninstall buttons based on user plugins', () => { - const { getByText, getByRole } = setup(); - expect(getByText(/Plugin Store/i)).toBeInTheDocument(); - expect(getByText(/Use Google Search to find information/i)).toBeInTheDocument(); - expect(getByRole('button', { name: 'Install Google' })).toBeInTheDocument(); - expect(getByRole('button', { name: 'Uninstall Wolfram' })).toBeInTheDocument(); -}); - -test('Displays the plugin auth form when installing a plugin with auth', async () => { - const { getByRole, getByText } = setup(); - const googleButton = getByRole('button', { name: 'Install Google' }); - await userEvent.click(googleButton); - expect(getByText(/Google CSE ID/i)).toBeInTheDocument(); - expect(getByRole('button', { name: 'Save' })).toBeInTheDocument(); -}); - -test('allows the user to navigate between pages', async () => { - const { getByRole, getByText } = setup(); - - expect(getByText('Google')).toBeInTheDocument(); - expect(getByText('Wolfram')).toBeInTheDocument(); - expect(getByText('Plugin 1')).toBeInTheDocument(); - - const nextPageButton = getByRole('button', { name: 'Next page' }); - await userEvent.click(nextPageButton); - - expect(getByText('Plugin 6')).toBeInTheDocument(); - expect(getByText('Plugin 7')).toBeInTheDocument(); - // expect(getByText('Plugin 3')).toBeInTheDocument(); - // expect(getByText('Plugin 4')).toBeInTheDocument(); - // expect(getByText('Plugin 5')).toBeInTheDocument(); - - const previousPageButton = getByRole('button', { name: 'Previous page' }); - await userEvent.click(previousPageButton); - - expect(getByText('Google')).toBeInTheDocument(); - expect(getByText('Wolfram')).toBeInTheDocument(); - expect(getByText('Plugin 1')).toBeInTheDocument(); -}); - -test('allows the user to search for plugins', async () => { - setup(); - - const searchInput = screen.getByPlaceholderText('Search plugins'); - fireEvent.change(searchInput, { target: { value: 'Google' } }); - - expect(screen.getByText('Google')).toBeInTheDocument(); - expect(screen.queryByText('Wolfram')).not.toBeInTheDocument(); - expect(screen.queryByText('Plugin 1')).not.toBeInTheDocument(); - - fireEvent.change(searchInput, { target: { value: 'Plugin 1' } }); - - expect(screen.getByText('Plugin 1')).toBeInTheDocument(); - expect(screen.queryByText('Google')).not.toBeInTheDocument(); - expect(screen.queryByText('Wolfram')).not.toBeInTheDocument(); -}); diff --git a/client/src/components/Plugins/Store/__tests__/PluginStoreItem.spec.tsx b/client/src/components/Plugins/Store/__tests__/PluginStoreItem.spec.tsx deleted file mode 100644 index ef2a861c97..0000000000 --- a/client/src/components/Plugins/Store/__tests__/PluginStoreItem.spec.tsx +++ /dev/null @@ -1,60 +0,0 @@ -import 'test/matchMedia.mock'; -import { render, screen } from 'test/layout-test-utils'; -import userEvent from '@testing-library/user-event'; -import { TPlugin } from 'librechat-data-provider'; -import PluginStoreItem from '../PluginStoreItem'; - -const mockPlugin = { - name: 'Test Plugin', - description: 'This is a test plugin', - icon: 'test-icon.png', -}; - -describe('PluginStoreItem', () => { - it('renders the plugin name and description', () => { - render( - { - return; - }} - onUninstall={() => { - return; - }} - />, - ); - expect(screen.getByText('Test Plugin')).toBeInTheDocument(); - expect(screen.getByText('This is a test plugin')).toBeInTheDocument(); - }); - - it('calls onInstall when the install button is clicked', async () => { - const onInstall = jest.fn(); - render( - { - return; - }} - />, - ); - await userEvent.click(screen.getByText('Install')); - expect(onInstall).toHaveBeenCalled(); - }); - - it('calls onUninstall when the uninstall button is clicked', async () => { - const onUninstall = jest.fn(); - render( - { - return; - }} - onUninstall={onUninstall} - isInstalled - />, - ); - await userEvent.click(screen.getByText('Uninstall')); - expect(onUninstall).toHaveBeenCalled(); - }); -}); diff --git a/client/src/components/Plugins/Store/index.ts b/client/src/components/Plugins/Store/index.ts index 2f9a1d4807..53a8c86fae 100644 --- a/client/src/components/Plugins/Store/index.ts +++ b/client/src/components/Plugins/Store/index.ts @@ -1,6 +1,3 @@ -export { default as PluginStoreDialog } from './PluginStoreDialog'; -export { default as PluginStoreItem } from './PluginStoreItem'; export { default as PluginPagination } from './PluginPagination'; -export { default as PluginStoreLinkButton } from './PluginStoreLinkButton'; export { default as PluginAuthForm } from './PluginAuthForm'; export { default as PluginTooltip } from './PluginTooltip'; diff --git a/client/src/components/Plugins/Store/styles.module.css b/client/src/components/Plugins/Store/styles.module.css deleted file mode 100644 index acd6ab3c01..0000000000 --- a/client/src/components/Plugins/Store/styles.module.css +++ /dev/null @@ -1,4 +0,0 @@ -a { - text-decoration: underline; - color: white; -} diff --git a/client/src/components/Plugins/index.ts b/client/src/components/Plugins/index.ts deleted file mode 100644 index 47e0805c13..0000000000 --- a/client/src/components/Plugins/index.ts +++ /dev/null @@ -1 +0,0 @@ -export * from './Store'; diff --git a/client/src/components/Prompts/PromptVersions.tsx b/client/src/components/Prompts/PromptVersions.tsx index 600d30fe8e..d21562edcd 100644 --- a/client/src/components/Prompts/PromptVersions.tsx +++ b/client/src/components/Prompts/PromptVersions.tsx @@ -125,7 +125,9 @@ const VersionCard = ({
{authorName && ( - + )} {tags.length > 0 && } diff --git a/client/src/components/Share/Message.tsx b/client/src/components/Share/Message.tsx index e556145481..99d46954a8 100644 --- a/client/src/components/Share/Message.tsx +++ b/client/src/components/Share/Message.tsx @@ -4,7 +4,6 @@ import MinimalHoverButtons from '~/components/Chat/Messages/MinimalHoverButtons' import MessageContent from '~/components/Chat/Messages/Content/MessageContent'; import SearchContent from '~/components/Chat/Messages/Content/SearchContent'; import SiblingSwitch from '~/components/Chat/Messages/SiblingSwitch'; -import { Plugin } from '~/components/Messages/Content'; import SubRow from '~/components/Chat/Messages/SubRow'; import { fontSizeAtom } from '~/store/fontSize'; import { MessageContext } from '~/Providers'; @@ -80,8 +79,6 @@ export default function Message(props: TMessageProps) { isLatestMessage: false, // No concept of latest message in share view }} > - {/* Legacy Plugins */} - {message.plugin && } {message.content ? ( { }, EModelEndpoint: actualModule.EModelEndpoint || { agents: 'agents', - chatGPTBrowser: 'chatGPTBrowser', - gptPlugins: 'gptPlugins', }, ResourceType: actualModule.ResourceType || { AGENT: 'agent', diff --git a/client/src/components/SidePanel/Agents/AgentPanel.tsx b/client/src/components/SidePanel/Agents/AgentPanel.tsx index 867f4466ff..729eea7ebd 100644 --- a/client/src/components/SidePanel/Agents/AgentPanel.tsx +++ b/client/src/components/SidePanel/Agents/AgentPanel.tsx @@ -306,9 +306,7 @@ export default function AgentPanel() { (key) => !isAssistantsEndpoint(key) && (allowedProviders.size > 0 ? allowedProviders.has(key) : true) && - key !== EModelEndpoint.agents && - key !== EModelEndpoint.chatGPTBrowser && - key !== EModelEndpoint.gptPlugins, + key !== EModelEndpoint.agents, ) .map((provider) => createProviderOption(provider)), [endpointsConfig, allowedProviders], diff --git a/client/src/components/Tools/AssistantToolsDialog.tsx b/client/src/components/Tools/AssistantToolsDialog.tsx index ce013af135..98e8043ebe 100644 --- a/client/src/components/Tools/AssistantToolsDialog.tsx +++ b/client/src/components/Tools/AssistantToolsDialog.tsx @@ -10,7 +10,7 @@ import type { TPluginAction, TError, } from 'librechat-data-provider'; -import type { TPluginStoreDialogProps } from '~/common/types'; +import type { ToolDialogProps } from '~/common/types'; import { PluginPagination, PluginAuthForm } from '~/components/Plugins/Store'; import { useLocalize, usePluginDialogHelpers } from '~/hooks'; import { useAvailableToolsQuery } from '~/data-provider'; @@ -20,7 +20,7 @@ function AssistantToolsDialog({ isOpen, endpoint, setIsOpen, -}: TPluginStoreDialogProps & { +}: ToolDialogProps & { endpoint: AssistantsEndpoint | EModelEndpoint.agents; }) { const localize = useLocalize(); diff --git a/client/src/components/Tools/MCPToolSelectDialog.tsx b/client/src/components/Tools/MCPToolSelectDialog.tsx index 6e3b75f099..b628f6a41a 100644 --- a/client/src/components/Tools/MCPToolSelectDialog.tsx +++ b/client/src/components/Tools/MCPToolSelectDialog.tsx @@ -6,7 +6,7 @@ import { Constants, EModelEndpoint, QueryKeys } from 'librechat-data-provider'; import { Dialog, DialogPanel, DialogTitle, Description } from '@headlessui/react'; import { useUpdateUserPluginsMutation } from 'librechat-data-provider/react-query'; import type { TError, AgentToolType } from 'librechat-data-provider'; -import type { AgentForm, TPluginStoreDialogProps } from '~/common'; +import type { AgentForm, ToolDialogProps } from '~/common'; import { usePluginDialogHelpers, useMCPServerManager, @@ -24,7 +24,7 @@ function MCPToolSelectDialog({ agentId, setIsOpen, mcpServerNames, -}: TPluginStoreDialogProps & { +}: ToolDialogProps & { agentId: string; mcpServerNames?: string[]; endpoint: EModelEndpoint.agents; diff --git a/client/src/components/Tools/ToolSelectDialog.tsx b/client/src/components/Tools/ToolSelectDialog.tsx index 14bab50603..1fb28c7413 100644 --- a/client/src/components/Tools/ToolSelectDialog.tsx +++ b/client/src/components/Tools/ToolSelectDialog.tsx @@ -11,7 +11,7 @@ import type { TPlugin, TError, } from 'librechat-data-provider'; -import type { AgentForm, TPluginStoreDialogProps } from '~/common'; +import type { AgentForm, ToolDialogProps } from '~/common'; import { PluginPagination, PluginAuthForm } from '~/components/Plugins/Store'; import { useAgentPanelContext } from '~/Providers/AgentPanelContext'; import { useLocalize, usePluginDialogHelpers } from '~/hooks'; @@ -21,7 +21,7 @@ function ToolSelectDialog({ isOpen, endpoint, setIsOpen, -}: TPluginStoreDialogProps & { +}: ToolDialogProps & { endpoint: AssistantsEndpoint | EModelEndpoint.agents; }) { const localize = useLocalize(); diff --git a/client/src/hooks/Chat/useChatHelpers.ts b/client/src/hooks/Chat/useChatHelpers.ts index a2d93ee375..b5ab9aee27 100644 --- a/client/src/hooks/Chat/useChatHelpers.ts +++ b/client/src/hooks/Chat/useChatHelpers.ts @@ -130,13 +130,10 @@ export default function useChatHelpers(index = 0, paramId?: string) { setSiblingIdx(0); }; + const [preset, setPreset] = useRecoilState(store.presetByIndex(index)); const [showPopover, setShowPopover] = useRecoilState(store.showPopoverFamily(index)); const [abortScroll, setAbortScroll] = useRecoilState(store.abortScrollFamily(index)); - const [preset, setPreset] = useRecoilState(store.presetByIndex(index)); const [optionSettings, setOptionSettings] = useRecoilState(store.optionSettingsFamily(index)); - const [showAgentSettings, setShowAgentSettings] = useRecoilState( - store.showAgentSettingsFamily(index), - ); return { newConversation, @@ -167,8 +164,6 @@ export default function useChatHelpers(index = 0, paramId?: string) { setPreset, optionSettings, setOptionSettings, - showAgentSettings, - setShowAgentSettings, files, setFiles, filesLoading, diff --git a/client/src/hooks/Config/useAppStartup.ts b/client/src/hooks/Config/useAppStartup.ts index bdac6d616f..d6324f7e71 100644 --- a/client/src/hooks/Config/useAppStartup.ts +++ b/client/src/hooks/Config/useAppStartup.ts @@ -1,25 +1,13 @@ import { useEffect } from 'react'; +import { useRecoilState } from 'recoil'; import TagManager from 'react-gtm-module'; -import { useRecoilState, useSetRecoilState } from 'recoil'; import { LocalStorageKeys } from 'librechat-data-provider'; -import { useAvailablePluginsQuery } from 'librechat-data-provider/react-query'; -import type { TStartupConfig, TPlugin, TUser } from 'librechat-data-provider'; -import { mapPlugins, selectPlugins, processPlugins } from '~/utils'; +import type { TStartupConfig, TUser } from 'librechat-data-provider'; import { cleanupTimestampedStorage } from '~/utils/timestamps'; import useSpeechSettingsInit from './useSpeechSettingsInit'; import { useMCPToolsQuery } from '~/data-provider'; import store from '~/store'; -const pluginStore: TPlugin = { - name: 'Plugin store', - pluginKey: 'pluginStore', - isButton: true, - description: '', - icon: '', - authConfig: [], - authenticated: false, -}; - export default function useAppStartup({ startupConfig, user, @@ -27,12 +15,7 @@ export default function useAppStartup({ startupConfig?: TStartupConfig; user?: TUser; }) { - const setAvailableTools = useSetRecoilState(store.availableTools); const [defaultPreset, setDefaultPreset] = useRecoilState(store.defaultPreset); - const { data: allPlugins } = useAvailablePluginsQuery({ - enabled: !!user?.plugins, - select: selectPlugins, - }); useSpeechSettingsInit(!!user); @@ -80,43 +63,6 @@ export default function useAppStartup({ }); }, [defaultPreset, setDefaultPreset, startupConfig?.modelSpecs?.list]); - /** Set the available Plugins */ - useEffect(() => { - if (!user) { - return; - } - - if (!allPlugins) { - return; - } - - const userPlugins = user.plugins ?? []; - - if (userPlugins.length === 0) { - setAvailableTools({ pluginStore }); - return; - } - - const tools = [...userPlugins] - .map((el) => allPlugins.map[el]) - .filter((el: TPlugin | undefined): el is TPlugin => el !== undefined); - - /* Filter Last Selected Tools */ - const localStorageItem = localStorage.getItem(LocalStorageKeys.LAST_TOOLS) ?? ''; - if (!localStorageItem) { - return setAvailableTools({ pluginStore, ...mapPlugins(tools) }); - } - const lastSelectedTools = processPlugins(JSON.parse(localStorageItem) ?? [], allPlugins.map); - const filteredTools = lastSelectedTools - .filter((tool: TPlugin) => - tools.some((existingTool) => existingTool.pluginKey === tool.pluginKey), - ) - .filter((tool: TPlugin | undefined) => !!tool); - localStorage.setItem(LocalStorageKeys.LAST_TOOLS, JSON.stringify(filteredTools)); - - setAvailableTools({ pluginStore, ...mapPlugins(tools) }); - }, [allPlugins, user, setAvailableTools]); - useEffect(() => { if (startupConfig?.analyticsGtmId != null && typeof window.google_tag_manager === 'undefined') { const tagManagerArgs = { diff --git a/client/src/hooks/Config/useClearStates.ts b/client/src/hooks/Config/useClearStates.ts index 354b92e4ed..140f83e924 100644 --- a/client/src/hooks/Config/useClearStates.ts +++ b/client/src/hooks/Config/useClearStates.ts @@ -28,7 +28,6 @@ export default function useClearStates() { reset(store.abortScrollFamily(key)); reset(store.isSubmittingFamily(key)); reset(store.optionSettingsFamily(key)); - reset(store.showAgentSettingsFamily(key)); reset(store.showPopoverFamily(key)); reset(store.showMentionPopoverFamily(key)); reset(store.showPlusPopoverFamily(key)); diff --git a/client/src/hooks/Conversations/usePresetIndexOptions.ts b/client/src/hooks/Conversations/usePresetIndexOptions.ts index 45a6e36af9..9824adef8c 100644 --- a/client/src/hooks/Conversations/usePresetIndexOptions.ts +++ b/client/src/hooks/Conversations/usePresetIndexOptions.ts @@ -1,15 +1,11 @@ -import { useRecoilValue, useSetRecoilState } from 'recoil'; -import type { TPreset, TPlugin } from 'librechat-data-provider'; +import type { TPreset } from 'librechat-data-provider'; import type { TSetOptionsPayload, TSetExample, TSetOption, TSetOptions } from '~/common'; import { useChatContext } from '~/Providers/ChatContext'; import { cleanupPreset } from '~/utils'; -import store from '~/store'; type TUsePresetOptions = (preset?: TPreset | boolean | null) => TSetOptionsPayload | boolean; const usePresetIndexOptions: TUsePresetOptions = (_preset) => { - const setShowPluginStoreDialog = useSetRecoilState(store.showPluginStoreDialog); - const availableTools = useRecoilValue(store.availableTools); const { preset, setPreset } = useChatContext(); if (!_preset) { @@ -101,68 +97,6 @@ const usePresetIndexOptions: TUsePresetOptions = (_preset) => { ); }; - const setAgentOption: TSetOption = (param) => (newValue) => { - const editablePreset = JSON.parse(JSON.stringify(_preset)); - const { agentOptions } = editablePreset; - agentOptions[param] = newValue; - setPreset((prevState) => - cleanupPreset({ - preset: { - ...prevState, - agentOptions, - }, - }), - ); - }; - - function checkPluginSelection(value: string) { - if (!preset?.tools) { - return false; - } - return preset.tools.find((el) => { - if (typeof el === 'string') { - return el === value; - } - return el.pluginKey === value; - }) - ? true - : false; - } - - const setTools: (newValue: string, remove?: boolean) => void = (newValue, remove) => { - if (newValue === 'pluginStore') { - setShowPluginStoreDialog(true); - return; - } - - const update = {}; - const current = - preset?.tools - ?.map((tool: string | TPlugin) => { - if (typeof tool === 'string') { - return availableTools[tool]; - } - return tool; - }) - .filter((el) => !!el) || []; - const isSelected = checkPluginSelection(newValue); - const tool = availableTools[newValue]; - if (isSelected || remove) { - update['tools'] = current.filter((el) => el.pluginKey !== newValue); - } else { - update['tools'] = [...current, tool]; - } - - setPreset((prevState) => - cleanupPreset({ - preset: { - ...prevState, - ...update, - }, - }), - ); - }; - return { setOption, setExample, @@ -170,9 +104,6 @@ const usePresetIndexOptions: TUsePresetOptions = (_preset) => { setOptions, removeExample, getConversation, - checkPluginSelection, - setAgentOption, - setTools, }; }; diff --git a/client/src/hooks/Conversations/useSetIndexOptions.ts b/client/src/hooks/Conversations/useSetIndexOptions.ts index 5b326777ac..72b25d8035 100644 --- a/client/src/hooks/Conversations/useSetIndexOptions.ts +++ b/client/src/hooks/Conversations/useSetIndexOptions.ts @@ -1,21 +1,16 @@ -import { useRecoilValue, useSetRecoilState } from 'recoil'; import { TPreset, - TPlugin, TConversation, - tConvoUpdateSchema, EModelEndpoint, + tConvoUpdateSchema, } from 'librechat-data-provider'; import type { TSetExample, TSetOption, TSetOptionsPayload } from '~/common'; import usePresetIndexOptions from './usePresetIndexOptions'; import { useChatContext } from '~/Providers/ChatContext'; -import store from '~/store'; type TUseSetOptions = (preset?: TPreset | boolean | null) => TSetOptionsPayload; const useSetIndexOptions: TUseSetOptions = (preset = false) => { - const setShowPluginStoreDialog = useSetRecoilState(store.showPluginStoreDialog); - const availableTools = useRecoilValue(store.availableTools); const { conversation, setConversation } = useChatContext(); const result = usePresetIndexOptions(preset); @@ -116,76 +111,11 @@ const useSetIndexOptions: TUseSetOptions = (preset = false) => { ); }; - function checkPluginSelection(value: string) { - if (!conversation?.tools) { - return false; - } - return conversation.tools.find((el) => { - if (typeof el === 'string') { - return el === value; - } - return el.pluginKey === value; - }) - ? true - : false; - } - - const setAgentOption: TSetOption = (param) => (newValue) => { - const editableConvo = JSON.stringify(conversation); - const convo = JSON.parse(editableConvo); - const { agentOptions } = convo; - agentOptions[param] = newValue; - - setConversation( - (prevState) => - tConvoUpdateSchema.parse({ - ...prevState, - agentOptions, - }) as TConversation, - ); - }; - - const setTools: (newValue: string, remove?: boolean) => void = (newValue, remove) => { - if (newValue === 'pluginStore') { - setShowPluginStoreDialog(true); - return; - } - - const update = {}; - const current = - conversation?.tools - ?.map((tool: string | TPlugin) => { - if (typeof tool === 'string') { - return availableTools[tool]; - } - return tool; - }) - .filter((el) => !!el) || []; - const isSelected = checkPluginSelection(newValue); - const tool = availableTools[newValue]; - if (isSelected || remove) { - update['tools'] = current.filter((el) => el.pluginKey !== newValue); - } else { - update['tools'] = [...current, tool]; - } - - setConversation( - (prevState) => - tConvoUpdateSchema.parse({ - ...prevState, - ...update, - }) as TConversation, - ); - }; - return { - setTools, setOption, setExample, addExample, removeExample, - setAgentOption, - checkPluginSelection, }; }; diff --git a/client/src/hooks/Endpoint/Icons.tsx b/client/src/hooks/Endpoint/Icons.tsx index f5280cd7f3..6849374a25 100644 --- a/client/src/hooks/Endpoint/Icons.tsx +++ b/client/src/hooks/Endpoint/Icons.tsx @@ -1,16 +1,14 @@ import { Feather } from 'lucide-react'; import { EModelEndpoint } from 'librechat-data-provider'; import { - MinimalPlugin, GPTIcon, + Sparkles, + BedrockIcon, + AssistantIcon, AnthropicIcon, AzureMinimalIcon, GoogleMinimalIcon, CustomMinimalIcon, - AssistantIcon, - LightningIcon, - BedrockIcon, - Sparkles, } from '@librechat/client'; import type { IconMapProps, AgentIconMapProps, IconsRecord } from '~/common'; import UnknownIcon from './UnknownIcon'; @@ -63,9 +61,7 @@ const Bedrock = ({ className = '' }: IconMapProps) => { export const icons: IconsRecord = { [EModelEndpoint.azureOpenAI]: AzureMinimalIcon, [EModelEndpoint.openAI]: GPTIcon, - [EModelEndpoint.gptPlugins]: MinimalPlugin, [EModelEndpoint.anthropic]: AnthropicIcon, - [EModelEndpoint.chatGPTBrowser]: LightningIcon, [EModelEndpoint.google]: GoogleMinimalIcon, [EModelEndpoint.custom]: CustomMinimalIcon, [EModelEndpoint.assistants]: AssistantAvatar, diff --git a/client/src/hooks/Input/useUserKey.ts b/client/src/hooks/Input/useUserKey.ts index 124fdabc93..158a0d0039 100644 --- a/client/src/hooks/Input/useUserKey.ts +++ b/client/src/hooks/Input/useUserKey.ts @@ -12,8 +12,6 @@ const useUserKey = (endpoint: string) => { if (azure) { keyName = EModelEndpoint.azureOpenAI; - } else if (keyName === EModelEndpoint.gptPlugins) { - keyName = EModelEndpoint.openAI; } const updateKey = useUpdateUserKeysMutation(); diff --git a/client/src/hooks/Plugins/index.ts b/client/src/hooks/Plugins/index.ts index 17644f6af5..9262503016 100644 --- a/client/src/hooks/Plugins/index.ts +++ b/client/src/hooks/Plugins/index.ts @@ -1,6 +1,5 @@ export * from './useToolToggle'; export { default as useAuthCodeTool } from './useAuthCodeTool'; -export { default as usePluginInstall } from './usePluginInstall'; export { default as useCodeApiKeyForm } from './useCodeApiKeyForm'; export { default as useSearchApiKeyForm } from './useSearchApiKeyForm'; export { default as usePluginDialogHelpers } from './usePluginDialogHelpers'; diff --git a/client/src/hooks/Plugins/usePluginInstall.ts b/client/src/hooks/Plugins/usePluginInstall.ts deleted file mode 100644 index f42580199f..0000000000 --- a/client/src/hooks/Plugins/usePluginInstall.ts +++ /dev/null @@ -1,77 +0,0 @@ -// hooks/Plugins/usePluginInstall.ts -import { useCallback } from 'react'; -import { useUpdateUserPluginsMutation } from 'librechat-data-provider/react-query'; -import type { - TError, - TUser, - TUpdateUserPlugins, - TPlugin, - TPluginAction, -} from 'librechat-data-provider'; -import { useSetRecoilState } from 'recoil'; -import store from '~/store'; - -interface PluginStoreHandlers { - onInstallError?: (error: TError) => void; - onUninstallError?: (error: TError) => void; - onInstallSuccess?: (data: TUser, variables: TUpdateUserPlugins, context: unknown) => void; - onUninstallSuccess?: (data: TUser, variables: TUpdateUserPlugins, context: unknown) => void; -} - -export default function usePluginInstall(handlers: PluginStoreHandlers = {}) { - const setAvailableTools = useSetRecoilState(store.availableTools); - const { onInstallError, onInstallSuccess, onUninstallError, onUninstallSuccess } = handlers; - const updateUserPlugins = useUpdateUserPluginsMutation(); - - const installPlugin = useCallback( - (pluginAction: TPluginAction, plugin: TPlugin) => { - updateUserPlugins.mutate(pluginAction, { - onError: (error: unknown) => { - if (onInstallError) { - onInstallError(error as TError); - } - }, - onSuccess: (...rest) => { - setAvailableTools((prev) => { - return { ...prev, [plugin.pluginKey]: plugin }; - }); - if (onInstallSuccess) { - onInstallSuccess(...rest); - } - }, - }); - }, - [updateUserPlugins, onInstallError, onInstallSuccess, setAvailableTools], - ); - - const uninstallPlugin = useCallback( - (plugin: string) => { - updateUserPlugins.mutate( - { pluginKey: plugin, action: 'uninstall', auth: null }, - { - onError: (error: unknown) => { - if (onUninstallError) { - onUninstallError(error as TError); - } - }, - onSuccess: (...rest) => { - setAvailableTools((prev) => { - const newAvailableTools = { ...prev }; - delete newAvailableTools[plugin]; - return newAvailableTools; - }); - if (onUninstallSuccess) { - onUninstallSuccess(...rest); - } - }, - }, - ); - }, - [updateUserPlugins, onUninstallError, onUninstallSuccess, setAvailableTools], - ); - - return { - installPlugin, - uninstallPlugin, - }; -} diff --git a/client/src/hooks/SSE/useEventHandlers.ts b/client/src/hooks/SSE/useEventHandlers.ts index bde0319695..199482998f 100644 --- a/client/src/hooks/SSE/useEventHandlers.ts +++ b/client/src/hooks/SSE/useEventHandlers.ts @@ -201,14 +201,7 @@ export default function useEventHandlers({ const messageHandler = useCallback( (data: string | undefined, submission: EventSubmission) => { - const { - messages, - userMessage, - plugin, - plugins, - initialResponse, - isRegenerate = false, - } = submission; + const { messages, userMessage, initialResponse, isRegenerate = false } = submission; const text = data ?? ''; setIsSubmitting(true); @@ -224,8 +217,6 @@ export default function useEventHandlers({ { ...initialResponse, text, - plugin: plugin ?? null, - plugins: plugins ?? [], }, ]); } else { @@ -235,8 +226,6 @@ export default function useEventHandlers({ { ...initialResponse, text, - plugin: plugin ?? null, - plugins: plugins ?? [], }, ]); } diff --git a/client/src/hooks/SSE/useSSE.ts b/client/src/hooks/SSE/useSSE.ts index 945c13b449..4a6115e9b2 100644 --- a/client/src/hooks/SSE/useSSE.ts +++ b/client/src/hooks/SSE/useSSE.ts @@ -123,9 +123,8 @@ export default function useSSE( if (data.final != null) { clearDraft(submission.conversation?.conversationId); - const { plugins } = data; try { - finalHandler(data, { ...submission, plugins } as EventSubmission); + finalHandler(data, submission as EventSubmission); } catch (error) { console.error('Error in finalHandler:', error); setIsSubmitting(false); @@ -160,7 +159,6 @@ export default function useSSE( contentHandler({ data, submission: submission as EventSubmission }); } else { const text = data.text ?? data.response; - const { plugin, plugins } = data; const initialResponse = { ...(submission.initialResponse as TMessage), @@ -169,7 +167,7 @@ export default function useSSE( }; if (data.message != null) { - messageHandler(text, { ...submission, plugin, plugins, userMessage, initialResponse }); + messageHandler(text, { ...submission, userMessage, initialResponse }); } } }); diff --git a/client/src/hooks/useGenerationsByLatest.ts b/client/src/hooks/useGenerationsByLatest.ts index a70ffdd9cc..ddedc3ec15 100644 --- a/client/src/hooks/useGenerationsByLatest.ts +++ b/client/src/hooks/useGenerationsByLatest.ts @@ -31,7 +31,6 @@ export default function useGenerationsByLatest({ EModelEndpoint.agents, EModelEndpoint.bedrock, EModelEndpoint.anthropic, - EModelEndpoint.gptPlugins, EModelEndpoint.azureOpenAI, ].find((e) => e === endpoint), ); @@ -51,9 +50,7 @@ export default function useGenerationsByLatest({ EModelEndpoint.custom, EModelEndpoint.agents, EModelEndpoint.bedrock, - EModelEndpoint.chatGPTBrowser, EModelEndpoint.google, - EModelEndpoint.gptPlugins, EModelEndpoint.anthropic, ].find((e) => e === endpoint), ); diff --git a/client/src/locales/en/translation.json b/client/src/locales/en/translation.json index dabe319d5f..011a19978a 100644 --- a/client/src/locales/en/translation.json +++ b/client/src/locales/en/translation.json @@ -222,7 +222,6 @@ "com_download_expires": "(click here to download - expires {{0}})", "com_endpoint": "Endpoint", "com_endpoint_agent": "Agent", - "com_endpoint_agent_model": "Agent Model (Recommended: GPT-3.5)", "com_endpoint_agent_placeholder": "Please select an Agent", "com_endpoint_ai": "AI", "com_endpoint_anthropic_maxoutputtokens": "Maximum number of tokens that can be generated in the response. Specify a lower value for shorter responses and a higher value for longer responses. Note: models may stop before reaching this maximum.", @@ -236,8 +235,6 @@ "com_endpoint_assistant": "Assistant", "com_endpoint_assistant_model": "Assistant Model", "com_endpoint_assistant_placeholder": "Please select an Assistant from the right-hand Side Panel", - "com_endpoint_completion": "Completion", - "com_endpoint_completion_model": "Completion Model (Recommended: GPT-4)", "com_endpoint_config_click_here": "Click Here", "com_endpoint_config_google_api_info": "To get your Generative Language API key (for Gemini),", "com_endpoint_config_google_api_key": "Google API Key", @@ -265,18 +262,13 @@ "com_endpoint_custom_name": "Custom Name", "com_endpoint_default": "default", "com_endpoint_default_blank": "default: blank", - "com_endpoint_default_empty": "default: empty", "com_endpoint_default_with_num": "default: {{0}}", - "com_endpoint_deprecated": "Deprecated", - "com_endpoint_deprecated_info": "This endpoint is deprecated and may be removed in future versions, please use the agent endpoint instead", - "com_endpoint_deprecated_info_a11y": "The plugin endpoint is deprecated and may be removed in future versions, please use the agent endpoint instead", "com_endpoint_disable_streaming": "Disable streaming responses and receive the complete response at once. Useful for models like o3 that require organization verification for streaming", "com_endpoint_disable_streaming_label": "Disable Streaming", "com_endpoint_examples": " Presets", "com_endpoint_export": "Export", "com_endpoint_export_share": "Export/Share", "com_endpoint_frequency_penalty": "Frequency Penalty", - "com_endpoint_func_hover": "Enable use of Plugins as OpenAI Functions", "com_endpoint_google_custom_name_placeholder": "Set a custom name for Google", "com_endpoint_google_maxoutputtokens": "Maximum number of tokens that can be generated in the response. Specify a lower value for shorter responses and a higher value for longer responses. Note: models may stop before reaching this maximum.", "com_endpoint_google_temp": "Higher values = more random, while lower values = more focused and deterministic. We recommend altering this or Top P but not both.", @@ -314,9 +306,6 @@ "com_endpoint_output": "Output", "com_endpoint_plug_image_detail": "Image Detail", "com_endpoint_plug_resend_files": "Resend Files", - "com_endpoint_plug_set_custom_instructions_for_gpt_placeholder": "Set custom instructions to include in System Message. Default: none", - "com_endpoint_plug_skip_completion": "Skip Completion", - "com_endpoint_plug_use_functions": "Use Functions", "com_endpoint_presence_penalty": "Presence Penalty", "com_endpoint_preset": "preset", "com_endpoint_preset_custom_name_placeholder": "something needs to go here. was empty", @@ -348,7 +337,6 @@ "com_endpoint_search_models": "Search models...", "com_endpoint_search_var": "Search {{0}}...", "com_endpoint_set_custom_name": "Set a custom name, in case you can find this preset", - "com_endpoint_skip_hover": "Enable skipping the completion step, which reviews the final answer and generated steps", "com_endpoint_stop": "Stop Sequences", "com_endpoint_stop_placeholder": "Separate values by pressing `Enter`", "com_endpoint_temperature": "Temperature", @@ -550,10 +538,6 @@ "com_nav_open_sidebar": "Open sidebar", "com_nav_playback_rate": "Audio Playback Rate", "com_nav_plugin_auth_error": "There was an error attempting to authenticate this plugin. Please try again.", - "com_nav_plugin_install": "Install", - "com_nav_plugin_search": "Search plugins", - "com_nav_plugin_store": "Plugin store", - "com_nav_plugin_uninstall": "Uninstall", "com_nav_plus_command": "+-Command", "com_nav_plus_command_description": "Toggle command \"+\" for adding a multi-response setting", "com_nav_profile_picture": "Profile Picture", @@ -594,8 +578,6 @@ "com_nav_user_msg_markdown": "Render user messages as markdown", "com_nav_user_name_display": "Display username in messages", "com_nav_voice_select": "Voice", - "com_show_agent_settings": "Show Agent Settings", - "com_show_completion_settings": "Show Completion Settings", "com_show_examples": "Show Examples", "com_sidepanel_agent_builder": "Agent Builder", "com_sidepanel_assistant_builder": "Assistant Builder", @@ -771,6 +753,7 @@ "com_ui_bookmarks_title": "Title", "com_ui_bookmarks_update_error": "There was an error updating the bookmark", "com_ui_bookmarks_update_success": "Bookmark updated successfully", + "com_ui_by_author": "by {{0}}", "com_ui_bulk_delete_error": "Failed to delete shared links", "com_ui_callback_url": "Callback URL", "com_ui_cancel": "Cancel", @@ -1223,7 +1206,6 @@ "com_ui_select_provider_first": "Select a provider first", "com_ui_select_region": "Select a region", "com_ui_select_search_model": "Search model by name", - "com_ui_select_search_plugin": "Search plugin by name", "com_ui_select_search_provider": "Search provider by name", "com_ui_select_search_region": "Search region by name", "com_ui_set": "Set", diff --git a/client/src/store/endpoints.ts b/client/src/store/endpoints.ts index 4d01997184..9815b7dde1 100644 --- a/client/src/store/endpoints.ts +++ b/client/src/store/endpoints.ts @@ -8,8 +8,6 @@ const defaultConfig: TEndpointsConfig = { [EModelEndpoint.assistants]: null, [EModelEndpoint.agents]: null, [EModelEndpoint.openAI]: null, - [EModelEndpoint.chatGPTBrowser]: null, - [EModelEndpoint.gptPlugins]: null, [EModelEndpoint.google]: null, [EModelEndpoint.anthropic]: null, [EModelEndpoint.custom]: null, @@ -25,14 +23,6 @@ const endpointsQueryEnabled = atom({ default: true, }); -const plugins = selector({ - key: 'plugins', - get: ({ get }) => { - const config = get(endpointsConfig) || {}; - return config.gptPlugins?.plugins || {}; - }, -}); - const endpointsFilter = selector({ key: 'endpointsFilter', get: ({ get }) => { @@ -47,7 +37,6 @@ const endpointsFilter = selector({ }); export default { - plugins, endpointsConfig, endpointsFilter, defaultConfig, diff --git a/client/src/store/families.ts b/client/src/store/families.ts index f97f3d3a09..0d630296db 100644 --- a/client/src/store/families.ts +++ b/client/src/store/families.ts @@ -203,11 +203,6 @@ const optionSettingsFamily = atomFamily({ default: {}, }); -const showAgentSettingsFamily = atomFamily({ - key: 'showAgentSettingsByIndex', - default: false, -}); - const showPopoverFamily = atomFamily({ key: 'showPopoverByIndex', default: false, @@ -403,7 +398,6 @@ export default { abortScrollFamily, isSubmittingFamily, optionSettingsFamily, - showAgentSettingsFamily, showPopoverFamily, latestMessageFamily, messagesSiblingIdxFamily, diff --git a/client/src/store/settings.ts b/client/src/store/settings.ts index ed83c8a4b1..50c1ce3d54 100644 --- a/client/src/store/settings.ts +++ b/client/src/store/settings.ts @@ -8,8 +8,6 @@ const staticAtoms = { abortScroll: atom({ key: 'abortScroll', default: false }), showFiles: atom({ key: 'showFiles', default: false }), optionSettings: atom({ key: 'optionSettings', default: {} }), - showPluginStoreDialog: atom({ key: 'showPluginStoreDialog', default: false }), - showAgentSettings: atom({ key: 'showAgentSettings', default: false }), currentSettingsView: atom({ key: 'currentSettingsView', default: SettingsViews.default, diff --git a/client/src/utils/buildDefaultConvo.ts b/client/src/utils/buildDefaultConvo.ts index dd1e356032..497013e763 100644 --- a/client/src/utils/buildDefaultConvo.ts +++ b/client/src/utils/buildDefaultConvo.ts @@ -31,12 +31,8 @@ const buildDefaultConvo = ({ const availableModels = models; const model = lastConversationSetup?.model ?? lastSelectedModel?.[endpoint] ?? ''; - const secondaryModel: string | null = - endpoint === EModelEndpoint.gptPlugins - ? (lastConversationSetup?.agentOptions?.model ?? lastSelectedModel?.secondaryModel ?? null) - : null; - let possibleModels: string[], secondaryModels: string[]; + let possibleModels: string[]; if (availableModels.includes(model)) { possibleModels = [model, ...availableModels]; @@ -44,19 +40,12 @@ const buildDefaultConvo = ({ possibleModels = [...availableModels]; } - if (secondaryModel != null && secondaryModel !== '' && availableModels.includes(secondaryModel)) { - secondaryModels = [secondaryModel, ...availableModels]; - } else { - secondaryModels = [...availableModels]; - } - const convo = parseConvo({ endpoint: endpoint as EndpointSchemaKey, endpointType: endpointType as EndpointSchemaKey, conversation: lastConversationSetup, possibleValues: { models: possibleModels, - secondaryModels, }, }); diff --git a/client/src/utils/convos.spec.ts b/client/src/utils/convos.spec.ts index 2bcc95e999..7bf94c33c6 100644 --- a/client/src/utils/convos.spec.ts +++ b/client/src/utils/convos.spec.ts @@ -539,19 +539,6 @@ describe('Conversation Utilities', () => { expect([undefined, 'gpt-3']).toContain(stored.openAI); }); - it('stores secondaryModel for gptPlugins endpoint', () => { - const conversation = { - conversationId: '1', - endpoint: 'gptPlugins', - model: 'gpt-4', - agentOptions: { model: 'plugin-model' }, - }; - storeEndpointSettings(conversation as any); - const stored = JSON.parse(localStorage.getItem('lastModel') || '{}'); - expect([undefined, 'gpt-4']).toContain(stored.gptPlugins); - expect([undefined, 'plugin-model']).toContain(stored.secondaryModel); - }); - it('does nothing if conversation is null', () => { storeEndpointSettings(null); expect(localStorage.getItem('lastModel')).toBeNull(); diff --git a/client/src/utils/convos.ts b/client/src/utils/convos.ts index f5746c7d3e..f60fab40a8 100644 --- a/client/src/utils/convos.ts +++ b/client/src/utils/convos.ts @@ -1,3 +1,5 @@ +import { QueryClient } from '@tanstack/react-query'; +import { LocalStorageKeys, QueryKeys } from 'librechat-data-provider'; import { format, isToday, @@ -8,8 +10,6 @@ import { startOfYear, isWithinInterval, } from 'date-fns'; -import { QueryClient } from '@tanstack/react-query'; -import { EModelEndpoint, LocalStorageKeys, QueryKeys } from 'librechat-data-provider'; import type { TConversation, GroupedConversations } from 'librechat-data-provider'; import type { InfiniteData } from '@tanstack/react-query'; @@ -306,15 +306,12 @@ export function storeEndpointSettings(conversation: TConversation | null) { if (!conversation) { return; } - const { endpoint, model, agentOptions } = conversation; + const { endpoint, model } = conversation; if (!endpoint) { return; } const lastModel = JSON.parse(localStorage.getItem(LocalStorageKeys.LAST_MODEL) ?? '{}'); lastModel[endpoint] = model; - if (endpoint === EModelEndpoint.gptPlugins) { - lastModel.secondaryModel = agentOptions?.model ?? model ?? ''; - } localStorage.setItem(LocalStorageKeys.LAST_MODEL, JSON.stringify(lastModel)); } diff --git a/e2e/specs/keys.spec.ts b/e2e/specs/keys.spec.ts index 5b0c3a1fc4..2daff9e461 100644 --- a/e2e/specs/keys.spec.ts +++ b/e2e/specs/keys.spec.ts @@ -14,7 +14,7 @@ test.describe('Key suite', () => { // npx playwright test --config=e2e/playwright.config.local.ts --headed e2e/specs/keys.spec.ts test('Test Setting and Revoking Keys', async ({ page }) => { await page.goto('http://localhost:3080/', { timeout: 5000 }); - const endpoint = 'chatGPTBrowser'; + const endpoint = 'openAI'; const newTopicButton = page.getByTestId('new-conversation-menu'); await newTopicButton.click(); diff --git a/e2e/specs/messages.spec.ts b/e2e/specs/messages.spec.ts index a19295bcda..91131701c8 100644 --- a/e2e/specs/messages.spec.ts +++ b/e2e/specs/messages.spec.ts @@ -3,7 +3,7 @@ import type { Response, Page, BrowserContext } from '@playwright/test'; const basePath = 'http://localhost:3080/c/'; const initialUrl = `${basePath}new`; -const endpoints = ['google', 'openAI', 'azureOpenAI', 'chatGPTBrowser', 'gptPlugins']; +const endpoints = ['google', 'openAI', 'azureOpenAI']; const endpoint = endpoints[1]; function isUUID(uuid: string) { diff --git a/helm/librechat/values.yaml b/helm/librechat/values.yaml index 169604bfe4..a15b681de2 100755 --- a/helm/librechat/values.yaml +++ b/helm/librechat/values.yaml @@ -34,8 +34,6 @@ global: librechat: configEnv: - PLUGIN_MODELS: gpt-4,gpt-4-turbo-preview,gpt-4-0125-preview,gpt-4-1106-preview,gpt-4-0613,gpt-3.5-turbo,gpt-3.5-turbo-0125,gpt-3.5-turbo-1106,gpt-3.5-turbo-0613 - DEBUG_PLUGINS: "true" # IMPORTANT -- GENERATE your own: openssl rand -hex 32 and openssl rand -hex 16 for CREDS_IV. Best Practise: Put into Secret. See global.librechat.existingSecretName CREDS_KEY: 9e95d9894da7e68dd69c0046caf5343c8b1e80c89609b5a1e40e6568b5b23ce6 CREDS_IV: ac028c86ba23f4cd48165e0ca9f2c683 diff --git a/package-lock.json b/package-lock.json index cb4976164f..7cfc0e4d50 100644 --- a/package-lock.json +++ b/package-lock.json @@ -48,26 +48,20 @@ "version": "v0.8.1", "license": "ISC", "dependencies": { - "@anthropic-ai/sdk": "^0.52.0", "@aws-sdk/client-s3": "^3.758.0", "@aws-sdk/s3-request-presigner": "^3.758.0", "@azure/identity": "^4.7.0", "@azure/search-documents": "^12.0.0", "@azure/storage-blob": "^12.27.0", - "@google/generative-ai": "^0.24.0", "@googleapis/youtube": "^20.0.0", "@keyv/redis": "^4.3.3", "@langchain/core": "^0.3.79", - "@langchain/google-genai": "^0.2.13", - "@langchain/google-vertexai": "^0.2.13", - "@langchain/textsplitters": "^0.1.0", "@librechat/agents": "^3.0.50", "@librechat/api": "*", "@librechat/data-schemas": "*", "@microsoft/microsoft-graph-client": "^3.0.7", "@modelcontextprotocol/sdk": "^1.21.0", "@node-saml/passport-saml": "^5.1.0", - "@waylaidwanderer/fetch-event-source": "^3.0.1", "axios": "^1.12.1", "bcryptjs": "^2.4.3", "compression": "^1.8.1", @@ -86,7 +80,6 @@ "file-type": "^18.7.0", "firebase": "^11.0.2", "form-data": "^4.0.4", - "googleapis": "^126.0.1", "handlebars": "^4.7.7", "https-proxy-agent": "^7.0.6", "ioredis": "^5.3.2", @@ -137,15 +130,6 @@ "supertest": "^7.1.0" } }, - "api/node_modules/@anthropic-ai/sdk": { - "version": "0.52.0", - "resolved": "https://registry.npmjs.org/@anthropic-ai/sdk/-/sdk-0.52.0.tgz", - "integrity": "sha512-d4c+fg+xy9e46c8+YnrrgIQR45CZlAi7PwdzIfDXDM6ACxEZli1/fxhURsq30ZpMZy6LvSkr41jGq5aF5TD7rQ==", - "license": "MIT", - "bin": { - "anthropic-ai-sdk": "bin/cli" - } - }, "api/node_modules/@node-saml/node-saml": { "version": "5.1.0", "resolved": "https://registry.npmjs.org/@node-saml/node-saml/-/node-saml-5.1.0.tgz", @@ -25279,15 +25263,6 @@ "win32" ] }, - "node_modules/@waylaidwanderer/fetch-event-source": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/@waylaidwanderer/fetch-event-source/-/fetch-event-source-3.0.1.tgz", - "integrity": "sha512-gkc7vmBW9uulRj7tY30/1D8iBrpcgphBpI+e7LP744x/hAzaQxUuyF+n4O5dctKx+dE3i4BFuCWMEz9fAx2jlQ==", - "license": "MIT", - "engines": { - "node": ">=16.15" - } - }, "node_modules/@webassemblyjs/ast": { "version": "1.12.1", "resolved": "https://registry.npmjs.org/@webassemblyjs/ast/-/ast-1.12.1.tgz", @@ -30996,18 +30971,6 @@ "node": ">=14" } }, - "node_modules/googleapis": { - "version": "126.0.1", - "resolved": "https://registry.npmjs.org/googleapis/-/googleapis-126.0.1.tgz", - "integrity": "sha512-4N8LLi+hj6ytK3PhE52KcM8iSGhJjtXnCDYB4fp6l+GdLbYz4FoDmx074WqMbl7iYMDN87vqD/8drJkhxW92mQ==", - "dependencies": { - "google-auth-library": "^9.0.0", - "googleapis-common": "^7.0.0" - }, - "engines": { - "node": ">=14.0.0" - } - }, "node_modules/googleapis-common": { "version": "7.0.1", "resolved": "https://registry.npmjs.org/googleapis-common/-/googleapis-common-7.0.1.tgz", diff --git a/packages/data-provider/src/config.ts b/packages/data-provider/src/config.ts index be8cfe0d62..1604e8588b 100644 --- a/packages/data-provider/src/config.ts +++ b/packages/data-provider/src/config.ts @@ -860,7 +860,6 @@ export const configSchema = z.object({ [EModelEndpoint.openAI]: baseEndpointSchema.optional(), [EModelEndpoint.google]: baseEndpointSchema.optional(), [EModelEndpoint.anthropic]: baseEndpointSchema.optional(), - [EModelEndpoint.gptPlugins]: baseEndpointSchema.optional(), [EModelEndpoint.azureOpenAI]: azureEndpointSchema.optional(), [EModelEndpoint.azureAssistants]: assistantEndpointSchema.optional(), [EModelEndpoint.assistants]: assistantEndpointSchema.optional(), @@ -936,8 +935,6 @@ export const defaultEndpoints: EModelEndpoint[] = [ EModelEndpoint.azureAssistants, EModelEndpoint.azureOpenAI, EModelEndpoint.agents, - EModelEndpoint.chatGPTBrowser, - EModelEndpoint.gptPlugins, EModelEndpoint.google, EModelEndpoint.anthropic, EModelEndpoint.custom, @@ -950,8 +947,6 @@ export const alternateName = { [EModelEndpoint.agents]: 'My Agents', [EModelEndpoint.azureAssistants]: 'Azure Assistants', [EModelEndpoint.azureOpenAI]: 'Azure OpenAI', - [EModelEndpoint.chatGPTBrowser]: 'ChatGPT', - [EModelEndpoint.gptPlugins]: 'Plugins', [EModelEndpoint.google]: 'Google', [EModelEndpoint.anthropic]: 'Anthropic', [EModelEndpoint.custom]: 'Custom', @@ -1098,9 +1093,7 @@ export const initialModelsConfig: TModelsConfig = { [EModelEndpoint.openAI]: openAIModels, [EModelEndpoint.assistants]: openAIModels.filter(fitlerAssistantModels), [EModelEndpoint.agents]: openAIModels, // TODO: Add agent models (agentsModels) - [EModelEndpoint.gptPlugins]: openAIModels, [EModelEndpoint.azureOpenAI]: openAIModels, - [EModelEndpoint.chatGPTBrowser]: ['text-davinci-002-render-sha'], [EModelEndpoint.google]: defaultModels[EModelEndpoint.google], [EModelEndpoint.anthropic]: defaultModels[EModelEndpoint.anthropic], [EModelEndpoint.bedrock]: defaultModels[EModelEndpoint.bedrock], @@ -1113,7 +1106,6 @@ export const EndpointURLs = { } as const; export const modularEndpoints = new Set([ - EModelEndpoint.gptPlugins, EModelEndpoint.anthropic, EModelEndpoint.google, EModelEndpoint.openAI, @@ -1127,7 +1119,6 @@ export const supportsBalanceCheck = { [EModelEndpoint.custom]: true, [EModelEndpoint.openAI]: true, [EModelEndpoint.anthropic]: true, - [EModelEndpoint.gptPlugins]: true, [EModelEndpoint.assistants]: true, [EModelEndpoint.agents]: true, [EModelEndpoint.azureAssistants]: true, @@ -1243,10 +1234,6 @@ export enum CacheKeys { * Key for the roles cache. */ ROLES = 'ROLES', - /** - * Key for the plugins cache. - */ - PLUGINS = 'PLUGINS', /** * Key for the title generation cache. */ diff --git a/packages/data-provider/src/parsers.ts b/packages/data-provider/src/parsers.ts index c4fea469ee..6fee90e925 100644 --- a/packages/data-provider/src/parsers.ts +++ b/packages/data-provider/src/parsers.ts @@ -10,11 +10,9 @@ import { EModelEndpoint, anthropicSchema, assistantSchema, - gptPluginsSchema, // agentsSchema, compactAgentsSchema, compactGoogleSchema, - compactPluginsSchema, compactAssistantSchema, } from './schemas'; import { bedrockInputSchema } from './bedrock'; @@ -24,12 +22,11 @@ type EndpointSchema = | typeof openAISchema | typeof googleSchema | typeof anthropicSchema - | typeof gptPluginsSchema | typeof assistantSchema | typeof compactAgentsSchema | typeof bedrockInputSchema; -export type EndpointSchemaKey = Exclude; +export type EndpointSchemaKey = EModelEndpoint; const endpointSchemas: Record = { [EModelEndpoint.openAI]: openAISchema, @@ -37,7 +34,6 @@ const endpointSchemas: Record = { [EModelEndpoint.custom]: openAISchema, [EModelEndpoint.google]: googleSchema, [EModelEndpoint.anthropic]: anthropicSchema, - [EModelEndpoint.gptPlugins]: gptPluginsSchema, [EModelEndpoint.assistants]: assistantSchema, [EModelEndpoint.azureAssistants]: assistantSchema, [EModelEndpoint.agents]: compactAgentsSchema, @@ -57,8 +53,6 @@ export function getEnabledEndpoints() { EModelEndpoint.azureAssistants, EModelEndpoint.azureOpenAI, EModelEndpoint.google, - EModelEndpoint.chatGPTBrowser, - EModelEndpoint.gptPlugins, EModelEndpoint.anthropic, EModelEndpoint.bedrock, ]; @@ -143,7 +137,6 @@ export function getNonEmptyValue(possibleValues: string[]) { export type TPossibleValues = { models: string[]; - secondaryModels?: string[]; }; export const parseConvo = ({ @@ -172,16 +165,12 @@ export const parseConvo = ({ // } const convo = schema?.parse(conversation) as s.TConversation | undefined; - const { models, secondaryModels } = possibleValues ?? {}; + const { models } = possibleValues ?? {}; if (models && convo) { convo.model = getFirstDefinedValue(models) ?? convo.model; } - if (secondaryModels && convo?.agentOptions) { - convo.agentOptions.model = getFirstDefinedValue(secondaryModels) ?? convo.agentOptions.model; - } - return convo; }; @@ -225,13 +214,7 @@ export const getResponseSender = (endpointOption: t.TEndpointOption): string => const chatGptLabel = _cgl ?? ''; const modelLabel = _ml ?? ''; if ( - [ - EModelEndpoint.openAI, - EModelEndpoint.bedrock, - EModelEndpoint.gptPlugins, - EModelEndpoint.azureOpenAI, - EModelEndpoint.chatGPTBrowser, - ].includes(endpoint) + [EModelEndpoint.openAI, EModelEndpoint.bedrock, EModelEndpoint.azureOpenAI].includes(endpoint) ) { if (chatGptLabel) { return chatGptLabel; @@ -247,7 +230,7 @@ export const getResponseSender = (endpointOption: t.TEndpointOption): string => const gptVersion = extractGPTVersion(model); return gptVersion || 'GPT'; } - return (alternateName[endpoint] as string | undefined) ?? 'ChatGPT'; + return (alternateName[endpoint] as string | undefined) ?? 'AI'; } if (endpoint === EModelEndpoint.anthropic) { @@ -298,8 +281,7 @@ type CompactEndpointSchema = | typeof compactAgentsSchema | typeof compactGoogleSchema | typeof anthropicSchema - | typeof bedrockInputSchema - | typeof compactPluginsSchema; + | typeof bedrockInputSchema; const compactEndpointSchemas: Record = { [EModelEndpoint.openAI]: openAISchema, @@ -311,7 +293,6 @@ const compactEndpointSchemas: Record = [EModelEndpoint.google]: compactGoogleSchema, [EModelEndpoint.bedrock]: bedrockInputSchema, [EModelEndpoint.anthropic]: anthropicSchema, - [EModelEndpoint.gptPlugins]: compactPluginsSchema, }; export const parseCompactConvo = ({ @@ -348,17 +329,12 @@ export const parseCompactConvo = ({ const { iconURL: _clientIconURL, ...conversationWithoutIconURL } = conversation; const convo = schema.parse(conversationWithoutIconURL) as s.TConversation | null; - // const { models, secondaryModels } = possibleValues ?? {}; const { models } = possibleValues ?? {}; if (models && convo) { convo.model = getFirstDefinedValue(models) ?? convo.model; } - // if (secondaryModels && convo.agentOptions) { - // convo.agentOptionmodel = getFirstDefinedValue(secondaryModels) ?? convo.agentOptionmodel; - // } - return convo; }; diff --git a/packages/data-provider/src/schemas.ts b/packages/data-provider/src/schemas.ts index ecf0a925fc..7c9cb883b7 100644 --- a/packages/data-provider/src/schemas.ts +++ b/packages/data-provider/src/schemas.ts @@ -25,10 +25,6 @@ export enum EModelEndpoint { agents = 'agents', custom = 'custom', bedrock = 'bedrock', - /** @deprecated */ - chatGPTBrowser = 'chatGPTBrowser', - /** @deprecated */ - gptPlugins = 'gptPlugins', } /** Mirrors `@librechat/agents` providers */ @@ -529,16 +525,6 @@ export type TInput = { inputStr: string; }; -export type TResPlugin = { - plugin: string; - input: string; - thought: string; - loading?: boolean; - outputs?: string; - latest?: string; - inputs?: TInput[]; -}; - export const tExampleSchema = z.object({ input: z.object({ content: z.string(), @@ -550,39 +536,6 @@ export const tExampleSchema = z.object({ export type TExample = z.infer; -export enum EAgent { - functions = 'functions', - classic = 'classic', -} - -export const agentOptionSettings = { - model: { - default: 'gpt-4o-mini', - }, - temperature: { - min: 0, - max: 1, - step: 0.01, - default: 0, - }, - agent: { - default: EAgent.functions, - options: [EAgent.functions, EAgent.classic], - }, - skipCompletion: { - default: true, - }, -}; - -export const eAgentOptionsSchema = z.nativeEnum(EAgent); - -export const tAgentOptionsSchema = z.object({ - agent: z.string().default(EAgent.functions), - skipCompletion: z.boolean().default(agentOptionSettings.skipCompletion.default), - model: z.string(), - temperature: z.number().default(agentOptionSettings.temperature.default), -}); - export const tMessageSchema = z.object({ messageId: z.string(), endpoint: z.string().optional(), @@ -659,8 +612,6 @@ export type TAttachment = export type TMessage = z.input & { children?: TMessage[]; - plugin?: TResPlugin | null; - plugins?: TResPlugin[]; content?: TMessageContentParts[]; files?: Partial[]; depth?: number; @@ -775,8 +726,6 @@ export const tConversationSchema = z.object({ fileTokenLimit: coerceNumber.optional(), /** @deprecated */ resendImages: z.boolean().optional(), - /** @deprecated */ - agentOptions: tAgentOptionsSchema.nullable().optional(), /** @deprecated Prefer `modelLabel` over `chatGptLabel` */ chatGptLabel: z.string().nullable().optional(), }); @@ -982,75 +931,6 @@ export const googleGenConfigSchema = z .strip() .optional(); -const gptPluginsBaseSchema = tConversationSchema.pick({ - model: true, - modelLabel: true, - chatGptLabel: true, - promptPrefix: true, - temperature: true, - artifacts: true, - top_p: true, - presence_penalty: true, - frequency_penalty: true, - tools: true, - agentOptions: true, - iconURL: true, - greeting: true, - spec: true, - maxContextTokens: true, -}); - -export const gptPluginsSchema = gptPluginsBaseSchema - .transform((obj) => { - const result = { - ...obj, - model: obj.model ?? 'gpt-3.5-turbo', - chatGptLabel: obj.chatGptLabel ?? obj.modelLabel ?? null, - promptPrefix: obj.promptPrefix ?? null, - temperature: obj.temperature ?? 0.8, - top_p: obj.top_p ?? 1, - presence_penalty: obj.presence_penalty ?? 0, - frequency_penalty: obj.frequency_penalty ?? 0, - tools: obj.tools ?? [], - agentOptions: obj.agentOptions ?? { - agent: EAgent.functions, - skipCompletion: true, - model: 'gpt-3.5-turbo', - temperature: 0, - }, - iconURL: obj.iconURL ?? undefined, - greeting: obj.greeting ?? undefined, - spec: obj.spec ?? undefined, - maxContextTokens: obj.maxContextTokens ?? undefined, - }; - - if (obj.modelLabel != null && obj.modelLabel !== '') { - result.modelLabel = null; - } - - return result; - }) - .catch(() => ({ - model: 'gpt-3.5-turbo', - chatGptLabel: null, - promptPrefix: null, - temperature: 0.8, - top_p: 1, - presence_penalty: 0, - frequency_penalty: 0, - tools: [], - agentOptions: { - agent: EAgent.functions, - skipCompletion: true, - model: 'gpt-3.5-turbo', - temperature: 0, - }, - iconURL: undefined, - greeting: undefined, - spec: undefined, - maxContextTokens: undefined, - })); - export function removeNullishValues>( obj: T, removeEmptyStrings?: boolean, @@ -1251,48 +1131,6 @@ export const anthropicSchema = anthropicBaseSchema .transform((obj) => removeNullishValues(obj)) .catch(() => ({})); -export const compactPluginsSchema = gptPluginsBaseSchema - .transform((obj) => { - const newObj: Partial = { ...obj }; - if (newObj.modelLabel === null) { - delete newObj.modelLabel; - } - if (newObj.chatGptLabel === null) { - delete newObj.chatGptLabel; - } - if (newObj.promptPrefix === null) { - delete newObj.promptPrefix; - } - if (newObj.temperature === 0.8) { - delete newObj.temperature; - } - if (newObj.top_p === 1) { - delete newObj.top_p; - } - if (newObj.presence_penalty === 0) { - delete newObj.presence_penalty; - } - if (newObj.frequency_penalty === 0) { - delete newObj.frequency_penalty; - } - if (newObj.tools?.length === 0) { - delete newObj.tools; - } - - if ( - newObj.agentOptions && - newObj.agentOptions.agent === EAgent.functions && - newObj.agentOptions.skipCompletion === true && - newObj.agentOptions.model === 'gpt-3.5-turbo' && - newObj.agentOptions.temperature === 0 - ) { - delete newObj.agentOptions; - } - - return removeNullishValues(newObj); - }) - .catch(() => ({})); - export const tBannerSchema = z.object({ bannerId: z.string(), message: z.string(), diff --git a/packages/data-provider/src/types.ts b/packages/data-provider/src/types.ts index fd6190f50e..997e133be1 100644 --- a/packages/data-provider/src/types.ts +++ b/packages/data-provider/src/types.ts @@ -1,13 +1,12 @@ import type { InfiniteData } from '@tanstack/react-query'; import type { - TBanner, - TMessage, - TResPlugin, - TSharedLink, - TConversation, - EModelEndpoint, TConversationTag, + EModelEndpoint, + TConversation, + TSharedLink, TAttachment, + TMessage, + TBanner, } from './schemas'; import type { SettingDefinition } from './generate'; import type { TMinimalFeedback } from './feedback'; @@ -125,8 +124,6 @@ export type TEditedContent = }; export type TSubmission = { - plugin?: TResPlugin; - plugins?: TResPlugin[]; userMessage: TMessage; isEdited?: boolean; isContinued?: boolean; diff --git a/packages/data-schemas/src/models/plugins/mongoMeili.ts b/packages/data-schemas/src/models/plugins/mongoMeili.ts index eacb3f2b60..7c0086e2d1 100644 --- a/packages/data-schemas/src/models/plugins/mongoMeili.ts +++ b/packages/data-schemas/src/models/plugins/mongoMeili.ts @@ -1,5 +1,6 @@ import _ from 'lodash'; import { MeiliSearch } from 'meilisearch'; +import { parseTextParts } from 'librechat-data-provider'; import type { SearchResponse, SearchParams, Index } from 'meilisearch'; import type { CallbackWithoutResultAndOptionalError, @@ -28,11 +29,6 @@ interface MeiliIndexable { _meiliIndex?: boolean; } -interface ContentItem { - type: string; - text?: string; -} - interface SyncProgress { lastSyncedId?: string; totalProcessed: number; @@ -100,29 +96,6 @@ const getSyncConfig = () => ({ delayMs: parseInt(process.env.MEILI_SYNC_DELAY_MS || '100', 10), }); -/** - * Local implementation of parseTextParts to avoid dependency on librechat-data-provider - * Extracts text content from an array of content items - */ -const parseTextParts = (content: ContentItem[]): string => { - if (!Array.isArray(content)) { - return ''; - } - - return content - .filter((item) => item.type === 'text' && typeof item.text === 'string') - .map((item) => item.text) - .join(' ') - .trim(); -}; - -/** - * Local implementation to handle Bing convoId conversion - */ -const cleanUpPrimaryKeyValue = (value: string): string => { - return value.replace(/--/g, '|'); -}; - /** * Validates the required options for configuring the mongoMeili plugin. */ @@ -393,9 +366,7 @@ const createMeiliMongooseModel = ({ if (populate) { const query: Record = {}; - query[primaryKey] = _.map(data.hits, (hit) => - cleanUpPrimaryKeyValue(hit[primaryKey] as string), - ); + query[primaryKey] = _.map(data.hits, (hit) => hit[primaryKey]); const projection = Object.keys(this.schema.obj).reduce>( (results, key) => { diff --git a/packages/data-schemas/src/schema/convo.ts b/packages/data-schemas/src/schema/convo.ts index 4c3f09373f..2933f2e95b 100644 --- a/packages/data-schemas/src/schema/convo.ts +++ b/packages/data-schemas/src/schema/convo.ts @@ -22,9 +22,6 @@ const convoSchema: Schema = new Schema( meiliIndex: true, }, messages: [{ type: Schema.Types.ObjectId, ref: 'Message' }], - agentOptions: { - type: Schema.Types.Mixed, - }, ...conversationPreset, agent_id: { type: String, diff --git a/packages/data-schemas/src/schema/defaults.ts b/packages/data-schemas/src/schema/defaults.ts index 43ac33b34d..0ebc5e3665 100644 --- a/packages/data-schemas/src/schema/defaults.ts +++ b/packages/data-schemas/src/schema/defaults.ts @@ -2,7 +2,6 @@ import { Schema } from 'mongoose'; // @ts-ignore export const conversationPreset = { - // endpoint: [azureOpenAI, openAI, anthropic, chatGPTBrowser] endpoint: { type: String, default: null, @@ -11,7 +10,7 @@ export const conversationPreset = { endpointType: { type: String, }, - // for azureOpenAI, openAI, chatGPTBrowser only + // for azureOpenAI, openAI only model: { type: String, required: false, diff --git a/packages/data-schemas/src/schema/message.ts b/packages/data-schemas/src/schema/message.ts index f287f14ca4..5dece654d7 100644 --- a/packages/data-schemas/src/schema/message.ts +++ b/packages/data-schemas/src/schema/message.ts @@ -101,25 +101,6 @@ const messageSchema: Schema = new Schema( default: false, }, files: { type: [{ type: mongoose.Schema.Types.Mixed }], default: undefined }, - plugin: { - type: { - latest: { - type: String, - required: false, - }, - inputs: { - type: [mongoose.Schema.Types.Mixed], - required: false, - default: undefined, - }, - outputs: { - type: String, - required: false, - }, - }, - default: undefined, - }, - plugins: { type: [{ type: mongoose.Schema.Types.Mixed }], default: undefined }, content: { type: [{ type: mongoose.Schema.Types.Mixed }], default: undefined, diff --git a/packages/data-schemas/src/schema/preset.ts b/packages/data-schemas/src/schema/preset.ts index 36e2c9e8b9..fb0b1bb7ff 100644 --- a/packages/data-schemas/src/schema/preset.ts +++ b/packages/data-schemas/src/schema/preset.ts @@ -1,4 +1,4 @@ -import mongoose, { Schema, Document } from 'mongoose'; +import { Schema, Document } from 'mongoose'; import { conversationPreset } from './defaults'; // @ts-ignore @@ -52,8 +52,6 @@ export interface IPreset extends Document { web_search?: boolean; disableStreaming?: boolean; fileTokenLimit?: number; - // end of additional fields - agentOptions?: unknown; } const presetSchema: Schema = new Schema( @@ -80,10 +78,6 @@ const presetSchema: Schema = new Schema( type: Number, }, ...conversationPreset, - agentOptions: { - type: mongoose.Schema.Types.Mixed, - default: null, - }, }, { timestamps: true }, ); diff --git a/packages/data-schemas/src/types/app.ts b/packages/data-schemas/src/types/app.ts index 324aa151e4..751e6a81d0 100644 --- a/packages/data-schemas/src/types/app.ts +++ b/packages/data-schemas/src/types/app.ts @@ -99,8 +99,6 @@ export interface AppConfig { bedrock?: Partial; /** Anthropic endpoint configuration */ anthropic?: Partial; - /** GPT plugins endpoint configuration */ - gptPlugins?: Partial; /** Azure OpenAI endpoint configuration */ azureOpenAI?: TAzureConfig; /** Assistants endpoint configuration */ diff --git a/packages/data-schemas/src/types/convo.ts b/packages/data-schemas/src/types/convo.ts index 9e77dc905b..d3af4ff48d 100644 --- a/packages/data-schemas/src/types/convo.ts +++ b/packages/data-schemas/src/types/convo.ts @@ -6,7 +6,6 @@ export interface IConversation extends Document { title?: string; user?: string; messages?: Types.ObjectId[]; - agentOptions?: unknown; // Fields provided by conversationPreset (adjust types as needed) endpoint?: string; endpointType?: string;