From c6020881782e14f756cfe76a2d8645a612e5beb2 Mon Sep 17 00:00:00 2001 From: Danny Avila Date: Wed, 15 Oct 2025 15:12:32 +0300 Subject: [PATCH 01/37] =?UTF-8?q?=F0=9F=93=B1=20fix:=20Improve=20Mobile=20?= =?UTF-8?q?Chat=20Focus=20Detection=20and=20Navigation=20(#10125)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- client/src/hooks/Chat/useFocusChatEffect.ts | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/client/src/hooks/Chat/useFocusChatEffect.ts b/client/src/hooks/Chat/useFocusChatEffect.ts index fefb2c9584..17c41fef01 100644 --- a/client/src/hooks/Chat/useFocusChatEffect.ts +++ b/client/src/hooks/Chat/useFocusChatEffect.ts @@ -12,12 +12,22 @@ export default function useFocusChatEffect(textAreaRef: React.RefObject Date: Wed, 15 Oct 2025 16:20:54 +0300 Subject: [PATCH 02/37] =?UTF-8?q?=F0=9F=AA=82=20refactor:=20OCR=20Fallback?= =?UTF-8?q?=20for=20"Upload=20as=20Text"=20File=20Process=20(#10126)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- api/server/services/Files/process.js | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/api/server/services/Files/process.js b/api/server/services/Files/process.js index 5e945f0e36..701412523d 100644 --- a/api/server/services/Files/process.js +++ b/api/server/services/Files/process.js @@ -598,11 +598,22 @@ const processAgentFileUpload = async ({ req, res, metadata }) => { if (shouldUseOCR && !(await checkCapability(req, AgentCapabilities.ocr))) { throw new Error('OCR capability is not enabled for Agents'); } else if (shouldUseOCR) { - const { handleFileUpload: uploadOCR } = getStrategyFunctions( - appConfig?.ocr?.strategy ?? FileSources.mistral_ocr, - ); - const { text, bytes, filepath: ocrFileURL } = await uploadOCR({ req, file, loadAuthValues }); - return await createTextFile({ text, bytes, filepath: ocrFileURL }); + try { + const { handleFileUpload: uploadOCR } = getStrategyFunctions( + appConfig?.ocr?.strategy ?? FileSources.mistral_ocr, + ); + const { + text, + bytes, + filepath: ocrFileURL, + } = await uploadOCR({ req, file, loadAuthValues }); + return await createTextFile({ text, bytes, filepath: ocrFileURL }); + } catch (ocrError) { + logger.error( + `[processAgentFileUpload] OCR processing failed for file "${file.originalname}", falling back to text extraction:`, + ocrError, + ); + } } const shouldUseSTT = fileConfig.checkType( From f59daaeecc6299a3f432cf552d92a4159be5f278 Mon Sep 17 00:00:00 2001 From: Danny Avila Date: Thu, 16 Oct 2025 23:24:14 +0300 Subject: [PATCH 03/37] =?UTF-8?q?=F0=9F=93=84=20feat:=20Context=20Field=20?= =?UTF-8?q?for=20Anthropic=20Documents=20(PDF)=20(#10148)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * fix: Remove ephemeral cache control from document encoding function * refactor: Improve document encoding types and add file context for anthropic messages api - Added AnthropicDocumentBlock interface to define the structure for documents from the Anthropic provider. - Updated encodeAndFormatDocuments function to utilize the new type and include optional context for filenames. - Refactored DocumentResult to use a union type for various document formats, improving type safety and clarity. --- packages/api/src/files/encode/document.ts | 13 +++-- packages/api/src/types/files.ts | 66 +++++++++++++++-------- 2 files changed, 53 insertions(+), 26 deletions(-) diff --git a/packages/api/src/files/encode/document.ts b/packages/api/src/files/encode/document.ts index bc1396958c..338a4ddea6 100644 --- a/packages/api/src/files/encode/document.ts +++ b/packages/api/src/files/encode/document.ts @@ -2,7 +2,7 @@ import { Providers } from '@librechat/agents'; import { isOpenAILikeProvider, isDocumentSupportedProvider } from 'librechat-data-provider'; import type { IMongoFile } from '@librechat/data-schemas'; import type { Request } from 'express'; -import type { StrategyFunctions, DocumentResult } from '~/types/files'; +import type { StrategyFunctions, DocumentResult, AnthropicDocumentBlock } from '~/types/files'; import { validatePdf } from '~/files/validation'; import { getFileStream } from './utils'; @@ -69,16 +69,21 @@ export async function encodeAndFormatDocuments( } if (provider === Providers.ANTHROPIC) { - result.documents.push({ + const document: AnthropicDocumentBlock = { type: 'document', source: { type: 'base64', media_type: 'application/pdf', data: content, }, - cache_control: { type: 'ephemeral' }, citations: { enabled: true }, - }); + }; + + if (file.filename) { + document.context = `File: "${file.filename}"`; + } + + result.documents.push(document); } else if (useResponsesApi) { result.documents.push({ type: 'input_file', diff --git a/packages/api/src/types/files.ts b/packages/api/src/types/files.ts index dc37410050..5b25f0b3e9 100644 --- a/packages/api/src/types/files.ts +++ b/packages/api/src/types/files.ts @@ -46,29 +46,51 @@ export interface VideoResult { }>; } +/** Anthropic document block format */ +export interface AnthropicDocumentBlock { + type: 'document'; + source: { + type: string; + media_type: string; + data: string; + }; + context?: string; + title?: string; + cache_control?: { type: string }; + citations?: { enabled: boolean }; +} + +/** Google document block format */ +export interface GoogleDocumentBlock { + type: 'document'; + mimeType: string; + data: string; +} + +/** OpenAI file block format */ +export interface OpenAIFileBlock { + type: 'file'; + file: { + filename: string; + file_data: string; + }; +} + +/** OpenAI Responses API file format */ +export interface OpenAIInputFileBlock { + type: 'input_file'; + filename: string; + file_data: string; +} + +export type DocumentBlock = + | AnthropicDocumentBlock + | GoogleDocumentBlock + | OpenAIFileBlock + | OpenAIInputFileBlock; + export interface DocumentResult { - documents: Array<{ - type: 'document' | 'file' | 'input_file'; - /** Anthropic File Format, `document` */ - source?: { - type: string; - media_type: string; - data: string; - }; - cache_control?: { type: string }; - citations?: { enabled: boolean }; - /** Google File Format, `document` */ - mimeType?: string; - data?: string; - /** OpenAI File Format, `file` */ - file?: { - filename?: string; - file_data?: string; - }; - /** OpenAI Responses API File Format, `input_file` */ - filename?: string; - file_data?: string; - }>; + documents: DocumentBlock[]; files: Array<{ file_id?: string; temp_file_id?: string; From 114deecc4ebbc002ac20e289a0058c6663e637bd Mon Sep 17 00:00:00 2001 From: Sean McGrath Date: Fri, 17 Oct 2025 09:26:14 +1300 Subject: [PATCH 04/37] =?UTF-8?q?=F0=9F=9B=A0=EF=B8=8F=20chore:=20Add=20`@?= =?UTF-8?q?radix-ui/react-tooltip`=20to=20Artifact=20Dependencies=20(#1011?= =?UTF-8?q?2)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- client/src/utils/artifacts.ts | 1 + 1 file changed, 1 insertion(+) diff --git a/client/src/utils/artifacts.ts b/client/src/utils/artifacts.ts index af14ec6dca..e441481848 100644 --- a/client/src/utils/artifacts.ts +++ b/client/src/utils/artifacts.ts @@ -89,6 +89,7 @@ const standardDependencies = { '@radix-ui/react-slot': '^1.1.0', '@radix-ui/react-toggle': '^1.1.0', '@radix-ui/react-toggle-group': '^1.1.0', + '@radix-ui/react-tooltip': '^1.2.8', 'embla-carousel-react': '^8.2.0', 'react-day-picker': '^9.0.8', 'dat.gui': '^0.7.9', From d41b07c0afc97ebfa434ec69d188529166aee5aa Mon Sep 17 00:00:00 2001 From: Marco Beretta <81851188+berry-13@users.noreply.github.com> Date: Sat, 18 Oct 2025 11:50:34 +0200 Subject: [PATCH 05/37] =?UTF-8?q?=E2=99=BB=EF=B8=8F=20refactor:=20Replace?= =?UTF-8?q?=20`fontSize`=20Recoil=20atom=20with=20Jotai=20(#10171)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * fix: reapply chat font size on load * refactor: streamline font size handling in localStorage * fix: update matchMedia mock to accurately reflect desktop and touchscreen capabilities * refactor: implement Jotai for font size management and initialize on app load - Replaced Recoil with Jotai for font size state management across components. - Added a new `fontSize` atom to handle font size changes and persist them in localStorage. - Implemented `initializeFontSize` function to apply saved font size on app load. - Updated relevant components to utilize the new font size atom. --------- Co-authored-by: ddooochii Co-authored-by: Danny Avila --- client/src/App.jsx | 6 +++ .../components/Chat/Messages/MessageParts.tsx | 4 +- .../components/Chat/Messages/MessagesView.tsx | 4 +- .../Chat/Messages/SearchMessage.tsx | 4 +- .../Chat/Messages/ui/MessageRender.tsx | 4 +- .../src/components/Messages/ContentRender.tsx | 6 ++- .../SettingsTabs/Chat/FontSizeSelector.tsx | 9 ++-- client/src/components/Share/Message.tsx | 6 +-- .../__tests__/useFocusChatEffect.spec.tsx | 8 +-- client/src/store/fontSize.ts | 54 +++++++++++++++++++ client/src/store/settings.ts | 1 - 11 files changed, 87 insertions(+), 19 deletions(-) create mode 100644 client/src/store/fontSize.ts diff --git a/client/src/App.jsx b/client/src/App.jsx index decad9392b..eda775bc71 100644 --- a/client/src/App.jsx +++ b/client/src/App.jsx @@ -1,3 +1,4 @@ +import { useEffect } from 'react'; import { RecoilRoot } from 'recoil'; import { DndProvider } from 'react-dnd'; import { RouterProvider } from 'react-router-dom'; @@ -8,6 +9,7 @@ import { Toast, ThemeProvider, ToastProvider } from '@librechat/client'; import { QueryClient, QueryClientProvider, QueryCache } from '@tanstack/react-query'; import { ScreenshotProvider, useApiErrorBoundary } from './hooks'; import { getThemeFromEnv } from './utils/getThemeFromEnv'; +import { initializeFontSize } from '~/store/fontSize'; import { LiveAnnouncer } from '~/a11y'; import { router } from './routes'; @@ -24,6 +26,10 @@ const App = () => { }), }); + useEffect(() => { + initializeFontSize(); + }, []); + // Load theme from environment variables if available const envTheme = getThemeFromEnv(); diff --git a/client/src/components/Chat/Messages/MessageParts.tsx b/client/src/components/Chat/Messages/MessageParts.tsx index a79f0985d9..a993009915 100644 --- a/client/src/components/Chat/Messages/MessageParts.tsx +++ b/client/src/components/Chat/Messages/MessageParts.tsx @@ -1,10 +1,12 @@ import React, { useMemo } from 'react'; +import { useAtomValue } from 'jotai'; import { useRecoilValue } from 'recoil'; import type { TMessageContentParts } from 'librechat-data-provider'; import type { TMessageProps, TMessageIcon } from '~/common'; import { useMessageHelpers, useLocalize, useAttachments } from '~/hooks'; import MessageIcon from '~/components/Chat/Messages/MessageIcon'; import ContentParts from './Content/ContentParts'; +import { fontSizeAtom } from '~/store/fontSize'; import SiblingSwitch from './SiblingSwitch'; import MultiMessage from './MultiMessage'; import HoverButtons from './HoverButtons'; @@ -36,7 +38,7 @@ export default function Message(props: TMessageProps) { regenerateMessage, } = useMessageHelpers(props); - const fontSize = useRecoilValue(store.fontSize); + const fontSize = useAtomValue(fontSizeAtom); const maximizeChatSpace = useRecoilValue(store.maximizeChatSpace); const { children, messageId = null, isCreatedByUser } = message ?? {}; diff --git a/client/src/components/Chat/Messages/MessagesView.tsx b/client/src/components/Chat/Messages/MessagesView.tsx index 01459203f0..bea6554ff1 100644 --- a/client/src/components/Chat/Messages/MessagesView.tsx +++ b/client/src/components/Chat/Messages/MessagesView.tsx @@ -1,10 +1,12 @@ import { useState } from 'react'; +import { useAtomValue } from 'jotai'; import { useRecoilValue } from 'recoil'; import { CSSTransition } from 'react-transition-group'; import type { TMessage } from 'librechat-data-provider'; import { useScreenshot, useMessageScrolling, useLocalize } from '~/hooks'; import ScrollToBottom from '~/components/Messages/ScrollToBottom'; import { MessagesViewProvider } from '~/Providers'; +import { fontSizeAtom } from '~/store/fontSize'; import MultiMessage from './MultiMessage'; import { cn } from '~/utils'; import store from '~/store'; @@ -15,7 +17,7 @@ function MessagesViewContent({ messagesTree?: TMessage[] | null; }) { const localize = useLocalize(); - const fontSize = useRecoilValue(store.fontSize); + const fontSize = useAtomValue(fontSizeAtom); const { screenshotTargetRef } = useScreenshot(); const scrollButtonPreference = useRecoilValue(store.showScrollButton); const [currentEditId, setCurrentEditId] = useState(-1); diff --git a/client/src/components/Chat/Messages/SearchMessage.tsx b/client/src/components/Chat/Messages/SearchMessage.tsx index c7ac2c69c3..982aee06ce 100644 --- a/client/src/components/Chat/Messages/SearchMessage.tsx +++ b/client/src/components/Chat/Messages/SearchMessage.tsx @@ -1,10 +1,12 @@ import { useMemo } from 'react'; +import { useAtomValue } from 'jotai'; import { useRecoilValue } from 'recoil'; import { useAuthContext, useLocalize } from '~/hooks'; import type { TMessageProps, TMessageIcon } from '~/common'; import MinimalHoverButtons from '~/components/Chat/Messages/MinimalHoverButtons'; import Icon from '~/components/Chat/Messages/MessageIcon'; import SearchContent from './Content/SearchContent'; +import { fontSizeAtom } from '~/store/fontSize'; import SearchButtons from './SearchButtons'; import SubRow from './SubRow'; import { cn } from '~/utils'; @@ -34,8 +36,8 @@ const MessageBody = ({ message, messageLabel, fontSize }) => ( ); export default function SearchMessage({ message }: Pick) { + const fontSize = useAtomValue(fontSizeAtom); const UsernameDisplay = useRecoilValue(store.UsernameDisplay); - const fontSize = useRecoilValue(store.fontSize); const { user } = useAuthContext(); const localize = useLocalize(); diff --git a/client/src/components/Chat/Messages/ui/MessageRender.tsx b/client/src/components/Chat/Messages/ui/MessageRender.tsx index f056fccc98..179da5942d 100644 --- a/client/src/components/Chat/Messages/ui/MessageRender.tsx +++ b/client/src/components/Chat/Messages/ui/MessageRender.tsx @@ -1,4 +1,5 @@ import React, { useCallback, useMemo, memo } from 'react'; +import { useAtomValue } from 'jotai'; import { useRecoilValue } from 'recoil'; import { type TMessage } from 'librechat-data-provider'; import type { TMessageProps, TMessageIcon } from '~/common'; @@ -9,6 +10,7 @@ import HoverButtons from '~/components/Chat/Messages/HoverButtons'; import MessageIcon from '~/components/Chat/Messages/MessageIcon'; import { Plugin } from '~/components/Messages/Content'; import SubRow from '~/components/Chat/Messages/SubRow'; +import { fontSizeAtom } from '~/store/fontSize'; import { MessageContext } from '~/Providers'; import { useMessageActions } from '~/hooks'; import { cn, logger } from '~/utils'; @@ -58,8 +60,8 @@ const MessageRender = memo( isMultiMessage, setCurrentEditId, }); + const fontSize = useAtomValue(fontSizeAtom); const maximizeChatSpace = useRecoilValue(store.maximizeChatSpace); - const fontSize = useRecoilValue(store.fontSize); const handleRegenerateMessage = useCallback(() => regenerateMessage(), [regenerateMessage]); const hasNoChildren = !(msg?.children?.length ?? 0); diff --git a/client/src/components/Messages/ContentRender.tsx b/client/src/components/Messages/ContentRender.tsx index ce88687d23..565dadde11 100644 --- a/client/src/components/Messages/ContentRender.tsx +++ b/client/src/components/Messages/ContentRender.tsx @@ -1,5 +1,6 @@ -import { useRecoilValue } from 'recoil'; import { useCallback, useMemo, memo } from 'react'; +import { useAtomValue } from 'jotai'; +import { useRecoilValue } from 'recoil'; import type { TMessage, TMessageContentParts } from 'librechat-data-provider'; import type { TMessageProps, TMessageIcon } from '~/common'; import ContentParts from '~/components/Chat/Messages/Content/ContentParts'; @@ -9,6 +10,7 @@ import HoverButtons from '~/components/Chat/Messages/HoverButtons'; import MessageIcon from '~/components/Chat/Messages/MessageIcon'; import { useAttachments, useMessageActions } from '~/hooks'; import SubRow from '~/components/Chat/Messages/SubRow'; +import { fontSizeAtom } from '~/store/fontSize'; import { cn, logger } from '~/utils'; import store from '~/store'; @@ -60,8 +62,8 @@ const ContentRender = memo( isMultiMessage, setCurrentEditId, }); + const fontSize = useAtomValue(fontSizeAtom); const maximizeChatSpace = useRecoilValue(store.maximizeChatSpace); - const fontSize = useRecoilValue(store.fontSize); const handleRegenerateMessage = useCallback(() => regenerateMessage(), [regenerateMessage]); const isLast = useMemo( diff --git a/client/src/components/Nav/SettingsTabs/Chat/FontSizeSelector.tsx b/client/src/components/Nav/SettingsTabs/Chat/FontSizeSelector.tsx index 82fa2e746b..66b3f832ab 100644 --- a/client/src/components/Nav/SettingsTabs/Chat/FontSizeSelector.tsx +++ b/client/src/components/Nav/SettingsTabs/Chat/FontSizeSelector.tsx @@ -1,15 +1,14 @@ -import { useRecoilState } from 'recoil'; -import { Dropdown, applyFontSize } from '@librechat/client'; +import { useAtom } from 'jotai'; +import { Dropdown } from '@librechat/client'; +import { fontSizeAtom } from '~/store/fontSize'; import { useLocalize } from '~/hooks'; -import store from '~/store'; export default function FontSizeSelector() { - const [fontSize, setFontSize] = useRecoilState(store.fontSize); const localize = useLocalize(); + const [fontSize, setFontSize] = useAtom(fontSizeAtom); const handleChange = (val: string) => { setFontSize(val); - applyFontSize(val); }; const options = [ diff --git a/client/src/components/Share/Message.tsx b/client/src/components/Share/Message.tsx index eddd5060e5..e556145481 100644 --- a/client/src/components/Share/Message.tsx +++ b/client/src/components/Share/Message.tsx @@ -1,4 +1,4 @@ -import { useRecoilValue } from 'recoil'; +import { useAtomValue } from 'jotai'; import type { TMessageProps } from '~/common'; import MinimalHoverButtons from '~/components/Chat/Messages/MinimalHoverButtons'; import MessageContent from '~/components/Chat/Messages/Content/MessageContent'; @@ -6,16 +6,16 @@ import SearchContent from '~/components/Chat/Messages/Content/SearchContent'; import SiblingSwitch from '~/components/Chat/Messages/SiblingSwitch'; import { Plugin } from '~/components/Messages/Content'; import SubRow from '~/components/Chat/Messages/SubRow'; +import { fontSizeAtom } from '~/store/fontSize'; import { MessageContext } from '~/Providers'; import { useAttachments } from '~/hooks'; import MultiMessage from './MultiMessage'; import { cn } from '~/utils'; -import store from '~/store'; import Icon from './MessageIcon'; export default function Message(props: TMessageProps) { - const fontSize = useRecoilValue(store.fontSize); + const fontSize = useAtomValue(fontSizeAtom); const { message, siblingIdx, diff --git a/client/src/hooks/Chat/__tests__/useFocusChatEffect.spec.tsx b/client/src/hooks/Chat/__tests__/useFocusChatEffect.spec.tsx index a3d3b8d67d..e0dbac5a1e 100644 --- a/client/src/hooks/Chat/__tests__/useFocusChatEffect.spec.tsx +++ b/client/src/hooks/Chat/__tests__/useFocusChatEffect.spec.tsx @@ -21,8 +21,8 @@ describe('useFocusChatEffect', () => { (useNavigate as jest.Mock).mockReturnValue(mockNavigate); // Mock window.matchMedia - window.matchMedia = jest.fn().mockImplementation(() => ({ - matches: false, + window.matchMedia = jest.fn().mockImplementation((query) => ({ + matches: query === '(hover: hover)', // Desktop has hover capability media: '', onchange: null, addListener: jest.fn(), @@ -83,8 +83,8 @@ describe('useFocusChatEffect', () => { }); test('should not focus textarea on touchscreen devices', () => { - window.matchMedia = jest.fn().mockImplementation(() => ({ - matches: true, // This indicates a touchscreen + window.matchMedia = jest.fn().mockImplementation((query) => ({ + matches: query === '(pointer: coarse)', // Touchscreen has coarse pointer media: '', onchange: null, addListener: jest.fn(), diff --git a/client/src/store/fontSize.ts b/client/src/store/fontSize.ts new file mode 100644 index 0000000000..4b1a0666f3 --- /dev/null +++ b/client/src/store/fontSize.ts @@ -0,0 +1,54 @@ +import { atom } from 'jotai'; +import { atomWithStorage } from 'jotai/utils'; +import { applyFontSize } from '@librechat/client'; + +const DEFAULT_FONT_SIZE = 'text-base'; + +/** + * Base storage atom for font size + */ +const fontSizeStorageAtom = atomWithStorage('fontSize', DEFAULT_FONT_SIZE, undefined, { + getOnInit: true, +}); + +/** + * Derived atom that applies font size changes to the DOM + * Read: returns the current font size + * Write: updates storage and applies the font size to the DOM + */ +export const fontSizeAtom = atom( + (get) => get(fontSizeStorageAtom), + (get, set, newValue: string) => { + set(fontSizeStorageAtom, newValue); + if (typeof window !== 'undefined' && typeof document !== 'undefined') { + applyFontSize(newValue); + } + }, +); + +/** + * Initialize font size on app load + */ +export const initializeFontSize = () => { + if (typeof window === 'undefined' || typeof document === 'undefined') { + return; + } + + const savedValue = localStorage.getItem('fontSize'); + + if (savedValue !== null) { + try { + const parsedValue = JSON.parse(savedValue); + applyFontSize(parsedValue); + } catch (error) { + console.error( + 'Error parsing localStorage key "fontSize", resetting to default. Error:', + error, + ); + localStorage.setItem('fontSize', JSON.stringify(DEFAULT_FONT_SIZE)); + applyFontSize(DEFAULT_FONT_SIZE); + } + } else { + applyFontSize(DEFAULT_FONT_SIZE); + } +}; diff --git a/client/src/store/settings.ts b/client/src/store/settings.ts index 0fe4dccd2c..4e9c2f5cad 100644 --- a/client/src/store/settings.ts +++ b/client/src/store/settings.ts @@ -21,7 +21,6 @@ const localStorageAtoms = { // General settings autoScroll: atomWithLocalStorage('autoScroll', false), hideSidePanel: atomWithLocalStorage('hideSidePanel', false), - fontSize: atomWithLocalStorage('fontSize', 'text-base'), enableUserMsgMarkdown: atomWithLocalStorage( LocalStorageKeys.ENABLE_USER_MSG_MARKDOWN, true, From 589f1193100de0a5d119c67677f3ebb64aed4a9c Mon Sep 17 00:00:00 2001 From: Federico Ruggi Date: Sat, 18 Oct 2025 11:54:05 +0200 Subject: [PATCH 06/37] =?UTF-8?q?=F0=9F=A9=B9=20fix:=20Wrap=20Attempt=20to?= =?UTF-8?q?=20Reconnect=20OAuth=20MCP=20Servers=20(#10172)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- api/server/controllers/AuthController.js | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/api/server/controllers/AuthController.js b/api/server/controllers/AuthController.js index 249817610e..096727e977 100644 --- a/api/server/controllers/AuthController.js +++ b/api/server/controllers/AuthController.js @@ -116,11 +116,15 @@ const refreshController = async (req, res) => { const token = await setAuthTokens(userId, res, session); // trigger OAuth MCP server reconnection asynchronously (best effort) - void getOAuthReconnectionManager() - .reconnectServers(userId) - .catch((err) => { - logger.error('Error reconnecting OAuth MCP servers:', err); - }); + try { + void getOAuthReconnectionManager() + .reconnectServers(userId) + .catch((err) => { + logger.error('[refreshController] Error reconnecting OAuth MCP servers:', err); + }); + } catch (err) { + logger.warn(`[refreshController] Cannot attempt OAuth MCP servers reconnection:`, err); + } res.status(200).send({ token, user }); } else if (req?.query?.retry) { From 36f0365fd4d1f866c9bbac732d4e14e02963be36 Mon Sep 17 00:00:00 2001 From: Danny Avila Date: Sun, 19 Oct 2025 09:23:27 -0400 Subject: [PATCH 07/37] =?UTF-8?q?=F0=9F=A7=AE=20feat:=20Enhance=20Model=20?= =?UTF-8?q?Pricing=20Coverage=20and=20Pattern=20Matching=20(#10173)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * updated gpt5-pro it is here and on openrouter https://platform.openai.com/docs/models/gpt-5-pro * feat: Add gpt-5-pro pricing - Implemented handling for the new gpt-5-pro model in the getValueKey function. - Updated tests to ensure correct behavior for gpt-5-pro across various scenarios. - Adjusted token limits and multipliers for gpt-5-pro in the tokens utility files. - Enhanced model matching functionality to include gpt-5-pro variations. * refactor: optimize model pricing and validation logic - Added new model pricing entries for llama2, llama3, and qwen variants in tx.js. - Updated tokenValues to include additional models and their pricing structures. - Implemented validation tests in tx.spec.js to ensure all models resolve correctly to pricing. - Refactored getValueKey function to improve model matching and resolution efficiency. - Removed outdated model entries from tokens.ts to streamline pricing management. * fix: add missing pricing * chore: update model pricing for qwen and gemma variants * chore: update model pricing and add validation for context windows - Removed outdated model entries from tx.js and updated tokenValues with new models. - Added a test in tx.spec.js to ensure all models with pricing have corresponding context windows defined in tokens.ts. - Introduced 'command-text' model pricing in tokens.ts to maintain consistency across model definitions. * chore: update model names and pricing for AI21 and Amazon models - Refactored model names in tx.js for AI21 and Amazon models to remove versioning and improve consistency. - Updated pricing values in tokens.ts to reflect the new model names. - Added comprehensive tests in tx.spec.js to validate pricing for both short and full model names across AI21 and Amazon models. * feat: add pricing and validation for Claude Haiku 4.5 model * chore: increase default max context tokens to 18000 for agents * feat: add Qwen3 model pricing and validation tests * chore: reorganize and update Qwen model pricing in tx.js and tokens.ts --------- Co-authored-by: khfung <68192841+khfung@users.noreply.github.com> --- api/models/tx.js | 258 ++++---- api/models/tx.spec.js | 594 ++++++++++++++++++ api/server/services/Endpoints/agents/agent.js | 2 +- api/utils/tokens.spec.js | 180 +++++- packages/api/src/utils/tokens.ts | 62 +- 5 files changed, 964 insertions(+), 132 deletions(-) diff --git a/api/models/tx.js b/api/models/tx.js index 462396d860..92f2432d0e 100644 --- a/api/models/tx.js +++ b/api/models/tx.js @@ -1,4 +1,4 @@ -const { matchModelName } = require('@librechat/api'); +const { matchModelName, findMatchingPattern } = require('@librechat/api'); const defaultRate = 6; /** @@ -6,44 +6,58 @@ const defaultRate = 6; * source: https://aws.amazon.com/bedrock/pricing/ * */ const bedrockValues = { - // Basic llama2 patterns + // Basic llama2 patterns (base defaults to smallest variant) + llama2: { prompt: 0.75, completion: 1.0 }, + 'llama-2': { prompt: 0.75, completion: 1.0 }, 'llama2-13b': { prompt: 0.75, completion: 1.0 }, - 'llama2:13b': { prompt: 0.75, completion: 1.0 }, 'llama2:70b': { prompt: 1.95, completion: 2.56 }, 'llama2-70b': { prompt: 1.95, completion: 2.56 }, - // Basic llama3 patterns + // Basic llama3 patterns (base defaults to smallest variant) + llama3: { prompt: 0.3, completion: 0.6 }, + 'llama-3': { prompt: 0.3, completion: 0.6 }, 'llama3-8b': { prompt: 0.3, completion: 0.6 }, 'llama3:8b': { prompt: 0.3, completion: 0.6 }, 'llama3-70b': { prompt: 2.65, completion: 3.5 }, 'llama3:70b': { prompt: 2.65, completion: 3.5 }, - // llama3-x-Nb pattern + // llama3-x-Nb pattern (base defaults to smallest variant) + 'llama3-1': { prompt: 0.22, completion: 0.22 }, 'llama3-1-8b': { prompt: 0.22, completion: 0.22 }, 'llama3-1-70b': { prompt: 0.72, completion: 0.72 }, 'llama3-1-405b': { prompt: 2.4, completion: 2.4 }, + 'llama3-2': { prompt: 0.1, completion: 0.1 }, 'llama3-2-1b': { prompt: 0.1, completion: 0.1 }, 'llama3-2-3b': { prompt: 0.15, completion: 0.15 }, 'llama3-2-11b': { prompt: 0.16, completion: 0.16 }, 'llama3-2-90b': { prompt: 0.72, completion: 0.72 }, + 'llama3-3': { prompt: 2.65, completion: 3.5 }, + 'llama3-3-70b': { prompt: 2.65, completion: 3.5 }, - // llama3.x:Nb pattern + // llama3.x:Nb pattern (base defaults to smallest variant) + 'llama3.1': { prompt: 0.22, completion: 0.22 }, 'llama3.1:8b': { prompt: 0.22, completion: 0.22 }, 'llama3.1:70b': { prompt: 0.72, completion: 0.72 }, 'llama3.1:405b': { prompt: 2.4, completion: 2.4 }, + 'llama3.2': { prompt: 0.1, completion: 0.1 }, 'llama3.2:1b': { prompt: 0.1, completion: 0.1 }, 'llama3.2:3b': { prompt: 0.15, completion: 0.15 }, 'llama3.2:11b': { prompt: 0.16, completion: 0.16 }, 'llama3.2:90b': { prompt: 0.72, completion: 0.72 }, + 'llama3.3': { prompt: 2.65, completion: 3.5 }, + 'llama3.3:70b': { prompt: 2.65, completion: 3.5 }, - // llama-3.x-Nb pattern + // llama-3.x-Nb pattern (base defaults to smallest variant) + 'llama-3.1': { prompt: 0.22, completion: 0.22 }, 'llama-3.1-8b': { prompt: 0.22, completion: 0.22 }, 'llama-3.1-70b': { prompt: 0.72, completion: 0.72 }, 'llama-3.1-405b': { prompt: 2.4, completion: 2.4 }, + 'llama-3.2': { prompt: 0.1, completion: 0.1 }, 'llama-3.2-1b': { prompt: 0.1, completion: 0.1 }, 'llama-3.2-3b': { prompt: 0.15, completion: 0.15 }, 'llama-3.2-11b': { prompt: 0.16, completion: 0.16 }, 'llama-3.2-90b': { prompt: 0.72, completion: 0.72 }, + 'llama-3.3': { prompt: 2.65, completion: 3.5 }, 'llama-3.3-70b': { prompt: 2.65, completion: 3.5 }, 'mistral-7b': { prompt: 0.15, completion: 0.2 }, 'mistral-small': { prompt: 0.15, completion: 0.2 }, @@ -52,15 +66,19 @@ const bedrockValues = { 'mistral-large-2407': { prompt: 3.0, completion: 9.0 }, 'command-text': { prompt: 1.5, completion: 2.0 }, 'command-light': { prompt: 0.3, completion: 0.6 }, - 'ai21.j2-mid-v1': { prompt: 12.5, completion: 12.5 }, - 'ai21.j2-ultra-v1': { prompt: 18.8, completion: 18.8 }, - 'ai21.jamba-instruct-v1:0': { prompt: 0.5, completion: 0.7 }, - 'amazon.titan-text-lite-v1': { prompt: 0.15, completion: 0.2 }, - 'amazon.titan-text-express-v1': { prompt: 0.2, completion: 0.6 }, - 'amazon.titan-text-premier-v1:0': { prompt: 0.5, completion: 1.5 }, - 'amazon.nova-micro-v1:0': { prompt: 0.035, completion: 0.14 }, - 'amazon.nova-lite-v1:0': { prompt: 0.06, completion: 0.24 }, - 'amazon.nova-pro-v1:0': { prompt: 0.8, completion: 3.2 }, + // AI21 models + 'j2-mid': { prompt: 12.5, completion: 12.5 }, + 'j2-ultra': { prompt: 18.8, completion: 18.8 }, + 'jamba-instruct': { prompt: 0.5, completion: 0.7 }, + // Amazon Titan models + 'titan-text-lite': { prompt: 0.15, completion: 0.2 }, + 'titan-text-express': { prompt: 0.2, completion: 0.6 }, + 'titan-text-premier': { prompt: 0.5, completion: 1.5 }, + // Amazon Nova models + 'nova-micro': { prompt: 0.035, completion: 0.14 }, + 'nova-lite': { prompt: 0.06, completion: 0.24 }, + 'nova-pro': { prompt: 0.8, completion: 3.2 }, + 'nova-premier': { prompt: 2.5, completion: 12.5 }, 'deepseek.r1': { prompt: 1.35, completion: 5.4 }, }; @@ -71,100 +89,136 @@ const bedrockValues = { */ const tokenValues = Object.assign( { + // Legacy token size mappings (generic patterns - check LAST) '8k': { prompt: 30, completion: 60 }, '32k': { prompt: 60, completion: 120 }, '4k': { prompt: 1.5, completion: 2 }, '16k': { prompt: 3, completion: 4 }, + // Generic fallback patterns (check LAST) + 'claude-': { prompt: 0.8, completion: 2.4 }, + deepseek: { prompt: 0.28, completion: 0.42 }, + command: { prompt: 0.38, completion: 0.38 }, + gemma: { prompt: 0.02, completion: 0.04 }, // Base pattern (using gemma-3n-e4b pricing) + gemini: { prompt: 0.5, completion: 1.5 }, + 'gpt-oss': { prompt: 0.05, completion: 0.2 }, + // Specific model variants (check FIRST - more specific patterns at end) 'gpt-3.5-turbo-1106': { prompt: 1, completion: 2 }, - 'o4-mini': { prompt: 1.1, completion: 4.4 }, - 'o3-mini': { prompt: 1.1, completion: 4.4 }, - o3: { prompt: 2, completion: 8 }, - 'o1-mini': { prompt: 1.1, completion: 4.4 }, - 'o1-preview': { prompt: 15, completion: 60 }, - o1: { prompt: 15, completion: 60 }, + 'gpt-3.5-turbo-0125': { prompt: 0.5, completion: 1.5 }, + 'gpt-4-1106': { prompt: 10, completion: 30 }, + 'gpt-4.1': { prompt: 2, completion: 8 }, 'gpt-4.1-nano': { prompt: 0.1, completion: 0.4 }, 'gpt-4.1-mini': { prompt: 0.4, completion: 1.6 }, - 'gpt-4.1': { prompt: 2, completion: 8 }, 'gpt-4.5': { prompt: 75, completion: 150 }, - 'gpt-4o-mini': { prompt: 0.15, completion: 0.6 }, - 'gpt-5': { prompt: 1.25, completion: 10 }, - 'gpt-5-mini': { prompt: 0.25, completion: 2 }, - 'gpt-5-nano': { prompt: 0.05, completion: 0.4 }, 'gpt-4o': { prompt: 2.5, completion: 10 }, 'gpt-4o-2024-05-13': { prompt: 5, completion: 15 }, - 'gpt-4-1106': { prompt: 10, completion: 30 }, - 'gpt-3.5-turbo-0125': { prompt: 0.5, completion: 1.5 }, - 'claude-3-opus': { prompt: 15, completion: 75 }, + 'gpt-4o-mini': { prompt: 0.15, completion: 0.6 }, + 'gpt-5': { prompt: 1.25, completion: 10 }, + 'gpt-5-nano': { prompt: 0.05, completion: 0.4 }, + 'gpt-5-mini': { prompt: 0.25, completion: 2 }, + 'gpt-5-pro': { prompt: 15, completion: 120 }, + o1: { prompt: 15, completion: 60 }, + 'o1-mini': { prompt: 1.1, completion: 4.4 }, + 'o1-preview': { prompt: 15, completion: 60 }, + o3: { prompt: 2, completion: 8 }, + 'o3-mini': { prompt: 1.1, completion: 4.4 }, + 'o4-mini': { prompt: 1.1, completion: 4.4 }, + 'claude-instant': { prompt: 0.8, completion: 2.4 }, + 'claude-2': { prompt: 8, completion: 24 }, + 'claude-2.1': { prompt: 8, completion: 24 }, + 'claude-3-haiku': { prompt: 0.25, completion: 1.25 }, 'claude-3-sonnet': { prompt: 3, completion: 15 }, + 'claude-3-opus': { prompt: 15, completion: 75 }, + 'claude-3-5-haiku': { prompt: 0.8, completion: 4 }, + 'claude-3.5-haiku': { prompt: 0.8, completion: 4 }, 'claude-3-5-sonnet': { prompt: 3, completion: 15 }, 'claude-3.5-sonnet': { prompt: 3, completion: 15 }, 'claude-3-7-sonnet': { prompt: 3, completion: 15 }, 'claude-3.7-sonnet': { prompt: 3, completion: 15 }, - 'claude-3-5-haiku': { prompt: 0.8, completion: 4 }, - 'claude-3.5-haiku': { prompt: 0.8, completion: 4 }, - 'claude-3-haiku': { prompt: 0.25, completion: 1.25 }, - 'claude-sonnet-4': { prompt: 3, completion: 15 }, + 'claude-haiku-4-5': { prompt: 1, completion: 5 }, 'claude-opus-4': { prompt: 15, completion: 75 }, - 'claude-2.1': { prompt: 8, completion: 24 }, - 'claude-2': { prompt: 8, completion: 24 }, - 'claude-instant': { prompt: 0.8, completion: 2.4 }, - 'claude-': { prompt: 0.8, completion: 2.4 }, - 'command-r-plus': { prompt: 3, completion: 15 }, + 'claude-sonnet-4': { prompt: 3, completion: 15 }, 'command-r': { prompt: 0.5, completion: 1.5 }, + 'command-r-plus': { prompt: 3, completion: 15 }, + 'command-text': { prompt: 1.5, completion: 2.0 }, 'deepseek-reasoner': { prompt: 0.28, completion: 0.42 }, - deepseek: { prompt: 0.28, completion: 0.42 }, - /* cohere doesn't have rates for the older command models, - so this was from https://artificialanalysis.ai/models/command-light/providers */ - command: { prompt: 0.38, completion: 0.38 }, - gemma: { prompt: 0, completion: 0 }, // https://ai.google.dev/pricing - 'gemma-2': { prompt: 0, completion: 0 }, // https://ai.google.dev/pricing - 'gemma-3': { prompt: 0, completion: 0 }, // https://ai.google.dev/pricing - 'gemma-3-27b': { prompt: 0, completion: 0 }, // https://ai.google.dev/pricing - 'gemini-2.0-flash-lite': { prompt: 0.075, completion: 0.3 }, + 'deepseek-r1': { prompt: 0.4, completion: 2.0 }, + 'deepseek-v3': { prompt: 0.2, completion: 0.8 }, + 'gemma-2': { prompt: 0.01, completion: 0.03 }, // Base pattern (using gemma-2-9b pricing) + 'gemma-3': { prompt: 0.02, completion: 0.04 }, // Base pattern (using gemma-3n-e4b pricing) + 'gemma-3-27b': { prompt: 0.09, completion: 0.16 }, + 'gemini-1.5': { prompt: 2.5, completion: 10 }, + 'gemini-1.5-flash': { prompt: 0.15, completion: 0.6 }, + 'gemini-1.5-flash-8b': { prompt: 0.075, completion: 0.3 }, + 'gemini-2.0': { prompt: 0.1, completion: 0.4 }, // Base pattern (using 2.0-flash pricing) 'gemini-2.0-flash': { prompt: 0.1, completion: 0.4 }, - 'gemini-2.0': { prompt: 0, completion: 0 }, // https://ai.google.dev/pricing - 'gemini-2.5-pro': { prompt: 1.25, completion: 10 }, + 'gemini-2.0-flash-lite': { prompt: 0.075, completion: 0.3 }, + 'gemini-2.5': { prompt: 0.3, completion: 2.5 }, // Base pattern (using 2.5-flash pricing) 'gemini-2.5-flash': { prompt: 0.3, completion: 2.5 }, 'gemini-2.5-flash-lite': { prompt: 0.1, completion: 0.4 }, - 'gemini-2.5': { prompt: 0, completion: 0 }, // Free for a period of time - 'gemini-1.5-flash-8b': { prompt: 0.075, completion: 0.3 }, - 'gemini-1.5-flash': { prompt: 0.15, completion: 0.6 }, - 'gemini-1.5': { prompt: 2.5, completion: 10 }, + 'gemini-2.5-pro': { prompt: 1.25, completion: 10 }, 'gemini-pro-vision': { prompt: 0.5, completion: 1.5 }, - gemini: { prompt: 0.5, completion: 1.5 }, - 'grok-2-vision-1212': { prompt: 2.0, completion: 10.0 }, - 'grok-2-vision-latest': { prompt: 2.0, completion: 10.0 }, - 'grok-2-vision': { prompt: 2.0, completion: 10.0 }, + grok: { prompt: 2.0, completion: 10.0 }, // Base pattern defaults to grok-2 + 'grok-beta': { prompt: 5.0, completion: 15.0 }, 'grok-vision-beta': { prompt: 5.0, completion: 15.0 }, + 'grok-2': { prompt: 2.0, completion: 10.0 }, 'grok-2-1212': { prompt: 2.0, completion: 10.0 }, 'grok-2-latest': { prompt: 2.0, completion: 10.0 }, - 'grok-2': { prompt: 2.0, completion: 10.0 }, - 'grok-3-mini-fast': { prompt: 0.6, completion: 4 }, - 'grok-3-mini': { prompt: 0.3, completion: 0.5 }, - 'grok-3-fast': { prompt: 5.0, completion: 25.0 }, + 'grok-2-vision': { prompt: 2.0, completion: 10.0 }, + 'grok-2-vision-1212': { prompt: 2.0, completion: 10.0 }, + 'grok-2-vision-latest': { prompt: 2.0, completion: 10.0 }, 'grok-3': { prompt: 3.0, completion: 15.0 }, + 'grok-3-fast': { prompt: 5.0, completion: 25.0 }, + 'grok-3-mini': { prompt: 0.3, completion: 0.5 }, + 'grok-3-mini-fast': { prompt: 0.6, completion: 4 }, 'grok-4': { prompt: 3.0, completion: 15.0 }, - 'grok-beta': { prompt: 5.0, completion: 15.0 }, - 'mistral-large': { prompt: 2.0, completion: 6.0 }, - 'pixtral-large': { prompt: 2.0, completion: 6.0 }, - 'mistral-saba': { prompt: 0.2, completion: 0.6 }, codestral: { prompt: 0.3, completion: 0.9 }, - 'ministral-8b': { prompt: 0.1, completion: 0.1 }, 'ministral-3b': { prompt: 0.04, completion: 0.04 }, - // GPT-OSS models - 'gpt-oss': { prompt: 0.05, completion: 0.2 }, + 'ministral-8b': { prompt: 0.1, completion: 0.1 }, + 'mistral-nemo': { prompt: 0.15, completion: 0.15 }, + 'mistral-saba': { prompt: 0.2, completion: 0.6 }, + 'pixtral-large': { prompt: 2.0, completion: 6.0 }, + 'mistral-large': { prompt: 2.0, completion: 6.0 }, + 'mixtral-8x22b': { prompt: 0.65, completion: 0.65 }, + kimi: { prompt: 0.14, completion: 2.49 }, // Base pattern (using kimi-k2 pricing) + // GPT-OSS models (specific sizes) 'gpt-oss:20b': { prompt: 0.05, completion: 0.2 }, 'gpt-oss-20b': { prompt: 0.05, completion: 0.2 }, 'gpt-oss:120b': { prompt: 0.15, completion: 0.6 }, 'gpt-oss-120b': { prompt: 0.15, completion: 0.6 }, - // GLM models (Zhipu AI) + // GLM models (Zhipu AI) - general to specific glm4: { prompt: 0.1, completion: 0.1 }, 'glm-4': { prompt: 0.1, completion: 0.1 }, 'glm-4-32b': { prompt: 0.1, completion: 0.1 }, 'glm-4.5': { prompt: 0.35, completion: 1.55 }, - 'glm-4.5v': { prompt: 0.6, completion: 1.8 }, 'glm-4.5-air': { prompt: 0.14, completion: 0.86 }, + 'glm-4.5v': { prompt: 0.6, completion: 1.8 }, 'glm-4.6': { prompt: 0.5, completion: 1.75 }, + // Qwen models + qwen: { prompt: 0.08, completion: 0.33 }, // Qwen base pattern (using qwen2.5-72b pricing) + 'qwen2.5': { prompt: 0.08, completion: 0.33 }, // Qwen 2.5 base pattern + 'qwen-turbo': { prompt: 0.05, completion: 0.2 }, + 'qwen-plus': { prompt: 0.4, completion: 1.2 }, + 'qwen-max': { prompt: 1.6, completion: 6.4 }, + 'qwq-32b': { prompt: 0.15, completion: 0.4 }, + // Qwen3 models + qwen3: { prompt: 0.035, completion: 0.138 }, // Qwen3 base pattern (using qwen3-4b pricing) + 'qwen3-8b': { prompt: 0.035, completion: 0.138 }, + 'qwen3-14b': { prompt: 0.05, completion: 0.22 }, + 'qwen3-30b-a3b': { prompt: 0.06, completion: 0.22 }, + 'qwen3-32b': { prompt: 0.05, completion: 0.2 }, + 'qwen3-235b-a22b': { prompt: 0.08, completion: 0.55 }, + // Qwen3 VL (Vision-Language) models + 'qwen3-vl-8b-thinking': { prompt: 0.18, completion: 2.1 }, + 'qwen3-vl-8b-instruct': { prompt: 0.18, completion: 0.69 }, + 'qwen3-vl-30b-a3b': { prompt: 0.29, completion: 1.0 }, + 'qwen3-vl-235b-a22b': { prompt: 0.3, completion: 1.2 }, + // Qwen3 specialized models + 'qwen3-max': { prompt: 1.2, completion: 6 }, + 'qwen3-coder': { prompt: 0.22, completion: 0.95 }, + 'qwen3-coder-30b-a3b': { prompt: 0.06, completion: 0.25 }, + 'qwen3-coder-plus': { prompt: 1, completion: 5 }, + 'qwen3-coder-flash': { prompt: 0.3, completion: 1.5 }, + 'qwen3-next-80b-a3b': { prompt: 0.1, completion: 0.8 }, }, bedrockValues, ); @@ -195,67 +249,39 @@ const cacheTokenValues = { * @returns {string|undefined} The key corresponding to the model name, or undefined if no match is found. */ const getValueKey = (model, endpoint) => { + if (!model || typeof model !== 'string') { + return undefined; + } + + // Use findMatchingPattern directly against tokenValues for efficient lookup + if (!endpoint || (typeof endpoint === 'string' && !tokenValues[endpoint])) { + const matchedKey = findMatchingPattern(model, tokenValues); + if (matchedKey) { + return matchedKey; + } + } + + // Fallback: use matchModelName for edge cases and legacy handling const modelName = matchModelName(model, endpoint); if (!modelName) { return undefined; } + // Legacy token size mappings and aliases for older models if (modelName.includes('gpt-3.5-turbo-16k')) { return '16k'; - } else if (modelName.includes('gpt-3.5-turbo-0125')) { - return 'gpt-3.5-turbo-0125'; - } else if (modelName.includes('gpt-3.5-turbo-1106')) { - return 'gpt-3.5-turbo-1106'; } else if (modelName.includes('gpt-3.5')) { return '4k'; - } else if (modelName.includes('o4-mini')) { - return 'o4-mini'; - } else if (modelName.includes('o4')) { - return 'o4'; - } else if (modelName.includes('o3-mini')) { - return 'o3-mini'; - } else if (modelName.includes('o3')) { - return 'o3'; - } else if (modelName.includes('o1-preview')) { - return 'o1-preview'; - } else if (modelName.includes('o1-mini')) { - return 'o1-mini'; - } else if (modelName.includes('o1')) { - return 'o1'; - } else if (modelName.includes('gpt-4.5')) { - return 'gpt-4.5'; - } else if (modelName.includes('gpt-4.1-nano')) { - return 'gpt-4.1-nano'; - } else if (modelName.includes('gpt-4.1-mini')) { - return 'gpt-4.1-mini'; - } else if (modelName.includes('gpt-4.1')) { - return 'gpt-4.1'; - } else if (modelName.includes('gpt-4o-2024-05-13')) { - return 'gpt-4o-2024-05-13'; - } else if (modelName.includes('gpt-5-nano')) { - return 'gpt-5-nano'; - } else if (modelName.includes('gpt-5-mini')) { - return 'gpt-5-mini'; - } else if (modelName.includes('gpt-5')) { - return 'gpt-5'; - } else if (modelName.includes('gpt-4o-mini')) { - return 'gpt-4o-mini'; - } else if (modelName.includes('gpt-4o')) { - return 'gpt-4o'; } else if (modelName.includes('gpt-4-vision')) { - return 'gpt-4-1106'; - } else if (modelName.includes('gpt-4-1106')) { - return 'gpt-4-1106'; + return 'gpt-4-1106'; // Alias for gpt-4-vision } else if (modelName.includes('gpt-4-0125')) { - return 'gpt-4-1106'; + return 'gpt-4-1106'; // Alias for gpt-4-0125 } else if (modelName.includes('gpt-4-turbo')) { - return 'gpt-4-1106'; + return 'gpt-4-1106'; // Alias for gpt-4-turbo } else if (modelName.includes('gpt-4-32k')) { return '32k'; } else if (modelName.includes('gpt-4')) { return '8k'; - } else if (tokenValues[modelName]) { - return modelName; } return undefined; diff --git a/api/models/tx.spec.js b/api/models/tx.spec.js index 3cbce34295..670ea9d5ec 100644 --- a/api/models/tx.spec.js +++ b/api/models/tx.spec.js @@ -1,3 +1,4 @@ +const { maxTokensMap } = require('@librechat/api'); const { EModelEndpoint } = require('librechat-data-provider'); const { defaultRate, @@ -113,6 +114,14 @@ describe('getValueKey', () => { expect(getValueKey('gpt-5-nano-2025-01-30-0130')).toBe('gpt-5-nano'); }); + it('should return "gpt-5-pro" for model type of "gpt-5-pro"', () => { + expect(getValueKey('gpt-5-pro-2025-01-30')).toBe('gpt-5-pro'); + expect(getValueKey('openai/gpt-5-pro')).toBe('gpt-5-pro'); + expect(getValueKey('gpt-5-pro-0130')).toBe('gpt-5-pro'); + expect(getValueKey('gpt-5-pro-2025-01-30-0130')).toBe('gpt-5-pro'); + expect(getValueKey('gpt-5-pro-preview')).toBe('gpt-5-pro'); + }); + it('should return "gpt-4o" for model type of "gpt-4o"', () => { expect(getValueKey('gpt-4o-2024-08-06')).toBe('gpt-4o'); expect(getValueKey('gpt-4o-2024-08-06-0718')).toBe('gpt-4o'); @@ -288,6 +297,20 @@ describe('getMultiplier', () => { ); }); + it('should return the correct multiplier for gpt-5-pro', () => { + const valueKey = getValueKey('gpt-5-pro-2025-01-30'); + expect(getMultiplier({ valueKey, tokenType: 'prompt' })).toBe(tokenValues['gpt-5-pro'].prompt); + expect(getMultiplier({ valueKey, tokenType: 'completion' })).toBe( + tokenValues['gpt-5-pro'].completion, + ); + expect(getMultiplier({ model: 'gpt-5-pro-preview', tokenType: 'prompt' })).toBe( + tokenValues['gpt-5-pro'].prompt, + ); + expect(getMultiplier({ model: 'openai/gpt-5-pro', tokenType: 'completion' })).toBe( + tokenValues['gpt-5-pro'].completion, + ); + }); + it('should return the correct multiplier for gpt-4o', () => { const valueKey = getValueKey('gpt-4o-2024-08-06'); expect(getMultiplier({ valueKey, tokenType: 'prompt' })).toBe(tokenValues['gpt-4o'].prompt); @@ -471,6 +494,249 @@ describe('AWS Bedrock Model Tests', () => { }); }); +describe('Amazon Model Tests', () => { + describe('Amazon Nova Models', () => { + it('should return correct pricing for nova-premier', () => { + expect(getMultiplier({ model: 'nova-premier', tokenType: 'prompt' })).toBe( + tokenValues['nova-premier'].prompt, + ); + expect(getMultiplier({ model: 'nova-premier', tokenType: 'completion' })).toBe( + tokenValues['nova-premier'].completion, + ); + expect(getMultiplier({ model: 'amazon.nova-premier-v1:0', tokenType: 'prompt' })).toBe( + tokenValues['nova-premier'].prompt, + ); + expect(getMultiplier({ model: 'amazon.nova-premier-v1:0', tokenType: 'completion' })).toBe( + tokenValues['nova-premier'].completion, + ); + }); + + it('should return correct pricing for nova-pro', () => { + expect(getMultiplier({ model: 'nova-pro', tokenType: 'prompt' })).toBe( + tokenValues['nova-pro'].prompt, + ); + expect(getMultiplier({ model: 'nova-pro', tokenType: 'completion' })).toBe( + tokenValues['nova-pro'].completion, + ); + expect(getMultiplier({ model: 'amazon.nova-pro-v1:0', tokenType: 'prompt' })).toBe( + tokenValues['nova-pro'].prompt, + ); + expect(getMultiplier({ model: 'amazon.nova-pro-v1:0', tokenType: 'completion' })).toBe( + tokenValues['nova-pro'].completion, + ); + }); + + it('should return correct pricing for nova-lite', () => { + expect(getMultiplier({ model: 'nova-lite', tokenType: 'prompt' })).toBe( + tokenValues['nova-lite'].prompt, + ); + expect(getMultiplier({ model: 'nova-lite', tokenType: 'completion' })).toBe( + tokenValues['nova-lite'].completion, + ); + expect(getMultiplier({ model: 'amazon.nova-lite-v1:0', tokenType: 'prompt' })).toBe( + tokenValues['nova-lite'].prompt, + ); + expect(getMultiplier({ model: 'amazon.nova-lite-v1:0', tokenType: 'completion' })).toBe( + tokenValues['nova-lite'].completion, + ); + }); + + it('should return correct pricing for nova-micro', () => { + expect(getMultiplier({ model: 'nova-micro', tokenType: 'prompt' })).toBe( + tokenValues['nova-micro'].prompt, + ); + expect(getMultiplier({ model: 'nova-micro', tokenType: 'completion' })).toBe( + tokenValues['nova-micro'].completion, + ); + expect(getMultiplier({ model: 'amazon.nova-micro-v1:0', tokenType: 'prompt' })).toBe( + tokenValues['nova-micro'].prompt, + ); + expect(getMultiplier({ model: 'amazon.nova-micro-v1:0', tokenType: 'completion' })).toBe( + tokenValues['nova-micro'].completion, + ); + }); + + it('should match both short and full model names to the same pricing', () => { + const models = ['nova-micro', 'nova-lite', 'nova-pro', 'nova-premier']; + const fullModels = [ + 'amazon.nova-micro-v1:0', + 'amazon.nova-lite-v1:0', + 'amazon.nova-pro-v1:0', + 'amazon.nova-premier-v1:0', + ]; + + models.forEach((shortModel, i) => { + const fullModel = fullModels[i]; + const shortPrompt = getMultiplier({ model: shortModel, tokenType: 'prompt' }); + const fullPrompt = getMultiplier({ model: fullModel, tokenType: 'prompt' }); + const shortCompletion = getMultiplier({ model: shortModel, tokenType: 'completion' }); + const fullCompletion = getMultiplier({ model: fullModel, tokenType: 'completion' }); + + expect(shortPrompt).toBe(fullPrompt); + expect(shortCompletion).toBe(fullCompletion); + expect(shortPrompt).toBe(tokenValues[shortModel].prompt); + expect(shortCompletion).toBe(tokenValues[shortModel].completion); + }); + }); + }); + + describe('Amazon Titan Models', () => { + it('should return correct pricing for titan-text-premier', () => { + expect(getMultiplier({ model: 'titan-text-premier', tokenType: 'prompt' })).toBe( + tokenValues['titan-text-premier'].prompt, + ); + expect(getMultiplier({ model: 'titan-text-premier', tokenType: 'completion' })).toBe( + tokenValues['titan-text-premier'].completion, + ); + expect(getMultiplier({ model: 'amazon.titan-text-premier-v1:0', tokenType: 'prompt' })).toBe( + tokenValues['titan-text-premier'].prompt, + ); + expect( + getMultiplier({ model: 'amazon.titan-text-premier-v1:0', tokenType: 'completion' }), + ).toBe(tokenValues['titan-text-premier'].completion); + }); + + it('should return correct pricing for titan-text-express', () => { + expect(getMultiplier({ model: 'titan-text-express', tokenType: 'prompt' })).toBe( + tokenValues['titan-text-express'].prompt, + ); + expect(getMultiplier({ model: 'titan-text-express', tokenType: 'completion' })).toBe( + tokenValues['titan-text-express'].completion, + ); + expect(getMultiplier({ model: 'amazon.titan-text-express-v1', tokenType: 'prompt' })).toBe( + tokenValues['titan-text-express'].prompt, + ); + expect( + getMultiplier({ model: 'amazon.titan-text-express-v1', tokenType: 'completion' }), + ).toBe(tokenValues['titan-text-express'].completion); + }); + + it('should return correct pricing for titan-text-lite', () => { + expect(getMultiplier({ model: 'titan-text-lite', tokenType: 'prompt' })).toBe( + tokenValues['titan-text-lite'].prompt, + ); + expect(getMultiplier({ model: 'titan-text-lite', tokenType: 'completion' })).toBe( + tokenValues['titan-text-lite'].completion, + ); + expect(getMultiplier({ model: 'amazon.titan-text-lite-v1', tokenType: 'prompt' })).toBe( + tokenValues['titan-text-lite'].prompt, + ); + expect(getMultiplier({ model: 'amazon.titan-text-lite-v1', tokenType: 'completion' })).toBe( + tokenValues['titan-text-lite'].completion, + ); + }); + + it('should match both short and full model names to the same pricing', () => { + const models = ['titan-text-lite', 'titan-text-express', 'titan-text-premier']; + const fullModels = [ + 'amazon.titan-text-lite-v1', + 'amazon.titan-text-express-v1', + 'amazon.titan-text-premier-v1:0', + ]; + + models.forEach((shortModel, i) => { + const fullModel = fullModels[i]; + const shortPrompt = getMultiplier({ model: shortModel, tokenType: 'prompt' }); + const fullPrompt = getMultiplier({ model: fullModel, tokenType: 'prompt' }); + const shortCompletion = getMultiplier({ model: shortModel, tokenType: 'completion' }); + const fullCompletion = getMultiplier({ model: fullModel, tokenType: 'completion' }); + + expect(shortPrompt).toBe(fullPrompt); + expect(shortCompletion).toBe(fullCompletion); + expect(shortPrompt).toBe(tokenValues[shortModel].prompt); + expect(shortCompletion).toBe(tokenValues[shortModel].completion); + }); + }); + }); +}); + +describe('AI21 Model Tests', () => { + describe('AI21 J2 Models', () => { + it('should return correct pricing for j2-mid', () => { + expect(getMultiplier({ model: 'j2-mid', tokenType: 'prompt' })).toBe( + tokenValues['j2-mid'].prompt, + ); + expect(getMultiplier({ model: 'j2-mid', tokenType: 'completion' })).toBe( + tokenValues['j2-mid'].completion, + ); + expect(getMultiplier({ model: 'ai21.j2-mid-v1', tokenType: 'prompt' })).toBe( + tokenValues['j2-mid'].prompt, + ); + expect(getMultiplier({ model: 'ai21.j2-mid-v1', tokenType: 'completion' })).toBe( + tokenValues['j2-mid'].completion, + ); + }); + + it('should return correct pricing for j2-ultra', () => { + expect(getMultiplier({ model: 'j2-ultra', tokenType: 'prompt' })).toBe( + tokenValues['j2-ultra'].prompt, + ); + expect(getMultiplier({ model: 'j2-ultra', tokenType: 'completion' })).toBe( + tokenValues['j2-ultra'].completion, + ); + expect(getMultiplier({ model: 'ai21.j2-ultra-v1', tokenType: 'prompt' })).toBe( + tokenValues['j2-ultra'].prompt, + ); + expect(getMultiplier({ model: 'ai21.j2-ultra-v1', tokenType: 'completion' })).toBe( + tokenValues['j2-ultra'].completion, + ); + }); + + it('should match both short and full model names to the same pricing', () => { + const models = ['j2-mid', 'j2-ultra']; + const fullModels = ['ai21.j2-mid-v1', 'ai21.j2-ultra-v1']; + + models.forEach((shortModel, i) => { + const fullModel = fullModels[i]; + const shortPrompt = getMultiplier({ model: shortModel, tokenType: 'prompt' }); + const fullPrompt = getMultiplier({ model: fullModel, tokenType: 'prompt' }); + const shortCompletion = getMultiplier({ model: shortModel, tokenType: 'completion' }); + const fullCompletion = getMultiplier({ model: fullModel, tokenType: 'completion' }); + + expect(shortPrompt).toBe(fullPrompt); + expect(shortCompletion).toBe(fullCompletion); + expect(shortPrompt).toBe(tokenValues[shortModel].prompt); + expect(shortCompletion).toBe(tokenValues[shortModel].completion); + }); + }); + }); + + describe('AI21 Jamba Models', () => { + it('should return correct pricing for jamba-instruct', () => { + expect(getMultiplier({ model: 'jamba-instruct', tokenType: 'prompt' })).toBe( + tokenValues['jamba-instruct'].prompt, + ); + expect(getMultiplier({ model: 'jamba-instruct', tokenType: 'completion' })).toBe( + tokenValues['jamba-instruct'].completion, + ); + expect(getMultiplier({ model: 'ai21.jamba-instruct-v1:0', tokenType: 'prompt' })).toBe( + tokenValues['jamba-instruct'].prompt, + ); + expect(getMultiplier({ model: 'ai21.jamba-instruct-v1:0', tokenType: 'completion' })).toBe( + tokenValues['jamba-instruct'].completion, + ); + }); + + it('should match both short and full model names to the same pricing', () => { + const shortPrompt = getMultiplier({ model: 'jamba-instruct', tokenType: 'prompt' }); + const fullPrompt = getMultiplier({ + model: 'ai21.jamba-instruct-v1:0', + tokenType: 'prompt', + }); + const shortCompletion = getMultiplier({ model: 'jamba-instruct', tokenType: 'completion' }); + const fullCompletion = getMultiplier({ + model: 'ai21.jamba-instruct-v1:0', + tokenType: 'completion', + }); + + expect(shortPrompt).toBe(fullPrompt); + expect(shortCompletion).toBe(fullCompletion); + expect(shortPrompt).toBe(tokenValues['jamba-instruct'].prompt); + expect(shortCompletion).toBe(tokenValues['jamba-instruct'].completion); + }); + }); +}); + describe('Deepseek Model Tests', () => { const deepseekModels = ['deepseek-chat', 'deepseek-coder', 'deepseek-reasoner', 'deepseek.r1']; @@ -502,6 +768,187 @@ describe('Deepseek Model Tests', () => { }); }); +describe('Qwen3 Model Tests', () => { + describe('Qwen3 Base Models', () => { + it('should return correct pricing for qwen3 base pattern', () => { + expect(getMultiplier({ model: 'qwen3', tokenType: 'prompt' })).toBe( + tokenValues['qwen3'].prompt, + ); + expect(getMultiplier({ model: 'qwen3', tokenType: 'completion' })).toBe( + tokenValues['qwen3'].completion, + ); + }); + + it('should return correct pricing for qwen3-4b (falls back to qwen3)', () => { + expect(getMultiplier({ model: 'qwen3-4b', tokenType: 'prompt' })).toBe( + tokenValues['qwen3'].prompt, + ); + expect(getMultiplier({ model: 'qwen3-4b', tokenType: 'completion' })).toBe( + tokenValues['qwen3'].completion, + ); + }); + + it('should return correct pricing for qwen3-8b', () => { + expect(getMultiplier({ model: 'qwen3-8b', tokenType: 'prompt' })).toBe( + tokenValues['qwen3-8b'].prompt, + ); + expect(getMultiplier({ model: 'qwen3-8b', tokenType: 'completion' })).toBe( + tokenValues['qwen3-8b'].completion, + ); + }); + + it('should return correct pricing for qwen3-14b', () => { + expect(getMultiplier({ model: 'qwen3-14b', tokenType: 'prompt' })).toBe( + tokenValues['qwen3-14b'].prompt, + ); + expect(getMultiplier({ model: 'qwen3-14b', tokenType: 'completion' })).toBe( + tokenValues['qwen3-14b'].completion, + ); + }); + + it('should return correct pricing for qwen3-235b-a22b', () => { + expect(getMultiplier({ model: 'qwen3-235b-a22b', tokenType: 'prompt' })).toBe( + tokenValues['qwen3-235b-a22b'].prompt, + ); + expect(getMultiplier({ model: 'qwen3-235b-a22b', tokenType: 'completion' })).toBe( + tokenValues['qwen3-235b-a22b'].completion, + ); + }); + + it('should handle model name variations with provider prefixes', () => { + const models = [ + { input: 'qwen3', expected: 'qwen3' }, + { input: 'qwen3-4b', expected: 'qwen3' }, + { input: 'qwen3-8b', expected: 'qwen3-8b' }, + { input: 'qwen3-32b', expected: 'qwen3-32b' }, + ]; + models.forEach(({ input, expected }) => { + const withPrefix = `alibaba/${input}`; + expect(getMultiplier({ model: withPrefix, tokenType: 'prompt' })).toBe( + tokenValues[expected].prompt, + ); + expect(getMultiplier({ model: withPrefix, tokenType: 'completion' })).toBe( + tokenValues[expected].completion, + ); + }); + }); + }); + + describe('Qwen3 VL (Vision-Language) Models', () => { + it('should return correct pricing for qwen3-vl-8b-thinking', () => { + expect(getMultiplier({ model: 'qwen3-vl-8b-thinking', tokenType: 'prompt' })).toBe( + tokenValues['qwen3-vl-8b-thinking'].prompt, + ); + expect(getMultiplier({ model: 'qwen3-vl-8b-thinking', tokenType: 'completion' })).toBe( + tokenValues['qwen3-vl-8b-thinking'].completion, + ); + }); + + it('should return correct pricing for qwen3-vl-8b-instruct', () => { + expect(getMultiplier({ model: 'qwen3-vl-8b-instruct', tokenType: 'prompt' })).toBe( + tokenValues['qwen3-vl-8b-instruct'].prompt, + ); + expect(getMultiplier({ model: 'qwen3-vl-8b-instruct', tokenType: 'completion' })).toBe( + tokenValues['qwen3-vl-8b-instruct'].completion, + ); + }); + + it('should return correct pricing for qwen3-vl-30b-a3b', () => { + expect(getMultiplier({ model: 'qwen3-vl-30b-a3b', tokenType: 'prompt' })).toBe( + tokenValues['qwen3-vl-30b-a3b'].prompt, + ); + expect(getMultiplier({ model: 'qwen3-vl-30b-a3b', tokenType: 'completion' })).toBe( + tokenValues['qwen3-vl-30b-a3b'].completion, + ); + }); + + it('should return correct pricing for qwen3-vl-235b-a22b', () => { + expect(getMultiplier({ model: 'qwen3-vl-235b-a22b', tokenType: 'prompt' })).toBe( + tokenValues['qwen3-vl-235b-a22b'].prompt, + ); + expect(getMultiplier({ model: 'qwen3-vl-235b-a22b', tokenType: 'completion' })).toBe( + tokenValues['qwen3-vl-235b-a22b'].completion, + ); + }); + }); + + describe('Qwen3 Specialized Models', () => { + it('should return correct pricing for qwen3-max', () => { + expect(getMultiplier({ model: 'qwen3-max', tokenType: 'prompt' })).toBe( + tokenValues['qwen3-max'].prompt, + ); + expect(getMultiplier({ model: 'qwen3-max', tokenType: 'completion' })).toBe( + tokenValues['qwen3-max'].completion, + ); + }); + + it('should return correct pricing for qwen3-coder', () => { + expect(getMultiplier({ model: 'qwen3-coder', tokenType: 'prompt' })).toBe( + tokenValues['qwen3-coder'].prompt, + ); + expect(getMultiplier({ model: 'qwen3-coder', tokenType: 'completion' })).toBe( + tokenValues['qwen3-coder'].completion, + ); + }); + + it('should return correct pricing for qwen3-coder-plus', () => { + expect(getMultiplier({ model: 'qwen3-coder-plus', tokenType: 'prompt' })).toBe( + tokenValues['qwen3-coder-plus'].prompt, + ); + expect(getMultiplier({ model: 'qwen3-coder-plus', tokenType: 'completion' })).toBe( + tokenValues['qwen3-coder-plus'].completion, + ); + }); + + it('should return correct pricing for qwen3-coder-flash', () => { + expect(getMultiplier({ model: 'qwen3-coder-flash', tokenType: 'prompt' })).toBe( + tokenValues['qwen3-coder-flash'].prompt, + ); + expect(getMultiplier({ model: 'qwen3-coder-flash', tokenType: 'completion' })).toBe( + tokenValues['qwen3-coder-flash'].completion, + ); + }); + + it('should return correct pricing for qwen3-next-80b-a3b', () => { + expect(getMultiplier({ model: 'qwen3-next-80b-a3b', tokenType: 'prompt' })).toBe( + tokenValues['qwen3-next-80b-a3b'].prompt, + ); + expect(getMultiplier({ model: 'qwen3-next-80b-a3b', tokenType: 'completion' })).toBe( + tokenValues['qwen3-next-80b-a3b'].completion, + ); + }); + }); + + describe('Qwen3 Model Variations', () => { + it('should handle all qwen3 models with provider prefixes', () => { + const models = ['qwen3', 'qwen3-8b', 'qwen3-max', 'qwen3-coder', 'qwen3-vl-8b-instruct']; + const prefixes = ['alibaba', 'qwen', 'openrouter']; + + models.forEach((model) => { + prefixes.forEach((prefix) => { + const fullModel = `${prefix}/${model}`; + expect(getMultiplier({ model: fullModel, tokenType: 'prompt' })).toBe( + tokenValues[model].prompt, + ); + expect(getMultiplier({ model: fullModel, tokenType: 'completion' })).toBe( + tokenValues[model].completion, + ); + }); + }); + }); + + it('should handle qwen3-4b falling back to qwen3 base pattern', () => { + const testCases = ['qwen3-4b', 'alibaba/qwen3-4b', 'qwen/qwen3-4b-preview']; + testCases.forEach((model) => { + expect(getMultiplier({ model, tokenType: 'prompt' })).toBe(tokenValues['qwen3'].prompt); + expect(getMultiplier({ model, tokenType: 'completion' })).toBe( + tokenValues['qwen3'].completion, + ); + }); + }); + }); +}); + describe('getCacheMultiplier', () => { it('should return the correct cache multiplier for a given valueKey and cacheType', () => { expect(getCacheMultiplier({ valueKey: 'claude-3-5-sonnet', cacheType: 'write' })).toBe( @@ -914,6 +1361,37 @@ describe('Claude Model Tests', () => { ); }); + it('should return correct prompt and completion rates for Claude Haiku 4.5', () => { + expect(getMultiplier({ model: 'claude-haiku-4-5', tokenType: 'prompt' })).toBe( + tokenValues['claude-haiku-4-5'].prompt, + ); + expect(getMultiplier({ model: 'claude-haiku-4-5', tokenType: 'completion' })).toBe( + tokenValues['claude-haiku-4-5'].completion, + ); + }); + + it('should handle Claude Haiku 4.5 model name variations', () => { + const modelVariations = [ + 'claude-haiku-4-5', + 'claude-haiku-4-5-20250420', + 'claude-haiku-4-5-latest', + 'anthropic/claude-haiku-4-5', + 'claude-haiku-4-5/anthropic', + 'claude-haiku-4-5-preview', + ]; + + modelVariations.forEach((model) => { + const valueKey = getValueKey(model); + expect(valueKey).toBe('claude-haiku-4-5'); + expect(getMultiplier({ model, tokenType: 'prompt' })).toBe( + tokenValues['claude-haiku-4-5'].prompt, + ); + expect(getMultiplier({ model, tokenType: 'completion' })).toBe( + tokenValues['claude-haiku-4-5'].completion, + ); + }); + }); + it('should handle Claude 4 model name variations with different prefixes and suffixes', () => { const modelVariations = [ 'claude-sonnet-4', @@ -991,3 +1469,119 @@ describe('Claude Model Tests', () => { }); }); }); + +describe('tokens.ts and tx.js sync validation', () => { + it('should resolve all models in maxTokensMap to pricing via getValueKey', () => { + const tokensKeys = Object.keys(maxTokensMap[EModelEndpoint.openAI]); + const txKeys = Object.keys(tokenValues); + + const unresolved = []; + + tokensKeys.forEach((key) => { + // Skip legacy token size mappings (e.g., '4k', '8k', '16k', '32k') + if (/^\d+k$/.test(key)) return; + + // Skip generic pattern keys (end with '-' or ':') + if (key.endsWith('-') || key.endsWith(':')) return; + + // Try to resolve via getValueKey + const resolvedKey = getValueKey(key); + + // If it resolves and the resolved key has pricing, success + if (resolvedKey && txKeys.includes(resolvedKey)) return; + + // If it resolves to a legacy key (4k, 8k, etc), also OK + if (resolvedKey && /^\d+k$/.test(resolvedKey)) return; + + // If we get here, this model can't get pricing - flag it + unresolved.push({ + key, + resolvedKey: resolvedKey || 'undefined', + context: maxTokensMap[EModelEndpoint.openAI][key], + }); + }); + + if (unresolved.length > 0) { + console.log('\nModels that cannot resolve to pricing via getValueKey:'); + unresolved.forEach(({ key, resolvedKey, context }) => { + console.log(` - '${key}' → '${resolvedKey}' (context: ${context})`); + }); + } + + expect(unresolved).toEqual([]); + }); + + it('should not have redundant dated variants with same pricing and context as base model', () => { + const txKeys = Object.keys(tokenValues); + const redundant = []; + + txKeys.forEach((key) => { + // Check if this is a dated variant (ends with -YYYY-MM-DD) + if (key.match(/.*-\d{4}-\d{2}-\d{2}$/)) { + const baseKey = key.replace(/-\d{4}-\d{2}-\d{2}$/, ''); + + if (txKeys.includes(baseKey)) { + const variantPricing = tokenValues[key]; + const basePricing = tokenValues[baseKey]; + const variantContext = maxTokensMap[EModelEndpoint.openAI][key]; + const baseContext = maxTokensMap[EModelEndpoint.openAI][baseKey]; + + const samePricing = + variantPricing.prompt === basePricing.prompt && + variantPricing.completion === basePricing.completion; + const sameContext = variantContext === baseContext; + + if (samePricing && sameContext) { + redundant.push({ + key, + baseKey, + pricing: `${variantPricing.prompt}/${variantPricing.completion}`, + context: variantContext, + }); + } + } + } + }); + + if (redundant.length > 0) { + console.log('\nRedundant dated variants found (same pricing and context as base):'); + redundant.forEach(({ key, baseKey, pricing, context }) => { + console.log(` - '${key}' → '${baseKey}' (pricing: ${pricing}, context: ${context})`); + console.log(` Can be removed - pattern matching will handle it`); + }); + } + + expect(redundant).toEqual([]); + }); + + it('should have context windows in tokens.ts for all models with pricing in tx.js (openAI catch-all)', () => { + const txKeys = Object.keys(tokenValues); + const missingContext = []; + + txKeys.forEach((key) => { + // Skip legacy token size mappings (4k, 8k, 16k, 32k) + if (/^\d+k$/.test(key)) return; + + // Check if this model has a context window defined + const context = maxTokensMap[EModelEndpoint.openAI][key]; + + if (!context) { + const pricing = tokenValues[key]; + missingContext.push({ + key, + pricing: `${pricing.prompt}/${pricing.completion}`, + }); + } + }); + + if (missingContext.length > 0) { + console.log('\nModels with pricing but missing context in tokens.ts:'); + missingContext.forEach(({ key, pricing }) => { + console.log(` - '${key}' (pricing: ${pricing})`); + console.log(` Add to tokens.ts openAIModels/bedrockModels/etc.`); + }); + } + + expect(missingContext).toEqual([]); + }); +}); diff --git a/api/server/services/Endpoints/agents/agent.js b/api/server/services/Endpoints/agents/agent.js index 1966834ed4..ec9d56d026 100644 --- a/api/server/services/Endpoints/agents/agent.js +++ b/api/server/services/Endpoints/agents/agent.js @@ -143,7 +143,7 @@ const initializeAgent = async ({ const agentMaxContextTokens = optionalChainWithEmptyCheck( maxContextTokens, getModelMaxTokens(tokensModel, providerEndpointMap[provider], options.endpointTokenConfig), - 4096, + 18000, ); if ( diff --git a/api/utils/tokens.spec.js b/api/utils/tokens.spec.js index 162827767f..12daf64e47 100644 --- a/api/utils/tokens.spec.js +++ b/api/utils/tokens.spec.js @@ -186,6 +186,19 @@ describe('getModelMaxTokens', () => { ); }); + test('should return correct tokens for gpt-5-pro matches', () => { + expect(getModelMaxTokens('gpt-5-pro')).toBe(maxTokensMap[EModelEndpoint.openAI]['gpt-5-pro']); + expect(getModelMaxTokens('gpt-5-pro-preview')).toBe( + maxTokensMap[EModelEndpoint.openAI]['gpt-5-pro'], + ); + expect(getModelMaxTokens('openai/gpt-5-pro')).toBe( + maxTokensMap[EModelEndpoint.openAI]['gpt-5-pro'], + ); + expect(getModelMaxTokens('gpt-5-pro-2025-01-30')).toBe( + maxTokensMap[EModelEndpoint.openAI]['gpt-5-pro'], + ); + }); + test('should return correct tokens for Anthropic models', () => { const models = [ 'claude-2.1', @@ -469,7 +482,7 @@ describe('getModelMaxTokens', () => { test('should return correct max output tokens for GPT-5 models', () => { const { getModelMaxOutputTokens } = require('@librechat/api'); - ['gpt-5', 'gpt-5-mini', 'gpt-5-nano'].forEach((model) => { + ['gpt-5', 'gpt-5-mini', 'gpt-5-nano', 'gpt-5-pro'].forEach((model) => { expect(getModelMaxOutputTokens(model)).toBe(maxOutputTokensMap[EModelEndpoint.openAI][model]); expect(getModelMaxOutputTokens(model, EModelEndpoint.openAI)).toBe( maxOutputTokensMap[EModelEndpoint.openAI][model], @@ -582,6 +595,13 @@ describe('matchModelName', () => { expect(matchModelName('gpt-5-nano-2025-01-30')).toBe('gpt-5-nano'); }); + it('should return the closest matching key for gpt-5-pro matches', () => { + expect(matchModelName('openai/gpt-5-pro')).toBe('gpt-5-pro'); + expect(matchModelName('gpt-5-pro-preview')).toBe('gpt-5-pro'); + expect(matchModelName('gpt-5-pro-2025-01-30')).toBe('gpt-5-pro'); + expect(matchModelName('gpt-5-pro-2025-01-30-0130')).toBe('gpt-5-pro'); + }); + // Tests for Google models it('should return the exact model name if it exists in maxTokensMap - Google models', () => { expect(matchModelName('text-bison-32k', EModelEndpoint.google)).toBe('text-bison-32k'); @@ -832,6 +852,49 @@ describe('Claude Model Tests', () => { ); }); + it('should return correct context length for Claude Haiku 4.5', () => { + expect(getModelMaxTokens('claude-haiku-4-5', EModelEndpoint.anthropic)).toBe( + maxTokensMap[EModelEndpoint.anthropic]['claude-haiku-4-5'], + ); + expect(getModelMaxTokens('claude-haiku-4-5')).toBe( + maxTokensMap[EModelEndpoint.anthropic]['claude-haiku-4-5'], + ); + }); + + it('should handle Claude Haiku 4.5 model name variations', () => { + const modelVariations = [ + 'claude-haiku-4-5', + 'claude-haiku-4-5-20250420', + 'claude-haiku-4-5-latest', + 'anthropic/claude-haiku-4-5', + 'claude-haiku-4-5/anthropic', + 'claude-haiku-4-5-preview', + ]; + + modelVariations.forEach((model) => { + const modelKey = findMatchingPattern(model, maxTokensMap[EModelEndpoint.anthropic]); + expect(modelKey).toBe('claude-haiku-4-5'); + expect(getModelMaxTokens(model, EModelEndpoint.anthropic)).toBe( + maxTokensMap[EModelEndpoint.anthropic]['claude-haiku-4-5'], + ); + }); + }); + + it('should match model names correctly for Claude Haiku 4.5', () => { + const modelVariations = [ + 'claude-haiku-4-5', + 'claude-haiku-4-5-20250420', + 'claude-haiku-4-5-latest', + 'anthropic/claude-haiku-4-5', + 'claude-haiku-4-5/anthropic', + 'claude-haiku-4-5-preview', + ]; + + modelVariations.forEach((model) => { + expect(matchModelName(model, EModelEndpoint.anthropic)).toBe('claude-haiku-4-5'); + }); + }); + it('should handle Claude 4 model name variations with different prefixes and suffixes', () => { const modelVariations = [ 'claude-sonnet-4', @@ -924,6 +987,121 @@ describe('Kimi Model Tests', () => { }); }); +describe('Qwen3 Model Tests', () => { + describe('getModelMaxTokens', () => { + test('should return correct tokens for Qwen3 base pattern', () => { + expect(getModelMaxTokens('qwen3')).toBe(maxTokensMap[EModelEndpoint.openAI]['qwen3']); + }); + + test('should return correct tokens for qwen3-4b (falls back to qwen3)', () => { + expect(getModelMaxTokens('qwen3-4b')).toBe(maxTokensMap[EModelEndpoint.openAI]['qwen3']); + }); + + test('should return correct tokens for Qwen3 base models', () => { + expect(getModelMaxTokens('qwen3-8b')).toBe(maxTokensMap[EModelEndpoint.openAI]['qwen3-8b']); + expect(getModelMaxTokens('qwen3-14b')).toBe(maxTokensMap[EModelEndpoint.openAI]['qwen3-14b']); + expect(getModelMaxTokens('qwen3-32b')).toBe(maxTokensMap[EModelEndpoint.openAI]['qwen3-32b']); + expect(getModelMaxTokens('qwen3-235b-a22b')).toBe( + maxTokensMap[EModelEndpoint.openAI]['qwen3-235b-a22b'], + ); + }); + + test('should return correct tokens for Qwen3 VL (Vision-Language) models', () => { + expect(getModelMaxTokens('qwen3-vl-8b-thinking')).toBe( + maxTokensMap[EModelEndpoint.openAI]['qwen3-vl-8b-thinking'], + ); + expect(getModelMaxTokens('qwen3-vl-8b-instruct')).toBe( + maxTokensMap[EModelEndpoint.openAI]['qwen3-vl-8b-instruct'], + ); + expect(getModelMaxTokens('qwen3-vl-30b-a3b')).toBe( + maxTokensMap[EModelEndpoint.openAI]['qwen3-vl-30b-a3b'], + ); + expect(getModelMaxTokens('qwen3-vl-235b-a22b')).toBe( + maxTokensMap[EModelEndpoint.openAI]['qwen3-vl-235b-a22b'], + ); + }); + + test('should return correct tokens for Qwen3 specialized models', () => { + expect(getModelMaxTokens('qwen3-max')).toBe(maxTokensMap[EModelEndpoint.openAI]['qwen3-max']); + expect(getModelMaxTokens('qwen3-coder')).toBe( + maxTokensMap[EModelEndpoint.openAI]['qwen3-coder'], + ); + expect(getModelMaxTokens('qwen3-coder-30b-a3b')).toBe( + maxTokensMap[EModelEndpoint.openAI]['qwen3-coder-30b-a3b'], + ); + expect(getModelMaxTokens('qwen3-coder-plus')).toBe( + maxTokensMap[EModelEndpoint.openAI]['qwen3-coder-plus'], + ); + expect(getModelMaxTokens('qwen3-coder-flash')).toBe( + maxTokensMap[EModelEndpoint.openAI]['qwen3-coder-flash'], + ); + expect(getModelMaxTokens('qwen3-next-80b-a3b')).toBe( + maxTokensMap[EModelEndpoint.openAI]['qwen3-next-80b-a3b'], + ); + }); + + test('should handle Qwen3 models with provider prefixes', () => { + expect(getModelMaxTokens('alibaba/qwen3')).toBe(maxTokensMap[EModelEndpoint.openAI]['qwen3']); + expect(getModelMaxTokens('alibaba/qwen3-4b')).toBe( + maxTokensMap[EModelEndpoint.openAI]['qwen3'], + ); + expect(getModelMaxTokens('qwen/qwen3-8b')).toBe( + maxTokensMap[EModelEndpoint.openAI]['qwen3-8b'], + ); + expect(getModelMaxTokens('openrouter/qwen3-max')).toBe( + maxTokensMap[EModelEndpoint.openAI]['qwen3-max'], + ); + expect(getModelMaxTokens('alibaba/qwen3-vl-8b-instruct')).toBe( + maxTokensMap[EModelEndpoint.openAI]['qwen3-vl-8b-instruct'], + ); + expect(getModelMaxTokens('qwen/qwen3-coder')).toBe( + maxTokensMap[EModelEndpoint.openAI]['qwen3-coder'], + ); + }); + + test('should handle Qwen3 models with suffixes', () => { + expect(getModelMaxTokens('qwen3-preview')).toBe(maxTokensMap[EModelEndpoint.openAI]['qwen3']); + expect(getModelMaxTokens('qwen3-4b-preview')).toBe( + maxTokensMap[EModelEndpoint.openAI]['qwen3'], + ); + expect(getModelMaxTokens('qwen3-8b-latest')).toBe( + maxTokensMap[EModelEndpoint.openAI]['qwen3-8b'], + ); + expect(getModelMaxTokens('qwen3-max-2024')).toBe( + maxTokensMap[EModelEndpoint.openAI]['qwen3-max'], + ); + }); + }); + + describe('matchModelName', () => { + test('should match exact Qwen3 model names', () => { + expect(matchModelName('qwen3')).toBe('qwen3'); + expect(matchModelName('qwen3-4b')).toBe('qwen3'); + expect(matchModelName('qwen3-8b')).toBe('qwen3-8b'); + expect(matchModelName('qwen3-vl-8b-thinking')).toBe('qwen3-vl-8b-thinking'); + expect(matchModelName('qwen3-max')).toBe('qwen3-max'); + expect(matchModelName('qwen3-coder')).toBe('qwen3-coder'); + }); + + test('should match Qwen3 model variations with provider prefixes', () => { + expect(matchModelName('alibaba/qwen3')).toBe('qwen3'); + expect(matchModelName('alibaba/qwen3-4b')).toBe('qwen3'); + expect(matchModelName('qwen/qwen3-8b')).toBe('qwen3-8b'); + expect(matchModelName('openrouter/qwen3-max')).toBe('qwen3-max'); + expect(matchModelName('alibaba/qwen3-vl-8b-instruct')).toBe('qwen3-vl-8b-instruct'); + expect(matchModelName('qwen/qwen3-coder')).toBe('qwen3-coder'); + }); + + test('should match Qwen3 model variations with suffixes', () => { + expect(matchModelName('qwen3-preview')).toBe('qwen3'); + expect(matchModelName('qwen3-4b-preview')).toBe('qwen3'); + expect(matchModelName('qwen3-8b-latest')).toBe('qwen3-8b'); + expect(matchModelName('qwen3-max-2024')).toBe('qwen3-max'); + expect(matchModelName('qwen3-coder-v1')).toBe('qwen3-coder'); + }); + }); +}); + describe('GLM Model Tests (Zhipu AI)', () => { describe('getModelMaxTokens', () => { test('should return correct tokens for GLM models', () => { diff --git a/packages/api/src/utils/tokens.ts b/packages/api/src/utils/tokens.ts index d527836642..32921ca851 100644 --- a/packages/api/src/utils/tokens.ts +++ b/packages/api/src/utils/tokens.ts @@ -40,10 +40,10 @@ const openAIModels = { 'gpt-5': 400000, 'gpt-5-mini': 400000, 'gpt-5-nano': 400000, + 'gpt-5-pro': 400000, 'gpt-4o': 127500, // -500 from max 'gpt-4o-mini': 127500, // -500 from max 'gpt-4o-2024-05-13': 127500, // -500 from max - 'gpt-4o-2024-08-06': 127500, // -500 from max 'gpt-4-turbo': 127500, // -500 from max 'gpt-4-vision': 127500, // -500 from max 'gpt-3.5-turbo': 16375, // -10 from max @@ -60,9 +60,11 @@ const mistralModels = { 'mistral-7b': 31990, // -10 from max 'mistral-small': 31990, // -10 from max 'mixtral-8x7b': 31990, // -10 from max + 'mixtral-8x22b': 65536, 'mistral-large': 131000, 'mistral-large-2402': 127500, 'mistral-large-2407': 127500, + 'mistral-nemo': 131000, 'pixtral-large': 131000, 'mistral-saba': 32000, codestral: 256000, @@ -75,6 +77,7 @@ const cohereModels = { 'command-light-nightly': 8182, // -10 from max command: 4086, // -10 from max 'command-nightly': 8182, // -10 from max + 'command-text': 4086, // -10 from max 'command-r': 127500, // -500 from max 'command-r-plus': 127500, // -500 from max }; @@ -127,14 +130,17 @@ const anthropicModels = { 'claude-3.7-sonnet': 200000, 'claude-3-5-sonnet-latest': 200000, 'claude-3.5-sonnet-latest': 200000, + 'claude-haiku-4-5': 200000, 'claude-sonnet-4': 1000000, 'claude-opus-4': 200000, 'claude-4': 200000, }; const deepseekModels = { - 'deepseek-reasoner': 128000, deepseek: 128000, + 'deepseek-reasoner': 128000, + 'deepseek-r1': 128000, + 'deepseek-v3': 128000, 'deepseek.r1': 128000, }; @@ -200,32 +206,57 @@ const metaModels = { 'llama2:70b': 4000, }; -const ollamaModels = { +const qwenModels = { + qwen: 32000, 'qwen2.5': 32000, + 'qwen-turbo': 1000000, + 'qwen-plus': 131000, + 'qwen-max': 32000, + 'qwq-32b': 32000, + // Qwen3 models + qwen3: 40960, // Qwen3 base pattern (using qwen3-4b context) + 'qwen3-8b': 128000, + 'qwen3-14b': 40960, + 'qwen3-30b-a3b': 40960, + 'qwen3-32b': 40960, + 'qwen3-235b-a22b': 40960, + // Qwen3 VL (Vision-Language) models + 'qwen3-vl-8b-thinking': 256000, + 'qwen3-vl-8b-instruct': 262144, + 'qwen3-vl-30b-a3b': 262144, + 'qwen3-vl-235b-a22b': 131072, + // Qwen3 specialized models + 'qwen3-max': 256000, + 'qwen3-coder': 262144, + 'qwen3-coder-30b-a3b': 262144, + 'qwen3-coder-plus': 128000, + 'qwen3-coder-flash': 128000, + 'qwen3-next-80b-a3b': 262144, }; const ai21Models = { - 'ai21.j2-mid-v1': 8182, // -10 from max - 'ai21.j2-ultra-v1': 8182, // -10 from max - 'ai21.jamba-instruct-v1:0': 255500, // -500 from max + 'j2-mid': 8182, // -10 from max + 'j2-ultra': 8182, // -10 from max + 'jamba-instruct': 255500, // -500 from max }; const amazonModels = { - 'amazon.titan-text-lite-v1': 4000, - 'amazon.titan-text-express-v1': 8000, - 'amazon.titan-text-premier-v1:0': 31500, // -500 from max + // Amazon Titan models + 'titan-text-lite': 4000, + 'titan-text-express': 8000, + 'titan-text-premier': 31500, // -500 from max + // Amazon Nova models // https://aws.amazon.com/ai/generative-ai/nova/ - 'amazon.nova-micro-v1:0': 127000, // -1000 from max, - 'amazon.nova-lite-v1:0': 295000, // -5000 from max, - 'amazon.nova-pro-v1:0': 295000, // -5000 from max, - 'amazon.nova-premier-v1:0': 995000, // -5000 from max, + 'nova-micro': 127000, // -1000 from max + 'nova-lite': 295000, // -5000 from max + 'nova-pro': 295000, // -5000 from max + 'nova-premier': 995000, // -5000 from max }; const bedrockModels = { ...anthropicModels, ...mistralModels, ...cohereModels, - ...ollamaModels, ...deepseekModels, ...metaModels, ...ai21Models, @@ -254,6 +285,7 @@ const aggregateModels = { ...googleModels, ...bedrockModels, ...xAIModels, + ...qwenModels, // misc. kimi: 131000, // GPT-OSS @@ -289,6 +321,7 @@ export const modelMaxOutputs = { 'gpt-5': 128000, 'gpt-5-mini': 128000, 'gpt-5-nano': 128000, + 'gpt-5-pro': 128000, 'gpt-oss-20b': 131000, 'gpt-oss-120b': 131000, system_default: 32000, @@ -299,6 +332,7 @@ const anthropicMaxOutputs = { 'claude-3-haiku': 4096, 'claude-3-sonnet': 4096, 'claude-3-opus': 4096, + 'claude-haiku-4-5': 64000, 'claude-opus-4': 32000, 'claude-sonnet-4': 64000, 'claude-3.5-sonnet': 8192, From cbf52eabe3dff7f0234bf955c23872ce7885a78b Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Wed, 22 Oct 2025 09:53:21 +0200 Subject: [PATCH 08/37] =?UTF-8?q?=F0=9F=8C=8D=20i18n:=20Update=20translati?= =?UTF-8?q?on.json=20with=20latest=20translations=20(#10175)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> --- client/src/locales/zh-Hans/translation.json | 24 +++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/client/src/locales/zh-Hans/translation.json b/client/src/locales/zh-Hans/translation.json index 736caa7dba..8ffa0c5f0c 100644 --- a/client/src/locales/zh-Hans/translation.json +++ b/client/src/locales/zh-Hans/translation.json @@ -365,6 +365,7 @@ "com_error_files_process": "处理文件时发生错误", "com_error_files_upload": "上传文件时发生错误", "com_error_files_upload_canceled": "文件上传请求已取消。注意:文件上传可能仍在进行中,需要手动删除。", + "com_error_files_upload_too_large": "文件过大,请上传小于 {{0}} MB 的文件", "com_error_files_validation": "验证文件时出错。", "com_error_google_tool_conflict": "内置的 Google 工具与外部工具不兼容。请禁用内置工具或外部工具。", "com_error_heic_conversion": "将 HEIC 图片转换为 JPEG 失败。请尝试手动转换图像或使用其他格式。", @@ -560,6 +561,7 @@ "com_nav_setting_balance": "余额", "com_nav_setting_chat": "对话", "com_nav_setting_data": "数据管理", + "com_nav_setting_delay": "延迟(秒)", "com_nav_setting_general": "通用", "com_nav_setting_mcp": "MCP 设置", "com_nav_setting_personalization": "个性化", @@ -759,6 +761,7 @@ "com_ui_client_secret": "Client Secret", "com_ui_close": "关闭", "com_ui_close_menu": "关闭菜单", + "com_ui_close_settings": "关闭设置", "com_ui_close_window": "关闭窗口", "com_ui_code": "代码", "com_ui_collapse_chat": "收起对话", @@ -857,6 +860,7 @@ "com_ui_edit_editing_image": "编辑图片", "com_ui_edit_mcp_server": "编辑 MCP 服务器", "com_ui_edit_memory": "编辑记忆", + "com_ui_editor_instructions": "拖动图片调整位置 • 使用缩放滑块或按钮调整大小", "com_ui_empty_category": "-", "com_ui_endpoint": "端点", "com_ui_endpoint_menu": "LLM 端点菜单", @@ -891,6 +895,7 @@ "com_ui_feedback_tag_unjustified_refusal": "无故拒绝回答", "com_ui_field_max_length": "{{field}} 最多 {{length}} 个字符", "com_ui_field_required": "此字段为必填项", + "com_ui_file_input_avatar_label": "上传文件用作头像", "com_ui_file_size": "文件大小", "com_ui_file_token_limit": "文件词元数限制", "com_ui_file_token_limit_desc": "为文件处理设定最大词元数限制,以控制成本和资源使用", @@ -953,11 +958,13 @@ "com_ui_import_conversation_file_type_error": "不支持的导入类型", "com_ui_import_conversation_info": "从 JSON 文件导入对话", "com_ui_import_conversation_success": "对话导入成功", + "com_ui_import_conversation_upload_error": "上传文件时出错,请重试。", "com_ui_include_shadcnui": "包含 shadcn/ui 组件指令", "com_ui_initializing": "初始化中...", "com_ui_input": "输入", "com_ui_instructions": "指令", "com_ui_key": "键", + "com_ui_key_required": "API Key 为必填项", "com_ui_late_night": "夜深了", "com_ui_latest_footer": "Every AI for Everyone.", "com_ui_latest_production_version": "最新在用版本", @@ -972,6 +979,7 @@ "com_ui_manage": "管理", "com_ui_marketplace": "市场", "com_ui_marketplace_allow_use": "允许使用市场", + "com_ui_max_file_size": "PNG、JPG 或 JPEG(最大 {{0}})", "com_ui_max_tags": "最多允许 {{0}} 个,用最新值。", "com_ui_mcp_authenticated_success": "MCP 服务器 “{{0}}” 认证成功", "com_ui_mcp_configure_server": "配置 {{0}}", @@ -1066,6 +1074,7 @@ "com_ui_privacy_policy": "隐私政策", "com_ui_privacy_policy_url": "隐私政策链接", "com_ui_prompt": "提示词", + "com_ui_prompt_groups": "提示词组列表", "com_ui_prompt_name": "提示词名称", "com_ui_prompt_name_required": "提示词名称为必填项", "com_ui_prompt_preview_not_shared": "作者未允许对此提示词进行协作。", @@ -1095,6 +1104,8 @@ "com_ui_rename_failed": "重命名对话失败", "com_ui_rename_prompt": "重命名 Prompt", "com_ui_requires_auth": "需要认证", + "com_ui_reset": "重置", + "com_ui_reset_adjustments": "重置调整", "com_ui_reset_var": "重置 {{0}}", "com_ui_reset_zoom": "重置缩放", "com_ui_resource": "资源", @@ -1103,6 +1114,8 @@ "com_ui_revoke_info": "撤销所有用户提供的凭据", "com_ui_revoke_key_confirm": "您确定要撤销此密钥吗?", "com_ui_revoke_key_endpoint": "撤销 {{0}} 的密钥", + "com_ui_revoke_key_error": "撤销 API Key 失败,请重试。", + "com_ui_revoke_key_success": "API Key 撤销成功", "com_ui_revoke_keys": "撤销密钥", "com_ui_revoke_keys_confirm": "您确定要撤销所有密钥吗?", "com_ui_role": "角色", @@ -1116,11 +1129,15 @@ "com_ui_role_viewer": "查看者", "com_ui_role_viewer_desc": "可以查看和使用智能体,但无法修改智能体", "com_ui_roleplay": "角色扮演", + "com_ui_rotate": "旋转", + "com_ui_rotate_90": "旋转 90 度", "com_ui_run_code": "运行代码", "com_ui_run_code_error": "代码运行出错", "com_ui_save": "保存", "com_ui_save_badge_changes": "保存徽章更改?", "com_ui_save_changes": "保存修改", + "com_ui_save_key_error": "保存 API Key 失败,请重试。", + "com_ui_save_key_success": "API Key 保存成功", "com_ui_save_submit": "保存并提交", "com_ui_saved": "保存成功!", "com_ui_saving": "保存中...", @@ -1217,6 +1234,7 @@ "com_ui_update_mcp_success": "已成功创建或更新 MCP", "com_ui_upload": "上传", "com_ui_upload_agent_avatar": "成功更新智能体头像", + "com_ui_upload_avatar_label": "上传头像图片", "com_ui_upload_code_files": "上传代码解释器文件", "com_ui_upload_delay": "上传 “{{0}}” 时比预期花了更长时间。文件正在进行检索索引,请稍候。", "com_ui_upload_error": "上传文件错误", @@ -1228,6 +1246,7 @@ "com_ui_upload_invalid": "上传的文件无效。必须是图片,且不得超过大小限制", "com_ui_upload_invalid_var": "上传的文件无效。必须是图片,且不得超过 {{0}} MB。", "com_ui_upload_ocr_text": "作为文本上传", + "com_ui_upload_provider": "上传至提供商", "com_ui_upload_success": "上传文件成功", "com_ui_upload_type": "选择上传类型", "com_ui_usage": "用量", @@ -1267,6 +1286,8 @@ "com_ui_web_search_scraper": "抓取器", "com_ui_web_search_scraper_firecrawl": "Firecrawl API", "com_ui_web_search_scraper_firecrawl_key": "获取您的 Firecrawl API Key", + "com_ui_web_search_scraper_serper": "Serper Scrape API", + "com_ui_web_search_scraper_serper_key": "获取您的 Serper API Key", "com_ui_web_search_searxng_api_key": "输入 SearXNG API Key(可选)", "com_ui_web_search_searxng_instance_url": "SearXNG 实例 URL", "com_ui_web_searching": "正在搜索网络", @@ -1276,5 +1297,8 @@ "com_ui_x_selected": "{{0}} 已选择", "com_ui_yes": "是的", "com_ui_zoom": "缩放", + "com_ui_zoom_in": "放大", + "com_ui_zoom_level": "缩放级别", + "com_ui_zoom_out": "缩小", "com_user_message": "您" } From e3d33fed8da2808737cf182dde098fb3bb86b1e5 Mon Sep 17 00:00:00 2001 From: Danny Avila Date: Wed, 22 Oct 2025 16:51:58 +0200 Subject: [PATCH 09/37] =?UTF-8?q?=F0=9F=93=A6=20chore:=20update=20`@librec?= =?UTF-8?q?hat/agents`=20to=20v2.4.86=20(#10216)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- api/package.json | 2 +- package-lock.json | 10 +++++----- packages/api/package.json | 2 +- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/api/package.json b/api/package.json index f0b654b1af..44cc252216 100644 --- a/api/package.json +++ b/api/package.json @@ -48,7 +48,7 @@ "@langchain/google-genai": "^0.2.13", "@langchain/google-vertexai": "^0.2.13", "@langchain/textsplitters": "^0.1.0", - "@librechat/agents": "^2.4.85", + "@librechat/agents": "^2.4.86", "@librechat/api": "*", "@librechat/data-schemas": "*", "@microsoft/microsoft-graph-client": "^3.0.7", diff --git a/package-lock.json b/package-lock.json index 1a789de54d..2575fdfb6a 100644 --- a/package-lock.json +++ b/package-lock.json @@ -64,7 +64,7 @@ "@langchain/google-genai": "^0.2.13", "@langchain/google-vertexai": "^0.2.13", "@langchain/textsplitters": "^0.1.0", - "@librechat/agents": "^2.4.85", + "@librechat/agents": "^2.4.86", "@librechat/api": "*", "@librechat/data-schemas": "*", "@microsoft/microsoft-graph-client": "^3.0.7", @@ -21531,9 +21531,9 @@ } }, "node_modules/@librechat/agents": { - "version": "2.4.85", - "resolved": "https://registry.npmjs.org/@librechat/agents/-/agents-2.4.85.tgz", - "integrity": "sha512-t6h5f6ApnoEC+x8kqBlke1RR6BPzT+9BvlkA8VxvQVJtYIt5Ey4BOTRDGjdilDoXUcLui11PbjCd17EbjPkTcA==", + "version": "2.4.86", + "resolved": "https://registry.npmjs.org/@librechat/agents/-/agents-2.4.86.tgz", + "integrity": "sha512-Z3v+vMfFEyrDWrlPvgY9dUlhzYvtLXYYULEzkxUM1QpITuI3DsXr3xb1kXHAYOx3NmBGxiN9R/gjZN0tGBEo1g==", "license": "MIT", "dependencies": { "@langchain/anthropic": "^0.3.26", @@ -51337,7 +51337,7 @@ "@azure/storage-blob": "^12.27.0", "@keyv/redis": "^4.3.3", "@langchain/core": "^0.3.62", - "@librechat/agents": "^2.4.85", + "@librechat/agents": "^2.4.86", "@librechat/data-schemas": "*", "@modelcontextprotocol/sdk": "^1.17.1", "axios": "^1.12.1", diff --git a/packages/api/package.json b/packages/api/package.json index 05054498e2..a642cf6e23 100644 --- a/packages/api/package.json +++ b/packages/api/package.json @@ -80,7 +80,7 @@ "@azure/storage-blob": "^12.27.0", "@keyv/redis": "^4.3.3", "@langchain/core": "^0.3.62", - "@librechat/agents": "^2.4.85", + "@librechat/agents": "^2.4.86", "@librechat/data-schemas": "*", "@modelcontextprotocol/sdk": "^1.17.1", "axios": "^1.12.1", From d8d5d59d922f50ce1eb7277625886d7eee880050 Mon Sep 17 00:00:00 2001 From: Danny Avila Date: Wed, 22 Oct 2025 22:02:29 +0200 Subject: [PATCH 10/37] =?UTF-8?q?=E2=99=BB=EF=B8=8F=20refactor:=20Message?= =?UTF-8?q?=20Cache=20Clearing=20Logic=20into=20Reusable=20Helper=20(#1022?= =?UTF-8?q?6)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- client/src/components/Agents/AgentDetail.tsx | 7 ++--- client/src/components/Agents/Marketplace.tsx | 9 +++---- .../components/Chat/Menus/HeaderNewChat.tsx | 9 +++---- client/src/components/Nav/MobileNav.tsx | 6 ++--- client/src/components/Nav/NewChat.tsx | 6 ++--- .../Conversations/useNavigateToConvo.tsx | 10 +++++-- client/src/utils/messages.ts | 26 ++++++++++++++++++- 7 files changed, 45 insertions(+), 28 deletions(-) diff --git a/client/src/components/Agents/AgentDetail.tsx b/client/src/components/Agents/AgentDetail.tsx index 3cbfe330ca..ef77734e30 100644 --- a/client/src/components/Agents/AgentDetail.tsx +++ b/client/src/components/Agents/AgentDetail.tsx @@ -11,9 +11,9 @@ import { AgentListResponse, } from 'librechat-data-provider'; import type t from 'librechat-data-provider'; +import { renderAgentAvatar, clearMessagesCache } from '~/utils'; import { useLocalize, useDefaultConvo } from '~/hooks'; import { useChatContext } from '~/Providers'; -import { renderAgentAvatar } from '~/utils'; interface SupportContact { name?: string; @@ -56,10 +56,7 @@ const AgentDetail: React.FC = ({ agent, isOpen, onClose }) => localStorage.setItem(`${LocalStorageKeys.AGENT_ID_PREFIX}0`, agent.id); - queryClient.setQueryData( - [QueryKeys.messages, conversation?.conversationId ?? Constants.NEW_CONVO], - [], - ); + clearMessagesCache(queryClient, conversation?.conversationId); queryClient.invalidateQueries([QueryKeys.messages]); /** Template with agent configuration */ diff --git a/client/src/components/Agents/Marketplace.tsx b/client/src/components/Agents/Marketplace.tsx index 97cf1b20cc..ef882142e2 100644 --- a/client/src/components/Agents/Marketplace.tsx +++ b/client/src/components/Agents/Marketplace.tsx @@ -4,7 +4,7 @@ import { useOutletContext } from 'react-router-dom'; import { useQueryClient } from '@tanstack/react-query'; import { useSearchParams, useParams, useNavigate } from 'react-router-dom'; import { TooltipAnchor, Button, NewChatIcon, useMediaQuery } from '@librechat/client'; -import { PermissionTypes, Permissions, QueryKeys, Constants } from 'librechat-data-provider'; +import { PermissionTypes, Permissions, QueryKeys } from 'librechat-data-provider'; import type t from 'librechat-data-provider'; import type { ContextType } from '~/common'; import { useDocumentTitle, useHasAccess, useLocalize, TranslationKeys } from '~/hooks'; @@ -13,11 +13,11 @@ import MarketplaceAdminSettings from './MarketplaceAdminSettings'; import { SidePanelProvider, useChatContext } from '~/Providers'; import { SidePanelGroup } from '~/components/SidePanel'; import { OpenSidebar } from '~/components/Chat/Menus'; +import { cn, clearMessagesCache } from '~/utils'; import CategoryTabs from './CategoryTabs'; import AgentDetail from './AgentDetail'; import SearchBar from './SearchBar'; import AgentGrid from './AgentGrid'; -import { cn } from '~/utils'; import store from '~/store'; interface AgentMarketplaceProps { @@ -224,10 +224,7 @@ const AgentMarketplace: React.FC = ({ className = '' }) = window.open('/c/new', '_blank'); return; } - queryClient.setQueryData( - [QueryKeys.messages, conversation?.conversationId ?? Constants.NEW_CONVO], - [], - ); + clearMessagesCache(queryClient, conversation?.conversationId); queryClient.invalidateQueries([QueryKeys.messages]); newConversation(); }; diff --git a/client/src/components/Chat/Menus/HeaderNewChat.tsx b/client/src/components/Chat/Menus/HeaderNewChat.tsx index b2dc6416ab..5245ccbf13 100644 --- a/client/src/components/Chat/Menus/HeaderNewChat.tsx +++ b/client/src/components/Chat/Menus/HeaderNewChat.tsx @@ -1,8 +1,8 @@ +import { QueryKeys } from 'librechat-data-provider'; import { useQueryClient } from '@tanstack/react-query'; -import { QueryKeys, Constants } from 'librechat-data-provider'; import { TooltipAnchor, Button, NewChatIcon } from '@librechat/client'; -import type { TMessage } from 'librechat-data-provider'; import { useChatContext } from '~/Providers'; +import { clearMessagesCache } from '~/utils'; import { useLocalize } from '~/hooks'; export default function HeaderNewChat() { @@ -15,10 +15,7 @@ export default function HeaderNewChat() { window.open('/c/new', '_blank'); return; } - queryClient.setQueryData( - [QueryKeys.messages, conversation?.conversationId ?? Constants.NEW_CONVO], - [], - ); + clearMessagesCache(queryClient, conversation?.conversationId); queryClient.invalidateQueries([QueryKeys.messages]); newConversation(); }; diff --git a/client/src/components/Nav/MobileNav.tsx b/client/src/components/Nav/MobileNav.tsx index e945ec5be1..6f11b327ce 100644 --- a/client/src/components/Nav/MobileNav.tsx +++ b/client/src/components/Nav/MobileNav.tsx @@ -5,6 +5,7 @@ import { QueryKeys, Constants } from 'librechat-data-provider'; import type { TMessage } from 'librechat-data-provider'; import type { Dispatch, SetStateAction } from 'react'; import { useLocalize, useNewConvo } from '~/hooks'; +import { clearMessagesCache } from '~/utils'; import store from '~/store'; export default function MobileNav({ @@ -57,10 +58,7 @@ export default function MobileNav({ aria-label={localize('com_ui_new_chat')} className="m-1 inline-flex size-10 items-center justify-center rounded-full hover:bg-surface-hover" onClick={() => { - queryClient.setQueryData( - [QueryKeys.messages, conversation?.conversationId ?? Constants.NEW_CONVO], - [], - ); + clearMessagesCache(queryClient, conversation?.conversationId); queryClient.invalidateQueries([QueryKeys.messages]); newConversation(); }} diff --git a/client/src/components/Nav/NewChat.tsx b/client/src/components/Nav/NewChat.tsx index b3cdd2cac5..026f115103 100644 --- a/client/src/components/Nav/NewChat.tsx +++ b/client/src/components/Nav/NewChat.tsx @@ -5,6 +5,7 @@ import { QueryKeys, Constants } from 'librechat-data-provider'; import { TooltipAnchor, NewChatIcon, MobileSidebar, Sidebar, Button } from '@librechat/client'; import type { TMessage } from 'librechat-data-provider'; import { useLocalize, useNewConvo } from '~/hooks'; +import { clearMessagesCache } from '~/utils'; import store from '~/store'; export default function NewChat({ @@ -33,10 +34,7 @@ export default function NewChat({ window.open('/c/new', '_blank'); return; } - queryClient.setQueryData( - [QueryKeys.messages, conversation?.conversationId ?? Constants.NEW_CONVO], - [], - ); + clearMessagesCache(queryClient, conversation?.conversationId); queryClient.invalidateQueries([QueryKeys.messages]); newConvo(); navigate('/c/new', { state: { focusChat: true } }); diff --git a/client/src/hooks/Conversations/useNavigateToConvo.tsx b/client/src/hooks/Conversations/useNavigateToConvo.tsx index 55f43fa820..2bbb4620b3 100644 --- a/client/src/hooks/Conversations/useNavigateToConvo.tsx +++ b/client/src/hooks/Conversations/useNavigateToConvo.tsx @@ -3,7 +3,13 @@ import { useNavigate } from 'react-router-dom'; import { useQueryClient } from '@tanstack/react-query'; import { QueryKeys, Constants, dataService } from 'librechat-data-provider'; import type { TConversation, TEndpointsConfig, TModelsConfig } from 'librechat-data-provider'; -import { buildDefaultConvo, getDefaultEndpoint, getEndpointField, logger } from '~/utils'; +import { + getDefaultEndpoint, + clearMessagesCache, + buildDefaultConvo, + getEndpointField, + logger, +} from '~/utils'; import store from '~/store'; const useNavigateToConvo = (index = 0) => { @@ -80,7 +86,7 @@ const useNavigateToConvo = (index = 0) => { }); } clearAllConversations(true); - queryClient.setQueryData([QueryKeys.messages, currentConvoId], []); + clearMessagesCache(queryClient, currentConvoId); if (convo.conversationId !== Constants.NEW_CONVO && convo.conversationId) { queryClient.invalidateQueries([QueryKeys.conversation, convo.conversationId]); fetchFreshData(convo); diff --git a/client/src/utils/messages.ts b/client/src/utils/messages.ts index fe8ec36499..d436c45077 100644 --- a/client/src/utils/messages.ts +++ b/client/src/utils/messages.ts @@ -1,5 +1,6 @@ -import { ContentTypes } from 'librechat-data-provider'; +import { ContentTypes, QueryKeys, Constants } from 'librechat-data-provider'; import type { TMessage, TMessageContentParts } from 'librechat-data-provider'; +import type { QueryClient } from '@tanstack/react-query'; export const TEXT_KEY_DIVIDER = '|||'; @@ -146,3 +147,26 @@ export const scrollToEnd = (callback?: () => void) => { } } }; + +/** + * Clears messages for both the specified conversation ID and the NEW_CONVO query key. + * This ensures that messages are properly cleared in all contexts, preventing stale data + * from persisting in the NEW_CONVO cache. + * + * @param queryClient - The React Query client instance + * @param conversationId - The conversation ID to clear messages for + */ +export const clearMessagesCache = ( + queryClient: QueryClient, + conversationId: string | undefined | null, +): void => { + const convoId = conversationId ?? Constants.NEW_CONVO; + + // Clear messages for the current conversation + queryClient.setQueryData([QueryKeys.messages, convoId], []); + + // Also clear NEW_CONVO messages if we're not already on NEW_CONVO + if (convoId !== Constants.NEW_CONVO) { + queryClient.setQueryData([QueryKeys.messages, Constants.NEW_CONVO], []); + } +}; From 87d7ee4b0ef0078c80ae7a9b54ad066344072ffe Mon Sep 17 00:00:00 2001 From: Sebastien Bruel <93573440+sbruel@users.noreply.github.com> Date: Thu, 23 Oct 2025 05:04:49 +0900 Subject: [PATCH 11/37] =?UTF-8?q?=F0=9F=8C=90=20feat:=20Configurable=20Dom?= =?UTF-8?q?ain=20and=20Port=20for=20Vite=20Dev=20Server=20(#10180)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- client/vite.config.ts | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/client/vite.config.ts b/client/vite.config.ts index a356e246a1..f49e6bc9cb 100644 --- a/client/vite.config.ts +++ b/client/vite.config.ts @@ -1,4 +1,5 @@ import react from '@vitejs/plugin-react'; +// @ts-ignore import path from 'path'; import type { Plugin } from 'vite'; import { defineConfig } from 'vite'; @@ -7,19 +8,23 @@ import { nodePolyfills } from 'vite-plugin-node-polyfills'; import { VitePWA } from 'vite-plugin-pwa'; // https://vitejs.dev/config/ +const backendPort = process.env.BACKEND_PORT && Number(process.env.BACKEND_PORT) || 3080; +const backendURL = process.env.HOST ? `http://${process.env.HOST}:${backendPort}` : `http://localhost:${backendPort}`; + export default defineConfig(({ command }) => ({ base: '', server: { - host: 'localhost', - port: 3090, + allowedHosts: process.env.VITE_ALLOWED_HOSTS && process.env.VITE_ALLOWED_HOSTS.split(',') || [], + host: process.env.HOST || 'localhost', + port: process.env.PORT && Number(process.env.PORT) || 3090, strictPort: false, proxy: { '/api': { - target: 'http://localhost:3080', + target: backendURL, changeOrigin: true, }, '/oauth': { - target: 'http://localhost:3080', + target: backendURL, changeOrigin: true, }, }, @@ -259,6 +264,7 @@ export default defineConfig(({ command }) => ({ interface SourcemapExclude { excludeNodeModules?: boolean; } + export function sourcemapExclude(opts?: SourcemapExclude): Plugin { return { name: 'sourcemap-exclude', From 9495520f6f23391211ad3f00f1a992c378027bae Mon Sep 17 00:00:00 2001 From: Danny Avila Date: Wed, 22 Oct 2025 22:22:57 +0200 Subject: [PATCH 12/37] =?UTF-8?q?=F0=9F=93=A6=20chore:=20update=20`vite`?= =?UTF-8?q?=20to=20v6.4.1=20and=20`@playwright/test`=20to=20v1.56.1=20(#10?= =?UTF-8?q?227)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * 📦 chore: update vite to v6.4.1 * 📦 chore: update @playwright/test to v1.56.1 --- client/package.json | 2 +- package-lock.json | 135 +++++++++++++++++++++++++++++++++++++++----- package.json | 2 +- 3 files changed, 124 insertions(+), 15 deletions(-) diff --git a/client/package.json b/client/package.json index b46f77cbd8..dcd5f637a1 100644 --- a/client/package.json +++ b/client/package.json @@ -149,7 +149,7 @@ "tailwindcss": "^3.4.1", "ts-jest": "^29.2.5", "typescript": "^5.3.3", - "vite": "^6.3.6", + "vite": "^6.4.1", "vite-plugin-compression2": "^2.2.1", "vite-plugin-node-polyfills": "^0.23.0", "vite-plugin-pwa": "^0.21.2" diff --git a/package-lock.json b/package-lock.json index 2575fdfb6a..cf3bb750eb 100644 --- a/package-lock.json +++ b/package-lock.json @@ -19,7 +19,7 @@ "@eslint/eslintrc": "^3.2.0", "@eslint/js": "^9.20.0", "@microsoft/eslint-formatter-sarif": "^3.1.0", - "@playwright/test": "^1.50.1", + "@playwright/test": "^1.56.1", "@types/react-virtualized": "^9.22.0", "caniuse-lite": "^1.0.30001741", "cross-env": "^7.0.3", @@ -2768,7 +2768,7 @@ "tailwindcss": "^3.4.1", "ts-jest": "^29.2.5", "typescript": "^5.3.3", - "vite": "^6.3.6", + "vite": "^6.4.1", "vite-plugin-compression2": "^2.2.1", "vite-plugin-node-polyfills": "^0.23.0", "vite-plugin-pwa": "^0.21.2" @@ -4305,6 +4305,24 @@ "node": ">=6" } }, + "client/node_modules/fdir": { + "version": "6.5.0", + "resolved": "https://registry.npmjs.org/fdir/-/fdir-6.5.0.tgz", + "integrity": "sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12.0.0" + }, + "peerDependencies": { + "picomatch": "^3 || ^4" + }, + "peerDependenciesMeta": { + "picomatch": { + "optional": true + } + } + }, "client/node_modules/framer-motion": { "version": "11.18.2", "resolved": "https://registry.npmjs.org/framer-motion/-/framer-motion-11.18.2.tgz", @@ -4358,6 +4376,19 @@ "dev": true, "license": "MIT" }, + "client/node_modules/picomatch": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz", + "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, "client/node_modules/react-is": { "version": "17.0.2", "resolved": "https://registry.npmjs.org/react-is/-/react-is-17.0.2.tgz", @@ -4453,6 +4484,81 @@ "browserslist": ">= 4.21.0" } }, + "client/node_modules/vite": { + "version": "6.4.1", + "resolved": "https://registry.npmjs.org/vite/-/vite-6.4.1.tgz", + "integrity": "sha512-+Oxm7q9hDoLMyJOYfUYBuHQo+dkAloi33apOPP56pzj+vsdJDzr+j1NISE5pyaAuKL4A3UD34qd0lx5+kfKp2g==", + "dev": true, + "license": "MIT", + "dependencies": { + "esbuild": "^0.25.0", + "fdir": "^6.4.4", + "picomatch": "^4.0.2", + "postcss": "^8.5.3", + "rollup": "^4.34.9", + "tinyglobby": "^0.2.13" + }, + "bin": { + "vite": "bin/vite.js" + }, + "engines": { + "node": "^18.0.0 || ^20.0.0 || >=22.0.0" + }, + "funding": { + "url": "https://github.com/vitejs/vite?sponsor=1" + }, + "optionalDependencies": { + "fsevents": "~2.3.3" + }, + "peerDependencies": { + "@types/node": "^18.0.0 || ^20.0.0 || >=22.0.0", + "jiti": ">=1.21.0", + "less": "*", + "lightningcss": "^1.21.0", + "sass": "*", + "sass-embedded": "*", + "stylus": "*", + "sugarss": "*", + "terser": "^5.16.0", + "tsx": "^4.8.1", + "yaml": "^2.4.2" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + }, + "jiti": { + "optional": true + }, + "less": { + "optional": true + }, + "lightningcss": { + "optional": true + }, + "sass": { + "optional": true + }, + "sass-embedded": { + "optional": true + }, + "stylus": { + "optional": true + }, + "sugarss": { + "optional": true + }, + "terser": { + "optional": true + }, + "tsx": { + "optional": true + }, + "yaml": { + "optional": true + } + } + }, "client/node_modules/vite-plugin-pwa": { "version": "0.21.2", "resolved": "https://registry.npmjs.org/vite-plugin-pwa/-/vite-plugin-pwa-0.21.2.tgz", @@ -22902,12 +23008,12 @@ } }, "node_modules/@playwright/test": { - "version": "1.50.1", - "resolved": "https://registry.npmjs.org/@playwright/test/-/test-1.50.1.tgz", - "integrity": "sha512-Jii3aBg+CEDpgnuDxEp/h7BimHcUTDlpEtce89xEumlJ5ef2hqepZ+PWp1DDpYC/VO9fmWVI1IlEaoI5fK9FXQ==", + "version": "1.56.1", + "resolved": "https://registry.npmjs.org/@playwright/test/-/test-1.56.1.tgz", + "integrity": "sha512-vSMYtL/zOcFpvJCW71Q/OEGQb7KYBPAdKh35WNSkaZA75JlAO8ED8UN6GUNTm3drWomcbcqRPFqQbLae8yBTdg==", "license": "Apache-2.0", "dependencies": { - "playwright": "1.50.1" + "playwright": "1.56.1" }, "bin": { "playwright": "cli.js" @@ -43042,12 +43148,12 @@ } }, "node_modules/playwright": { - "version": "1.50.1", - "resolved": "https://registry.npmjs.org/playwright/-/playwright-1.50.1.tgz", - "integrity": "sha512-G8rwsOQJ63XG6BbKj2w5rHeavFjy5zynBA9zsJMMtBoe/Uf757oG12NXz6e6OirF7RCrTVAKFXbLmn1RbL7Qaw==", + "version": "1.56.1", + "resolved": "https://registry.npmjs.org/playwright/-/playwright-1.56.1.tgz", + "integrity": "sha512-aFi5B0WovBHTEvpM3DzXTUaeN6eN0qWnTkKx4NQaH4Wvcmc153PdaY2UBdSYKaGYw+UyWXSVyxDUg5DoPEttjw==", "license": "Apache-2.0", "dependencies": { - "playwright-core": "1.50.1" + "playwright-core": "1.56.1" }, "bin": { "playwright": "cli.js" @@ -43060,9 +43166,9 @@ } }, "node_modules/playwright-core": { - "version": "1.50.1", - "resolved": "https://registry.npmjs.org/playwright-core/-/playwright-core-1.50.1.tgz", - "integrity": "sha512-ra9fsNWayuYumt+NiM069M6OkcRb1FZSK8bgi66AtpFoWkg2+y0bJSNmkFrWhMbEBbVKC/EruAHH3g0zmtwGmQ==", + "version": "1.56.1", + "resolved": "https://registry.npmjs.org/playwright-core/-/playwright-core-1.56.1.tgz", + "integrity": "sha512-hutraynyn31F+Bifme+Ps9Vq59hKuUCz7H1kDOcBs+2oGguKkWTU50bBWrtz34OUWmIwpBTWDxaRPXrIXkgvmQ==", "license": "Apache-2.0", "bin": { "playwright-core": "cli.js" @@ -49973,6 +50079,7 @@ "integrity": "sha512-0msEVHJEScQbhkbVTb/4iHZdJ6SXp/AvxL2sjwYQFfBqleHtnCqv1J3sa9zbWz/6kW1m9Tfzn92vW+kZ1WV6QA==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "esbuild": "^0.25.0", "fdir": "^6.4.4", @@ -50076,6 +50183,7 @@ "integrity": "sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=12.0.0" }, @@ -50094,6 +50202,7 @@ "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=12" }, diff --git a/package.json b/package.json index 4a69267be2..cfd5d53691 100644 --- a/package.json +++ b/package.json @@ -100,7 +100,7 @@ "@eslint/eslintrc": "^3.2.0", "@eslint/js": "^9.20.0", "@microsoft/eslint-formatter-sarif": "^3.1.0", - "@playwright/test": "^1.50.1", + "@playwright/test": "^1.56.1", "@types/react-virtualized": "^9.22.0", "caniuse-lite": "^1.0.30001741", "cross-env": "^7.0.3", From 05c91951974828a6f64b9b2cccd57ba2bcd97394 Mon Sep 17 00:00:00 2001 From: Sebastien Bruel <93573440+sbruel@users.noreply.github.com> Date: Mon, 27 Oct 2025 10:30:05 +0900 Subject: [PATCH 13/37] =?UTF-8?q?=F0=9F=9B=A0=EF=B8=8F=20fix:=20Agent=20To?= =?UTF-8?q?ols=20Modal=20on=20First-Time=20Agent=20Creation=20(#10234)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- client/src/Providers/AgentPanelContext.tsx | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/client/src/Providers/AgentPanelContext.tsx b/client/src/Providers/AgentPanelContext.tsx index 3925492534..4effd7d679 100644 --- a/client/src/Providers/AgentPanelContext.tsx +++ b/client/src/Providers/AgentPanelContext.tsx @@ -35,9 +35,7 @@ export function AgentPanelProvider({ children }: { children: React.ReactNode }) enabled: !isEphemeralAgent(agent_id), }); - const { data: regularTools } = useAvailableToolsQuery(EModelEndpoint.agents, { - enabled: !isEphemeralAgent(agent_id), - }); + const { data: regularTools } = useAvailableToolsQuery(EModelEndpoint.agents); const { data: mcpData } = useMCPToolsQuery({ enabled: !isEphemeralAgent(agent_id) && startupConfig?.mcpServers != null, From cbbbde36814b0f99a691543c6a5cc487104e2d84 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Sun, 26 Oct 2025 21:32:38 -0400 Subject: [PATCH 14/37] =?UTF-8?q?=F0=9F=8C=8D=20i18n:=20Update=20translati?= =?UTF-8?q?on.json=20with=20latest=20translations=20(#10229)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> --- client/src/locales/lv/translation.json | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/client/src/locales/lv/translation.json b/client/src/locales/lv/translation.json index ac3fe25876..e4fd26b544 100644 --- a/client/src/locales/lv/translation.json +++ b/client/src/locales/lv/translation.json @@ -628,7 +628,7 @@ "com_ui_2fa_invalid": "Nederīgs divfaktoru autentifikācijas kods", "com_ui_2fa_setup": "Iestatīt 2FA", "com_ui_2fa_verified": "Divfaktoru autentifikācija veiksmīgi verificēta", - "com_ui_accept": "Es piekrītu", + "com_ui_accept": "Piekrītu", "com_ui_action_button": "Darbības poga", "com_ui_active": "Aktīvais", "com_ui_add": "Pievienot", @@ -814,7 +814,7 @@ "com_ui_date_september": "Septembris", "com_ui_date_today": "Šodien", "com_ui_date_yesterday": "Vakar", - "com_ui_decline": "Es nepiekrītu", + "com_ui_decline": "Nepiekrītu", "com_ui_default_post_request": "Noklusējums (POST pieprasījums)", "com_ui_delete": "Dzēst", "com_ui_delete_action": "Dzēst darbību", @@ -1173,7 +1173,7 @@ "com_ui_share_delete_error": "Dzēšot koplietoto saiti, radās kļūda.", "com_ui_share_error": "Kopīgojot sarunas saiti, radās kļūda.", "com_ui_share_everyone": "Koplietot ar visiem", - "com_ui_share_everyone_description_var": "Šis {{resource}} būs pieejams ikvienam. Lūdzu, pārliecinieties, ka {{resource}} patiesībā ir paredzēts koplietošanai ar visiem. Esiet uzmanīgi ar saviem datiem.", + "com_ui_share_everyone_description_var": "Šis {{resource}} būs pieejams ikvienam. Lūdzu, pārliecinieties, ka {{resource}} patiesībā ir paredzēts koplietošanai visiem. Esiet uzmanīgi ar saviem datiem.", "com_ui_share_link_to_chat": "Kopīgot saiti sarunai", "com_ui_share_update_message": "Jūsu vārds, pielāgotie norādījumi un visas ziņas, ko pievienojat pēc kopīgošanas, paliek privātas.", "com_ui_share_var": "Kopīgot {{0}}", From 90e610cedad4ec7be5e1fcb7e88abdf915745803 Mon Sep 17 00:00:00 2001 From: Danny Avila Date: Sun, 26 Oct 2025 21:37:55 -0400 Subject: [PATCH 15/37] =?UTF-8?q?=F0=9F=8E=AA=20refactor:=20Allow=20Last?= =?UTF-8?q?=20Model=20Spec=20Selection=20without=20Prioritizing=20(#10258)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * refactor: Default Model Spec Retrieval Logic, allowing last selected spec on new chat if last selection was a spec * chore: Replace hardcoded 'new' conversation ID with Constants.NEW_CONVO for consistency * chore: remove redundant condition for model spec preset selection in useNewConvo hook --- client/src/hooks/useNewConvo.ts | 8 +++++--- client/src/routes/ChatRoute.tsx | 6 ++++-- client/src/utils/endpoints.ts | 20 +++++++++++++------- 3 files changed, 22 insertions(+), 12 deletions(-) diff --git a/client/src/hooks/useNewConvo.ts b/client/src/hooks/useNewConvo.ts index 22ea5f327c..63b442b83a 100644 --- a/client/src/hooks/useNewConvo.ts +++ b/client/src/hooks/useNewConvo.ts @@ -252,18 +252,20 @@ const useNewConvo = (index = 0) => { }; let preset = _preset; - const defaultModelSpec = getDefaultModelSpec(startupConfig); + const result = getDefaultModelSpec(startupConfig); + const defaultModelSpec = result?.default ?? result?.last; if ( !preset && startupConfig && (startupConfig.modelSpecs?.prioritize === true || - (startupConfig.interface?.modelSelect ?? true) !== true) && + (startupConfig.interface?.modelSelect ?? true) !== true || + (result?.last != null && Object.keys(_template).length === 0)) && defaultModelSpec ) { preset = getModelSpecPreset(defaultModelSpec); } - if (conversation.conversationId === 'new' && !modelsData) { + if (conversation.conversationId === Constants.NEW_CONVO && !modelsData) { const filesToDelete = Array.from(files.values()) .filter( (file) => diff --git a/client/src/routes/ChatRoute.tsx b/client/src/routes/ChatRoute.tsx index d81cbc075c..240b5583b9 100644 --- a/client/src/routes/ChatRoute.tsx +++ b/client/src/routes/ChatRoute.tsx @@ -66,7 +66,8 @@ export default function ChatRoute() { } if (conversationId === Constants.NEW_CONVO && endpointsQuery.data && modelsQuery.data) { - const spec = getDefaultModelSpec(startupConfig); + const result = getDefaultModelSpec(startupConfig); + const spec = result?.default ?? result?.last; logger.log('conversation', 'ChatRoute, new convo effect', conversation); newConversation({ modelsData: modelsQuery.data, @@ -90,7 +91,8 @@ export default function ChatRoute() { assistantListMap[EModelEndpoint.assistants] && assistantListMap[EModelEndpoint.azureAssistants] ) { - const spec = getDefaultModelSpec(startupConfig); + const result = getDefaultModelSpec(startupConfig); + const spec = result?.default ?? result?.last; logger.log('conversation', 'ChatRoute new convo, assistants effect', conversation); newConversation({ modelsData: modelsQuery.data, diff --git a/client/src/utils/endpoints.ts b/client/src/utils/endpoints.ts index 60585132d4..c98680843a 100644 --- a/client/src/utils/endpoints.ts +++ b/client/src/utils/endpoints.ts @@ -176,11 +176,17 @@ export function getConvoSwitchLogic(params: ConversationInitParams): InitiatedTe }; } -/** Gets the default spec by order. - * - * First, the admin defined default, then last selected spec, followed by first spec +/** + * Gets default model spec from config and user preferences. + * Priority: admin default → last selected → first spec (when prioritize=true or modelSelect disabled). + * Otherwise: admin default or last conversation spec. */ -export function getDefaultModelSpec(startupConfig?: t.TStartupConfig) { +export function getDefaultModelSpec(startupConfig?: t.TStartupConfig): + | { + default?: t.TModelSpec; + last?: t.TModelSpec; + } + | undefined { const { modelSpecs, interface: interfaceConfig } = startupConfig ?? {}; const { list, prioritize } = modelSpecs ?? {}; if (!list) { @@ -190,9 +196,9 @@ export function getDefaultModelSpec(startupConfig?: t.TStartupConfig) { if (prioritize === true || !interfaceConfig?.modelSelect) { const lastSelectedSpecName = localStorage.getItem(LocalStorageKeys.LAST_SPEC); const lastSelectedSpec = list?.find((spec) => spec.name === lastSelectedSpecName); - return defaultSpec || lastSelectedSpec || list?.[0]; + return { default: defaultSpec || lastSelectedSpec || list?.[0] }; } else if (defaultSpec) { - return defaultSpec; + return { default: defaultSpec }; } const lastConversationSetup = JSON.parse( localStorage.getItem(LocalStorageKeys.LAST_CONVO_SETUP + '_0') ?? '{}', @@ -200,7 +206,7 @@ export function getDefaultModelSpec(startupConfig?: t.TStartupConfig) { if (!lastConversationSetup.spec) { return; } - return list?.find((spec) => spec.name === lastConversationSetup.spec); + return { last: list?.find((spec) => spec.name === lastConversationSetup.spec) }; } export function getModelSpecPreset(modelSpec?: t.TModelSpec) { From 13b784a3e68ab26f6c3cd16a7e9801fba576f34f Mon Sep 17 00:00:00 2001 From: Federico Ruggi Date: Mon, 27 Oct 2025 02:48:23 +0100 Subject: [PATCH 16/37] =?UTF-8?q?=F0=9F=A7=BC=20fix:=20Sanitize=20MCP=20Se?= =?UTF-8?q?rver=20Selection=20Against=20Config=20(#10243)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * filter out unavailable servers * bump render time * Fix import path for useGetStartupConfig * refactor: Change configuredServers to use Set for improved filtering of available MCPs --------- Co-authored-by: Danny Avila --- .../VirtualScrollingPerformance.test.tsx | 2 +- .../hooks/MCP/__tests__/useMCPSelect.test.tsx | 104 +++++++++++++++--- client/src/hooks/MCP/useMCPSelect.ts | 16 ++- 3 files changed, 104 insertions(+), 18 deletions(-) diff --git a/client/src/components/Agents/tests/VirtualScrollingPerformance.test.tsx b/client/src/components/Agents/tests/VirtualScrollingPerformance.test.tsx index 1efb239308..1e1b7d1e4b 100644 --- a/client/src/components/Agents/tests/VirtualScrollingPerformance.test.tsx +++ b/client/src/components/Agents/tests/VirtualScrollingPerformance.test.tsx @@ -194,7 +194,7 @@ describe('Virtual Scrolling Performance', () => { // Performance check: rendering should be fast const renderTime = endTime - startTime; - expect(renderTime).toBeLessThan(720); + expect(renderTime).toBeLessThan(740); console.log(`Rendered 1000 agents in ${renderTime.toFixed(2)}ms`); console.log(`Only ${renderedCards.length} DOM nodes created for 1000 agents`); diff --git a/client/src/hooks/MCP/__tests__/useMCPSelect.test.tsx b/client/src/hooks/MCP/__tests__/useMCPSelect.test.tsx index b562b77d86..7145e95e74 100644 --- a/client/src/hooks/MCP/__tests__/useMCPSelect.test.tsx +++ b/client/src/hooks/MCP/__tests__/useMCPSelect.test.tsx @@ -6,6 +6,7 @@ import { Constants, LocalStorageKeys } from 'librechat-data-provider'; import { ephemeralAgentByConvoId } from '~/store'; import { setTimestamp } from '~/utils/timestamps'; import { useMCPSelect } from '../useMCPSelect'; +import * as dataProvider from '~/data-provider'; // Mock dependencies jest.mock('~/utils/timestamps', () => ({ @@ -14,10 +15,21 @@ jest.mock('~/utils/timestamps', () => ({ jest.mock('lodash/isEqual', () => jest.fn((a, b) => JSON.stringify(a) === JSON.stringify(b))); -const createWrapper = () => { +jest.mock('~/data-provider', () => ({ + ...jest.requireActual('~/data-provider'), + useGetStartupConfig: jest.fn(), +})); + +const createWrapper = (mcpServers: string[] = []) => { // Create a new Jotai store for each test to ensure clean state const store = createStore(); + // Mock the startup config + (dataProvider.useGetStartupConfig as jest.Mock).mockReturnValue({ + data: { mcpServers: Object.fromEntries(mcpServers.map((v) => [v, {}])) }, + isLoading: false, + }); + const Wrapper: React.FC<{ children: React.ReactNode }> = ({ children }) => ( {children} @@ -65,7 +77,7 @@ describe('useMCPSelect', () => { describe('State Updates', () => { it('should update mcpValues when setMCPValues is called', async () => { const { result } = renderHook(() => useMCPSelect({}), { - wrapper: createWrapper(), + wrapper: createWrapper(['value1', 'value2']), }); const newValues = ['value1', 'value2']; @@ -229,7 +241,7 @@ describe('useMCPSelect', () => { const { result, rerender } = renderHook( ({ conversationId }) => useMCPSelect({ conversationId }), { - wrapper: createWrapper(), + wrapper: createWrapper(['convo1-value', 'convo2-value']), initialProps: { conversationId: 'convo1' }, }, ); @@ -271,7 +283,7 @@ describe('useMCPSelect', () => { describe('Ephemeral Agent Synchronization', () => { it('should sync mcpValues when ephemeralAgent is updated externally', async () => { // Create a shared wrapper for both hooks to share the same Recoil/Jotai context - const wrapper = createWrapper(); + const wrapper = createWrapper(['external-value1', 'external-value2']); // Create a component that uses both hooks to ensure they share state const TestComponent = () => { @@ -298,9 +310,75 @@ describe('useMCPSelect', () => { }); }); + it('should filter out MCPs not in configured servers', async () => { + const wrapper = createWrapper(['server1', 'server2']); + + const TestComponent = () => { + const mcpHook = useMCPSelect({}); + const setEphemeralAgent = useSetRecoilState(ephemeralAgentByConvoId(Constants.NEW_CONVO)); + return { mcpHook, setEphemeralAgent }; + }; + + const { result } = renderHook(() => TestComponent(), { wrapper }); + + act(() => { + result.current.setEphemeralAgent({ + mcp: ['server1', 'removed-server', 'server2'], + }); + }); + + await waitFor(() => { + expect(result.current.mcpHook.mcpValues).toEqual(['server1', 'server2']); + }); + }); + + it('should clear all MCPs when none are in configured servers', async () => { + const wrapper = createWrapper(['server1', 'server2']); + + const TestComponent = () => { + const mcpHook = useMCPSelect({}); + const setEphemeralAgent = useSetRecoilState(ephemeralAgentByConvoId(Constants.NEW_CONVO)); + return { mcpHook, setEphemeralAgent }; + }; + + const { result } = renderHook(() => TestComponent(), { wrapper }); + + act(() => { + result.current.setEphemeralAgent({ + mcp: ['removed1', 'removed2', 'removed3'], + }); + }); + + await waitFor(() => { + expect(result.current.mcpHook.mcpValues).toEqual([]); + }); + }); + + it('should keep all MCPs when all are in configured servers', async () => { + const wrapper = createWrapper(['server1', 'server2', 'server3']); + + const TestComponent = () => { + const mcpHook = useMCPSelect({}); + const setEphemeralAgent = useSetRecoilState(ephemeralAgentByConvoId(Constants.NEW_CONVO)); + return { mcpHook, setEphemeralAgent }; + }; + + const { result } = renderHook(() => TestComponent(), { wrapper }); + + act(() => { + result.current.setEphemeralAgent({ + mcp: ['server1', 'server2'], + }); + }); + + await waitFor(() => { + expect(result.current.mcpHook.mcpValues).toEqual(['server1', 'server2']); + }); + }); + it('should update ephemeralAgent when mcpValues changes through hook', async () => { // Create a shared wrapper for both hooks - const wrapper = createWrapper(); + const wrapper = createWrapper(['hook-value1', 'hook-value2']); // Create a component that uses both the hook and accesses Recoil state const TestComponent = () => { @@ -326,7 +404,7 @@ describe('useMCPSelect', () => { it('should handle empty ephemeralAgent.mcp array correctly', async () => { // Create a shared wrapper - const wrapper = createWrapper(); + const wrapper = createWrapper(['initial-value']); // Create a component that uses both hooks const TestComponent = () => { @@ -360,7 +438,7 @@ describe('useMCPSelect', () => { it('should properly sync non-empty arrays from ephemeralAgent', async () => { // Additional test to ensure non-empty arrays DO sync - const wrapper = createWrapper(); + const wrapper = createWrapper(['value1', 'value2', 'value3', 'value4', 'value5']); const TestComponent = () => { const mcpHook = useMCPSelect({}); @@ -401,7 +479,7 @@ describe('useMCPSelect', () => { describe('Edge Cases', () => { it('should handle undefined conversationId', () => { const { result } = renderHook(() => useMCPSelect({ conversationId: undefined }), { - wrapper: createWrapper(), + wrapper: createWrapper(['test']), }); expect(result.current.mcpValues).toEqual([]); @@ -422,11 +500,10 @@ describe('useMCPSelect', () => { }); it('should handle very large arrays without performance issues', async () => { - const { result } = renderHook(() => useMCPSelect({}), { - wrapper: createWrapper(), - }); - const largeArray = Array.from({ length: 1000 }, (_, i) => `value-${i}`); + const { result } = renderHook(() => useMCPSelect({}), { + wrapper: createWrapper(largeArray), + }); const startTime = performance.now(); @@ -457,8 +534,9 @@ describe('useMCPSelect', () => { describe('Memory Leak Prevention', () => { it('should not leak memory on repeated updates', async () => { + const values = Array.from({ length: 100 }, (_, i) => `value-${i}`); const { result } = renderHook(() => useMCPSelect({}), { - wrapper: createWrapper(), + wrapper: createWrapper(values), }); // Perform many updates to test for memory leaks diff --git a/client/src/hooks/MCP/useMCPSelect.ts b/client/src/hooks/MCP/useMCPSelect.ts index dc3ac869c9..3f37bb4d70 100644 --- a/client/src/hooks/MCP/useMCPSelect.ts +++ b/client/src/hooks/MCP/useMCPSelect.ts @@ -1,13 +1,18 @@ -import { useCallback, useEffect } from 'react'; +import { useCallback, useEffect, useMemo } from 'react'; import { useAtom } from 'jotai'; import isEqual from 'lodash/isEqual'; import { useRecoilState } from 'recoil'; import { Constants, LocalStorageKeys } from 'librechat-data-provider'; import { ephemeralAgentByConvoId, mcpValuesAtomFamily, mcpPinnedAtom } from '~/store'; +import { useGetStartupConfig } from '~/data-provider'; import { setTimestamp } from '~/utils/timestamps'; export function useMCPSelect({ conversationId }: { conversationId?: string | null }) { const key = conversationId ?? Constants.NEW_CONVO; + const { data: startupConfig } = useGetStartupConfig(); + const configuredServers = useMemo(() => { + return new Set(Object.keys(startupConfig?.mcpServers ?? {})); + }, [startupConfig?.mcpServers]); const [isPinned, setIsPinned] = useAtom(mcpPinnedAtom); const [mcpValues, setMCPValuesRaw] = useAtom(mcpValuesAtomFamily(key)); @@ -15,10 +20,13 @@ export function useMCPSelect({ conversationId }: { conversationId?: string | nul // Sync Jotai state with ephemeral agent state useEffect(() => { - if (ephemeralAgent?.mcp && ephemeralAgent.mcp.length > 0) { - setMCPValuesRaw(ephemeralAgent.mcp); + const mcps = ephemeralAgent?.mcp ?? []; + if (mcps.length > 0) { + // Strip out servers that are not available in the startup config + const activeMcps = mcps.filter((mcp) => configuredServers.has(mcp)); + setMCPValuesRaw(activeMcps); } - }, [ephemeralAgent?.mcp, setMCPValuesRaw]); + }, [ephemeralAgent?.mcp, setMCPValuesRaw, configuredServers]); useEffect(() => { setEphemeralAgent((prev) => { From d46dde4e01f90f8efe17b0e803ce09cb9e37162d Mon Sep 17 00:00:00 2001 From: Max Sanna Date: Mon, 27 Oct 2025 02:58:29 +0100 Subject: [PATCH 17/37] =?UTF-8?q?=F0=9F=91=AB=20fix:=20Update=20Entra=20ID?= =?UTF-8?q?=20group=20retrieval=20to=20use=20getMemberGroups=20and=20add?= =?UTF-8?q?=20pagination=20support=20(#10199)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- api/server/services/GraphApiService.js | 47 +++++--- api/server/services/GraphApiService.spec.js | 118 +++++++++++++++++--- 2 files changed, 134 insertions(+), 31 deletions(-) diff --git a/api/server/services/GraphApiService.js b/api/server/services/GraphApiService.js index 82fa245d58..08ca253964 100644 --- a/api/server/services/GraphApiService.js +++ b/api/server/services/GraphApiService.js @@ -159,7 +159,7 @@ const searchEntraIdPrincipals = async (accessToken, sub, query, type = 'all', li /** * Get current user's Entra ID group memberships from Microsoft Graph - * Uses /me/memberOf endpoint to get groups the user is a member of + * Uses /me/getMemberGroups endpoint to get transitive groups the user is a member of * @param {string} accessToken - OpenID Connect access token * @param {string} sub - Subject identifier * @returns {Promise>} Array of group ID strings (GUIDs) @@ -167,10 +167,12 @@ const searchEntraIdPrincipals = async (accessToken, sub, query, type = 'all', li const getUserEntraGroups = async (accessToken, sub) => { try { const graphClient = await createGraphClient(accessToken, sub); + const response = await graphClient + .api('/me/getMemberGroups') + .post({ securityEnabledOnly: false }); - const groupsResponse = await graphClient.api('/me/memberOf').select('id').get(); - - return (groupsResponse.value || []).map((group) => group.id); + const groupIds = Array.isArray(response?.value) ? response.value : []; + return [...new Set(groupIds.map((groupId) => String(groupId)))]; } catch (error) { logger.error('[getUserEntraGroups] Error fetching user groups:', error); return []; @@ -187,13 +189,22 @@ const getUserEntraGroups = async (accessToken, sub) => { const getUserOwnedEntraGroups = async (accessToken, sub) => { try { const graphClient = await createGraphClient(accessToken, sub); + const allGroupIds = []; + let nextLink = '/me/ownedObjects/microsoft.graph.group'; - const groupsResponse = await graphClient - .api('/me/ownedObjects/microsoft.graph.group') - .select('id') - .get(); + while (nextLink) { + const response = await graphClient.api(nextLink).select('id').top(999).get(); + const groups = response?.value || []; + allGroupIds.push(...groups.map((group) => group.id)); - return (groupsResponse.value || []).map((group) => group.id); + nextLink = response['@odata.nextLink'] + ? response['@odata.nextLink'] + .replace(/^https:\/\/graph\.microsoft\.com\/v1\.0/, '') + .trim() || null + : null; + } + + return allGroupIds; } catch (error) { logger.error('[getUserOwnedEntraGroups] Error fetching user owned groups:', error); return []; @@ -211,21 +222,27 @@ const getUserOwnedEntraGroups = async (accessToken, sub) => { const getGroupMembers = async (accessToken, sub, groupId) => { try { const graphClient = await createGraphClient(accessToken, sub); - const allMembers = []; - let nextLink = `/groups/${groupId}/members`; + const allMembers = new Set(); + let nextLink = `/groups/${groupId}/transitiveMembers`; while (nextLink) { const membersResponse = await graphClient.api(nextLink).select('id').top(999).get(); - const members = membersResponse.value || []; - allMembers.push(...members.map((member) => member.id)); + const members = membersResponse?.value || []; + members.forEach((member) => { + if (typeof member?.id === 'string' && member['@odata.type'] === '#microsoft.graph.user') { + allMembers.add(member.id); + } + }); nextLink = membersResponse['@odata.nextLink'] - ? membersResponse['@odata.nextLink'].split('/v1.0')[1] + ? membersResponse['@odata.nextLink'] + .replace(/^https:\/\/graph\.microsoft\.com\/v1\.0/, '') + .trim() || null : null; } - return allMembers; + return Array.from(allMembers); } catch (error) { logger.error('[getGroupMembers] Error fetching group members:', error); return []; diff --git a/api/server/services/GraphApiService.spec.js b/api/server/services/GraphApiService.spec.js index 5d8dd62cf5..fa11190cc3 100644 --- a/api/server/services/GraphApiService.spec.js +++ b/api/server/services/GraphApiService.spec.js @@ -73,6 +73,7 @@ describe('GraphApiService', () => { header: jest.fn().mockReturnThis(), top: jest.fn().mockReturnThis(), get: jest.fn(), + post: jest.fn(), }; Client.init.mockReturnValue(mockGraphClient); @@ -514,31 +515,33 @@ describe('GraphApiService', () => { }); describe('getUserEntraGroups', () => { - it('should fetch user groups from memberOf endpoint', async () => { + it('should fetch user groups using getMemberGroups endpoint', async () => { const mockGroupsResponse = { - value: [ - { - id: 'group-1', - }, - { - id: 'group-2', - }, - ], + value: ['group-1', 'group-2'], }; - mockGraphClient.get.mockResolvedValue(mockGroupsResponse); + mockGraphClient.post.mockResolvedValue(mockGroupsResponse); const result = await GraphApiService.getUserEntraGroups('token', 'user'); - expect(mockGraphClient.api).toHaveBeenCalledWith('/me/memberOf'); - expect(mockGraphClient.select).toHaveBeenCalledWith('id'); + expect(mockGraphClient.api).toHaveBeenCalledWith('/me/getMemberGroups'); + expect(mockGraphClient.post).toHaveBeenCalledWith({ securityEnabledOnly: false }); + + expect(result).toEqual(['group-1', 'group-2']); + }); + + it('should deduplicate returned group ids', async () => { + mockGraphClient.post.mockResolvedValue({ + value: ['group-1', 'group-2', 'group-1'], + }); + + const result = await GraphApiService.getUserEntraGroups('token', 'user'); - expect(result).toHaveLength(2); expect(result).toEqual(['group-1', 'group-2']); }); it('should return empty array on error', async () => { - mockGraphClient.get.mockRejectedValue(new Error('API error')); + mockGraphClient.post.mockRejectedValue(new Error('API error')); const result = await GraphApiService.getUserEntraGroups('token', 'user'); @@ -550,7 +553,7 @@ describe('GraphApiService', () => { value: [], }; - mockGraphClient.get.mockResolvedValue(mockGroupsResponse); + mockGraphClient.post.mockResolvedValue(mockGroupsResponse); const result = await GraphApiService.getUserEntraGroups('token', 'user'); @@ -558,7 +561,7 @@ describe('GraphApiService', () => { }); it('should handle missing value property', async () => { - mockGraphClient.get.mockResolvedValue({}); + mockGraphClient.post.mockResolvedValue({}); const result = await GraphApiService.getUserEntraGroups('token', 'user'); @@ -566,6 +569,89 @@ describe('GraphApiService', () => { }); }); + describe('getUserOwnedEntraGroups', () => { + it('should fetch owned groups with pagination support', async () => { + const firstPage = { + value: [ + { + id: 'owned-group-1', + }, + ], + '@odata.nextLink': + 'https://graph.microsoft.com/v1.0/me/ownedObjects/microsoft.graph.group?$skiptoken=xyz', + }; + + const secondPage = { + value: [ + { + id: 'owned-group-2', + }, + ], + }; + + mockGraphClient.get.mockResolvedValueOnce(firstPage).mockResolvedValueOnce(secondPage); + + const result = await GraphApiService.getUserOwnedEntraGroups('token', 'user'); + + expect(mockGraphClient.api).toHaveBeenNthCalledWith( + 1, + '/me/ownedObjects/microsoft.graph.group', + ); + expect(mockGraphClient.api).toHaveBeenNthCalledWith( + 2, + '/me/ownedObjects/microsoft.graph.group?$skiptoken=xyz', + ); + expect(mockGraphClient.top).toHaveBeenCalledWith(999); + expect(mockGraphClient.get).toHaveBeenCalledTimes(2); + + expect(result).toEqual(['owned-group-1', 'owned-group-2']); + }); + + it('should return empty array on error', async () => { + mockGraphClient.get.mockRejectedValue(new Error('API error')); + + const result = await GraphApiService.getUserOwnedEntraGroups('token', 'user'); + + expect(result).toEqual([]); + }); + }); + + describe('getGroupMembers', () => { + it('should fetch transitive members and include only users', async () => { + const firstPage = { + value: [ + { id: 'user-1', '@odata.type': '#microsoft.graph.user' }, + { id: 'child-group', '@odata.type': '#microsoft.graph.group' }, + ], + '@odata.nextLink': + 'https://graph.microsoft.com/v1.0/groups/group-id/transitiveMembers?$skiptoken=abc', + }; + const secondPage = { + value: [{ id: 'user-2', '@odata.type': '#microsoft.graph.user' }], + }; + + mockGraphClient.get.mockResolvedValueOnce(firstPage).mockResolvedValueOnce(secondPage); + + const result = await GraphApiService.getGroupMembers('token', 'user', 'group-id'); + + expect(mockGraphClient.api).toHaveBeenNthCalledWith(1, '/groups/group-id/transitiveMembers'); + expect(mockGraphClient.api).toHaveBeenNthCalledWith( + 2, + '/groups/group-id/transitiveMembers?$skiptoken=abc', + ); + expect(mockGraphClient.top).toHaveBeenCalledWith(999); + expect(result).toEqual(['user-1', 'user-2']); + }); + + it('should return empty array on error', async () => { + mockGraphClient.get.mockRejectedValue(new Error('API error')); + + const result = await GraphApiService.getGroupMembers('token', 'user', 'group-id'); + + expect(result).toEqual([]); + }); + }); + describe('testGraphApiAccess', () => { beforeEach(() => { jest.clearAllMocks(); From 64df54528dcd138f7f790f720de31c2fbba41509 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Mon, 27 Oct 2025 19:45:37 -0400 Subject: [PATCH 18/37] =?UTF-8?q?=F0=9F=8C=8D=20i18n:=20Update=20translati?= =?UTF-8?q?on.json=20with=20latest=20translations=20(#10259)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> --- client/src/locales/zh-Hant/translation.json | 52 +++++++++++++++++---- 1 file changed, 44 insertions(+), 8 deletions(-) diff --git a/client/src/locales/zh-Hant/translation.json b/client/src/locales/zh-Hant/translation.json index 2de11c381e..0023757bbc 100644 --- a/client/src/locales/zh-Hant/translation.json +++ b/client/src/locales/zh-Hant/translation.json @@ -1,6 +1,6 @@ { - "chat_direction_left_to_right": "這裡需要放些東西。目前是空的。", - "chat_direction_right_to_left": "這裡需要放些東西。目前是空的。", + "chat_direction_left_to_right": "由左至右", + "chat_direction_right_to_left": "由右至左", "com_a11y_ai_composing": "AI 仍在撰寫中", "com_a11y_end": "AI 已完成回覆", "com_a11y_start": "AI 已開始回覆。", @@ -21,8 +21,12 @@ "com_agents_category_it": "IT", "com_agents_category_it_description": "IT 支援、技術排障與系統管理 agent", "com_agents_category_rd": "研發", + "com_agents_category_rd_description": "擅長開發流程,發明和技術研究的agents", + "com_agents_category_sales": "銷售", + "com_agents_category_sales_description": "擅長銷售流程和客戶關係的agents", "com_agents_category_tab_label": "{{category}} 類別,{{position}} / {{total}}", "com_agents_category_tabs_label": "Agent 類別", + "com_agents_chat_with": "與 {{name}} 對話", "com_agents_clear_search": "清除搜尋", "com_agents_code_interpreter": "啟用後,您的代理可以安全地使用 LibreChat 程式碼解譯器 API 來執行產生的程式碼,包括檔案處理功能。需要有效的 API 金鑰。", "com_agents_code_interpreter_title": "程式碼解譯器 API", @@ -35,22 +39,48 @@ "com_agents_enable_file_search": "啟用檔案搜尋", "com_agents_error_bad_request_message": "無法處理該請求", "com_agents_error_bad_request_suggestion": "請檢查您的輸入並再試一次。", + "com_agents_error_category_title": "類別錯誤", + "com_agents_error_generic": "我們在載入內容的時候遇到問題", + "com_agents_error_invalid_request": "錯誤的請求", + "com_agents_error_loading": "載入agents錯誤", + "com_agents_error_network_message": "無法連線至伺服器", + "com_agents_error_network_suggestion": "檢查您的網路連線後再試一次", + "com_agents_error_network_title": "連線問題", + "com_agents_error_not_found_message": "無法找到請求的內容", + "com_agents_error_not_found_suggestion": "瀏覽其他的選項或是回到市場", + "com_agents_error_not_found_title": "沒有找到", + "com_agents_error_retry": "再試一次", + "com_agents_error_search_title": "搜尋錯誤", + "com_agents_error_searching": "搜尋agents錯誤", + "com_agents_error_server_message": "此伺服器暫時不可用", + "com_agents_error_server_suggestion": "請稍後再次嘗試", + "com_agents_error_server_title": "伺服器錯誤", + "com_agents_error_suggestion_generic": "請嘗試重新整理網頁或稍後再試", + "com_agents_error_timeout_message": "此請求逾時未完成", + "com_agents_error_timeout_suggestion": "請檢查您的網路連線後再試一次", + "com_agents_error_timeout_title": "連線逾時", + "com_agents_error_title": "發生錯誤了", "com_agents_file_context_disabled": "在為檔案上下文上傳檔案之前,必須先建立 Agent。", "com_agents_file_search_disabled": "必須先建立代理才能上傳檔案進行檔案搜尋。", "com_agents_file_search_info": "啟用後,代理將會被告知以下列出的確切檔案名稱,使其能夠從這些檔案中擷取相關內容。", "com_agents_instructions_placeholder": "代理程式使用的系統指令", + "com_agents_loading": "載入中...", + "com_agents_marketplace": "Agents市場", "com_agents_mcp_description_placeholder": "簡要解釋它的作用", "com_agents_mcp_icon_size": "最小尺寸 128 x 128 px", "com_agents_mcp_info": "將 MCP 伺服器新增至您的 Agent,讓其能執行任務並與外部服務互動", "com_agents_mcp_name_placeholder": "自定義工具", "com_agents_mcp_trust_subtext": "自訂連接器未經 LibreChat 驗證", "com_agents_mcps_disabled": "在新增 MCP 之前,您需要先建立 Agent。", + "com_agents_missing_name": "在建立agent之前,請先輸入名稱", "com_agents_missing_provider_model": "請在建立代理前選擇供應商和模型。", "com_agents_name_placeholder": "選填:代理人的名稱", "com_agents_no_access": "您沒有權限編輯此助理", "com_agents_no_agent_id_error": "找不到 Agent ID。請先建立 Agent。", "com_agents_not_available": "代理不可用", + "com_agents_recommended": "我們推薦的agents", "com_agents_search_info": "啟用後,允許您的 Agent 搜尋網路以取得最新資訊。需要有效的 API 金鑰。", + "com_agents_search_instructions": "輸入名稱或描述來搜尋agents", "com_agents_search_name": "依名稱搜尋代理", "com_agents_update_error": "更新您的代理時發生錯誤。", "com_assistants_action_attempt": "助理想要與 {{0}} 交談", @@ -251,7 +281,7 @@ "com_endpoint_openai_max": "要生成的最大 token 數。輸入 token 和生成 token 的總長度受到模型前後文長度的限制。", "com_endpoint_openai_max_tokens": "可選的 `max_tokens` 欄位,代表在對話完成中可以生成的最大 token 數。\n\n輸入 token 和生成 token 的總長度受限於模型的上下文長度。如果此數字超過最大上下文 token 數,您可能會遇到錯誤。", "com_endpoint_openai_pres": "數值範圍介於 -2.0 和 2.0 之間。正值會根據該 token 是否在目前的文字中出現來進行懲罰,增加模型談及新主題的可能性。", - "com_endpoint_openai_prompt_prefix_placeholder": "在系統訊息中設定自訂提示。", + "com_endpoint_openai_prompt_prefix_placeholder": "在系統訊息中設定自訂提示。預設:無", "com_endpoint_openai_reasoning_effort": "僅適用於推理模型:限制推理的投入。降低推理投入可以使回應更快,且在回應中使用較少的推理 token。最小值會產生非常少的推理 token,以達到最快的首次 token 產生時間,特別適合程式碼與指令遵循。", "com_endpoint_openai_reasoning_summary": "僅限 Responses API:模型執行推理的摘要。這有助於除錯並理解模型的推理過程。可設定為 無、自動、簡潔或詳細。", "com_endpoint_openai_resend": "重新傳送之前所有附加的圖片。注意:這可能會大幅增加 token 成本,如果附加了太多圖片,您可能會遇到錯誤。", @@ -265,7 +295,7 @@ "com_endpoint_output": "輸出", "com_endpoint_plug_image_detail": "影像詳細資訊", "com_endpoint_plug_resend_files": "重新傳送檔案", - "com_endpoint_plug_set_custom_instructions_for_gpt_placeholder": "在系統訊息中新增自訂提示。", + "com_endpoint_plug_set_custom_instructions_for_gpt_placeholder": "在系統訊息中新增自訂提示。\n預設:無", "com_endpoint_plug_skip_completion": "跳過完成步驟", "com_endpoint_plug_use_functions": "使用外掛作為 OpenAI 函式", "com_endpoint_presence_penalty": "出現懲罰", @@ -338,16 +368,22 @@ "com_nav_auto_transcribe_audio": "自動轉錄語音", "com_nav_automatic_playback": "自動播放最新訊息", "com_nav_balance": "餘額", + "com_nav_balance_auto_refill_disabled": "自動儲值已停用", + "com_nav_balance_auto_refill_error": "載入自動儲值設定錯誤", + "com_nav_balance_auto_refill_settings": "自動儲值設定", "com_nav_balance_day": "日", "com_nav_balance_days": "日", "com_nav_balance_every": "每", "com_nav_balance_hour": "小時", "com_nav_balance_hours": "小時", "com_nav_balance_interval": "間隔", + "com_nav_balance_last_refill": "上次儲值", "com_nav_balance_minute": "分鐘", "com_nav_balance_minutes": "分鐘", "com_nav_balance_month": "月", "com_nav_balance_months": "月", + "com_nav_balance_next_refill": "下次儲值", + "com_nav_balance_refill_amount": "儲值金額", "com_nav_balance_second": "秒", "com_nav_balance_seconds": "秒", "com_nav_balance_week": "週", @@ -469,9 +505,9 @@ "com_nav_search_placeholder": "搜尋訊息", "com_nav_send_message": "傳送訊息", "com_nav_setting_account": "帳號", - "com_nav_setting_chat": "聊天", + "com_nav_setting_chat": "對話", "com_nav_setting_data": "資料控制", - "com_nav_setting_general": "一般", + "com_nav_setting_general": "通用", "com_nav_setting_mcp": "MCP 設定", "com_nav_setting_personalization": "個性化", "com_nav_setting_speech": "語音", @@ -532,7 +568,7 @@ "com_ui_admin_access_warning": "停用管理員對此功能的存取權限可能會導致意外的介面問題,需要重新整理頁面。若儲存此設定,唯一的還原方式是透過 librechat.yaml 設定檔中的介面設定,這會影響所有角色。", "com_ui_admin_settings": "管理員設定", "com_ui_advanced": "進階", - "com_ui_agent": "助理", + "com_ui_agent": "Agent", "com_ui_agent_delete_error": "刪除助理時發生錯誤", "com_ui_agent_deleted": "已成功刪除助理", "com_ui_agent_duplicate_error": "複製助理時發生錯誤", @@ -600,7 +636,7 @@ "com_ui_command_usage_placeholder": "透過指令或名稱選擇提示", "com_ui_concise": "簡潔", "com_ui_confirm_action": "確認操作", - "com_ui_context": "情境", + "com_ui_context": "前後文", "com_ui_continue": "繼續", "com_ui_controls": "控制項", "com_ui_copied": "已複製!", From 33d6b337bc4d99e5a968a2785ce7ca9be7d489e1 Mon Sep 17 00:00:00 2001 From: Danny Avila Date: Mon, 27 Oct 2025 19:46:30 -0400 Subject: [PATCH 19/37] =?UTF-8?q?=F0=9F=93=9B=20feat:=20Chat=20Badges=20vi?= =?UTF-8?q?a=20Model=20Specs=20(#10272)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * refactor: remove `useChatContext` from `useSelectMention`, explicitly pass `conversation` object * feat: ephemeral agents via model specs * refactor: Sync Jotai state with ephemeral agent state, also when Ephemeral Agent has no MCP servers selected * refactor: move `useUpdateEphemeralAgent` to store and clean up imports * refactor: reorder imports and invalidate queries for mcpConnectionStatus in event handler * refactor: replace useApplyModelSpecEffects with useApplyModelSpecAgents and update event handlers to use new agent template logic * ci: update useMCPSelect test to verify mcpValues sync with empty ephemeralAgent.mcp --- api/models/Agent.js | 25 +++-- api/server/services/Endpoints/agents/build.js | 4 +- client/src/components/Chat/Input/ChatForm.tsx | 2 + client/src/components/Chat/Input/Mention.tsx | 4 + .../Endpoints/ModelSelectorChatContext.tsx | 13 +-- .../Menus/Endpoints/ModelSelectorContext.tsx | 3 +- client/src/hooks/Agents/index.ts | 1 + .../hooks/Agents/useApplyModelSpecAgents.ts | 95 +++++++++++++++++++ .../Conversations/useNavigateToConvo.tsx | 29 +++++- client/src/hooks/Input/useSelectMention.ts | 6 +- .../hooks/MCP/__tests__/useMCPSelect.test.tsx | 7 +- client/src/hooks/MCP/useMCPSelect.ts | 2 + client/src/hooks/SSE/useEventHandlers.ts | 41 +++++--- client/src/hooks/useNewConvo.ts | 9 ++ client/src/store/agents.ts | 12 +++ client/src/utils/endpoints.ts | 34 +++++++ packages/data-provider/src/models.ts | 8 ++ 17 files changed, 254 insertions(+), 41 deletions(-) create mode 100644 client/src/hooks/Agents/useApplyModelSpecAgents.ts diff --git a/api/models/Agent.js b/api/models/Agent.js index 5468293523..f5f740ba7b 100644 --- a/api/models/Agent.js +++ b/api/models/Agent.js @@ -62,25 +62,37 @@ const getAgents = async (searchParameter) => await Agent.find(searchParameter).l * * @param {Object} params * @param {ServerRequest} params.req + * @param {string} params.spec * @param {string} params.agent_id * @param {string} params.endpoint * @param {import('@librechat/agents').ClientOptions} [params.model_parameters] * @returns {Promise} The agent document as a plain object, or null if not found. */ -const loadEphemeralAgent = async ({ req, agent_id, endpoint, model_parameters: _m }) => { +const loadEphemeralAgent = async ({ req, spec, agent_id, endpoint, model_parameters: _m }) => { const { model, ...model_parameters } = _m; + const modelSpecs = req.config?.modelSpecs?.list; + /** @type {TModelSpec | null} */ + let modelSpec = null; + if (spec != null && spec !== '') { + modelSpec = modelSpecs?.find((s) => s.name === spec) || null; + } /** @type {TEphemeralAgent | null} */ const ephemeralAgent = req.body.ephemeralAgent; const mcpServers = new Set(ephemeralAgent?.mcp); + if (modelSpec?.mcpServers) { + for (const mcpServer of modelSpec.mcpServers) { + mcpServers.add(mcpServer); + } + } /** @type {string[]} */ const tools = []; - if (ephemeralAgent?.execute_code === true) { + if (ephemeralAgent?.execute_code === true || modelSpec?.executeCode === true) { tools.push(Tools.execute_code); } - if (ephemeralAgent?.file_search === true) { + if (ephemeralAgent?.file_search === true || modelSpec?.fileSearch === true) { tools.push(Tools.file_search); } - if (ephemeralAgent?.web_search === true) { + if (ephemeralAgent?.web_search === true || modelSpec?.webSearch === true) { tools.push(Tools.web_search); } @@ -122,17 +134,18 @@ const loadEphemeralAgent = async ({ req, agent_id, endpoint, model_parameters: _ * * @param {Object} params * @param {ServerRequest} params.req + * @param {string} params.spec * @param {string} params.agent_id * @param {string} params.endpoint * @param {import('@librechat/agents').ClientOptions} [params.model_parameters] * @returns {Promise} The agent document as a plain object, or null if not found. */ -const loadAgent = async ({ req, agent_id, endpoint, model_parameters }) => { +const loadAgent = async ({ req, spec, agent_id, endpoint, model_parameters }) => { if (!agent_id) { return null; } if (agent_id === EPHEMERAL_AGENT_ID) { - return await loadEphemeralAgent({ req, agent_id, endpoint, model_parameters }); + return await loadEphemeralAgent({ req, spec, agent_id, endpoint, model_parameters }); } const agent = await getAgent({ id: agent_id, diff --git a/api/server/services/Endpoints/agents/build.js b/api/server/services/Endpoints/agents/build.js index 3bf90e8d82..34fcaf4be4 100644 --- a/api/server/services/Endpoints/agents/build.js +++ b/api/server/services/Endpoints/agents/build.js @@ -3,9 +3,10 @@ const { isAgentsEndpoint, removeNullishValues, Constants } = require('librechat- const { loadAgent } = require('~/models/Agent'); const buildOptions = (req, endpoint, parsedBody, endpointType) => { - const { spec, iconURL, agent_id, instructions, ...model_parameters } = parsedBody; + const { spec, iconURL, agent_id, ...model_parameters } = parsedBody; const agentPromise = loadAgent({ req, + spec, agent_id: isAgentsEndpoint(endpoint) ? agent_id : Constants.EPHEMERAL_AGENT_ID, endpoint, model_parameters, @@ -20,7 +21,6 @@ const buildOptions = (req, endpoint, parsedBody, endpointType) => { endpoint, agent_id, endpointType, - instructions, model_parameters, agent: agentPromise, }); diff --git a/client/src/components/Chat/Input/ChatForm.tsx b/client/src/components/Chat/Input/ChatForm.tsx index 0736c7dc61..b807369082 100644 --- a/client/src/components/Chat/Input/ChatForm.tsx +++ b/client/src/components/Chat/Input/ChatForm.tsx @@ -220,6 +220,7 @@ const ChatForm = memo(({ index = 0 }: { index?: number }) => {
{showPlusPopover && !isAssistantsEndpoint(endpoint) && ( { )} {showMentionPopover && ( ; newConversation: ConvoGenerator; textAreaRef: React.MutableRefObject; @@ -42,6 +45,7 @@ export default function Mention({ const { onSelectMention } = useSelectMention({ presets, modelSpecs, + conversation, assistantsMap, endpointsConfig, newConversation, diff --git a/client/src/components/Chat/Menus/Endpoints/ModelSelectorChatContext.tsx b/client/src/components/Chat/Menus/Endpoints/ModelSelectorChatContext.tsx index bd639523d8..eac3bb200c 100644 --- a/client/src/components/Chat/Menus/Endpoints/ModelSelectorChatContext.tsx +++ b/client/src/components/Chat/Menus/Endpoints/ModelSelectorChatContext.tsx @@ -1,5 +1,5 @@ import React, { createContext, useContext, useMemo } from 'react'; -import type { EModelEndpoint } from 'librechat-data-provider'; +import type { EModelEndpoint, TConversation } from 'librechat-data-provider'; import { useChatContext } from '~/Providers/ChatContext'; interface ModelSelectorChatContextValue { @@ -8,6 +8,7 @@ interface ModelSelectorChatContextValue { spec?: string | null; agent_id?: string | null; assistant_id?: string | null; + conversation: TConversation | null; newConversation: ReturnType['newConversation']; } @@ -26,16 +27,10 @@ export function ModelSelectorChatProvider({ children }: { children: React.ReactN spec: conversation?.spec, agent_id: conversation?.agent_id, assistant_id: conversation?.assistant_id, + conversation, newConversation, }), - [ - conversation?.endpoint, - conversation?.model, - conversation?.spec, - conversation?.agent_id, - conversation?.assistant_id, - newConversation, - ], + [conversation, newConversation], ); return ( diff --git a/client/src/components/Chat/Menus/Endpoints/ModelSelectorContext.tsx b/client/src/components/Chat/Menus/Endpoints/ModelSelectorContext.tsx index a4527d56e7..e79d9a2d21 100644 --- a/client/src/components/Chat/Menus/Endpoints/ModelSelectorContext.tsx +++ b/client/src/components/Chat/Menus/Endpoints/ModelSelectorContext.tsx @@ -57,7 +57,7 @@ export function ModelSelectorProvider({ children, startupConfig }: ModelSelector const agentsMap = useAgentsMapContext(); const assistantsMap = useAssistantsMapContext(); const { data: endpointsConfig } = useGetEndpointsQuery(); - const { endpoint, model, spec, agent_id, assistant_id, newConversation } = + const { endpoint, model, spec, agent_id, assistant_id, conversation, newConversation } = useModelSelectorChatContext(); const modelSpecs = useMemo(() => { const specs = startupConfig?.modelSpecs?.list ?? []; @@ -96,6 +96,7 @@ export function ModelSelectorProvider({ children, startupConfig }: ModelSelector const { onSelectEndpoint, onSelectSpec } = useSelectMention({ // presets, modelSpecs, + conversation, assistantsMap, endpointsConfig, newConversation, diff --git a/client/src/hooks/Agents/index.ts b/client/src/hooks/Agents/index.ts index b0df8398e4..3597b0e646 100644 --- a/client/src/hooks/Agents/index.ts +++ b/client/src/hooks/Agents/index.ts @@ -6,3 +6,4 @@ export { default as useAgentCapabilities } from './useAgentCapabilities'; export { default as useGetAgentsConfig } from './useGetAgentsConfig'; export { default as useAgentDefaultPermissionLevel } from './useAgentDefaultPermissionLevel'; export { default as useAgentToolPermissions } from './useAgentToolPermissions'; +export * from './useApplyModelSpecAgents'; diff --git a/client/src/hooks/Agents/useApplyModelSpecAgents.ts b/client/src/hooks/Agents/useApplyModelSpecAgents.ts new file mode 100644 index 0000000000..e7f15741cb --- /dev/null +++ b/client/src/hooks/Agents/useApplyModelSpecAgents.ts @@ -0,0 +1,95 @@ +import { useCallback } from 'react'; +import type { TStartupConfig, TSubmission } from 'librechat-data-provider'; +import { useUpdateEphemeralAgent, useApplyNewAgentTemplate } from '~/store/agents'; +import { getModelSpec, applyModelSpecEphemeralAgent } from '~/utils'; + +/** + * Hook that applies a model spec from a preset to an ephemeral agent. + * This is used when initializing a new conversation with a preset that has a spec. + */ +export function useApplyModelSpecEffects() { + const updateEphemeralAgent = useUpdateEphemeralAgent(); + const applyPresetModelSpec = useCallback( + ({ + convoId, + specName, + startupConfig, + }: { + convoId: string | null; + specName?: string | null; + startupConfig?: TStartupConfig; + }) => { + if (specName == null || !specName) { + return; + } + + const modelSpec = getModelSpec({ + specName, + startupConfig, + }); + + applyModelSpecEphemeralAgent({ + convoId, + modelSpec, + updateEphemeralAgent, + }); + }, + [updateEphemeralAgent], + ); + + return applyPresetModelSpec; +} + +export function useApplyAgentTemplate() { + const applyAgentTemplate = useApplyNewAgentTemplate(); + /** + * Helper function to apply agent template with model spec merged into ephemeral agent + */ + const applyAgentTemplateWithSpec = useCallback( + ({ + targetId, + sourceId, + ephemeralAgent, + specName, + startupConfig, + }: { + targetId: string; + sourceId?: TSubmission['conversation']['conversationId'] | null; + ephemeralAgent: TSubmission['ephemeralAgent']; + specName?: string | null; + startupConfig?: TStartupConfig; + }) => { + if (!specName) { + applyAgentTemplate(targetId, sourceId, ephemeralAgent); + return; + } + + const modelSpec = getModelSpec({ + specName, + startupConfig, + }); + + if (!modelSpec) { + applyAgentTemplate(targetId, sourceId, ephemeralAgent); + return; + } + + // Merge model spec fields into ephemeral agent + const mergedAgent = { + ...ephemeralAgent, + mcp: [...(ephemeralAgent?.mcp ?? []), ...(modelSpec.mcpServers ?? [])], + web_search: ephemeralAgent?.web_search ?? modelSpec.webSearch ?? false, + file_search: ephemeralAgent?.file_search ?? modelSpec.fileSearch ?? false, + execute_code: ephemeralAgent?.execute_code ?? modelSpec.executeCode ?? false, + }; + + // Deduplicate MCP servers + mergedAgent.mcp = [...new Set(mergedAgent.mcp)]; + + applyAgentTemplate(targetId, sourceId, mergedAgent); + }, + [applyAgentTemplate], + ); + + return applyAgentTemplateWithSpec; +} diff --git a/client/src/hooks/Conversations/useNavigateToConvo.tsx b/client/src/hooks/Conversations/useNavigateToConvo.tsx index 2bbb4620b3..bfe4a0b96e 100644 --- a/client/src/hooks/Conversations/useNavigateToConvo.tsx +++ b/client/src/hooks/Conversations/useNavigateToConvo.tsx @@ -1,8 +1,14 @@ +import { useCallback } from 'react'; import { useSetRecoilState } from 'recoil'; import { useNavigate } from 'react-router-dom'; import { useQueryClient } from '@tanstack/react-query'; import { QueryKeys, Constants, dataService } from 'librechat-data-provider'; -import type { TConversation, TEndpointsConfig, TModelsConfig } from 'librechat-data-provider'; +import type { + TEndpointsConfig, + TStartupConfig, + TModelsConfig, + TConversation, +} from 'librechat-data-provider'; import { getDefaultEndpoint, clearMessagesCache, @@ -10,15 +16,34 @@ import { getEndpointField, logger, } from '~/utils'; +import { useApplyModelSpecEffects } from '~/hooks/Agents'; import store from '~/store'; const useNavigateToConvo = (index = 0) => { const navigate = useNavigate(); const queryClient = useQueryClient(); const clearAllConversations = store.useClearConvoState(); + const applyModelSpecEffects = useApplyModelSpecEffects(); const setSubmission = useSetRecoilState(store.submissionByIndex(index)); const clearAllLatestMessages = store.useClearLatestMessages(`useNavigateToConvo ${index}`); - const { hasSetConversation, setConversation } = store.useCreateConversationAtom(index); + const { hasSetConversation, setConversation: setConvo } = store.useCreateConversationAtom(index); + + const setConversation = useCallback( + (conversation: TConversation) => { + setConvo(conversation); + if (!conversation.spec) { + return; + } + + const startupConfig = queryClient.getQueryData([QueryKeys.startupConfig]); + applyModelSpecEffects({ + startupConfig, + specName: conversation?.spec, + convoId: conversation.conversationId, + }); + }, + [setConvo, queryClient, applyModelSpecEffects], + ); const fetchFreshData = async (conversation?: Partial) => { const conversationId = conversation?.conversationId; diff --git a/client/src/hooks/Input/useSelectMention.ts b/client/src/hooks/Input/useSelectMention.ts index a5be633da0..51a2f75b11 100644 --- a/client/src/hooks/Input/useSelectMention.ts +++ b/client/src/hooks/Input/useSelectMention.ts @@ -10,18 +10,19 @@ import type { } from 'librechat-data-provider'; import type { MentionOption, ConvoGenerator } from '~/common'; import { getConvoSwitchLogic, getModelSpecIconURL, removeUnavailableTools, logger } from '~/utils'; -import { useChatContext } from '~/Providers'; import { useDefaultConvo } from '~/hooks'; import store from '~/store'; export default function useSelectMention({ presets, modelSpecs, + conversation, assistantsMap, + returnHandlers, endpointsConfig, newConversation, - returnHandlers, }: { + conversation: TConversation | null; presets?: TPreset[]; modelSpecs: TModelSpec[]; assistantsMap?: TAssistantsMap; @@ -29,7 +30,6 @@ export default function useSelectMention({ endpointsConfig: TEndpointsConfig; returnHandlers?: boolean; }) { - const { conversation } = useChatContext(); const getDefaultConversation = useDefaultConvo(); const modularChat = useRecoilValue(store.modularChat); const availableTools = useRecoilValue(store.availableTools); diff --git a/client/src/hooks/MCP/__tests__/useMCPSelect.test.tsx b/client/src/hooks/MCP/__tests__/useMCPSelect.test.tsx index 7145e95e74..ab10ec6d76 100644 --- a/client/src/hooks/MCP/__tests__/useMCPSelect.test.tsx +++ b/client/src/hooks/MCP/__tests__/useMCPSelect.test.tsx @@ -431,9 +431,10 @@ describe('useMCPSelect', () => { }); }); - // Values should remain unchanged since empty mcp array doesn't trigger update - // (due to the condition: ephemeralAgent?.mcp && ephemeralAgent.mcp.length > 0) - expect(result.current.mcpHook.mcpValues).toEqual(['initial-value']); + // Values should sync to empty array when ephemeralAgent.mcp is set to [] + await waitFor(() => { + expect(result.current.mcpHook.mcpValues).toEqual([]); + }); }); it('should properly sync non-empty arrays from ephemeralAgent', async () => { diff --git a/client/src/hooks/MCP/useMCPSelect.ts b/client/src/hooks/MCP/useMCPSelect.ts index 3f37bb4d70..3ce7999346 100644 --- a/client/src/hooks/MCP/useMCPSelect.ts +++ b/client/src/hooks/MCP/useMCPSelect.ts @@ -25,6 +25,8 @@ export function useMCPSelect({ conversationId }: { conversationId?: string | nul // Strip out servers that are not available in the startup config const activeMcps = mcps.filter((mcp) => configuredServers.has(mcp)); setMCPValuesRaw(activeMcps); + } else { + setMCPValuesRaw([]); } }, [ephemeralAgent?.mcp, setMCPValuesRaw, configuredServers]); diff --git a/client/src/hooks/SSE/useEventHandlers.ts b/client/src/hooks/SSE/useEventHandlers.ts index 83c1ff1ad9..6348581b68 100644 --- a/client/src/hooks/SSE/useEventHandlers.ts +++ b/client/src/hooks/SSE/useEventHandlers.ts @@ -1,8 +1,8 @@ -import { v4 } from 'uuid'; import { useCallback, useRef } from 'react'; +import { v4 } from 'uuid'; import { useSetRecoilState } from 'recoil'; -import { useParams, useNavigate, useLocation } from 'react-router-dom'; import { useQueryClient } from '@tanstack/react-query'; +import { useParams, useNavigate, useLocation } from 'react-router-dom'; import { QueryKeys, Constants, @@ -13,7 +13,12 @@ import { tConvoUpdateSchema, isAssistantsEndpoint, } from 'librechat-data-provider'; -import type { TMessage, TConversation, EventSubmission } from 'librechat-data-provider'; +import type { + TMessage, + TConversation, + EventSubmission, + TStartupConfig, +} from 'librechat-data-provider'; import type { TResData, TFinalResData, ConvoGenerator } from '~/common'; import type { InfiniteData } from '@tanstack/react-query'; import type { TGenTitleMutation } from '~/data-provider'; @@ -31,11 +36,12 @@ import { } from '~/utils'; import useAttachmentHandler from '~/hooks/SSE/useAttachmentHandler'; import useContentHandler from '~/hooks/SSE/useContentHandler'; -import store, { useApplyNewAgentTemplate } from '~/store'; import useStepHandler from '~/hooks/SSE/useStepHandler'; +import { useApplyAgentTemplate } from '~/hooks/Agents'; import { useAuthContext } from '~/hooks/AuthContext'; import { MESSAGE_UPDATE_INTERVAL } from '~/common'; import { useLiveAnnouncer } from '~/Providers'; +import store from '~/store'; type TSyncData = { sync: boolean; @@ -172,7 +178,7 @@ export default function useEventHandlers({ }: EventHandlerParams) { const queryClient = useQueryClient(); const { announcePolite } = useLiveAnnouncer(); - const applyAgentTemplate = useApplyNewAgentTemplate(); + const applyAgentTemplate = useApplyAgentTemplate(); const setAbortScroll = useSetRecoilState(store.abortScroll); const navigate = useNavigate(); const location = useLocation(); @@ -356,6 +362,7 @@ export default function useEventHandlers({ const createdHandler = useCallback( (data: TResData, submission: EventSubmission) => { + queryClient.invalidateQueries([QueryKeys.mcpConnectionStatus]); const { messages, userMessage, isRegenerate = false, isTemporary = false } = submission; const initialResponse = { ...submission.initialResponse, @@ -411,11 +418,13 @@ export default function useEventHandlers({ } if (conversationId) { - applyAgentTemplate( - conversationId, - submission.conversation.conversationId, - submission.ephemeralAgent, - ); + applyAgentTemplate({ + targetId: conversationId, + sourceId: submission.conversation?.conversationId, + ephemeralAgent: submission.ephemeralAgent, + specName: submission.conversation?.spec, + startupConfig: queryClient.getQueryData([QueryKeys.startupConfig]), + }); } if (resetLatestMessage) { @@ -566,11 +575,13 @@ export default function useEventHandlers({ }); if (conversation.conversationId && submission.ephemeralAgent) { - applyAgentTemplate( - conversation.conversationId, - submissionConvo.conversationId, - submission.ephemeralAgent, - ); + applyAgentTemplate({ + targetId: conversation.conversationId, + sourceId: submissionConvo.conversationId, + ephemeralAgent: submission.ephemeralAgent, + specName: submission.conversation?.spec, + startupConfig: queryClient.getQueryData([QueryKeys.startupConfig]), + }); } if (location.pathname === `/c/${Constants.NEW_CONVO}`) { diff --git a/client/src/hooks/useNewConvo.ts b/client/src/hooks/useNewConvo.ts index 63b442b83a..9f0e17b297 100644 --- a/client/src/hooks/useNewConvo.ts +++ b/client/src/hooks/useNewConvo.ts @@ -29,6 +29,7 @@ import { import { useDeleteFilesMutation, useGetEndpointsQuery, useGetStartupConfig } from '~/data-provider'; import useAssistantListMap from './Assistants/useAssistantListMap'; import { useResetChatBadges } from './useChatBadges'; +import { useApplyModelSpecEffects } from './Agents'; import { usePauseGlobalAudio } from './Audio'; import { logger } from '~/utils'; import store from '~/store'; @@ -37,6 +38,7 @@ const useNewConvo = (index = 0) => { const navigate = useNavigate(); const [searchParams] = useSearchParams(); const { data: startupConfig } = useGetStartupConfig(); + const applyModelSpecEffects = useApplyModelSpecEffects(); const clearAllConversations = store.useClearConvoState(); const defaultPreset = useRecoilValue(store.defaultPreset); const { setConversation } = store.useCreateConversationAtom(index); @@ -265,6 +267,12 @@ const useNewConvo = (index = 0) => { preset = getModelSpecPreset(defaultModelSpec); } + applyModelSpecEffects({ + startupConfig, + specName: preset?.spec, + convoId: conversation.conversationId, + }); + if (conversation.conversationId === Constants.NEW_CONVO && !modelsData) { const filesToDelete = Array.from(files.values()) .filter( @@ -311,6 +319,7 @@ const useNewConvo = (index = 0) => { saveBadgesState, pauseGlobalAudio, switchToConversation, + applyModelSpecEffects, ], ); diff --git a/client/src/store/agents.ts b/client/src/store/agents.ts index a62fae6046..13136ef34e 100644 --- a/client/src/store/agents.ts +++ b/client/src/store/agents.ts @@ -16,6 +16,18 @@ export const ephemeralAgentByConvoId = atomFamily + (convoId: string, agent: TEphemeralAgent | null) => { + set(ephemeralAgentByConvoId(convoId), agent); + }, + [], + ); + + return updateEphemeralAgent; +} + /** * Creates a callback function to apply the ephemeral agent state * from the "new" conversation template to a specified conversation ID. diff --git a/client/src/utils/endpoints.ts b/client/src/utils/endpoints.ts index c98680843a..1de9e2845c 100644 --- a/client/src/utils/endpoints.ts +++ b/client/src/utils/endpoints.ts @@ -1,4 +1,5 @@ import { + Constants, EModelEndpoint, defaultEndpoints, modularEndpoints, @@ -176,6 +177,39 @@ export function getConvoSwitchLogic(params: ConversationInitParams): InitiatedTe }; } +export function getModelSpec({ + specName, + startupConfig, +}: { + specName?: string | null; + startupConfig?: t.TStartupConfig; +}): t.TModelSpec | undefined { + if (!startupConfig || !specName) { + return; + } + return startupConfig.modelSpecs?.list?.find((spec) => spec.name === specName); +} + +export function applyModelSpecEphemeralAgent({ + convoId, + modelSpec, + updateEphemeralAgent, +}: { + convoId?: string | null; + modelSpec?: t.TModelSpec; + updateEphemeralAgent: ((convoId: string, agent: t.TEphemeralAgent | null) => void) | undefined; +}) { + if (!modelSpec || !updateEphemeralAgent) { + return; + } + updateEphemeralAgent((convoId ?? Constants.NEW_CONVO) || Constants.NEW_CONVO, { + mcp: modelSpec.mcpServers ?? [], + web_search: modelSpec.webSearch ?? false, + file_search: modelSpec.fileSearch ?? false, + execute_code: modelSpec.executeCode ?? false, + }); +} + /** * Gets default model spec from config and user preferences. * Priority: admin default → last selected → first spec (when prioritize=true or modelSelect disabled). diff --git a/packages/data-provider/src/models.ts b/packages/data-provider/src/models.ts index 78ba1237fc..1edca6ea37 100644 --- a/packages/data-provider/src/models.ts +++ b/packages/data-provider/src/models.ts @@ -26,6 +26,10 @@ export type TModelSpec = { showIconInHeader?: boolean; iconURL?: string | EModelEndpoint; // Allow using project-included icons authType?: AuthType; + webSearch?: boolean; + fileSearch?: boolean; + executeCode?: boolean; + mcpServers?: string[]; }; export const tModelSpecSchema = z.object({ @@ -40,6 +44,10 @@ export const tModelSpecSchema = z.object({ showIconInHeader: z.boolean().optional(), iconURL: z.union([z.string(), eModelEndpointSchema]).optional(), authType: authTypeSchema.optional(), + webSearch: z.boolean().optional(), + fileSearch: z.boolean().optional(), + executeCode: z.boolean().optional(), + mcpServers: z.array(z.string()).optional(), }); export const specsConfigSchema = z.object({ From 0446d0e1900f4d75c6b0e7e8b94deffc4478d3b2 Mon Sep 17 00:00:00 2001 From: Dustin Healy <54083382+dustinhealy@users.noreply.github.com> Date: Tue, 28 Oct 2025 00:46:43 +0100 Subject: [PATCH 20/37] =?UTF-8?q?=E2=99=BF=20fix:=20Address=20Accessibilit?= =?UTF-8?q?y=20Issues=20(#10260)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * chore: add i18n localization comment for AlwaysMakeProd component * feat: enhance accessibility by adding aria-label and aria-labelledby to Switch component * feat: add aria-labels for accessibility in Agent and Assistant avatar buttons * fix: add switch aria-labels for accessibility in various components * feat: add aria-labels and localization keys for accessibility in DataTable, DataTableColumnHeader, and OGDialogTemplate components * chore: refactor out nested ternary * feat: add aria-label to DataTable filter button for My Files modal * feat: add aria-labels for Buttons and localization strings * feat: add aria-labels to Checkboxes in Agent Builder * feat: enhance accessibility by adding aria-label and aria-labelledby to Checkbox component * feat: add aria-label to FileSearchCheckbox in Agent Builder * feat: add aria-label to Prompts text input area * feat: enhance accessibility by adding aria-label and aria-labelledby to TextAreaAutosize component * feat: remove improper role: "list" prop from List in Conversations.tsx to enhance accessibility and stop aria rules conflicting within react-virtualized component * feat: enhance accessibility by allowing tab navigation and adding ring highlights for conversation title editing accept/reject buttons * feat: add aria-label to Copy Link button in the conversation share modal * feat: add title to QR code svg in conversation share modal to describe the image content * feat: enhance accessibility by making Agent Avatar upload keyboard navigable and round out highlight border on focus * feat: enhance accessibility by adding aria attributes around alerting users with screen readers to invalid email address inputs in the Agent Builder * feat: add aria-labels to buttons in Advanced panel of Agent Builder * feat: enhance accessibility by making FileUpload and Clear All buttons in PresetItems keyboard navigable * feat: enchance accessiblity by indexing view and delete button aria-labels in shared links management modal to their specific chat titles * feat: add border highlighting on focus for AnimatedSearchInput * feat: add category description to aria-labels for prompts in ListCard * feat: add proper scoping to rows and columns in table headers * feat: add localized aria-labelling to EditTextPart's TextAreaAutosize component and base dynamic paramters panel components and their supporting translation keys * feat: add localized aria-labels and aria-labelledBy to Checkbox components without them * feat: add localized aria-labeledBy for endpoint settings Sliders * feat: add localized aria-labels for TextareaAutosize components * chore: remove unused i18n string * feat: add localized aria-label for BookmarkForm Checkbox * fix: add stopPropagation onKeyDown for Preview and Edit menu items in prompts that was causing the prompts to inadvertently be sent when triggered with keyboard navigation when Auto-send Prompts was toggled on * fix: switch TableCell to TableHead for title cells according to harvard issue #789 * fix: add more descriptive localization key for file filter button in DataTable * chore: remove self-explanatory code comment from RenameForm * fix: remove stray bg-yellow highlight that was left in during debugging * fix: add aria-label to model configurator panel back button * fix: undo incorrect hoist of tool name split for aria-label and span in MCPInput --------- Co-authored-by: Danny Avila --- .../Agents/MarketplaceAdminSettings.tsx | 1 + .../src/components/Bookmarks/BookmarkForm.tsx | 8 ++- client/src/components/Chat/Input/ChatForm.tsx | 3 ++ .../Chat/Input/Files/FileUpload.tsx | 34 ++++++++---- .../Chat/Input/Files/Table/DataTable.tsx | 6 ++- .../Chat/Menus/Presets/PresetItems.tsx | 10 ++-- .../Chat/Messages/Content/EditMessage.tsx | 1 + .../Messages/Content/Parts/EditTextPart.tsx | 1 + .../Conversations/Conversations.tsx | 1 - .../ConvoOptions/ShareButton.tsx | 9 +++- .../components/Conversations/RenameForm.tsx | 9 ++-- .../Endpoints/Settings/Advanced.tsx | 18 +++++-- .../Endpoints/Settings/AgentSettings.tsx | 7 ++- .../components/Endpoints/Settings/Google.tsx | 4 ++ .../components/Endpoints/Settings/Plugins.tsx | 4 ++ .../Nav/ExportConversation/ExportModal.tsx | 16 ++++-- .../Nav/SettingsTabs/Chat/SaveBadgesState.tsx | 1 + .../Nav/SettingsTabs/Chat/ShowThinking.tsx | 1 + .../Nav/SettingsTabs/Data/SharedLinks.tsx | 53 ++++++++----------- .../src/components/Prompts/AdminSettings.tsx | 8 ++- .../Prompts/Groups/AlwaysMakeProd.tsx | 2 +- .../Prompts/Groups/AutoSendPrompt.tsx | 2 +- .../Prompts/Groups/ChatGroupItem.tsx | 6 +++ .../Prompts/Groups/CreatePromptForm.tsx | 1 + client/src/components/Prompts/Groups/List.tsx | 1 + .../components/Prompts/Groups/ListCard.tsx | 4 +- .../Prompts/Groups/NoPromptGroup.tsx | 1 + .../Prompts/Groups/VariableForm.tsx | 3 +- .../src/components/Prompts/PromptEditor.tsx | 1 + .../Sharing/GenericGrantAccessDialog.tsx | 7 ++- .../Sharing/PeoplePickerAdminSettings.tsx | 2 + .../SidePanel/Agents/AdminSettings.tsx | 2 + .../Agents/Advanced/AdvancedButton.tsx | 1 + .../Agents/Advanced/AdvancedPanel.tsx | 1 + .../SidePanel/Agents/Advanced/AgentChain.tsx | 3 ++ .../SidePanel/Agents/AgentAvatar.tsx | 6 ++- .../SidePanel/Agents/AgentConfig.tsx | 18 ++++++- .../SidePanel/Agents/AgentPanel.tsx | 7 +++ .../components/SidePanel/Agents/Artifacts.tsx | 1 + .../SidePanel/Agents/Code/Action.tsx | 7 ++- .../SidePanel/Agents/Code/ApiKeyDialog.tsx | 1 + .../SidePanel/Agents/FileSearchCheckbox.tsx | 1 + .../components/SidePanel/Agents/Images.tsx | 7 +-- .../components/SidePanel/Agents/MCPInput.tsx | 13 ++++- .../components/SidePanel/Agents/MCPTool.tsx | 7 +++ .../SidePanel/Agents/ModelPanel.tsx | 1 + .../SidePanel/Agents/Search/Action.tsx | 1 + .../SidePanel/Agents/Search/ApiKeyDialog.tsx | 6 ++- .../Agents/Version/VersionButton.tsx | 1 + .../SidePanel/Bookmarks/BookmarkTable.tsx | 1 + .../SidePanel/Builder/AssistantAvatar.tsx | 6 ++- .../src/components/SidePanel/Builder/Code.tsx | 2 + .../SidePanel/Builder/ImageVision.tsx | 2 + .../SidePanel/Builder/Retrieval.tsx | 2 + .../SidePanel/Files/PanelColumns.tsx | 2 + .../src/components/SidePanel/MCP/MCPPanel.tsx | 9 +++- .../SidePanel/Memories/AdminSettings.tsx | 2 + .../SidePanel/Memories/MemoryCreateDialog.tsx | 1 + .../SidePanel/Memories/MemoryEditDialog.tsx | 1 + .../SidePanel/Memories/MemoryViewer.tsx | 6 ++- .../SidePanel/Parameters/DynamicCheckbox.tsx | 1 + .../SidePanel/Parameters/DynamicSlider.tsx | 3 ++ .../SidePanel/Parameters/DynamicSwitch.tsx | 3 ++ .../SidePanel/Parameters/DynamicTextarea.tsx | 1 + client/src/locales/en/translation.json | 14 ++++- client/src/routes/RouteErrorBoundary.tsx | 15 ++++-- .../src/components/AnimatedSearchInput.tsx | 2 +- packages/client/src/components/Checkbox.tsx | 50 +++++++++++------ packages/client/src/components/DataTable.tsx | 26 ++++++++- .../src/components/DataTableColumnHeader.tsx | 29 +++++++--- .../src/components/OGDialogTemplate.tsx | 4 +- packages/client/src/components/Switch.tsx | 48 +++++++++++------ .../src/components/TextareaAutosize.tsx | 14 ++++- .../client/src/locales/en/translation.json | 5 +- 74 files changed, 427 insertions(+), 131 deletions(-) diff --git a/client/src/components/Agents/MarketplaceAdminSettings.tsx b/client/src/components/Agents/MarketplaceAdminSettings.tsx index fa5fa34fbc..e09f168afe 100644 --- a/client/src/components/Agents/MarketplaceAdminSettings.tsx +++ b/client/src/components/Agents/MarketplaceAdminSettings.tsx @@ -58,6 +58,7 @@ const LabelController: React.FC = ({ checked={field.value} onCheckedChange={field.onChange} value={field.value.toString()} + aria-label={label} /> )} /> diff --git a/client/src/components/Bookmarks/BookmarkForm.tsx b/client/src/components/Bookmarks/BookmarkForm.tsx index 3b2633485b..23e94dbfb1 100644 --- a/client/src/components/Bookmarks/BookmarkForm.tsx +++ b/client/src/components/Bookmarks/BookmarkForm.tsx @@ -129,7 +129,11 @@ const BookmarkForm = ({
-
{conversationId != null && conversationId && ( @@ -161,6 +166,7 @@ const BookmarkForm = ({ onCheckedChange={field.onChange} className="relative float-left mr-2 inline-flex h-4 w-4 cursor-pointer" value={field.value?.toString()} + aria-label={localize('com_ui_bookmarks_add_to_conversation')} /> )} /> diff --git a/client/src/components/Chat/Input/ChatForm.tsx b/client/src/components/Chat/Input/ChatForm.tsx index b807369082..f1dc1ef076 100644 --- a/client/src/components/Chat/Input/ChatForm.tsx +++ b/client/src/components/Chat/Input/ChatForm.tsx @@ -12,6 +12,7 @@ import { import { useTextarea, useAutoSave, + useLocalize, useRequiresKey, useHandleKeyUp, useQueryParams, @@ -38,6 +39,7 @@ const ChatForm = memo(({ index = 0 }: { index?: number }) => { const submitButtonRef = useRef(null); const textAreaRef = useRef(null); useFocusChatEffect(textAreaRef); + const localize = useLocalize(); const [isCollapsed, setIsCollapsed] = useState(false); const [, setIsScrollable] = useState(false); @@ -279,6 +281,7 @@ const ChatForm = memo(({ index = 0 }: { index?: number }) => { setIsTextAreaFocused(true); }} onBlur={setIsTextAreaFocused.bind(null, false)} + aria-label={localize('com_ui_message_input')} onClick={handleFocusOrClick} style={{ height: 44, overflowY: 'auto' }} className={cn( diff --git a/client/src/components/Chat/Input/Files/FileUpload.tsx b/client/src/components/Chat/Input/Files/FileUpload.tsx index 723fa32e86..718c8c1f5d 100644 --- a/client/src/components/Chat/Input/Files/FileUpload.tsx +++ b/client/src/components/Chat/Input/Files/FileUpload.tsx @@ -62,17 +62,28 @@ const FileUpload: React.FC = ({ statusText = invalidText ?? localize('com_ui_upload_invalid'); } + const handleClick = () => { + const fileInput = document.getElementById(`file-upload-${id}`) as HTMLInputElement; + if (fileInput) { + fileInput.click(); + } + }; + return ( - + ); }; diff --git a/client/src/components/Chat/Input/Files/Table/DataTable.tsx b/client/src/components/Chat/Input/Files/Table/DataTable.tsx index ffb3e2825b..70459b2d66 100644 --- a/client/src/components/Chat/Input/Files/Table/DataTable.tsx +++ b/client/src/components/Chat/Input/Files/Table/DataTable.tsx @@ -122,7 +122,11 @@ export default function DataTable({ columns, data }: DataTablePro /> - diff --git a/client/src/components/Chat/Menus/Presets/PresetItems.tsx b/client/src/components/Chat/Menus/Presets/PresetItems.tsx index 4e7710e0a7..a0c65bc04c 100644 --- a/client/src/components/Chat/Menus/Presets/PresetItems.tsx +++ b/client/src/components/Chat/Menus/Presets/PresetItems.tsx @@ -59,9 +59,10 @@ const PresetItems: FC<{ - + diff --git a/client/src/components/Chat/Messages/Content/Parts/EditTextPart.tsx b/client/src/components/Chat/Messages/Content/Parts/EditTextPart.tsx index 242b13765e..5422d9733d 100644 --- a/client/src/components/Chat/Messages/Content/Parts/EditTextPart.tsx +++ b/client/src/components/Chat/Messages/Content/Parts/EditTextPart.tsx @@ -170,6 +170,7 @@ const EditTextPart = ({ 'max-h-[65vh] pr-3 md:max-h-[75vh] md:pr-4', removeFocusRings, )} + aria-label={localize('com_ui_editable_message')} dir={isRTL ? 'rtl' : 'ltr'} /> diff --git a/client/src/components/Conversations/Conversations.tsx b/client/src/components/Conversations/Conversations.tsx index b16c6458c7..b6a7032e9f 100644 --- a/client/src/components/Conversations/Conversations.tsx +++ b/client/src/components/Conversations/Conversations.tsx @@ -201,7 +201,6 @@ const Conversations: FC = ({ overscanRowCount={10} className="outline-none" style={{ outline: 'none' }} - role="list" aria-label="Conversations" onRowsRendered={handleRowsRendered} tabIndex={-1} diff --git a/client/src/components/Conversations/ConvoOptions/ShareButton.tsx b/client/src/components/Conversations/ConvoOptions/ShareButton.tsx index 46310268f0..cbbb612251 100644 --- a/client/src/components/Conversations/ConvoOptions/ShareButton.tsx +++ b/client/src/components/Conversations/ConvoOptions/ShareButton.tsx @@ -77,7 +77,13 @@ export default function ShareButton({
{showQR && (
- +
)} @@ -87,6 +93,7 @@ export default function ShareButton({ diff --git a/client/src/components/Endpoints/Settings/Advanced.tsx b/client/src/components/Endpoints/Settings/Advanced.tsx index d0beaa9020..504e6cd94d 100644 --- a/client/src/components/Endpoints/Settings/Advanced.tsx +++ b/client/src/components/Endpoints/Settings/Advanced.tsx @@ -151,6 +151,7 @@ export default function Settings({ min={0} step={0.01} className="flex h-4 w-full" + aria-labelledby="temp-int" /> @@ -160,7 +161,9 @@ export default function Settings({
@@ -199,7 +203,9 @@ export default function Settings({
@@ -238,7 +245,9 @@ export default function Settings({
@@ -306,6 +316,7 @@ export default function Settings({ onCheckedChange={(checked: boolean) => setResendFiles(checked)} disabled={readonly} className="flex" + aria-label={localize('com_endpoint_plug_resend_files')} /> @@ -323,6 +334,7 @@ export default function Settings({ max={2} min={0} step={1} + aria-label={localize('com_endpoint_plug_image_detail')} /> diff --git a/client/src/components/Endpoints/Settings/AgentSettings.tsx b/client/src/components/Endpoints/Settings/AgentSettings.tsx index f41a8bc19e..f4425a4db4 100644 --- a/client/src/components/Endpoints/Settings/AgentSettings.tsx +++ b/client/src/components/Endpoints/Settings/AgentSettings.tsx @@ -53,7 +53,9 @@ export default function Settings({ conversation, setOption, models, readonly }:
@@ -101,6 +104,7 @@ export default function Settings({ conversation, setOption, models, readonly }: onCheckedChange={onCheckedChangeAgent} disabled={readonly} className="ml-4 mt-2" + aria-label={localize('com_endpoint_plug_use_functions')} /> @@ -119,6 +123,7 @@ export default function Settings({ conversation, setOption, models, readonly }: onCheckedChange={onCheckedChangeSkip} disabled={readonly} className="ml-4 mt-2" + aria-label={localize('com_endpoint_plug_skip_completion')} /> diff --git a/client/src/components/Endpoints/Settings/Google.tsx b/client/src/components/Endpoints/Settings/Google.tsx index 6e513c1791..18bf95a1d0 100644 --- a/client/src/components/Endpoints/Settings/Google.tsx +++ b/client/src/components/Endpoints/Settings/Google.tsx @@ -171,6 +171,7 @@ export default function Settings({ conversation, setOption, models, readonly }: min={google.temperature.min} step={google.temperature.step} className="flex h-4 w-full" + aria-labelledby="temp-int" /> @@ -211,6 +212,7 @@ export default function Settings({ conversation, setOption, models, readonly }: min={google.topP.min} step={google.topP.step} className="flex h-4 w-full" + aria-labelledby="top-p-int" /> @@ -252,6 +254,7 @@ export default function Settings({ conversation, setOption, models, readonly }: min={google.topK.min} step={google.topK.step} className="flex h-4 w-full" + aria-labelledby="top-k-int" /> @@ -296,6 +299,7 @@ export default function Settings({ conversation, setOption, models, readonly }: min={google.maxOutputTokens.min} step={google.maxOutputTokens.step} className="flex h-4 w-full" + aria-labelledby="max-tokens-int" /> @@ -296,6 +297,7 @@ export default function Settings({ min={0} step={0.01} className="flex h-4 w-full" + aria-labelledby="top-p-int" /> @@ -337,6 +339,7 @@ export default function Settings({ min={-2} step={0.01} className="flex h-4 w-full" + aria-labelledby="freq-penalty-int" /> @@ -378,6 +381,7 @@ export default function Settings({ min={-2} step={0.01} className="flex h-4 w-full" + aria-labelledby="pres-penalty-int" /> diff --git a/client/src/components/Nav/ExportConversation/ExportModal.tsx b/client/src/components/Nav/ExportConversation/ExportModal.tsx index 642b5bbc81..2083ddec1a 100644 --- a/client/src/components/Nav/ExportConversation/ExportModal.tsx +++ b/client/src/components/Nav/ExportConversation/ExportModal.tsx @@ -124,13 +124,15 @@ export default function ExportModal({ disabled={!exportOptionsSupport} checked={includeOptions} onCheckedChange={setIncludeOptions} + aria-labelledby="includeOptions-label" />
@@ -146,13 +148,15 @@ export default function ExportModal({ disabled={!exportBranchesSupport} checked={exportBranches} onCheckedChange={setExportBranches} + aria-labelledby="exportBranches-label" />
@@ -163,8 +167,14 @@ export default function ExportModal({ {localize('com_nav_export_recursive_or_sequential')}
- +
); diff --git a/client/src/components/Nav/SettingsTabs/Chat/ShowThinking.tsx b/client/src/components/Nav/SettingsTabs/Chat/ShowThinking.tsx index 02a5ee256e..949453cb5c 100644 --- a/client/src/components/Nav/SettingsTabs/Chat/ShowThinking.tsx +++ b/client/src/components/Nav/SettingsTabs/Chat/ShowThinking.tsx @@ -30,6 +30,7 @@ export default function SaveDraft({ onCheckedChange={handleCheckedChange} className="ml-4" data-testid="showThinking" + aria-label={localize('com_nav_show_thinking')} />
); diff --git a/client/src/components/Nav/SettingsTabs/Data/SharedLinks.tsx b/client/src/components/Nav/SettingsTabs/Data/SharedLinks.tsx index ae25223a9b..bcc6a4af9c 100644 --- a/client/src/components/Nav/SettingsTabs/Data/SharedLinks.tsx +++ b/client/src/components/Nav/SettingsTabs/Data/SharedLinks.tsx @@ -13,7 +13,6 @@ import { useMediaQuery, OGDialogHeader, OGDialogTitle, - TooltipAnchor, DataTable, Spinner, Button, @@ -246,37 +245,27 @@ export default function SharedLinks() { }, cell: ({ row }) => (
- { - window.open(`/c/${row.original.conversationId}`, '_blank'); - }} - title={localize('com_ui_view_source')} - > - - - } - /> - { - setDeleteRow(row.original); - setIsDeleteOpen(true); - }} - title={localize('com_ui_delete')} - > - - - } - /> + +
), }, diff --git a/client/src/components/Prompts/AdminSettings.tsx b/client/src/components/Prompts/AdminSettings.tsx index 6f1580800e..7b25db721c 100644 --- a/client/src/components/Prompts/AdminSettings.tsx +++ b/client/src/components/Prompts/AdminSettings.tsx @@ -53,6 +53,7 @@ const LabelController: React.FC = ({ } }} value={field.value.toString()} + aria-label={label} /> )} /> @@ -216,7 +217,12 @@ const AdminSettings = () => { ))}
-
diff --git a/client/src/components/Prompts/Groups/AlwaysMakeProd.tsx b/client/src/components/Prompts/Groups/AlwaysMakeProd.tsx index 17c82c648d..64d6bd60ec 100644 --- a/client/src/components/Prompts/Groups/AlwaysMakeProd.tsx +++ b/client/src/components/Prompts/Groups/AlwaysMakeProd.tsx @@ -28,7 +28,7 @@ export default function AlwaysMakeProd({ checked={alwaysMakeProd} onCheckedChange={handleCheckedChange} data-testid="alwaysMakeProd" - aria-label="Always make prompt production" + aria-label={localize('com_nav_always_make_prod')} />
{localize('com_nav_always_make_prod')}
diff --git a/client/src/components/Prompts/Groups/AutoSendPrompt.tsx b/client/src/components/Prompts/Groups/AutoSendPrompt.tsx index 430506a748..182580a49c 100644 --- a/client/src/components/Prompts/Groups/AutoSendPrompt.tsx +++ b/client/src/components/Prompts/Groups/AutoSendPrompt.tsx @@ -30,7 +30,7 @@ export default function AutoSendPrompt({ >
{localize('com_nav_auto_send_prompts')}
{ + e.stopPropagation(); + }} className="w-full cursor-pointer rounded-lg text-text-primary hover:bg-surface-hover focus:bg-surface-hover disabled:cursor-not-allowed" >