diff --git a/.env.example b/.env.example index 3ba5d211e4..31a99c8612 100644 --- a/.env.example +++ b/.env.example @@ -142,12 +142,12 @@ GOOGLE_KEY=user_provided # GOOGLE_AUTH_HEADER=true # Gemini API (AI Studio) -# GOOGLE_MODELS=gemini-2.5-pro-exp-03-25,gemini-2.0-flash-exp,gemini-2.0-flash-thinking-exp-1219,gemini-exp-1121,gemini-exp-1114,gemini-1.5-flash-latest,gemini-1.0-pro,gemini-1.0-pro-001,gemini-1.0-pro-latest,gemini-1.0-pro-vision-latest,gemini-1.5-pro-latest,gemini-pro,gemini-pro-vision +# GOOGLE_MODELS=gemini-2.5-pro-preview-05-06,gemini-2.5-flash-preview-04-17,gemini-2.0-flash-001,gemini-2.0-flash-exp,gemini-2.0-flash-lite-001,gemini-1.5-pro-002,gemini-1.5-flash-002 # Vertex AI -# GOOGLE_MODELS=gemini-1.5-flash-preview-0514,gemini-1.5-pro-preview-0514,gemini-1.0-pro-vision-001,gemini-1.0-pro-002,gemini-1.0-pro-001,gemini-pro-vision,gemini-1.0-pro +# GOOGLE_MODELS=gemini-2.5-pro-preview-05-06,gemini-2.5-flash-preview-04-17,gemini-2.0-flash-001,gemini-2.0-flash-exp,gemini-2.0-flash-lite-001,gemini-1.5-pro-002,gemini-1.5-flash-002 -# GOOGLE_TITLE_MODEL=gemini-pro +# GOOGLE_TITLE_MODEL=gemini-2.0-flash-lite-001 # GOOGLE_LOC=us-central1 @@ -231,6 +231,14 @@ AZURE_AI_SEARCH_SEARCH_OPTION_QUERY_TYPE= AZURE_AI_SEARCH_SEARCH_OPTION_TOP= AZURE_AI_SEARCH_SEARCH_OPTION_SELECT= +# OpenAI Image Tools Customization +#---------------- +# IMAGE_GEN_OAI_DESCRIPTION_WITH_FILES=Custom description for image generation tool when files are present +# IMAGE_GEN_OAI_DESCRIPTION_NO_FILES=Custom description for image generation tool when no files are present +# IMAGE_EDIT_OAI_DESCRIPTION=Custom description for image editing tool +# IMAGE_GEN_OAI_PROMPT_DESCRIPTION=Custom prompt description for image generation tool +# IMAGE_EDIT_OAI_PROMPT_DESCRIPTION=Custom prompt description for image editing tool + # DALL·E #---------------- # DALLE_API_KEY= diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md index 5951ed694e..09444a1b44 100644 --- a/.github/CONTRIBUTING.md +++ b/.github/CONTRIBUTING.md @@ -24,22 +24,40 @@ Project maintainers have the right and responsibility to remove, edit, or reject ## To contribute to this project, please adhere to the following guidelines: -## 1. Development notes +## 1. Development Setup -1. Before starting work, make sure your main branch has the latest commits with `npm run update` -2. Run linting command to find errors: `npm run lint`. Alternatively, ensure husky pre-commit checks are functioning. +1. Use Node.JS 20.x. +2. Install typescript globally: `npm i -g typescript`. +3. Run `npm ci` to install dependencies. +4. Build the data provider: `npm run build:data-provider`. +5. Build MCP: `npm run build:mcp`. +6. Build data schemas: `npm run build:data-schemas`. +7. Setup and run unit tests: + - Copy `.env.test`: `cp api/test/.env.test.example api/test/.env.test`. + - Run backend unit tests: `npm run test:api`. + - Run frontend unit tests: `npm run test:client`. +8. Setup and run integration tests: + - Build client: `cd client && npm run build`. + - Create `.env`: `cp .env.example .env`. + - Install [MongoDB Community Edition](https://www.mongodb.com/docs/manual/administration/install-community/), ensure that `mongosh` connects to your local instance. + - Run: `npx install playwright`, then `npx playwright install`. + - Copy `config.local`: `cp e2e/config.local.example.ts e2e/config.local.ts`. + - Copy `librechat.yaml`: `cp librechat.example.yaml librechat.yaml`. + - Run: `npm run e2e`. + +## 2. Development Notes + +1. Before starting work, make sure your main branch has the latest commits with `npm run update`. +3. Run linting command to find errors: `npm run lint`. Alternatively, ensure husky pre-commit checks are functioning. 3. After your changes, reinstall packages in your current branch using `npm run reinstall` and ensure everything still works. - Restart the ESLint server ("ESLint: Restart ESLint Server" in VS Code command bar) and your IDE after reinstalling or updating. 4. Clear web app localStorage and cookies before and after changes. -5. For frontend changes: - - Install typescript globally: `npm i -g typescript`. - - Compile typescript before and after changes to check for introduced errors: `cd client && tsc --noEmit`. -6. Run tests locally: - - Backend unit tests: `npm run test:api` - - Frontend unit tests: `npm run test:client` - - Integration tests: `npm run e2e` (requires playwright installed, `npx install playwright`) +5. For frontend changes, compile typescript before and after changes to check for introduced errors: `cd client && npm run build`. +6. Run backend unit tests: `npm run test:api`. +7. Run frontend unit tests: `npm run test:client`. +8. Run integration tests: `npm run e2e`. -## 2. Git Workflow +## 3. Git Workflow We utilize a GitFlow workflow to manage changes to this project's codebase. Follow these general steps when contributing code: @@ -49,7 +67,7 @@ We utilize a GitFlow workflow to manage changes to this project's codebase. Foll 4. Submit a pull request with a clear and concise description of your changes and the reasons behind them. 5. We will review your pull request, provide feedback as needed, and eventually merge the approved changes into the main branch. -## 3. Commit Message Format +## 4. Commit Message Format We follow the [semantic format](https://gist.github.com/joshbuchea/6f47e86d2510bce28f8e7f42ae84c716) for commit messages. @@ -76,7 +94,7 @@ feat: add hat wobble ``` -## 4. Pull Request Process +## 5. Pull Request Process When submitting a pull request, please follow these guidelines: @@ -91,7 +109,7 @@ Ensure that your changes meet the following criteria: - The commit history is clean and easy to follow. You can use `git rebase` or `git merge --squash` to clean your commit history before submitting the pull request. - The pull request description clearly outlines the changes and the reasons behind them. Be sure to include the steps to test the pull request. -## 5. Naming Conventions +## 6. Naming Conventions Apply the following naming conventions to branches, labels, and other Git-related entities: @@ -100,7 +118,7 @@ Apply the following naming conventions to branches, labels, and other Git-relate - **JS/TS:** Directories and file names: Descriptive and camelCase. First letter uppercased for React files (e.g., `helperFunction.ts, ReactComponent.tsx`). - **Docs:** Directories and file names: Descriptive and snake_case (e.g., `config_files.md`). -## 6. TypeScript Conversion +## 7. TypeScript Conversion 1. **Original State**: The project was initially developed entirely in JavaScript (JS). @@ -126,7 +144,7 @@ Apply the following naming conventions to branches, labels, and other Git-relate - **Current Stance**: At present, this backend transition is of lower priority and might not be pursued. -## 7. Module Import Conventions +## 8. Module Import Conventions - `npm` packages first, - from shortest line (top) to longest (bottom) diff --git a/.github/ISSUE_TEMPLATE/BUG-REPORT.yml b/.github/ISSUE_TEMPLATE/BUG-REPORT.yml index 3a3b828ee1..610396959f 100644 --- a/.github/ISSUE_TEMPLATE/BUG-REPORT.yml +++ b/.github/ISSUE_TEMPLATE/BUG-REPORT.yml @@ -79,6 +79,8 @@ body: For UI-related issues, browser console logs can be very helpful. You can provide these as screenshots or paste the text here. render: shell + validations: + required: true - type: textarea id: screenshots attributes: diff --git a/.github/workflows/generate-release-changelog-pr.yml b/.github/workflows/generate-release-changelog-pr.yml index 004431e577..405f0ca6dc 100644 --- a/.github/workflows/generate-release-changelog-pr.yml +++ b/.github/workflows/generate-release-changelog-pr.yml @@ -4,6 +4,7 @@ on: push: tags: - 'v*.*.*' + workflow_dispatch: jobs: generate-release-changelog-pr: @@ -88,7 +89,7 @@ jobs: base: main branch: "changelog/${{ github.ref_name }}" reviewers: danny-avila - title: "chore: update CHANGELOG for release ${{ github.ref_name }}" + title: "📜 docs: Changelog for release ${{ github.ref_name }}" body: | **Description**: - - This PR updates the CHANGELOG.md by removing the "Unreleased" section and adding new release notes for release ${{ github.ref_name }} above previous releases. \ No newline at end of file + - This PR updates the CHANGELOG.md by removing the "Unreleased" section and adding new release notes for release ${{ github.ref_name }} above previous releases. diff --git a/.github/workflows/generate-unreleased-changelog-pr.yml b/.github/workflows/generate-unreleased-changelog-pr.yml index b130e4fb33..133e19f1e2 100644 --- a/.github/workflows/generate-unreleased-changelog-pr.yml +++ b/.github/workflows/generate-unreleased-changelog-pr.yml @@ -3,6 +3,7 @@ name: Generate Unreleased Changelog PR on: schedule: - cron: "0 0 * * 1" # Runs every Monday at 00:00 UTC + workflow_dispatch: jobs: generate-unreleased-changelog-pr: @@ -98,9 +99,9 @@ jobs: branch: "changelog/unreleased-update" sign-commits: true commit-message: "action: update Unreleased changelog" - title: "action: update Unreleased changelog" + title: "📜 docs: Unreleased Changelog" body: | **Description**: - This PR updates the Unreleased section in CHANGELOG.md. - It compares the current main branch with the latest version tag (determined as ${{ steps.get_latest_tag.outputs.tag }}), - regenerates the Unreleased changelog, removes any old Unreleased block, and inserts the new content. \ No newline at end of file + regenerates the Unreleased changelog, removes any old Unreleased block, and inserts the new content. diff --git a/.github/workflows/i18n-unused-keys.yml b/.github/workflows/i18n-unused-keys.yml index 5e29a8a8bd..f720a61783 100644 --- a/.github/workflows/i18n-unused-keys.yml +++ b/.github/workflows/i18n-unused-keys.yml @@ -39,12 +39,35 @@ jobs: # Check if each key is used in the source code for KEY in $KEYS; do FOUND=false - for DIR in "${SOURCE_DIRS[@]}"; do - if grep -r --include=\*.{js,jsx,ts,tsx} -q "$KEY" "$DIR"; then - FOUND=true - break + + # Special case for dynamically constructed special variable keys + if [[ "$KEY" == com_ui_special_var_* ]]; then + # Check if TSpecialVarLabel is used in the codebase + for DIR in "${SOURCE_DIRS[@]}"; do + if grep -r --include=\*.{js,jsx,ts,tsx} -q "TSpecialVarLabel" "$DIR"; then + FOUND=true + break + fi + done + + # Also check if the key is directly used somewhere + if [[ "$FOUND" == false ]]; then + for DIR in "${SOURCE_DIRS[@]}"; do + if grep -r --include=\*.{js,jsx,ts,tsx} -q "$KEY" "$DIR"; then + FOUND=true + break + fi + done fi - done + else + # Regular check for other keys + for DIR in "${SOURCE_DIRS[@]}"; do + if grep -r --include=\*.{js,jsx,ts,tsx} -q "$KEY" "$DIR"; then + FOUND=true + break + fi + done + fi if [[ "$FOUND" == false ]]; then UNUSED_KEYS+=("$KEY") @@ -90,4 +113,4 @@ jobs: - name: Fail workflow if unused keys found if: env.unused_keys != '[]' - run: exit 1 \ No newline at end of file + run: exit 1 diff --git a/.gitignore b/.gitignore index bd3b596c81..a4d2d8fc7e 100644 --- a/.gitignore +++ b/.gitignore @@ -52,6 +52,9 @@ bower_components/ *.d.ts !vite-env.d.ts +# Cline +.clineignore + # Floobits .floo .floobit diff --git a/CHANGELOG.md b/CHANGELOG.md index 292bd76f40..1b602cba28 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,15 +2,189 @@ All notable changes to this project will be documented in this file. -## [Unreleased] + + +## [v0.7.8] - + +Changes from v0.7.8-rc1 to v0.7.8. ### ✨ New Features -- 🪄 feat: Agent Artifacts by **@danny-avila** in [#5804](https://github.com/danny-avila/LibreChat/pull/5804) +- ✨ feat: Enhance form submission for touch screens by **@berry-13** in [#7198](https://github.com/danny-avila/LibreChat/pull/7198) +- 🔍 feat: Additional Tavily API Tool Parameters by **@glowforge-opensource** in [#7232](https://github.com/danny-avila/LibreChat/pull/7232) +- 🐋 feat: Add python to Dockerfile for increased MCP compatibility by **@technicalpickles** in [#7270](https://github.com/danny-avila/LibreChat/pull/7270) + +### 🔧 Fixes + +- 🔧 fix: Google Gemma Support & OpenAI Reasoning Instructions by **@danny-avila** in [#7196](https://github.com/danny-avila/LibreChat/pull/7196) +- 🛠️ fix: Conversation Navigation State by **@danny-avila** in [#7210](https://github.com/danny-avila/LibreChat/pull/7210) +- 🔄 fix: o-Series Model Regex for System Messages by **@danny-avila** in [#7245](https://github.com/danny-avila/LibreChat/pull/7245) +- 🔖 fix: Custom Headers for Initial MCP SSE Connection by **@danny-avila** in [#7246](https://github.com/danny-avila/LibreChat/pull/7246) +- 🛡️ fix: Deep Clone `MCPOptions` for User MCP Connections by **@danny-avila** in [#7247](https://github.com/danny-avila/LibreChat/pull/7247) +- 🔄 fix: URL Param Race Condition and File Draft Persistence by **@danny-avila** in [#7257](https://github.com/danny-avila/LibreChat/pull/7257) +- 🔄 fix: Assistants Endpoint & Minor Issues by **@danny-avila** in [#7274](https://github.com/danny-avila/LibreChat/pull/7274) +- 🔄 fix: Ollama Think Tag Edge Case with Tools by **@danny-avila** in [#7275](https://github.com/danny-avila/LibreChat/pull/7275) ### ⚙️ Other Changes -- 🔄 chore: Enforce 18next Language Keys by **@rubentalstra** in [#5803](https://github.com/danny-avila/LibreChat/pull/5803) -- 🔃 refactor: Parent Message ID Handling on Error, Update Translations, Bump Agents by **@danny-avila** in [#5833](https://github.com/danny-avila/LibreChat/pull/5833) +- 📜 docs: CHANGELOG for release v0.7.8-rc1 by **@github-actions[bot]** in [#7153](https://github.com/danny-avila/LibreChat/pull/7153) +- 🔄 refactor: Artifact Visibility Management by **@danny-avila** in [#7181](https://github.com/danny-avila/LibreChat/pull/7181) +- 📦 chore: Bump Package Security by **@danny-avila** in [#7183](https://github.com/danny-avila/LibreChat/pull/7183) +- 🌿 refactor: Unmount Fork Popover on Hide for Better Performance by **@danny-avila** in [#7189](https://github.com/danny-avila/LibreChat/pull/7189) +- 🧰 chore: ESLint configuration to enforce Prettier formatting rules by **@mawburn** in [#7186](https://github.com/danny-avila/LibreChat/pull/7186) +- 🎨 style: Improve KaTeX Rendering for LaTeX Equations by **@andresgit** in [#7223](https://github.com/danny-avila/LibreChat/pull/7223) +- 📝 docs: Update `.env.example` Google models by **@marlonka** in [#7254](https://github.com/danny-avila/LibreChat/pull/7254) +- 💬 refactor: MCP Chat Visibility Option, Google Rates, Remove OpenAPI Plugins by **@danny-avila** in [#7286](https://github.com/danny-avila/LibreChat/pull/7286) +- 📜 docs: Unreleased Changelog by **@github-actions[bot]** in [#7214](https://github.com/danny-avila/LibreChat/pull/7214) + + + +[See full release details][release-v0.7.8] + +[release-v0.7.8]: https://github.com/danny-avila/LibreChat/releases/tag/v0.7.8 + +--- +## [v0.7.8-rc1] - + +Changes from v0.7.7 to v0.7.8-rc1. + +### ✨ New Features + +- 🔍 feat: Mistral OCR API / Upload Files as Text by **@danny-avila** in [#6274](https://github.com/danny-avila/LibreChat/pull/6274) +- 🤖 feat: Support OpenAI Web Search models by **@danny-avila** in [#6313](https://github.com/danny-avila/LibreChat/pull/6313) +- 🔗 feat: Agent Chain (Mixture-of-Agents) by **@danny-avila** in [#6374](https://github.com/danny-avila/LibreChat/pull/6374) +- ⌛ feat: `initTimeout` for Slow Starting MCP Servers by **@perweij** in [#6383](https://github.com/danny-avila/LibreChat/pull/6383) +- 🚀 feat: `S3` Integration for File handling and Image uploads by **@rubentalstra** in [#6142](https://github.com/danny-avila/LibreChat/pull/6142) +- 🔒feat: Enable OpenID Auto-Redirect by **@leondape** in [#6066](https://github.com/danny-avila/LibreChat/pull/6066) +- 🚀 feat: Integrate `Azure Blob Storage` for file handling and image uploads by **@rubentalstra** in [#6153](https://github.com/danny-avila/LibreChat/pull/6153) +- 🚀 feat: Add support for custom `AWS` endpoint in `S3` by **@rubentalstra** in [#6431](https://github.com/danny-avila/LibreChat/pull/6431) +- 🚀 feat: Add support for LDAP STARTTLS in LDAP authentication by **@rubentalstra** in [#6438](https://github.com/danny-avila/LibreChat/pull/6438) +- 🚀 feat: Refactor schema exports and update package version to 0.0.4 by **@rubentalstra** in [#6455](https://github.com/danny-avila/LibreChat/pull/6455) +- 🔼 feat: Add Auto Submit For URL Query Params by **@mjaverto** in [#6440](https://github.com/danny-avila/LibreChat/pull/6440) +- 🛠 feat: Enhance Redis Integration, Rate Limiters & Log Headers by **@danny-avila** in [#6462](https://github.com/danny-avila/LibreChat/pull/6462) +- 💵 feat: Add Automatic Balance Refill by **@rubentalstra** in [#6452](https://github.com/danny-avila/LibreChat/pull/6452) +- 🗣️ feat: add support for gpt-4o-transcribe models by **@berry-13** in [#6483](https://github.com/danny-avila/LibreChat/pull/6483) +- 🎨 feat: UI Refresh for Enhanced UX by **@berry-13** in [#6346](https://github.com/danny-avila/LibreChat/pull/6346) +- 🌍 feat: Add support for Hungarian language localization by **@rubentalstra** in [#6508](https://github.com/danny-avila/LibreChat/pull/6508) +- 🚀 feat: Add Gemini 2.5 Token/Context Values, Increase Max Possible Output to 64k by **@danny-avila** in [#6563](https://github.com/danny-avila/LibreChat/pull/6563) +- 🚀 feat: Enhance MCP Connections For Multi-User Support by **@danny-avila** in [#6610](https://github.com/danny-avila/LibreChat/pull/6610) +- 🚀 feat: Enhance S3 URL Expiry with Refresh; fix: S3 File Deletion by **@danny-avila** in [#6647](https://github.com/danny-avila/LibreChat/pull/6647) +- 🚀 feat: enhance UI components and refactor settings by **@berry-13** in [#6625](https://github.com/danny-avila/LibreChat/pull/6625) +- 💬 feat: move TemporaryChat to the Header by **@berry-13** in [#6646](https://github.com/danny-avila/LibreChat/pull/6646) +- 🚀 feat: Use Model Specs + Specific Endpoints, Limit Providers for Agents by **@danny-avila** in [#6650](https://github.com/danny-avila/LibreChat/pull/6650) +- 🪙 feat: Sync Balance Config on Login by **@danny-avila** in [#6671](https://github.com/danny-avila/LibreChat/pull/6671) +- 🔦 feat: MCP Support for Non-Agent Endpoints by **@danny-avila** in [#6775](https://github.com/danny-avila/LibreChat/pull/6775) +- 🗃️ feat: Code Interpreter File Persistence between Sessions by **@danny-avila** in [#6790](https://github.com/danny-avila/LibreChat/pull/6790) +- 🖥️ feat: Code Interpreter API for Non-Agent Endpoints by **@danny-avila** in [#6803](https://github.com/danny-avila/LibreChat/pull/6803) +- ⚡ feat: Self-hosted Artifacts Static Bundler URL by **@danny-avila** in [#6827](https://github.com/danny-avila/LibreChat/pull/6827) +- 🐳 feat: Add Jemalloc and UV to Docker Builds by **@danny-avila** in [#6836](https://github.com/danny-avila/LibreChat/pull/6836) +- 🤖 feat: GPT-4.1 by **@danny-avila** in [#6880](https://github.com/danny-avila/LibreChat/pull/6880) +- 👋 feat: remove Edge TTS by **@berry-13** in [#6885](https://github.com/danny-avila/LibreChat/pull/6885) +- feat: nav optimization by **@berry-13** in [#5785](https://github.com/danny-avila/LibreChat/pull/5785) +- 🗺️ feat: Add Parameter Location Mapping for OpenAPI actions by **@peeeteeer** in [#6858](https://github.com/danny-avila/LibreChat/pull/6858) +- 🤖 feat: Support `o4-mini` and `o3` Models by **@danny-avila** in [#6928](https://github.com/danny-avila/LibreChat/pull/6928) +- 🎨 feat: OpenAI Image Tools (GPT-Image-1) by **@danny-avila** in [#7079](https://github.com/danny-avila/LibreChat/pull/7079) +- 🗓️ feat: Add Special Variables for Prompts & Agents, Prompt UI Improvements by **@danny-avila** in [#7123](https://github.com/danny-avila/LibreChat/pull/7123) + +### 🌍 Internationalization + +- 🌍 i18n: Add Thai Language Support and Update Translations by **@rubentalstra** in [#6219](https://github.com/danny-avila/LibreChat/pull/6219) +- 🌍 i18n: Update translation.json with latest translations by **@github-actions[bot]** in [#6220](https://github.com/danny-avila/LibreChat/pull/6220) +- 🌍 i18n: Update translation.json with latest translations by **@github-actions[bot]** in [#6240](https://github.com/danny-avila/LibreChat/pull/6240) +- 🌍 i18n: Update translation.json with latest translations by **@github-actions[bot]** in [#6241](https://github.com/danny-avila/LibreChat/pull/6241) +- 🌍 i18n: Update translation.json with latest translations by **@github-actions[bot]** in [#6277](https://github.com/danny-avila/LibreChat/pull/6277) +- 🌍 i18n: Update translation.json with latest translations by **@github-actions[bot]** in [#6414](https://github.com/danny-avila/LibreChat/pull/6414) +- 🌍 i18n: Update translation.json with latest translations by **@github-actions[bot]** in [#6505](https://github.com/danny-avila/LibreChat/pull/6505) +- 🌍 i18n: Update translation.json with latest translations by **@github-actions[bot]** in [#6530](https://github.com/danny-avila/LibreChat/pull/6530) +- 🌍 i18n: Add Persian Localization Support by **@rubentalstra** in [#6669](https://github.com/danny-avila/LibreChat/pull/6669) +- 🌍 i18n: Update translation.json with latest translations by **@github-actions[bot]** in [#6667](https://github.com/danny-avila/LibreChat/pull/6667) +- 🌍 i18n: Update translation.json with latest translations by **@github-actions[bot]** in [#7126](https://github.com/danny-avila/LibreChat/pull/7126) +- 🌍 i18n: Update translation.json with latest translations by **@github-actions[bot]** in [#7148](https://github.com/danny-avila/LibreChat/pull/7148) + +### 👐 Accessibility + +- 🎨 a11y: Update Model Spec Description Text by **@berry-13** in [#6294](https://github.com/danny-avila/LibreChat/pull/6294) +- 🗑️ a11y: Add Accessible Name to Button for File Attachment Removal by **@kangabell** in [#6709](https://github.com/danny-avila/LibreChat/pull/6709) +- ⌨️ a11y: enhance accessibility & visual consistency by **@berry-13** in [#6866](https://github.com/danny-avila/LibreChat/pull/6866) +- 🙌 a11y: Searchbar/Conversations List Focus by **@danny-avila** in [#7096](https://github.com/danny-avila/LibreChat/pull/7096) +- 👐 a11y: Improve Fork and SplitText Accessibility by **@danny-avila** in [#7147](https://github.com/danny-avila/LibreChat/pull/7147) + +### 🔧 Fixes + +- 🐛 fix: Avatar Type Definitions in Agent/Assistant Schemas by **@danny-avila** in [#6235](https://github.com/danny-avila/LibreChat/pull/6235) +- 🔧 fix: MeiliSearch Field Error and Patch Incorrect Import by #6210 by **@rubentalstra** in [#6245](https://github.com/danny-avila/LibreChat/pull/6245) +- 🔏 fix: Enhance Two-Factor Authentication by **@rubentalstra** in [#6247](https://github.com/danny-avila/LibreChat/pull/6247) +- 🐛 fix: Await saveMessage in abortMiddleware to ensure proper execution by **@sh4shii** in [#6248](https://github.com/danny-avila/LibreChat/pull/6248) +- 🔧 fix: Axios Proxy Usage And Bump `mongoose` by **@danny-avila** in [#6298](https://github.com/danny-avila/LibreChat/pull/6298) +- 🔧 fix: comment out MCP servers to resolve service run issues by **@KunalScriptz** in [#6316](https://github.com/danny-avila/LibreChat/pull/6316) +- 🔧 fix: Update Token Calculations and Mapping, MCP `env` Initialization by **@danny-avila** in [#6406](https://github.com/danny-avila/LibreChat/pull/6406) +- 🐞 fix: Agent "Resend" Message Attachments + Source Icon Styling by **@danny-avila** in [#6408](https://github.com/danny-avila/LibreChat/pull/6408) +- 🐛 fix: Prevent Crash on Duplicate Message ID by **@Odrec** in [#6392](https://github.com/danny-avila/LibreChat/pull/6392) +- 🔐 fix: Invalid Key Length in 2FA Encryption by **@rubentalstra** in [#6432](https://github.com/danny-avila/LibreChat/pull/6432) +- 🏗️ fix: Fix Agents Token Spend Race Conditions, Expand Test Coverage by **@danny-avila** in [#6480](https://github.com/danny-avila/LibreChat/pull/6480) +- 🔃 fix: Draft Clearing, Claude Titles, Remove Default Vision Max Tokens by **@danny-avila** in [#6501](https://github.com/danny-avila/LibreChat/pull/6501) +- 🔧 fix: Update username reference to use user.name in greeting display by **@rubentalstra** in [#6534](https://github.com/danny-avila/LibreChat/pull/6534) +- 🔧 fix: S3 Download Stream with Key Extraction and Blob Storage Encoding for Vision by **@danny-avila** in [#6557](https://github.com/danny-avila/LibreChat/pull/6557) +- 🔧 fix: Mistral type strictness for `usage` & update token values/windows by **@danny-avila** in [#6562](https://github.com/danny-avila/LibreChat/pull/6562) +- 🔧 fix: Consolidate Text Parsing and TTS Edge Initialization by **@danny-avila** in [#6582](https://github.com/danny-avila/LibreChat/pull/6582) +- 🔧 fix: Ensure continuation in image processing on base64 encoding from Blob Storage by **@danny-avila** in [#6619](https://github.com/danny-avila/LibreChat/pull/6619) +- ✉️ fix: Fallback For User Name In Email Templates by **@danny-avila** in [#6620](https://github.com/danny-avila/LibreChat/pull/6620) +- 🔧 fix: Azure Blob Integration and File Source References by **@rubentalstra** in [#6575](https://github.com/danny-avila/LibreChat/pull/6575) +- 🐛 fix: Safeguard against undefined addedEndpoints by **@wipash** in [#6654](https://github.com/danny-avila/LibreChat/pull/6654) +- 🤖 fix: Gemini 2.5 Vision Support by **@danny-avila** in [#6663](https://github.com/danny-avila/LibreChat/pull/6663) +- 🔄 fix: Avatar & Error Handling Enhancements by **@danny-avila** in [#6687](https://github.com/danny-avila/LibreChat/pull/6687) +- 🔧 fix: Chat Middleware, Zod Conversion, Auto-Save and S3 URL Refresh by **@danny-avila** in [#6720](https://github.com/danny-avila/LibreChat/pull/6720) +- 🔧 fix: Agent Capability Checks & DocumentDB Compatibility for Agent Resource Removal by **@danny-avila** in [#6726](https://github.com/danny-avila/LibreChat/pull/6726) +- 🔄 fix: Improve audio MIME type detection and handling by **@berry-13** in [#6707](https://github.com/danny-avila/LibreChat/pull/6707) +- 🪺 fix: Update Role Handling due to New Schema Shape by **@danny-avila** in [#6774](https://github.com/danny-avila/LibreChat/pull/6774) +- 🗨️ fix: Show ModelSpec Greeting by **@berry-13** in [#6770](https://github.com/danny-avila/LibreChat/pull/6770) +- 🔧 fix: Keyv and Proxy Issues, and More Memory Optimizations by **@danny-avila** in [#6867](https://github.com/danny-avila/LibreChat/pull/6867) +- ✨ fix: Implement dynamic text sizing for greeting and name display by **@berry-13** in [#6833](https://github.com/danny-avila/LibreChat/pull/6833) +- 📝 fix: Mistral OCR Image Support and Azure Agent Titles by **@danny-avila** in [#6901](https://github.com/danny-avila/LibreChat/pull/6901) +- 📢 fix: Invalid `engineTTS` and Conversation State on Navigation by **@berry-13** in [#6904](https://github.com/danny-avila/LibreChat/pull/6904) +- 🛠️ fix: Improve Accessibility and Display of Conversation Menu by **@danny-avila** in [#6913](https://github.com/danny-avila/LibreChat/pull/6913) +- 🔧 fix: Agent Resource Form, Convo Menu Style, Ensure Draft Clears on Submission by **@danny-avila** in [#6925](https://github.com/danny-avila/LibreChat/pull/6925) +- 🔀 fix: MCP Improvements, Auto-Save Drafts, Artifact Markup by **@danny-avila** in [#7040](https://github.com/danny-avila/LibreChat/pull/7040) +- 🐋 fix: Improve Deepseek Compatbility by **@danny-avila** in [#7132](https://github.com/danny-avila/LibreChat/pull/7132) +- 🐙 fix: Add Redis Ping Interval to Prevent Connection Drops by **@peeeteeer** in [#7127](https://github.com/danny-avila/LibreChat/pull/7127) + +### ⚙️ Other Changes + +- 📦 refactor: Move DB Models to `@librechat/data-schemas` by **@rubentalstra** in [#6210](https://github.com/danny-avila/LibreChat/pull/6210) +- 📦 chore: Patch `axios` to address CVE-2025-27152 by **@danny-avila** in [#6222](https://github.com/danny-avila/LibreChat/pull/6222) +- ⚠️ refactor: Use Error Content Part Instead Of Throwing Error for Agents by **@danny-avila** in [#6262](https://github.com/danny-avila/LibreChat/pull/6262) +- 🏃‍♂️ refactor: Improve Agent Run Context & Misc. Changes by **@danny-avila** in [#6448](https://github.com/danny-avila/LibreChat/pull/6448) +- 📝 docs: librechat.example.yaml by **@ineiti** in [#6442](https://github.com/danny-avila/LibreChat/pull/6442) +- 🏃‍♂️ refactor: More Agent Context Improvements during Run by **@danny-avila** in [#6477](https://github.com/danny-avila/LibreChat/pull/6477) +- 🔃 refactor: Allow streaming for `o1` models by **@danny-avila** in [#6509](https://github.com/danny-avila/LibreChat/pull/6509) +- 🔧 chore: `Vite` Plugin Upgrades & Config Optimizations by **@rubentalstra** in [#6547](https://github.com/danny-avila/LibreChat/pull/6547) +- 🔧 refactor: Consolidate Logging, Model Selection & Actions Optimizations, Minor Fixes by **@danny-avila** in [#6553](https://github.com/danny-avila/LibreChat/pull/6553) +- 🎨 style: Address Minor UI Refresh Issues by **@berry-13** in [#6552](https://github.com/danny-avila/LibreChat/pull/6552) +- 🔧 refactor: Enhance Model & Endpoint Configurations with Global Indicators 🌍 by **@berry-13** in [#6578](https://github.com/danny-avila/LibreChat/pull/6578) +- 💬 style: Chat UI, Greeting, and Message adjustments by **@berry-13** in [#6612](https://github.com/danny-avila/LibreChat/pull/6612) +- ⚡ refactor: DocumentDB Compatibility for Balance Updates by **@danny-avila** in [#6673](https://github.com/danny-avila/LibreChat/pull/6673) +- 🧹 chore: Update ESLint rules for React hooks by **@rubentalstra** in [#6685](https://github.com/danny-avila/LibreChat/pull/6685) +- 🪙 chore: Update Gemini Pricing by **@RedwindA** in [#6731](https://github.com/danny-avila/LibreChat/pull/6731) +- 🪺 refactor: Nest Permission fields for Roles by **@rubentalstra** in [#6487](https://github.com/danny-avila/LibreChat/pull/6487) +- 📦 chore: Update `caniuse-lite` dependency to version 1.0.30001706 by **@rubentalstra** in [#6482](https://github.com/danny-avila/LibreChat/pull/6482) +- ⚙️ refactor: OAuth Flow Signal, Type Safety, Tool Progress & Updated Packages by **@danny-avila** in [#6752](https://github.com/danny-avila/LibreChat/pull/6752) +- 📦 chore: bump vite from 6.2.3 to 6.2.5 by **@dependabot[bot]** in [#6745](https://github.com/danny-avila/LibreChat/pull/6745) +- 💾 chore: Enhance Local Storage Handling and Update MCP SDK by **@danny-avila** in [#6809](https://github.com/danny-avila/LibreChat/pull/6809) +- 🤖 refactor: Improve Agents Memory Usage, Bump Keyv, Grok 3 by **@danny-avila** in [#6850](https://github.com/danny-avila/LibreChat/pull/6850) +- 💾 refactor: Enhance Memory In Image Encodings & Client Disposal by **@danny-avila** in [#6852](https://github.com/danny-avila/LibreChat/pull/6852) +- 🔁 refactor: Token Event Handler and Standardize `maxTokens` Key by **@danny-avila** in [#6886](https://github.com/danny-avila/LibreChat/pull/6886) +- 🔍 refactor: Search & Message Retrieval by **@berry-13** in [#6903](https://github.com/danny-avila/LibreChat/pull/6903) +- 🎨 style: standardize dropdown styling & fix z-Index layering by **@berry-13** in [#6939](https://github.com/danny-avila/LibreChat/pull/6939) +- 📙 docs: CONTRIBUTING.md by **@dblock** in [#6831](https://github.com/danny-avila/LibreChat/pull/6831) +- 🧭 refactor: Modernize Nav/Header by **@danny-avila** in [#7094](https://github.com/danny-avila/LibreChat/pull/7094) +- 🪶 refactor: Chat Input Focus for Conversation Navigations & ChatForm Optimizations by **@danny-avila** in [#7100](https://github.com/danny-avila/LibreChat/pull/7100) +- 🔃 refactor: Streamline Navigation, Message Loading UX by **@danny-avila** in [#7118](https://github.com/danny-avila/LibreChat/pull/7118) +- 📜 docs: Unreleased changelog by **@github-actions[bot]** in [#6265](https://github.com/danny-avila/LibreChat/pull/6265) + + + +[See full release details][release-v0.7.8-rc1] + +[release-v0.7.8-rc1]: https://github.com/danny-avila/LibreChat/releases/tag/v0.7.8-rc1 --- diff --git a/Dockerfile b/Dockerfile index d9113eb650..393b35354d 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,9 +1,18 @@ -# v0.7.7 +# v0.7.8 # Base node image FROM node:20-alpine AS node -RUN apk --no-cache add curl +# Install jemalloc +RUN apk add --no-cache jemalloc +RUN apk add --no-cache python3 py3-pip uv + +# Set environment variable to use jemalloc +ENV LD_PRELOAD=/usr/lib/libjemalloc.so.2 + +# Add `uv` for extended MCP support +COPY --from=ghcr.io/astral-sh/uv:0.6.13 /uv /uvx /bin/ +RUN uv --version RUN mkdir -p /app && chown node:node /app WORKDIR /app @@ -38,4 +47,4 @@ CMD ["npm", "run", "backend"] # WORKDIR /usr/share/nginx/html # COPY --from=node /app/client/dist /usr/share/nginx/html # COPY client/nginx.conf /etc/nginx/conf.d/default.conf -# ENTRYPOINT ["nginx", "-g", "daemon off;"] +# ENTRYPOINT ["nginx", "-g", "daemon off;"] \ No newline at end of file diff --git a/Dockerfile.multi b/Dockerfile.multi index 40721137bb..991f805bec 100644 --- a/Dockerfile.multi +++ b/Dockerfile.multi @@ -1,8 +1,12 @@ # Dockerfile.multi -# v0.7.7 +# v0.7.8 # Base for all builds FROM node:20-alpine AS base-min +# Install jemalloc +RUN apk add --no-cache jemalloc +# Set environment variable to use jemalloc +ENV LD_PRELOAD=/usr/lib/libjemalloc.so.2 WORKDIR /app RUN apk --no-cache add curl RUN npm config set fetch-retry-maxtimeout 600000 && \ @@ -50,6 +54,9 @@ RUN npm run build # API setup (including client dist) FROM base-min AS api-build +# Add `uv` for extended MCP support +COPY --from=ghcr.io/astral-sh/uv:0.6.13 /uv /uvx /bin/ +RUN uv --version WORKDIR /app # Install only production deps RUN npm ci --omit=dev diff --git a/README.md b/README.md index 3e02c2cc08..6e0c92221c 100644 --- a/README.md +++ b/README.md @@ -74,6 +74,11 @@ - 🪄 **Generative UI with Code Artifacts**: - [Code Artifacts](https://youtu.be/GfTj7O4gmd0?si=WJbdnemZpJzBrJo3) allow creation of React, HTML, and Mermaid diagrams directly in chat +- 🎨 **Image Generation & Editing** + - Text-to-image and image-to-image with [GPT-Image-1](https://www.librechat.ai/docs/features/image_gen#1--openai-image-tools-recommended) + - Text-to-image with [DALL-E (3/2)](https://www.librechat.ai/docs/features/image_gen#2--dalle-legacy), [Stable Diffusion](https://www.librechat.ai/docs/features/image_gen#3--stable-diffusion-local), [Flux](https://www.librechat.ai/docs/features/image_gen#4--flux), or any [MCP server](https://www.librechat.ai/docs/features/image_gen#5--model-context-protocol-mcp) + - Produce stunning visuals from prompts or refine existing images with a single instruction + - 💾 **Presets & Context Management**: - Create, Save, & Share Custom Presets - Switch between AI Endpoints and Presets mid-chat diff --git a/api/app/clients/AnthropicClient.js b/api/app/clients/AnthropicClient.js index bc2e6042c7..91939975c4 100644 --- a/api/app/clients/AnthropicClient.js +++ b/api/app/clients/AnthropicClient.js @@ -9,7 +9,7 @@ const { getResponseSender, validateVisionModel, } = require('librechat-data-provider'); -const { SplitStreamHandler: _Handler, GraphEvents } = require('@librechat/agents'); +const { SplitStreamHandler: _Handler } = require('@librechat/agents'); const { truncateText, formatMessage, @@ -26,10 +26,11 @@ const { const { getModelMaxTokens, getModelMaxOutputTokens, matchModelName } = require('~/utils'); const { spendTokens, spendStructuredTokens } = require('~/models/spendTokens'); const { encodeAndFormat } = require('~/server/services/Files/images/encode'); +const { createFetch, createStreamEventHandlers } = require('./generators'); const Tokenizer = require('~/server/services/Tokenizer'); -const { logger, sendEvent } = require('~/config'); const { sleep } = require('~/server/utils'); const BaseClient = require('./BaseClient'); +const { logger } = require('~/config'); const HUMAN_PROMPT = '\n\nHuman:'; const AI_PROMPT = '\n\nAssistant:'; @@ -184,7 +185,10 @@ class AnthropicClient extends BaseClient { getClient(requestOptions) { /** @type {Anthropic.ClientOptions} */ const options = { - fetch: this.fetch, + fetch: createFetch({ + directEndpoint: this.options.directEndpoint, + reverseProxyUrl: this.options.reverseProxyUrl, + }), apiKey: this.apiKey, }; @@ -392,13 +396,13 @@ class AnthropicClient extends BaseClient { const formattedMessages = orderedMessages.map((message, i) => { const formattedMessage = this.useMessages ? formatMessage({ - message, - endpoint: EModelEndpoint.anthropic, - }) + message, + endpoint: EModelEndpoint.anthropic, + }) : { - author: message.isCreatedByUser ? this.userLabel : this.assistantLabel, - content: message?.content ?? message.text, - }; + author: message.isCreatedByUser ? this.userLabel : this.assistantLabel, + content: message?.content ?? message.text, + }; const needsTokenCount = this.contextStrategy && !orderedMessages[i].tokenCount; /* If tokens were never counted, or, is a Vision request and the message has files, count again */ @@ -414,6 +418,9 @@ class AnthropicClient extends BaseClient { this.contextHandlers?.processFile(file); continue; } + if (file.metadata?.fileIdentifier) { + continue; + } orderedMessages[i].tokenCount += this.calculateImageTokenCost({ width: file.width, @@ -673,7 +680,7 @@ class AnthropicClient extends BaseClient { } getCompletion() { - logger.debug('AnthropicClient doesn\'t use getCompletion (all handled in sendCompletion)'); + logger.debug("AnthropicClient doesn't use getCompletion (all handled in sendCompletion)"); } /** @@ -795,14 +802,11 @@ class AnthropicClient extends BaseClient { } logger.debug('[AnthropicClient]', { ...requestOptions }); + const handlers = createStreamEventHandlers(this.options.res); this.streamHandler = new SplitStreamHandler({ accumulate: true, runId: this.responseMessageId, - handlers: { - [GraphEvents.ON_RUN_STEP]: (event) => sendEvent(this.options.res, event), - [GraphEvents.ON_MESSAGE_DELTA]: (event) => sendEvent(this.options.res, event), - [GraphEvents.ON_REASONING_DELTA]: (event) => sendEvent(this.options.res, event), - }, + handlers, }); let intermediateReply = this.streamHandler.tokens; @@ -884,7 +888,7 @@ class AnthropicClient extends BaseClient { } getBuildMessagesOptions() { - logger.debug('AnthropicClient doesn\'t use getBuildMessagesOptions'); + logger.debug("AnthropicClient doesn't use getBuildMessagesOptions"); } getEncoding() { diff --git a/api/app/clients/BaseClient.js b/api/app/clients/BaseClient.js index f89f1b3a8e..55b8780180 100644 --- a/api/app/clients/BaseClient.js +++ b/api/app/clients/BaseClient.js @@ -28,15 +28,10 @@ class BaseClient { month: 'long', day: 'numeric', }); - this.fetch = this.fetch.bind(this); /** @type {boolean} */ this.skipSaveConvo = false; /** @type {boolean} */ this.skipSaveUserMessage = false; - /** @type {ClientDatabaseSavePromise} */ - this.userMessagePromise; - /** @type {ClientDatabaseSavePromise} */ - this.responsePromise; /** @type {string} */ this.user; /** @type {string} */ @@ -68,15 +63,15 @@ class BaseClient { } setOptions() { - throw new Error('Method \'setOptions\' must be implemented.'); + throw new Error("Method 'setOptions' must be implemented."); } async getCompletion() { - throw new Error('Method \'getCompletion\' must be implemented.'); + throw new Error("Method 'getCompletion' must be implemented."); } async sendCompletion() { - throw new Error('Method \'sendCompletion\' must be implemented.'); + throw new Error("Method 'sendCompletion' must be implemented."); } getSaveOptions() { @@ -242,11 +237,11 @@ class BaseClient { const userMessage = opts.isEdited ? this.currentMessages[this.currentMessages.length - 2] : this.createUserMessage({ - messageId: userMessageId, - parentMessageId, - conversationId, - text: message, - }); + messageId: userMessageId, + parentMessageId, + conversationId, + text: message, + }); if (typeof opts?.getReqData === 'function') { opts.getReqData({ @@ -564,6 +559,8 @@ class BaseClient { } async sendMessage(message, opts = {}) { + /** @type {Promise} */ + let userMessagePromise; const { user, head, isEdited, conversationId, responseMessageId, saveOptions, userMessage } = await this.handleStartMethods(message, opts); @@ -625,11 +622,11 @@ class BaseClient { } if (!isEdited && !this.skipSaveUserMessage) { - this.userMessagePromise = this.saveMessageToDatabase(userMessage, saveOptions, user); + userMessagePromise = this.saveMessageToDatabase(userMessage, saveOptions, user); this.savedMessageIds.add(userMessage.messageId); if (typeof opts?.getReqData === 'function') { opts.getReqData({ - userMessagePromise: this.userMessagePromise, + userMessagePromise, }); } } @@ -655,7 +652,9 @@ class BaseClient { /** @type {string|string[]|undefined} */ const completion = await this.sendCompletion(payload, opts); - this.abortController.requestCompleted = true; + if (this.abortController) { + this.abortController.requestCompleted = true; + } /** @type {TMessage} */ const responseMessage = { @@ -703,7 +702,13 @@ class BaseClient { if (usage != null && Number(usage[this.outputTokensKey]) > 0) { responseMessage.tokenCount = usage[this.outputTokensKey]; completionTokens = responseMessage.tokenCount; - await this.updateUserMessageTokenCount({ usage, tokenCountMap, userMessage, opts }); + await this.updateUserMessageTokenCount({ + usage, + tokenCountMap, + userMessage, + userMessagePromise, + opts, + }); } else { responseMessage.tokenCount = this.getTokenCountForResponse(responseMessage); completionTokens = responseMessage.tokenCount; @@ -712,8 +717,8 @@ class BaseClient { await this.recordTokenUsage({ promptTokens, completionTokens, usage }); } - if (this.userMessagePromise) { - await this.userMessagePromise; + if (userMessagePromise) { + await userMessagePromise; } if (this.artifactPromises) { @@ -728,7 +733,11 @@ class BaseClient { } } - this.responsePromise = this.saveMessageToDatabase(responseMessage, saveOptions, user); + responseMessage.databasePromise = this.saveMessageToDatabase( + responseMessage, + saveOptions, + user, + ); this.savedMessageIds.add(responseMessage.messageId); delete responseMessage.tokenCount; return responseMessage; @@ -749,9 +758,16 @@ class BaseClient { * @param {StreamUsage} params.usage * @param {Record} params.tokenCountMap * @param {TMessage} params.userMessage + * @param {Promise} params.userMessagePromise * @param {object} params.opts */ - async updateUserMessageTokenCount({ usage, tokenCountMap, userMessage, opts }) { + async updateUserMessageTokenCount({ + usage, + tokenCountMap, + userMessage, + userMessagePromise, + opts, + }) { /** @type {boolean} */ const shouldUpdateCount = this.calculateCurrentTokenCount != null && @@ -787,7 +803,7 @@ class BaseClient { Note: we update the user message to be sure it gets the calculated token count; though `AskController` saves the user message, EditController does not */ - await this.userMessagePromise; + await userMessagePromise; await this.updateMessageInDatabase({ messageId: userMessage.messageId, tokenCount: userMessageTokenCount, @@ -853,7 +869,7 @@ class BaseClient { } const savedMessage = await saveMessage( - this.options.req, + this.options?.req, { ...message, endpoint: this.options.endpoint, @@ -877,7 +893,7 @@ class BaseClient { const existingConvo = this.fetchedConvo === true ? null - : await getConvo(this.options.req?.user?.id, message.conversationId); + : await getConvo(this.options?.req?.user?.id, message.conversationId); const unsetFields = {}; const exceptions = new Set(['spec', 'iconURL']); @@ -897,7 +913,7 @@ class BaseClient { } } - const conversation = await saveConvo(this.options.req, fieldsToKeep, { + const conversation = await saveConvo(this.options?.req, fieldsToKeep, { context: 'api/app/clients/BaseClient.js - saveMessageToDatabase #saveConvo', unsetFields, }); diff --git a/api/app/clients/ChatGPTClient.js b/api/app/clients/ChatGPTClient.js index 5450300a17..07b2fa97bb 100644 --- a/api/app/clients/ChatGPTClient.js +++ b/api/app/clients/ChatGPTClient.js @@ -1,4 +1,4 @@ -const Keyv = require('keyv'); +const { Keyv } = require('keyv'); const crypto = require('crypto'); const { CohereClient } = require('cohere-ai'); const { fetchEventSource } = require('@waylaidwanderer/fetch-event-source'); @@ -339,7 +339,7 @@ class ChatGPTClient extends BaseClient { opts.body = JSON.stringify(modelOptions); if (modelOptions.stream) { - // eslint-disable-next-line no-async-promise-executor + return new Promise(async (resolve, reject) => { try { let done = false; diff --git a/api/app/clients/GoogleClient.js b/api/app/clients/GoogleClient.js index 575065d879..c9102e9ae2 100644 --- a/api/app/clients/GoogleClient.js +++ b/api/app/clients/GoogleClient.js @@ -140,8 +140,7 @@ class GoogleClient extends BaseClient { this.options.attachments?.then((attachments) => this.checkVisionRequest(attachments)); /** @type {boolean} Whether using a "GenerativeAI" Model */ - this.isGenerativeModel = - this.modelOptions.model.includes('gemini') || this.modelOptions.model.includes('learnlm'); + this.isGenerativeModel = /gemini|learnlm|gemma/.test(this.modelOptions.model); this.maxContextTokens = this.options.maxContextTokens ?? @@ -318,6 +317,9 @@ class GoogleClient extends BaseClient { this.contextHandlers?.processFile(file); continue; } + if (file.metadata?.fileIdentifier) { + continue; + } } this.augmentedPrompt = await this.contextHandlers.createContext(); diff --git a/api/app/clients/OpenAIClient.js b/api/app/clients/OpenAIClient.js index 179f5c986e..280db89284 100644 --- a/api/app/clients/OpenAIClient.js +++ b/api/app/clients/OpenAIClient.js @@ -1,7 +1,6 @@ -const OpenAI = require('openai'); const { OllamaClient } = require('./OllamaClient'); const { HttpsProxyAgent } = require('https-proxy-agent'); -const { SplitStreamHandler, GraphEvents } = require('@librechat/agents'); +const { SplitStreamHandler, CustomOpenAIClient: OpenAI } = require('@librechat/agents'); const { Constants, ImageDetail, @@ -32,17 +31,18 @@ const { createContextHandlers, } = require('./prompts'); const { encodeAndFormat } = require('~/server/services/Files/images/encode'); +const { createFetch, createStreamEventHandlers } = require('./generators'); const { addSpaceIfNeeded, isEnabled, sleep } = require('~/server/utils'); const Tokenizer = require('~/server/services/Tokenizer'); const { spendTokens } = require('~/models/spendTokens'); const { handleOpenAIErrors } = require('./tools/util'); const { createLLM, RunManager } = require('./llm'); -const { logger, sendEvent } = require('~/config'); const ChatGPTClient = require('./ChatGPTClient'); const { summaryBuffer } = require('./memory'); const { runTitleChain } = require('./chains'); const { tokenSplit } = require('./document'); const BaseClient = require('./BaseClient'); +const { logger } = require('~/config'); class OpenAIClient extends BaseClient { constructor(apiKey, options = {}) { @@ -108,7 +108,7 @@ class OpenAIClient extends BaseClient { this.checkVisionRequest(this.options.attachments); } - const omniPattern = /\b(o1|o3)\b/i; + const omniPattern = /\b(o\d)\b/i; this.isOmni = omniPattern.test(this.modelOptions.model); const { OPENAI_FORCE_PROMPT } = process.env ?? {}; @@ -455,6 +455,9 @@ class OpenAIClient extends BaseClient { this.contextHandlers?.processFile(file); continue; } + if (file.metadata?.fileIdentifier) { + continue; + } orderedMessages[i].tokenCount += this.calculateImageTokenCost({ width: file.width, @@ -472,7 +475,9 @@ class OpenAIClient extends BaseClient { promptPrefix = this.augmentedPrompt + promptPrefix; } - if (promptPrefix && this.isOmni !== true) { + const noSystemModelRegex = /\b(o1-preview|o1-mini)\b/i.test(this.modelOptions.model); + + if (promptPrefix && !noSystemModelRegex) { promptPrefix = `Instructions:\n${promptPrefix.trim()}`; instructions = { role: 'system', @@ -500,7 +505,7 @@ class OpenAIClient extends BaseClient { }; /** EXPERIMENTAL */ - if (promptPrefix && this.isOmni === true) { + if (promptPrefix && noSystemModelRegex) { const lastUserMessageIndex = payload.findLastIndex((message) => message.role === 'user'); if (lastUserMessageIndex !== -1) { if (Array.isArray(payload[lastUserMessageIndex].content)) { @@ -609,7 +614,7 @@ class OpenAIClient extends BaseClient { return result.trim(); } - logger.debug('[OpenAIClient] sendCompletion: result', result); + logger.debug('[OpenAIClient] sendCompletion: result', { ...result }); if (this.isChatCompletion) { reply = result.choices[0].message.content; @@ -818,7 +823,7 @@ ${convo} const completionTokens = this.getTokenCount(title); - this.recordTokenUsage({ promptTokens, completionTokens, context: 'title' }); + await this.recordTokenUsage({ promptTokens, completionTokens, context: 'title' }); } catch (e) { logger.error( '[OpenAIClient] There was an issue generating the title with the completion method', @@ -1224,9 +1229,9 @@ ${convo} opts.baseURL = this.langchainProxy ? constructAzureURL({ - baseURL: this.langchainProxy, - azureOptions: this.azure, - }) + baseURL: this.langchainProxy, + azureOptions: this.azure, + }) : this.azureEndpoint.split(/(? { + const dropParams = [...this.options.dropParams]; + dropParams.forEach((param) => { delete modelOptions[param]; }); logger.debug('[OpenAIClient] chatCompletion: dropped params', { - dropParams: this.options.dropParams, + dropParams: dropParams, modelOptions, }); } @@ -1355,15 +1376,12 @@ ${convo} delete modelOptions.reasoning_effort; } + const handlers = createStreamEventHandlers(this.options.res); this.streamHandler = new SplitStreamHandler({ reasoningKey, accumulate: true, runId: this.responseMessageId, - handlers: { - [GraphEvents.ON_RUN_STEP]: (event) => sendEvent(this.options.res, event), - [GraphEvents.ON_MESSAGE_DELTA]: (event) => sendEvent(this.options.res, event), - [GraphEvents.ON_REASONING_DELTA]: (event) => sendEvent(this.options.res, event), - }, + handlers, }); intermediateReply = this.streamHandler.tokens; @@ -1377,12 +1395,6 @@ ${convo} ...modelOptions, stream: true, }; - if ( - this.options.endpoint === EModelEndpoint.openAI || - this.options.endpoint === EModelEndpoint.azureOpenAI - ) { - params.stream_options = { include_usage: true }; - } const stream = await openai.beta.chat.completions .stream(params) .on('abort', () => { @@ -1467,6 +1479,11 @@ ${convo} }); } + if (openai.abortHandler && abortController.signal) { + abortController.signal.removeEventListener('abort', openai.abortHandler); + openai.abortHandler = undefined; + } + if (!chatCompletion && UnexpectedRoleError) { throw new Error( 'OpenAI error: Invalid final message: OpenAI expects final message to include role=assistant', diff --git a/api/app/clients/PluginsClient.js b/api/app/clients/PluginsClient.js index 60f8703e0f..d0ffe2ef75 100644 --- a/api/app/clients/PluginsClient.js +++ b/api/app/clients/PluginsClient.js @@ -252,12 +252,14 @@ class PluginsClient extends OpenAIClient { await this.recordTokenUsage(responseMessage); } - this.responsePromise = this.saveMessageToDatabase(responseMessage, saveOptions, user); + const databasePromise = this.saveMessageToDatabase(responseMessage, saveOptions, user); delete responseMessage.tokenCount; - return { ...responseMessage, ...result }; + return { ...responseMessage, ...result, databasePromise }; } async sendMessage(message, opts = {}) { + /** @type {Promise} */ + let userMessagePromise; /** @type {{ filteredTools: string[], includedTools: string[] }} */ const { filteredTools = [], includedTools = [] } = this.options.req.app.locals; @@ -327,10 +329,10 @@ class PluginsClient extends OpenAIClient { } if (!this.skipSaveUserMessage) { - this.userMessagePromise = this.saveMessageToDatabase(userMessage, saveOptions, user); + userMessagePromise = this.saveMessageToDatabase(userMessage, saveOptions, user); if (typeof opts?.getReqData === 'function') { opts.getReqData({ - userMessagePromise: this.userMessagePromise, + userMessagePromise, }); } } diff --git a/api/app/clients/generators.js b/api/app/clients/generators.js new file mode 100644 index 0000000000..9814cac7a5 --- /dev/null +++ b/api/app/clients/generators.js @@ -0,0 +1,71 @@ +const fetch = require('node-fetch'); +const { GraphEvents } = require('@librechat/agents'); +const { logger, sendEvent } = require('~/config'); +const { sleep } = require('~/server/utils'); + +/** + * Makes a function to make HTTP request and logs the process. + * @param {Object} params + * @param {boolean} [params.directEndpoint] - Whether to use a direct endpoint. + * @param {string} [params.reverseProxyUrl] - The reverse proxy URL to use for the request. + * @returns {Promise} - A promise that resolves to the response of the fetch request. + */ +function createFetch({ directEndpoint = false, reverseProxyUrl = '' }) { + /** + * Makes an HTTP request and logs the process. + * @param {RequestInfo} url - The URL to make the request to. Can be a string or a Request object. + * @param {RequestInit} [init] - Optional init options for the request. + * @returns {Promise} - A promise that resolves to the response of the fetch request. + */ + return async (_url, init) => { + let url = _url; + if (directEndpoint) { + url = reverseProxyUrl; + } + logger.debug(`Making request to ${url}`); + if (typeof Bun !== 'undefined') { + return await fetch(url, init); + } + return await fetch(url, init); + }; +} + +// Add this at the module level outside the class +/** + * Creates event handlers for stream events that don't capture client references + * @param {Object} res - The response object to send events to + * @returns {Object} Object containing handler functions + */ +function createStreamEventHandlers(res) { + return { + [GraphEvents.ON_RUN_STEP]: (event) => { + if (res) { + sendEvent(res, event); + } + }, + [GraphEvents.ON_MESSAGE_DELTA]: (event) => { + if (res) { + sendEvent(res, event); + } + }, + [GraphEvents.ON_REASONING_DELTA]: (event) => { + if (res) { + sendEvent(res, event); + } + }, + }; +} + +function createHandleLLMNewToken(streamRate) { + return async () => { + if (streamRate) { + await sleep(streamRate); + } + }; +} + +module.exports = { + createFetch, + createHandleLLMNewToken, + createStreamEventHandlers, +}; diff --git a/api/app/clients/specs/BaseClient.test.js b/api/app/clients/specs/BaseClient.test.js index c9be50d3de..d620d5f647 100644 --- a/api/app/clients/specs/BaseClient.test.js +++ b/api/app/clients/specs/BaseClient.test.js @@ -32,7 +32,7 @@ jest.mock('~/models', () => ({ const { getConvo, saveConvo } = require('~/models'); -jest.mock('@langchain/openai', () => { +jest.mock('@librechat/agents', () => { return { ChatOpenAI: jest.fn().mockImplementation(() => { return {}; diff --git a/api/app/clients/specs/OpenAIClient.test.js b/api/app/clients/specs/OpenAIClient.test.js index adc290486a..579f636eef 100644 --- a/api/app/clients/specs/OpenAIClient.test.js +++ b/api/app/clients/specs/OpenAIClient.test.js @@ -1,9 +1,7 @@ jest.mock('~/cache/getLogStores'); require('dotenv').config(); -const OpenAI = require('openai'); -const getLogStores = require('~/cache/getLogStores'); const { fetchEventSource } = require('@waylaidwanderer/fetch-event-source'); -const { genAzureChatCompletion } = require('~/utils/azureUtils'); +const getLogStores = require('~/cache/getLogStores'); const OpenAIClient = require('../OpenAIClient'); jest.mock('meilisearch'); @@ -36,19 +34,21 @@ jest.mock('~/models', () => ({ updateFileUsage: jest.fn(), })); -jest.mock('@langchain/openai', () => { - return { - ChatOpenAI: jest.fn().mockImplementation(() => { - return {}; - }), - }; +// Import the actual module but mock specific parts +const agents = jest.requireActual('@librechat/agents'); +const { CustomOpenAIClient } = agents; + +// Also mock ChatOpenAI to prevent real API calls +agents.ChatOpenAI = jest.fn().mockImplementation(() => { + return {}; +}); +agents.AzureChatOpenAI = jest.fn().mockImplementation(() => { + return {}; }); -jest.mock('openai'); - -jest.spyOn(OpenAI, 'constructor').mockImplementation(function (...options) { - // We can add additional logic here if needed - return new OpenAI(...options); +// Mock only the CustomOpenAIClient constructor +jest.spyOn(CustomOpenAIClient, 'constructor').mockImplementation(function (...options) { + return new CustomOpenAIClient(...options); }); const finalChatCompletion = jest.fn().mockResolvedValue({ @@ -120,7 +120,13 @@ const create = jest.fn().mockResolvedValue({ ], }); -OpenAI.mockImplementation(() => ({ +// Mock the implementation of CustomOpenAIClient instances +jest.spyOn(CustomOpenAIClient.prototype, 'constructor').mockImplementation(function () { + return this; +}); + +// Create a mock for the CustomOpenAIClient class +const mockCustomOpenAIClient = jest.fn().mockImplementation(() => ({ beta: { chat: { completions: { @@ -135,6 +141,8 @@ OpenAI.mockImplementation(() => ({ }, })); +CustomOpenAIClient.mockImplementation = mockCustomOpenAIClient; + describe('OpenAIClient', () => { beforeEach(() => { const mockCache = { @@ -559,41 +567,6 @@ describe('OpenAIClient', () => { expect(requestBody).toHaveProperty('model'); expect(requestBody.model).toBe(model); }); - - it('[Azure OpenAI] should call chatCompletion and OpenAI.stream with correct args', async () => { - // Set a default model - process.env.AZURE_OPENAI_DEFAULT_MODEL = 'gpt4-turbo'; - - const onProgress = jest.fn().mockImplementation(() => ({})); - client.azure = defaultAzureOptions; - const chatCompletion = jest.spyOn(client, 'chatCompletion'); - await client.sendMessage('Hi mom!', { - replaceOptions: true, - ...defaultOptions, - modelOptions: { model: 'gpt4-turbo', stream: true }, - onProgress, - azure: defaultAzureOptions, - }); - - expect(chatCompletion).toHaveBeenCalled(); - expect(chatCompletion.mock.calls.length).toBe(1); - - const chatCompletionArgs = chatCompletion.mock.calls[0][0]; - const { payload } = chatCompletionArgs; - - expect(payload[0].role).toBe('user'); - expect(payload[0].content).toBe('Hi mom!'); - - // Azure OpenAI does not use the model property, and will error if it's passed - // This check ensures the model property is not present - const streamArgs = stream.mock.calls[0][0]; - expect(streamArgs).not.toHaveProperty('model'); - - // Check if the baseURL is correct - const constructorArgs = OpenAI.mock.calls[0][0]; - const expectedURL = genAzureChatCompletion(defaultAzureOptions).split('/chat')[0]; - expect(constructorArgs.baseURL).toBe(expectedURL); - }); }); describe('checkVisionRequest functionality', () => { diff --git a/api/app/clients/tools/index.js b/api/app/clients/tools/index.js index df436fb089..87b1884e88 100644 --- a/api/app/clients/tools/index.js +++ b/api/app/clients/tools/index.js @@ -10,6 +10,7 @@ const StructuredACS = require('./structured/AzureAISearch'); const StructuredSD = require('./structured/StableDiffusion'); const GoogleSearchAPI = require('./structured/GoogleSearch'); const TraversaalSearch = require('./structured/TraversaalSearch'); +const createOpenAIImageTools = require('./structured/OpenAIImageTools'); const TavilySearchResults = require('./structured/TavilySearchResults'); /** @type {Record} */ @@ -40,4 +41,5 @@ module.exports = { StructuredWolfram, createYouTubeTools, TavilySearchResults, + createOpenAIImageTools, }; diff --git a/api/app/clients/tools/manifest.json b/api/app/clients/tools/manifest.json index 43be7a4e6c..55c1b1c51e 100644 --- a/api/app/clients/tools/manifest.json +++ b/api/app/clients/tools/manifest.json @@ -44,6 +44,20 @@ } ] }, + { + "name": "OpenAI Image Tools", + "pluginKey": "image_gen_oai", + "toolkit": true, + "description": "Image Generation and Editing using OpenAI's latest state-of-the-art models", + "icon": "/assets/image_gen_oai.png", + "authConfig": [ + { + "authField": "IMAGE_GEN_OAI_API_KEY", + "label": "OpenAI Image Tools API Key", + "description": "Your OpenAI API Key for Image Generation and Editing" + } + ] + }, { "name": "Wolfram", "pluginKey": "wolfram", diff --git a/api/app/clients/tools/structured/OpenAIImageTools.js b/api/app/clients/tools/structured/OpenAIImageTools.js new file mode 100644 index 0000000000..85941a779a --- /dev/null +++ b/api/app/clients/tools/structured/OpenAIImageTools.js @@ -0,0 +1,518 @@ +const { z } = require('zod'); +const axios = require('axios'); +const { v4 } = require('uuid'); +const OpenAI = require('openai'); +const FormData = require('form-data'); +const { tool } = require('@langchain/core/tools'); +const { HttpsProxyAgent } = require('https-proxy-agent'); +const { ContentTypes, EImageOutputType } = require('librechat-data-provider'); +const { getStrategyFunctions } = require('~/server/services/Files/strategies'); +const { logAxiosError, extractBaseURL } = require('~/utils'); +const { getFiles } = require('~/models/File'); +const { logger } = require('~/config'); + +/** Default descriptions for image generation tool */ +const DEFAULT_IMAGE_GEN_DESCRIPTION = ` +Generates high-quality, original images based solely on text, not using any uploaded reference images. + +When to use \`image_gen_oai\`: +- To create entirely new images from detailed text descriptions that do NOT reference any image files. + +When NOT to use \`image_gen_oai\`: +- If the user has uploaded any images and requests modifications, enhancements, or remixing based on those uploads → use \`image_edit_oai\` instead. + +Generated image IDs will be returned in the response, so you can refer to them in future requests made to \`image_edit_oai\`. +`.trim(); + +/** Default description for image editing tool */ +const DEFAULT_IMAGE_EDIT_DESCRIPTION = + `Generates high-quality, original images based on text and one or more uploaded/referenced images. + +When to use \`image_edit_oai\`: +- The user wants to modify, extend, or remix one **or more** uploaded images, either: + - Previously generated, or in the current request (both to be included in the \`image_ids\` array). +- Always when the user refers to uploaded images for editing, enhancement, remixing, style transfer, or combining elements. +- Any current or existing images are to be used as visual guides. +- If there are any files in the current request, they are more likely than not expected as references for image edit requests. + +When NOT to use \`image_edit_oai\`: +- Brand-new generations that do not rely on an existing image → use \`image_gen_oai\` instead. + +Both generated and referenced image IDs will be returned in the response, so you can refer to them in future requests made to \`image_edit_oai\`. +`.trim(); + +/** Default prompt descriptions */ +const DEFAULT_IMAGE_GEN_PROMPT_DESCRIPTION = `Describe the image you want in detail. + Be highly specific—break your idea into layers: + (1) main concept and subject, + (2) composition and position, + (3) lighting and mood, + (4) style, medium, or camera details, + (5) important features (age, expression, clothing, etc.), + (6) background. + Use positive, descriptive language and specify what should be included, not what to avoid. + List number and characteristics of people/objects, and mention style/technical requirements (e.g., "DSLR photo, 85mm lens, golden hour"). + Do not reference any uploaded images—use for new image creation from text only.`; + +const DEFAULT_IMAGE_EDIT_PROMPT_DESCRIPTION = `Describe the changes, enhancements, or new ideas to apply to the uploaded image(s). + Be highly specific—break your request into layers: + (1) main concept or transformation, + (2) specific edits/replacements or composition guidance, + (3) desired style, mood, or technique, + (4) features/items to keep, change, or add (such as objects, people, clothing, lighting, etc.). + Use positive, descriptive language and clarify what should be included or changed, not what to avoid. + Always base this prompt on the most recently uploaded reference images.`; + +const displayMessage = + 'The tool displayed an image. All generated images are already plainly visible, so don\'t repeat the descriptions in detail. Do not list download links as they are available in the UI already. The user may download the images by clicking on them, but do not mention anything about downloading to the user.'; + +/** + * Replaces unwanted characters from the input string + * @param {string} inputString - The input string to process + * @returns {string} - The processed string + */ +function replaceUnwantedChars(inputString) { + return inputString + .replace(/\r\n|\r|\n/g, ' ') + .replace(/"/g, '') + .trim(); +} + +function returnValue(value) { + if (typeof value === 'string') { + return [value, {}]; + } else if (typeof value === 'object') { + if (Array.isArray(value)) { + return value; + } + return [displayMessage, value]; + } + return value; +} + +const getImageGenDescription = () => { + return process.env.IMAGE_GEN_OAI_DESCRIPTION || DEFAULT_IMAGE_GEN_DESCRIPTION; +}; + +const getImageEditDescription = () => { + return process.env.IMAGE_EDIT_OAI_DESCRIPTION || DEFAULT_IMAGE_EDIT_DESCRIPTION; +}; + +const getImageGenPromptDescription = () => { + return process.env.IMAGE_GEN_OAI_PROMPT_DESCRIPTION || DEFAULT_IMAGE_GEN_PROMPT_DESCRIPTION; +}; + +const getImageEditPromptDescription = () => { + return process.env.IMAGE_EDIT_OAI_PROMPT_DESCRIPTION || DEFAULT_IMAGE_EDIT_PROMPT_DESCRIPTION; +}; + +/** + * Creates OpenAI Image tools (generation and editing) + * @param {Object} fields - Configuration fields + * @param {ServerRequest} fields.req - Whether the tool is being used in an agent context + * @param {boolean} fields.isAgent - Whether the tool is being used in an agent context + * @param {string} fields.IMAGE_GEN_OAI_API_KEY - The OpenAI API key + * @param {boolean} [fields.override] - Whether to override the API key check, necessary for app initialization + * @param {MongoFile[]} [fields.imageFiles] - The images to be used for editing + * @returns {Array} - Array of image tools + */ +function createOpenAIImageTools(fields = {}) { + /** @type {boolean} Used to initialize the Tool without necessary variables. */ + const override = fields.override ?? false; + /** @type {boolean} */ + if (!override && !fields.isAgent) { + throw new Error('This tool is only available for agents.'); + } + const { req } = fields; + const imageOutputType = req?.app.locals.imageOutputType || EImageOutputType.PNG; + const appFileStrategy = req?.app.locals.fileStrategy; + + const getApiKey = () => { + const apiKey = process.env.IMAGE_GEN_OAI_API_KEY ?? ''; + if (!apiKey && !override) { + throw new Error('Missing IMAGE_GEN_OAI_API_KEY environment variable.'); + } + return apiKey; + }; + + let apiKey = fields.IMAGE_GEN_OAI_API_KEY ?? getApiKey(); + const closureConfig = { apiKey }; + + let baseURL = 'https://api.openai.com/v1/'; + if (!override && process.env.IMAGE_GEN_OAI_BASEURL) { + baseURL = extractBaseURL(process.env.IMAGE_GEN_OAI_BASEURL); + closureConfig.baseURL = baseURL; + } + + // Note: Azure may not yet support the latest image generation models + if ( + !override && + process.env.IMAGE_GEN_OAI_AZURE_API_VERSION && + process.env.IMAGE_GEN_OAI_BASEURL + ) { + baseURL = process.env.IMAGE_GEN_OAI_BASEURL; + closureConfig.baseURL = baseURL; + closureConfig.defaultQuery = { 'api-version': process.env.IMAGE_GEN_OAI_AZURE_API_VERSION }; + closureConfig.defaultHeaders = { + 'api-key': process.env.IMAGE_GEN_OAI_API_KEY, + 'Content-Type': 'application/json', + }; + closureConfig.apiKey = process.env.IMAGE_GEN_OAI_API_KEY; + } + + const imageFiles = fields.imageFiles ?? []; + + /** + * Image Generation Tool + */ + const imageGenTool = tool( + async ( + { + prompt, + background = 'auto', + n = 1, + output_compression = 100, + quality = 'auto', + size = 'auto', + }, + runnableConfig, + ) => { + if (!prompt) { + throw new Error('Missing required field: prompt'); + } + const clientConfig = { ...closureConfig }; + if (process.env.PROXY) { + clientConfig.httpAgent = new HttpsProxyAgent(process.env.PROXY); + } + + /** @type {OpenAI} */ + const openai = new OpenAI(clientConfig); + let output_format = imageOutputType; + if ( + background === 'transparent' && + output_format !== EImageOutputType.PNG && + output_format !== EImageOutputType.WEBP + ) { + logger.warn( + '[ImageGenOAI] Transparent background requires PNG or WebP format, defaulting to PNG', + ); + output_format = EImageOutputType.PNG; + } + + let resp; + try { + const derivedSignal = runnableConfig?.signal + ? AbortSignal.any([runnableConfig.signal]) + : undefined; + resp = await openai.images.generate( + { + model: 'gpt-image-1', + prompt: replaceUnwantedChars(prompt), + n: Math.min(Math.max(1, n), 10), + background, + output_format, + output_compression: + output_format === EImageOutputType.WEBP || output_format === EImageOutputType.JPEG + ? output_compression + : undefined, + quality, + size, + }, + { + signal: derivedSignal, + }, + ); + } catch (error) { + const message = '[image_gen_oai] Problem generating the image:'; + logAxiosError({ error, message }); + return returnValue(`Something went wrong when trying to generate the image. The OpenAI API may be unavailable: +Error Message: ${error.message}`); + } + + if (!resp) { + return returnValue( + 'Something went wrong when trying to generate the image. The OpenAI API may be unavailable', + ); + } + + // For gpt-image-1, the response contains base64-encoded images + // TODO: handle cost in `resp.usage` + const base64Image = resp.data[0].b64_json; + + if (!base64Image) { + return returnValue( + 'No image data returned from OpenAI API. There may be a problem with the API or your configuration.', + ); + } + + const content = [ + { + type: ContentTypes.IMAGE_URL, + image_url: { + url: `data:image/${output_format};base64,${base64Image}`, + }, + }, + ]; + + const file_ids = [v4()]; + const response = [ + { + type: ContentTypes.TEXT, + text: displayMessage + `\n\ngenerated_image_id: "${file_ids[0]}"`, + }, + ]; + return [response, { content, file_ids }]; + }, + { + name: 'image_gen_oai', + description: getImageGenDescription(), + schema: z.object({ + prompt: z.string().max(32000).describe(getImageGenPromptDescription()), + background: z + .enum(['transparent', 'opaque', 'auto']) + .optional() + .describe( + 'Sets transparency for the background. Must be one of transparent, opaque or auto (default). When transparent, the output format should be png or webp.', + ), + /* + n: z + .number() + .int() + .min(1) + .max(10) + .optional() + .describe('The number of images to generate. Must be between 1 and 10.'), + output_compression: z + .number() + .int() + .min(0) + .max(100) + .optional() + .describe('The compression level (0-100%) for webp or jpeg formats. Defaults to 100.'), + */ + quality: z + .enum(['auto', 'high', 'medium', 'low']) + .optional() + .describe('The quality of the image. One of auto (default), high, medium, or low.'), + size: z + .enum(['auto', '1024x1024', '1536x1024', '1024x1536']) + .optional() + .describe( + 'The size of the generated image. One of 1024x1024, 1536x1024 (landscape), 1024x1536 (portrait), or auto (default).', + ), + }), + responseFormat: 'content_and_artifact', + }, + ); + + /** + * Image Editing Tool + */ + const imageEditTool = tool( + async ({ prompt, image_ids, quality = 'auto', size = 'auto' }, runnableConfig) => { + if (!prompt) { + throw new Error('Missing required field: prompt'); + } + + const clientConfig = { ...closureConfig }; + if (process.env.PROXY) { + clientConfig.httpAgent = new HttpsProxyAgent(process.env.PROXY); + } + + const formData = new FormData(); + formData.append('model', 'gpt-image-1'); + formData.append('prompt', replaceUnwantedChars(prompt)); + // TODO: `mask` support + // TODO: more than 1 image support + // formData.append('n', n.toString()); + formData.append('quality', quality); + formData.append('size', size); + + /** @type {Record>} */ + const streamMethods = {}; + + const requestFilesMap = Object.fromEntries(imageFiles.map((f) => [f.file_id, { ...f }])); + + const orderedFiles = new Array(image_ids.length); + const idsToFetch = []; + const indexOfMissing = Object.create(null); + + for (let i = 0; i < image_ids.length; i++) { + const id = image_ids[i]; + const file = requestFilesMap[id]; + + if (file) { + orderedFiles[i] = file; + } else { + idsToFetch.push(id); + indexOfMissing[id] = i; + } + } + + if (idsToFetch.length) { + const fetchedFiles = await getFiles( + { + user: req.user.id, + file_id: { $in: idsToFetch }, + height: { $exists: true }, + width: { $exists: true }, + }, + {}, + {}, + ); + + for (const file of fetchedFiles) { + requestFilesMap[file.file_id] = file; + orderedFiles[indexOfMissing[file.file_id]] = file; + } + } + for (const imageFile of orderedFiles) { + if (!imageFile) { + continue; + } + /** @type {NodeStream} */ + let stream; + /** @type {NodeStreamDownloader} */ + let getDownloadStream; + const source = imageFile.source || appFileStrategy; + if (!source) { + throw new Error('No source found for image file'); + } + if (streamMethods[source]) { + getDownloadStream = streamMethods[source]; + } else { + ({ getDownloadStream } = getStrategyFunctions(source)); + streamMethods[source] = getDownloadStream; + } + if (!getDownloadStream) { + throw new Error(`No download stream method found for source: ${source}`); + } + stream = await getDownloadStream(req, imageFile.filepath); + if (!stream) { + throw new Error('Failed to get download stream for image file'); + } + formData.append('image[]', stream, { + filename: imageFile.filename, + contentType: imageFile.type, + }); + } + + /** @type {import('axios').RawAxiosHeaders} */ + let headers = { + ...formData.getHeaders(), + }; + + if (process.env.IMAGE_GEN_OAI_AZURE_API_VERSION && process.env.IMAGE_GEN_OAI_BASEURL) { + headers['api-key'] = apiKey; + } else { + headers['Authorization'] = `Bearer ${apiKey}`; + } + + try { + const derivedSignal = runnableConfig?.signal + ? AbortSignal.any([runnableConfig.signal]) + : undefined; + + /** @type {import('axios').AxiosRequestConfig} */ + const axiosConfig = { + headers, + ...clientConfig, + signal: derivedSignal, + baseURL, + }; + + if (process.env.IMAGE_GEN_OAI_AZURE_API_VERSION && process.env.IMAGE_GEN_OAI_BASEURL) { + axiosConfig.params = { + 'api-version': process.env.IMAGE_GEN_OAI_AZURE_API_VERSION, + ...axiosConfig.params, + }; + } + const response = await axios.post('/images/edits', formData, axiosConfig); + + if (!response.data || !response.data.data || !response.data.data.length) { + return returnValue( + 'No image data returned from OpenAI API. There may be a problem with the API or your configuration.', + ); + } + + const base64Image = response.data.data[0].b64_json; + if (!base64Image) { + return returnValue( + 'No image data returned from OpenAI API. There may be a problem with the API or your configuration.', + ); + } + + const content = [ + { + type: ContentTypes.IMAGE_URL, + image_url: { + url: `data:image/${imageOutputType};base64,${base64Image}`, + }, + }, + ]; + + const file_ids = [v4()]; + const textResponse = [ + { + type: ContentTypes.TEXT, + text: + displayMessage + + `\n\ngenerated_image_id: "${file_ids[0]}"\nreferenced_image_ids: ["${image_ids.join('", "')}"]`, + }, + ]; + return [textResponse, { content, file_ids }]; + } catch (error) { + const message = '[image_edit_oai] Problem editing the image:'; + logAxiosError({ error, message }); + return returnValue(`Something went wrong when trying to edit the image. The OpenAI API may be unavailable: +Error Message: ${error.message || 'Unknown error'}`); + } + }, + { + name: 'image_edit_oai', + description: getImageEditDescription(), + schema: z.object({ + image_ids: z + .array(z.string()) + .min(1) + .describe( + ` +IDs (image ID strings) of previously generated or uploaded images that should guide the edit. + +Guidelines: +- If the user's request depends on any prior image(s), copy their image IDs into the \`image_ids\` array (in the same order the user refers to them). +- Never invent or hallucinate IDs; only use IDs that are still visible in the conversation context. +- If no earlier image is relevant, omit the field entirely. +`.trim(), + ), + prompt: z.string().max(32000).describe(getImageEditPromptDescription()), + /* + n: z + .number() + .int() + .min(1) + .max(10) + .optional() + .describe('The number of images to generate. Must be between 1 and 10. Defaults to 1.'), + */ + quality: z + .enum(['auto', 'high', 'medium', 'low']) + .optional() + .describe( + 'The quality of the image. One of auto (default), high, medium, or low. High/medium/low only supported for gpt-image-1.', + ), + size: z + .enum(['auto', '1024x1024', '1536x1024', '1024x1536', '256x256', '512x512']) + .optional() + .describe( + 'The size of the generated images. For gpt-image-1: auto (default), 1024x1024, 1536x1024, 1024x1536. For dall-e-2: 256x256, 512x512, 1024x1024.', + ), + }), + responseFormat: 'content_and_artifact', + }, + ); + + return [imageGenTool, imageEditTool]; +} + +module.exports = createOpenAIImageTools; diff --git a/api/app/clients/tools/structured/TavilySearchResults.js b/api/app/clients/tools/structured/TavilySearchResults.js index 9a62053ff0..9461293371 100644 --- a/api/app/clients/tools/structured/TavilySearchResults.js +++ b/api/app/clients/tools/structured/TavilySearchResults.js @@ -43,9 +43,39 @@ class TavilySearchResults extends Tool { .boolean() .optional() .describe('Whether to include answers in the search results. Default is False.'), - // include_raw_content: z.boolean().optional().describe('Whether to include raw content in the search results. Default is False.'), - // include_domains: z.array(z.string()).optional().describe('A list of domains to specifically include in the search results.'), - // exclude_domains: z.array(z.string()).optional().describe('A list of domains to specifically exclude from the search results.'), + include_raw_content: z + .boolean() + .optional() + .describe('Whether to include raw content in the search results. Default is False.'), + include_domains: z + .array(z.string()) + .optional() + .describe('A list of domains to specifically include in the search results.'), + exclude_domains: z + .array(z.string()) + .optional() + .describe('A list of domains to specifically exclude from the search results.'), + topic: z + .enum(['general', 'news', 'finance']) + .optional() + .describe( + 'The category of the search. Use news ONLY if query SPECIFCALLY mentions the word "news".', + ), + time_range: z + .enum(['day', 'week', 'month', 'year', 'd', 'w', 'm', 'y']) + .optional() + .describe('The time range back from the current date to filter results.'), + days: z + .number() + .min(1) + .optional() + .describe('Number of days back from the current date to include. Only if topic is news.'), + include_image_descriptions: z + .boolean() + .optional() + .describe( + 'When include_images is true, also add a descriptive text for each image. Default is false.', + ), }); } diff --git a/api/app/clients/tools/util/addOpenAPISpecs.js b/api/app/clients/tools/util/addOpenAPISpecs.js deleted file mode 100644 index 8b87be9941..0000000000 --- a/api/app/clients/tools/util/addOpenAPISpecs.js +++ /dev/null @@ -1,30 +0,0 @@ -const { loadSpecs } = require('./loadSpecs'); - -function transformSpec(input) { - return { - name: input.name_for_human, - pluginKey: input.name_for_model, - description: input.description_for_human, - icon: input?.logo_url ?? 'https://placehold.co/70x70.png', - // TODO: add support for authentication - isAuthRequired: 'false', - authConfig: [], - }; -} - -async function addOpenAPISpecs(availableTools) { - try { - const specs = (await loadSpecs({})).map(transformSpec); - if (specs.length > 0) { - return [...specs, ...availableTools]; - } - return availableTools; - } catch (error) { - return availableTools; - } -} - -module.exports = { - transformSpec, - addOpenAPISpecs, -}; diff --git a/api/app/clients/tools/util/addOpenAPISpecs.spec.js b/api/app/clients/tools/util/addOpenAPISpecs.spec.js deleted file mode 100644 index 21ff4eb8cc..0000000000 --- a/api/app/clients/tools/util/addOpenAPISpecs.spec.js +++ /dev/null @@ -1,76 +0,0 @@ -const { addOpenAPISpecs, transformSpec } = require('./addOpenAPISpecs'); -const { loadSpecs } = require('./loadSpecs'); -const { createOpenAPIPlugin } = require('../dynamic/OpenAPIPlugin'); - -jest.mock('./loadSpecs'); -jest.mock('../dynamic/OpenAPIPlugin'); - -describe('transformSpec', () => { - it('should transform input spec to a desired format', () => { - const input = { - name_for_human: 'Human Name', - name_for_model: 'Model Name', - description_for_human: 'Human Description', - logo_url: 'https://example.com/logo.png', - }; - - const expectedOutput = { - name: 'Human Name', - pluginKey: 'Model Name', - description: 'Human Description', - icon: 'https://example.com/logo.png', - isAuthRequired: 'false', - authConfig: [], - }; - - expect(transformSpec(input)).toEqual(expectedOutput); - }); - - it('should use default icon if logo_url is not provided', () => { - const input = { - name_for_human: 'Human Name', - name_for_model: 'Model Name', - description_for_human: 'Human Description', - }; - - const expectedOutput = { - name: 'Human Name', - pluginKey: 'Model Name', - description: 'Human Description', - icon: 'https://placehold.co/70x70.png', - isAuthRequired: 'false', - authConfig: [], - }; - - expect(transformSpec(input)).toEqual(expectedOutput); - }); -}); - -describe('addOpenAPISpecs', () => { - it('should add specs to available tools', async () => { - const availableTools = ['Tool1', 'Tool2']; - const specs = [ - { - name_for_human: 'Human Name', - name_for_model: 'Model Name', - description_for_human: 'Human Description', - logo_url: 'https://example.com/logo.png', - }, - ]; - - loadSpecs.mockResolvedValue(specs); - createOpenAPIPlugin.mockReturnValue('Plugin'); - - const result = await addOpenAPISpecs(availableTools); - expect(result).toEqual([...specs.map(transformSpec), ...availableTools]); - }); - - it('should return available tools if specs loading fails', async () => { - const availableTools = ['Tool1', 'Tool2']; - - loadSpecs.mockRejectedValue(new Error('Failed to load specs')); - - const result = await addOpenAPISpecs(availableTools); - expect(result).toEqual(availableTools); - }); -}); diff --git a/api/app/clients/tools/util/handleTools.js b/api/app/clients/tools/util/handleTools.js index 063d6e0327..e480dd4928 100644 --- a/api/app/clients/tools/util/handleTools.js +++ b/api/app/clients/tools/util/handleTools.js @@ -1,7 +1,7 @@ -const { Tools, Constants } = require('librechat-data-provider'); const { SerpAPI } = require('@langchain/community/tools/serpapi'); const { Calculator } = require('@langchain/community/tools/calculator'); const { createCodeExecutionTool, EnvVar } = require('@librechat/agents'); +const { Tools, Constants, EToolResources } = require('librechat-data-provider'); const { getUserPluginAuthValue } = require('~/server/services/PluginService'); const { availableTools, @@ -18,12 +18,12 @@ const { StructuredWolfram, createYouTubeTools, TavilySearchResults, + createOpenAIImageTools, } = require('../'); const { primeFiles: primeCodeFiles } = require('~/server/services/Files/Code/process'); const { createFileSearchTool, primeFiles: primeSearchFiles } = require('./fileSearch'); const { loadAuthValues } = require('~/server/services/Tools/credentials'); const { createMCPTool } = require('~/server/services/MCP'); -const { loadSpecs } = require('./loadSpecs'); const { logger } = require('~/config'); const mcpToolPattern = new RegExp(`^.+${Constants.mcp_delimiter}.+$`); @@ -123,7 +123,7 @@ const getAuthFields = (toolKey) => { * * @param {object} object * @param {string} object.user - * @param {Agent} [object.agent] + * @param {Pick} [object.agent] * @param {string} [object.model] * @param {EModelEndpoint} [object.endpoint] * @param {LoadToolOptions} [object.options] @@ -157,7 +157,7 @@ const loadTools = async ({ }; const customConstructors = { - serpapi: async () => { + serpapi: async (_toolContextMap) => { const authFields = getAuthFields('serpapi'); let envVar = authFields[0] ?? ''; let apiKey = process.env[envVar]; @@ -170,11 +170,40 @@ const loadTools = async ({ gl: 'us', }); }, - youtube: async () => { + youtube: async (_toolContextMap) => { const authFields = getAuthFields('youtube'); const authValues = await loadAuthValues({ userId: user, authFields }); return createYouTubeTools(authValues); }, + image_gen_oai: async (toolContextMap) => { + const authFields = getAuthFields('image_gen_oai'); + const authValues = await loadAuthValues({ userId: user, authFields }); + const imageFiles = options.tool_resources?.[EToolResources.image_edit]?.files ?? []; + let toolContext = ''; + for (let i = 0; i < imageFiles.length; i++) { + const file = imageFiles[i]; + if (!file) { + continue; + } + if (i === 0) { + toolContext = + 'Image files provided in this request (their image IDs listed in order of appearance) available for image editing:'; + } + toolContext += `\n\t- ${file.file_id}`; + if (i === imageFiles.length - 1) { + toolContext += `\n\nInclude any you need in the \`image_ids\` array when calling \`${EToolResources.image_edit}_oai\`. You may also include previously referenced or generated image IDs.`; + } + } + if (toolContext) { + toolContextMap.image_edit_oai = toolContext; + } + return createOpenAIImageTools({ + ...authValues, + isAgent: !!agent, + req: options.req, + imageFiles, + }); + }, }; const requestedTools = {}; @@ -200,8 +229,8 @@ const loadTools = async ({ serpapi: { location: 'Austin,Texas,United States', hl: 'en', gl: 'us' }, }; + /** @type {Record} */ const toolContextMap = {}; - const remainingTools = []; const appTools = options.req?.app?.locals?.availableTools ?? {}; for (const tool of tools) { @@ -246,7 +275,7 @@ const loadTools = async ({ } if (customConstructors[tool]) { - requestedTools[tool] = customConstructors[tool]; + requestedTools[tool] = async () => customConstructors[tool](toolContextMap); continue; } @@ -261,30 +290,6 @@ const loadTools = async ({ requestedTools[tool] = toolInstance; continue; } - - if (functions === true) { - remainingTools.push(tool); - } - } - - let specs = null; - if (useSpecs === true && functions === true && remainingTools.length > 0) { - specs = await loadSpecs({ - llm: model, - user, - message: options.message, - memory: options.memory, - signal: options.signal, - tools: remainingTools, - map: true, - verbose: false, - }); - } - - for (const tool of remainingTools) { - if (specs && specs[tool]) { - requestedTools[tool] = specs[tool]; - } } if (returnMap) { diff --git a/api/app/clients/tools/util/loadSpecs.js b/api/app/clients/tools/util/loadSpecs.js deleted file mode 100644 index e5b543132a..0000000000 --- a/api/app/clients/tools/util/loadSpecs.js +++ /dev/null @@ -1,117 +0,0 @@ -const fs = require('fs'); -const path = require('path'); -const { z } = require('zod'); -const { logger } = require('~/config'); -const { createOpenAPIPlugin } = require('~/app/clients/tools/dynamic/OpenAPIPlugin'); - -// The minimum Manifest definition -const ManifestDefinition = z.object({ - schema_version: z.string().optional(), - name_for_human: z.string(), - name_for_model: z.string(), - description_for_human: z.string(), - description_for_model: z.string(), - auth: z.object({}).optional(), - api: z.object({ - // Spec URL or can be the filename of the OpenAPI spec yaml file, - // located in api\app\clients\tools\.well-known\openapi - url: z.string(), - type: z.string().optional(), - is_user_authenticated: z.boolean().nullable().optional(), - has_user_authentication: z.boolean().nullable().optional(), - }), - // use to override any params that the LLM will consistently get wrong - params: z.object({}).optional(), - logo_url: z.string().optional(), - contact_email: z.string().optional(), - legal_info_url: z.string().optional(), -}); - -function validateJson(json) { - try { - return ManifestDefinition.parse(json); - } catch (error) { - logger.debug('[validateJson] manifest parsing error', error); - return false; - } -} - -// omit the LLM to return the well known jsons as objects -async function loadSpecs({ llm, user, message, tools = [], map = false, memory, signal }) { - const directoryPath = path.join(__dirname, '..', '.well-known'); - let files = []; - - for (let i = 0; i < tools.length; i++) { - const filePath = path.join(directoryPath, tools[i] + '.json'); - - try { - // If the access Promise is resolved, it means that the file exists - // Then we can add it to the files array - await fs.promises.access(filePath, fs.constants.F_OK); - files.push(tools[i] + '.json'); - } catch (err) { - logger.error(`[loadSpecs] File ${tools[i] + '.json'} does not exist`, err); - } - } - - if (files.length === 0) { - files = (await fs.promises.readdir(directoryPath)).filter( - (file) => path.extname(file) === '.json', - ); - } - - const validJsons = []; - const constructorMap = {}; - - logger.debug('[validateJson] files', files); - - for (const file of files) { - if (path.extname(file) === '.json') { - const filePath = path.join(directoryPath, file); - const fileContent = await fs.promises.readFile(filePath, 'utf8'); - const json = JSON.parse(fileContent); - - if (!validateJson(json)) { - logger.debug('[validateJson] Invalid json', json); - continue; - } - - if (llm && map) { - constructorMap[json.name_for_model] = async () => - await createOpenAPIPlugin({ - data: json, - llm, - message, - memory, - signal, - user, - }); - continue; - } - - if (llm) { - validJsons.push(createOpenAPIPlugin({ data: json, llm })); - continue; - } - - validJsons.push(json); - } - } - - if (map) { - return constructorMap; - } - - const plugins = (await Promise.all(validJsons)).filter((plugin) => plugin); - - // logger.debug('[validateJson] plugins', plugins); - // logger.debug(plugins[0].name); - - return plugins; -} - -module.exports = { - loadSpecs, - validateJson, - ManifestDefinition, -}; diff --git a/api/app/clients/tools/util/loadSpecs.spec.js b/api/app/clients/tools/util/loadSpecs.spec.js deleted file mode 100644 index 7b906d86f0..0000000000 --- a/api/app/clients/tools/util/loadSpecs.spec.js +++ /dev/null @@ -1,101 +0,0 @@ -const fs = require('fs'); -const { validateJson, loadSpecs, ManifestDefinition } = require('./loadSpecs'); -const { createOpenAPIPlugin } = require('../dynamic/OpenAPIPlugin'); - -jest.mock('../dynamic/OpenAPIPlugin'); - -describe('ManifestDefinition', () => { - it('should validate correct json', () => { - const json = { - name_for_human: 'Test', - name_for_model: 'Test', - description_for_human: 'Test', - description_for_model: 'Test', - api: { - url: 'http://test.com', - }, - }; - - expect(() => ManifestDefinition.parse(json)).not.toThrow(); - }); - - it('should not validate incorrect json', () => { - const json = { - name_for_human: 'Test', - name_for_model: 'Test', - description_for_human: 'Test', - description_for_model: 'Test', - api: { - url: 123, // incorrect type - }, - }; - - expect(() => ManifestDefinition.parse(json)).toThrow(); - }); -}); - -describe('validateJson', () => { - it('should return parsed json if valid', () => { - const json = { - name_for_human: 'Test', - name_for_model: 'Test', - description_for_human: 'Test', - description_for_model: 'Test', - api: { - url: 'http://test.com', - }, - }; - - expect(validateJson(json)).toEqual(json); - }); - - it('should return false if json is not valid', () => { - const json = { - name_for_human: 'Test', - name_for_model: 'Test', - description_for_human: 'Test', - description_for_model: 'Test', - api: { - url: 123, // incorrect type - }, - }; - - expect(validateJson(json)).toEqual(false); - }); -}); - -describe('loadSpecs', () => { - beforeEach(() => { - jest.spyOn(fs.promises, 'readdir').mockResolvedValue(['test.json']); - jest.spyOn(fs.promises, 'readFile').mockResolvedValue( - JSON.stringify({ - name_for_human: 'Test', - name_for_model: 'Test', - description_for_human: 'Test', - description_for_model: 'Test', - api: { - url: 'http://test.com', - }, - }), - ); - createOpenAPIPlugin.mockResolvedValue({}); - }); - - afterEach(() => { - jest.restoreAllMocks(); - }); - - it('should return plugins', async () => { - const plugins = await loadSpecs({ llm: true, verbose: false }); - - expect(plugins).toHaveLength(1); - expect(createOpenAPIPlugin).toHaveBeenCalledTimes(1); - }); - - it('should return constructorMap if map is true', async () => { - const plugins = await loadSpecs({ llm: {}, map: true, verbose: false }); - - expect(plugins).toHaveProperty('Test'); - expect(createOpenAPIPlugin).not.toHaveBeenCalled(); - }); -}); diff --git a/api/cache/clearPendingReq.js b/api/cache/clearPendingReq.js index 122638d7f9..54db8e9690 100644 --- a/api/cache/clearPendingReq.js +++ b/api/cache/clearPendingReq.js @@ -1,7 +1,8 @@ +const { Time, CacheKeys } = require('librechat-data-provider'); +const { isEnabled } = require('~/server/utils'); const getLogStores = require('./getLogStores'); -const { isEnabled } = require('../server/utils'); + const { USE_REDIS, LIMIT_CONCURRENT_MESSAGES } = process.env ?? {}; -const ttl = 1000 * 60 * 1; /** * Clear or decrement pending requests from the cache. @@ -28,7 +29,7 @@ const clearPendingReq = async ({ userId, cache: _cache }) => { return; } - const namespace = 'pending_req'; + const namespace = CacheKeys.PENDING_REQ; const cache = _cache ?? getLogStores(namespace); if (!cache) { @@ -39,7 +40,7 @@ const clearPendingReq = async ({ userId, cache: _cache }) => { const currentReq = +((await cache.get(key)) ?? 0); if (currentReq && currentReq >= 1) { - await cache.set(key, currentReq - 1, ttl); + await cache.set(key, currentReq - 1, Time.ONE_MINUTE); } else { await cache.delete(key); } diff --git a/api/cache/getLogStores.js b/api/cache/getLogStores.js index 6d5ea15a7b..612638b97b 100644 --- a/api/cache/getLogStores.js +++ b/api/cache/getLogStores.js @@ -1,4 +1,4 @@ -const Keyv = require('keyv'); +const { Keyv } = require('keyv'); const { CacheKeys, ViolationTypes, Time } = require('librechat-data-provider'); const { logFile, violationFile } = require('./keyvFiles'); const { math, isEnabled } = require('~/server/utils'); @@ -19,7 +19,7 @@ const createViolationInstance = (namespace) => { // Serve cache from memory so no need to clear it on startup/exit const pending_req = isRedisEnabled ? new Keyv({ store: keyvRedis }) - : new Keyv({ namespace: 'pending_req' }); + : new Keyv({ namespace: CacheKeys.PENDING_REQ }); const config = isRedisEnabled ? new Keyv({ store: keyvRedis }) @@ -64,7 +64,7 @@ const abortKeys = isRedisEnabled const namespaces = { [CacheKeys.ROLES]: roles, [CacheKeys.CONFIG_STORE]: config, - pending_req, + [CacheKeys.PENDING_REQ]: pending_req, [ViolationTypes.BAN]: new Keyv({ store: keyvMongo, namespace: CacheKeys.BANS, ttl: duration }), [CacheKeys.ENCODED_DOMAINS]: new Keyv({ store: keyvMongo, diff --git a/api/cache/ioredisClient.js b/api/cache/ioredisClient.js new file mode 100644 index 0000000000..cd48459ab4 --- /dev/null +++ b/api/cache/ioredisClient.js @@ -0,0 +1,92 @@ +const fs = require('fs'); +const Redis = require('ioredis'); +const { isEnabled } = require('~/server/utils'); +const logger = require('~/config/winston'); + +const { REDIS_URI, USE_REDIS, USE_REDIS_CLUSTER, REDIS_CA, REDIS_MAX_LISTENERS } = process.env; + +/** @type {import('ioredis').Redis | import('ioredis').Cluster} */ +let ioredisClient; +const redis_max_listeners = Number(REDIS_MAX_LISTENERS) || 40; + +function mapURI(uri) { + const regex = + /^(?:(?\w+):\/\/)?(?:(?[^:@]+)(?::(?[^@]+))?@)?(?[\w.-]+)(?::(?\d{1,5}))?$/; + const match = uri.match(regex); + + if (match) { + const { scheme, user, password, host, port } = match.groups; + + return { + scheme: scheme || 'none', + user: user || null, + password: password || null, + host: host || null, + port: port || null, + }; + } else { + const parts = uri.split(':'); + if (parts.length === 2) { + return { + scheme: 'none', + user: null, + password: null, + host: parts[0], + port: parts[1], + }; + } + + return { + scheme: 'none', + user: null, + password: null, + host: uri, + port: null, + }; + } +} + +if (REDIS_URI && isEnabled(USE_REDIS)) { + let redisOptions = null; + + if (REDIS_CA) { + const ca = fs.readFileSync(REDIS_CA); + redisOptions = { tls: { ca } }; + } + + if (isEnabled(USE_REDIS_CLUSTER)) { + const hosts = REDIS_URI.split(',').map((item) => { + var value = mapURI(item); + + return { + host: value.host, + port: value.port, + }; + }); + ioredisClient = new Redis.Cluster(hosts, { redisOptions }); + } else { + ioredisClient = new Redis(REDIS_URI, redisOptions); + } + + ioredisClient.on('ready', () => { + logger.info('IoRedis connection ready'); + }); + ioredisClient.on('reconnecting', () => { + logger.info('IoRedis connection reconnecting'); + }); + ioredisClient.on('end', () => { + logger.info('IoRedis connection ended'); + }); + ioredisClient.on('close', () => { + logger.info('IoRedis connection closed'); + }); + ioredisClient.on('error', (err) => logger.error('IoRedis connection error:', err)); + ioredisClient.setMaxListeners(redis_max_listeners); + logger.info( + '[Optional] IoRedis initialized for rate limiters. If you have issues, disable Redis or restart the server.', + ); +} else { + logger.info('[Optional] IoRedis not initialized for rate limiters.'); +} + +module.exports = ioredisClient; diff --git a/api/cache/keyvFiles.js b/api/cache/keyvFiles.js index f969174b7d..1476b60cb8 100644 --- a/api/cache/keyvFiles.js +++ b/api/cache/keyvFiles.js @@ -1,11 +1,9 @@ const { KeyvFile } = require('keyv-file'); -const logFile = new KeyvFile({ filename: './data/logs.json' }); -const pendingReqFile = new KeyvFile({ filename: './data/pendingReqCache.json' }); -const violationFile = new KeyvFile({ filename: './data/violations.json' }); +const logFile = new KeyvFile({ filename: './data/logs.json' }).setMaxListeners(20); +const violationFile = new KeyvFile({ filename: './data/violations.json' }).setMaxListeners(20); module.exports = { logFile, - pendingReqFile, violationFile, }; diff --git a/api/cache/keyvMongo.js b/api/cache/keyvMongo.js index 8f5b9fd8d8..1606e98eb8 100644 --- a/api/cache/keyvMongo.js +++ b/api/cache/keyvMongo.js @@ -1,9 +1,272 @@ -const KeyvMongo = require('@keyv/mongo'); +// api/cache/keyvMongo.js +const mongoose = require('mongoose'); +const EventEmitter = require('events'); +const { GridFSBucket } = require('mongodb'); const { logger } = require('~/config'); -const { MONGO_URI } = process.env ?? {}; +const storeMap = new Map(); + +class KeyvMongoCustom extends EventEmitter { + constructor(url, options = {}) { + super(); + + url = url || {}; + if (typeof url === 'string') { + url = { url }; + } + if (url.uri) { + url = { url: url.uri, ...url }; + } + + this.opts = { + url: 'mongodb://127.0.0.1:27017', + collection: 'keyv', + ...url, + ...options, + }; + + this.ttlSupport = false; + + // Filter valid options + const keyvMongoKeys = new Set([ + 'url', + 'collection', + 'namespace', + 'serialize', + 'deserialize', + 'uri', + 'useGridFS', + 'dialect', + ]); + this.opts = Object.fromEntries(Object.entries(this.opts).filter(([k]) => keyvMongoKeys.has(k))); + } + + // Helper to access the store WITHOUT storing a promise on the instance + _getClient() { + const storeKey = `${this.opts.collection}:${this.opts.useGridFS ? 'gridfs' : 'collection'}`; + + // If we already have the store initialized, return it directly + if (storeMap.has(storeKey)) { + return Promise.resolve(storeMap.get(storeKey)); + } + + // Check mongoose connection state + if (mongoose.connection.readyState !== 1) { + return Promise.reject( + new Error('Mongoose connection not ready. Ensure connectDb() is called first.'), + ); + } + + try { + const db = mongoose.connection.db; + let client; + + if (this.opts.useGridFS) { + const bucket = new GridFSBucket(db, { + readPreference: this.opts.readPreference, + bucketName: this.opts.collection, + }); + const store = db.collection(`${this.opts.collection}.files`); + client = { bucket, store, db }; + } else { + const collection = this.opts.collection || 'keyv'; + const store = db.collection(collection); + client = { store, db }; + } + + storeMap.set(storeKey, client); + return Promise.resolve(client); + } catch (error) { + this.emit('error', error); + return Promise.reject(error); + } + } + + async get(key) { + const client = await this._getClient(); + + if (this.opts.useGridFS) { + await client.store.updateOne( + { + filename: key, + }, + { + $set: { + 'metadata.lastAccessed': new Date(), + }, + }, + ); + + const stream = client.bucket.openDownloadStreamByName(key); + + return new Promise((resolve) => { + const resp = []; + stream.on('error', () => { + resolve(undefined); + }); + + stream.on('end', () => { + const data = Buffer.concat(resp).toString('utf8'); + resolve(data); + }); + + stream.on('data', (chunk) => { + resp.push(chunk); + }); + }); + } + + const document = await client.store.findOne({ key: { $eq: key } }); + + if (!document) { + return undefined; + } + + return document.value; + } + + async getMany(keys) { + const client = await this._getClient(); + + if (this.opts.useGridFS) { + const promises = []; + for (const key of keys) { + promises.push(this.get(key)); + } + + const values = await Promise.allSettled(promises); + const data = []; + for (const value of values) { + data.push(value.value); + } + + return data; + } + + const values = await client.store + .find({ key: { $in: keys } }) + .project({ _id: 0, value: 1, key: 1 }) + .toArray(); + + const results = [...keys]; + let i = 0; + for (const key of keys) { + const rowIndex = values.findIndex((row) => row.key === key); + results[i] = rowIndex > -1 ? values[rowIndex].value : undefined; + i++; + } + + return results; + } + + async set(key, value, ttl) { + const client = await this._getClient(); + const expiresAt = typeof ttl === 'number' ? new Date(Date.now() + ttl) : null; + + if (this.opts.useGridFS) { + const stream = client.bucket.openUploadStream(key, { + metadata: { + expiresAt, + lastAccessed: new Date(), + }, + }); + + return new Promise((resolve) => { + stream.on('finish', () => { + resolve(stream); + }); + stream.end(value); + }); + } + + await client.store.updateOne( + { key: { $eq: key } }, + { $set: { key, value, expiresAt } }, + { upsert: true }, + ); + } + + async delete(key) { + if (typeof key !== 'string') { + return false; + } + + const client = await this._getClient(); + + if (this.opts.useGridFS) { + try { + const bucket = new GridFSBucket(client.db, { + bucketName: this.opts.collection, + }); + const files = await bucket.find({ filename: key }).toArray(); + await client.bucket.delete(files[0]._id); + return true; + } catch { + return false; + } + } + + const object = await client.store.deleteOne({ key: { $eq: key } }); + return object.deletedCount > 0; + } + + async deleteMany(keys) { + const client = await this._getClient(); + + if (this.opts.useGridFS) { + const bucket = new GridFSBucket(client.db, { + bucketName: this.opts.collection, + }); + const files = await bucket.find({ filename: { $in: keys } }).toArray(); + if (files.length === 0) { + return false; + } + + await Promise.all(files.map(async (file) => client.bucket.delete(file._id))); + return true; + } + + const object = await client.store.deleteMany({ key: { $in: keys } }); + return object.deletedCount > 0; + } + + async clear() { + const client = await this._getClient(); + + if (this.opts.useGridFS) { + try { + await client.bucket.drop(); + } catch (error) { + // Throw error if not "namespace not found" error + if (!(error.code === 26)) { + throw error; + } + } + } + + await client.store.deleteMany({ + key: { $regex: this.namespace ? `^${this.namespace}:*` : '' }, + }); + } + + async has(key) { + const client = await this._getClient(); + const filter = { [this.opts.useGridFS ? 'filename' : 'key']: { $eq: key } }; + const document = await client.store.countDocuments(filter, { limit: 1 }); + return document !== 0; + } + + // No-op disconnect + async disconnect() { + // This is a no-op since we don't want to close the shared mongoose connection + return true; + } +} + +const keyvMongo = new KeyvMongoCustom({ + collection: 'logs', +}); -const keyvMongo = new KeyvMongo(MONGO_URI, { collection: 'logs' }); keyvMongo.on('error', (err) => logger.error('KeyvMongo connection error:', err)); module.exports = keyvMongo; diff --git a/api/cache/keyvRedis.js b/api/cache/keyvRedis.js index 992e789ae3..cb9d837e21 100644 --- a/api/cache/keyvRedis.js +++ b/api/cache/keyvRedis.js @@ -1,6 +1,6 @@ const fs = require('fs'); const ioredis = require('ioredis'); -const KeyvRedis = require('@keyv/redis'); +const KeyvRedis = require('@keyv/redis').default; const { isEnabled } = require('~/server/utils'); const logger = require('~/config/winston'); @@ -50,6 +50,7 @@ function mapURI(uri) { if (REDIS_URI && isEnabled(USE_REDIS)) { let redisOptions = null; + /** @type {import('@keyv/redis').KeyvRedisOptions} */ let keyvOpts = { useRedisSets: false, keyPrefix: redis_prefix, @@ -74,6 +75,25 @@ if (REDIS_URI && isEnabled(USE_REDIS)) { } else { keyvRedis = new KeyvRedis(REDIS_URI, keyvOpts); } + + const pingInterval = setInterval(() => { + logger.debug('KeyvRedis ping'); + keyvRedis.client.ping().catch(err => logger.error('Redis keep-alive ping failed:', err)); + }, 5 * 60 * 1000); + + keyvRedis.on('ready', () => { + logger.info('KeyvRedis connection ready'); + }); + keyvRedis.on('reconnecting', () => { + logger.info('KeyvRedis connection reconnecting'); + }); + keyvRedis.on('end', () => { + logger.info('KeyvRedis connection ended'); + }); + keyvRedis.on('close', () => { + clearInterval(pingInterval); + logger.info('KeyvRedis connection closed'); + }); keyvRedis.on('error', (err) => logger.error('KeyvRedis connection error:', err)); keyvRedis.setMaxListeners(redis_max_listeners); logger.info( diff --git a/api/cache/redis.js b/api/cache/redis.js deleted file mode 100644 index adf291d02b..0000000000 --- a/api/cache/redis.js +++ /dev/null @@ -1,4 +0,0 @@ -const Redis = require('ioredis'); -const { REDIS_URI } = process.env ?? {}; -const redis = new Redis.Cluster(REDIS_URI); -module.exports = redis; diff --git a/api/config/index.js b/api/config/index.js index 919919b55f..e238f700be 100644 --- a/api/config/index.js +++ b/api/config/index.js @@ -6,26 +6,30 @@ const logger = require('./winston'); global.EventSource = EventSource; +/** @type {MCPManager} */ let mcpManager = null; let flowManager = null; /** + * @param {string} [userId] - Optional user ID, to avoid disconnecting the current user. * @returns {MCPManager} */ -function getMCPManager() { +function getMCPManager(userId) { if (!mcpManager) { mcpManager = MCPManager.getInstance(logger); + } else { + mcpManager.checkIdleConnections(userId); } return mcpManager; } /** - * @param {(key: string) => Keyv} getLogStores + * @param {Keyv} flowsCache * @returns {FlowStateManager} */ -function getFlowStateManager(getLogStores) { +function getFlowStateManager(flowsCache) { if (!flowManager) { - flowManager = new FlowStateManager(getLogStores(CacheKeys.FLOWS), { + flowManager = new FlowStateManager(flowsCache, { ttl: Time.ONE_MINUTE * 3, logger, }); diff --git a/api/jest.config.js b/api/jest.config.js index ec44bd7f56..2df7790b7b 100644 --- a/api/jest.config.js +++ b/api/jest.config.js @@ -5,7 +5,6 @@ module.exports = { coverageDirectory: 'coverage', setupFiles: [ './test/jestSetup.js', - './test/__mocks__/KeyvMongo.js', './test/__mocks__/logger.js', './test/__mocks__/fetchEventSource.js', ], diff --git a/api/lib/utils/reduceHits.js b/api/lib/utils/reduceHits.js deleted file mode 100644 index 77b2f9d57d..0000000000 --- a/api/lib/utils/reduceHits.js +++ /dev/null @@ -1,59 +0,0 @@ -const mergeSort = require('./mergeSort'); -const { cleanUpPrimaryKeyValue } = require('./misc'); - -function reduceMessages(hits) { - const counts = {}; - - for (const hit of hits) { - if (!counts[hit.conversationId]) { - counts[hit.conversationId] = 1; - } else { - counts[hit.conversationId]++; - } - } - - const result = []; - - for (const [conversationId, count] of Object.entries(counts)) { - result.push({ - conversationId, - count, - }); - } - - return mergeSort(result, (a, b) => b.count - a.count); -} - -function reduceHits(hits, titles = []) { - const counts = {}; - const titleMap = {}; - const convos = [...hits, ...titles]; - - for (const convo of convos) { - const currentId = cleanUpPrimaryKeyValue(convo.conversationId); - if (!counts[currentId]) { - counts[currentId] = 1; - } else { - counts[currentId]++; - } - - if (convo.title) { - // titleMap[currentId] = convo._formatted.title; - titleMap[currentId] = convo.title; - } - } - - const result = []; - - for (const [conversationId, count] of Object.entries(counts)) { - result.push({ - conversationId, - count, - title: titleMap[conversationId] ? titleMap[conversationId] : null, - }); - } - - return mergeSort(result, (a, b) => b.count - a.count); -} - -module.exports = { reduceMessages, reduceHits }; diff --git a/api/models/Agent.js b/api/models/Agent.js index dba0c40ee9..9b34eeae65 100644 --- a/api/models/Agent.js +++ b/api/models/Agent.js @@ -153,9 +153,11 @@ const updateAgent = async (searchParameter, updateData) => { */ const addAgentResourceFile = async ({ agent_id, tool_resource, file_id }) => { const searchParameter = { id: agent_id }; - + let agent = await getAgent(searchParameter); + if (!agent) { + throw new Error('Agent not found for adding resource file'); + } const fileIdsPath = `tool_resources.${tool_resource}.file_ids`; - await Agent.updateOne( { id: agent_id, @@ -168,7 +170,12 @@ const addAgentResourceFile = async ({ agent_id, tool_resource, file_id }) => { }, ); - const updateData = { $addToSet: { [fileIdsPath]: file_id } }; + const updateData = { + $addToSet: { + tools: tool_resource, + [fileIdsPath]: file_id, + }, + }; const updatedAgent = await updateAgent(searchParameter, updateData); if (updatedAgent) { @@ -301,7 +308,7 @@ const getListAgents = async (searchParameter) => { * This function also updates the corresponding projects to include or exclude the agent ID. * * @param {Object} params - Parameters for updating the agent's projects. - * @param {import('librechat-data-provider').TUser} params.user - Parameters for updating the agent's projects. + * @param {MongoUser} params.user - Parameters for updating the agent's projects. * @param {string} params.agentId - The ID of the agent to update. * @param {string[]} [params.projectIds] - Array of project IDs to add to the agent. * @param {string[]} [params.removeProjectIds] - Array of project IDs to remove from the agent. diff --git a/api/models/Agent.spec.js b/api/models/Agent.spec.js index 0e6d1831ff..051cb6800f 100644 --- a/api/models/Agent.spec.js +++ b/api/models/Agent.spec.js @@ -33,6 +33,50 @@ describe('Agent Resource File Operations', () => { return agent; }; + test('should add tool_resource to tools if missing', async () => { + const agent = await createBasicAgent(); + const fileId = uuidv4(); + const toolResource = 'file_search'; + + const updatedAgent = await addAgentResourceFile({ + agent_id: agent.id, + tool_resource: toolResource, + file_id: fileId, + }); + + expect(updatedAgent.tools).toContain(toolResource); + expect(Array.isArray(updatedAgent.tools)).toBe(true); + // Should not duplicate + const count = updatedAgent.tools.filter((t) => t === toolResource).length; + expect(count).toBe(1); + }); + + test('should not duplicate tool_resource in tools if already present', async () => { + const agent = await createBasicAgent(); + const fileId1 = uuidv4(); + const fileId2 = uuidv4(); + const toolResource = 'file_search'; + + // First add + await addAgentResourceFile({ + agent_id: agent.id, + tool_resource: toolResource, + file_id: fileId1, + }); + + // Second add (should not duplicate) + const updatedAgent = await addAgentResourceFile({ + agent_id: agent.id, + tool_resource: toolResource, + file_id: fileId2, + }); + + expect(updatedAgent.tools).toContain(toolResource); + expect(Array.isArray(updatedAgent.tools)).toBe(true); + const count = updatedAgent.tools.filter((t) => t === toolResource).length; + expect(count).toBe(1); + }); + test('should handle concurrent file additions', async () => { const agent = await createBasicAgent(); const fileIds = Array.from({ length: 10 }, () => uuidv4()); diff --git a/api/models/Conversation.js b/api/models/Conversation.js index dd6ef9bde1..51081a6491 100644 --- a/api/models/Conversation.js +++ b/api/models/Conversation.js @@ -88,11 +88,13 @@ module.exports = { */ saveConvo: async (req, { conversationId, newConversationId, ...convo }, metadata) => { try { - if (metadata && metadata?.context) { + if (metadata?.context) { logger.debug(`[saveConvo] ${metadata.context}`); } + const messages = await getMessages({ conversationId }, '_id'); const update = { ...convo, messages, user: req.user.id }; + if (newConversationId) { update.conversationId = newConversationId; } @@ -148,75 +150,102 @@ module.exports = { throw new Error('Failed to save conversations in bulk.'); } }, - getConvosByPage: async (user, pageNumber = 1, pageSize = 25, isArchived = false, tags) => { - const query = { user }; + getConvosByCursor: async ( + user, + { cursor, limit = 25, isArchived = false, tags, search, order = 'desc' } = {}, + ) => { + const filters = [{ user }]; + if (isArchived) { - query.isArchived = true; + filters.push({ isArchived: true }); } else { - query.$or = [{ isArchived: false }, { isArchived: { $exists: false } }]; - } - if (Array.isArray(tags) && tags.length > 0) { - query.tags = { $in: tags }; + filters.push({ $or: [{ isArchived: false }, { isArchived: { $exists: false } }] }); } - query.$and = [{ $or: [{ expiredAt: null }, { expiredAt: { $exists: false } }] }]; + if (Array.isArray(tags) && tags.length > 0) { + filters.push({ tags: { $in: tags } }); + } + + filters.push({ $or: [{ expiredAt: null }, { expiredAt: { $exists: false } }] }); + + if (search) { + try { + const meiliResults = await Conversation.meiliSearch(search); + const matchingIds = Array.isArray(meiliResults.hits) + ? meiliResults.hits.map((result) => result.conversationId) + : []; + if (!matchingIds.length) { + return { conversations: [], nextCursor: null }; + } + filters.push({ conversationId: { $in: matchingIds } }); + } catch (error) { + logger.error('[getConvosByCursor] Error during meiliSearch', error); + return { message: 'Error during meiliSearch' }; + } + } + + if (cursor) { + filters.push({ updatedAt: { $lt: new Date(cursor) } }); + } + + const query = filters.length === 1 ? filters[0] : { $and: filters }; try { - const totalConvos = (await Conversation.countDocuments(query)) || 1; - const totalPages = Math.ceil(totalConvos / pageSize); const convos = await Conversation.find(query) - .sort({ updatedAt: -1 }) - .skip((pageNumber - 1) * pageSize) - .limit(pageSize) + .select( + 'conversationId endpoint title createdAt updatedAt user model agent_id assistant_id spec iconURL', + ) + .sort({ updatedAt: order === 'asc' ? 1 : -1 }) + .limit(limit + 1) .lean(); - return { conversations: convos, pages: totalPages, pageNumber, pageSize }; + + let nextCursor = null; + if (convos.length > limit) { + const lastConvo = convos.pop(); + nextCursor = lastConvo.updatedAt.toISOString(); + } + + return { conversations: convos, nextCursor }; } catch (error) { - logger.error('[getConvosByPage] Error getting conversations', error); + logger.error('[getConvosByCursor] Error getting conversations', error); return { message: 'Error getting conversations' }; } }, - getConvosQueried: async (user, convoIds, pageNumber = 1, pageSize = 25) => { + getConvosQueried: async (user, convoIds, cursor = null, limit = 25) => { try { - if (!convoIds || convoIds.length === 0) { - return { conversations: [], pages: 1, pageNumber, pageSize }; + if (!convoIds?.length) { + return { conversations: [], nextCursor: null, convoMap: {} }; + } + + const conversationIds = convoIds.map((convo) => convo.conversationId); + + const results = await Conversation.find({ + user, + conversationId: { $in: conversationIds }, + $or: [{ expiredAt: { $exists: false } }, { expiredAt: null }], + }).lean(); + + results.sort((a, b) => new Date(b.updatedAt) - new Date(a.updatedAt)); + + let filtered = results; + if (cursor && cursor !== 'start') { + const cursorDate = new Date(cursor); + filtered = results.filter((convo) => new Date(convo.updatedAt) < cursorDate); + } + + const limited = filtered.slice(0, limit + 1); + let nextCursor = null; + if (limited.length > limit) { + const lastConvo = limited.pop(); + nextCursor = lastConvo.updatedAt.toISOString(); } - const cache = {}; const convoMap = {}; - const promises = []; - - convoIds.forEach((convo) => - promises.push( - Conversation.findOne({ - user, - conversationId: convo.conversationId, - $or: [{ expiredAt: { $exists: false } }, { expiredAt: null }], - }).lean(), - ), - ); - - const results = (await Promise.all(promises)).filter(Boolean); - - results.forEach((convo, i) => { - const page = Math.floor(i / pageSize) + 1; - if (!cache[page]) { - cache[page] = []; - } - cache[page].push(convo); + limited.forEach((convo) => { convoMap[convo.conversationId] = convo; }); - const totalPages = Math.ceil(results.length / pageSize); - cache.pages = totalPages; - cache.pageSize = pageSize; - return { - cache, - conversations: cache[pageNumber] || [], - pages: totalPages || 1, - pageNumber, - pageSize, - convoMap, - }; + return { conversations: limited, nextCursor, convoMap }; } catch (error) { logger.error('[getConvosQueried] Error getting conversations', error); return { message: 'Error fetching conversations' }; @@ -257,10 +286,26 @@ module.exports = { * logger.error(result); // { n: 5, ok: 1, deletedCount: 5, messages: { n: 10, ok: 1, deletedCount: 10 } } */ deleteConvos: async (user, filter) => { - let toRemove = await Conversation.find({ ...filter, user }).select('conversationId'); - const ids = toRemove.map((instance) => instance.conversationId); - let deleteCount = await Conversation.deleteMany({ ...filter, user }); - deleteCount.messages = await deleteMessages({ conversationId: { $in: ids } }); - return deleteCount; + try { + const userFilter = { ...filter, user }; + + const conversations = await Conversation.find(userFilter).select('conversationId'); + const conversationIds = conversations.map((c) => c.conversationId); + + if (!conversationIds.length) { + throw new Error('Conversation not found or already deleted.'); + } + + const deleteConvoResult = await Conversation.deleteMany(userFilter); + + const deleteMessagesResult = await deleteMessages({ + conversationId: { $in: conversationIds }, + }); + + return { ...deleteConvoResult, messages: deleteMessagesResult }; + } catch (error) { + logger.error('[deleteConvos] Error deleting conversations and messages', error); + throw error; + } }, }; diff --git a/api/models/File.js b/api/models/File.js index 87c91003e2..4d94994478 100644 --- a/api/models/File.js +++ b/api/models/File.js @@ -1,4 +1,5 @@ const mongoose = require('mongoose'); +const { EToolResources } = require('librechat-data-provider'); const { fileSchema } = require('@librechat/data-schemas'); const { logger } = require('~/config'); @@ -8,7 +9,7 @@ const File = mongoose.model('File', fileSchema); * Finds a file by its file_id with additional query options. * @param {string} file_id - The unique identifier of the file. * @param {object} options - Query options for filtering, projection, etc. - * @returns {Promise} A promise that resolves to the file document or null. + * @returns {Promise} A promise that resolves to the file document or null. */ const findFileById = async (file_id, options = {}) => { return await File.findOne({ file_id, ...options }).lean(); @@ -20,7 +21,7 @@ const findFileById = async (file_id, options = {}) => { * @param {Object} [_sortOptions] - Optional sort parameters. * @param {Object|String} [selectFields={ text: 0 }] - Fields to include/exclude in the query results. * Default excludes the 'text' field. - * @returns {Promise>} A promise that resolves to an array of file documents. + * @returns {Promise>} A promise that resolves to an array of file documents. */ const getFiles = async (filter, _sortOptions, selectFields = { text: 0 }) => { const sortOptions = { updatedAt: -1, ..._sortOptions }; @@ -30,9 +31,10 @@ const getFiles = async (filter, _sortOptions, selectFields = { text: 0 }) => { /** * Retrieves tool files (files that are embedded or have a fileIdentifier) from an array of file IDs * @param {string[]} fileIds - Array of file_id strings to search for - * @returns {Promise>} Files that match the criteria + * @param {Set} toolResourceSet - Optional filter for tool resources + * @returns {Promise>} Files that match the criteria */ -const getToolFilesByIds = async (fileIds) => { +const getToolFilesByIds = async (fileIds, toolResourceSet) => { if (!fileIds || !fileIds.length) { return []; } @@ -40,9 +42,19 @@ const getToolFilesByIds = async (fileIds) => { try { const filter = { file_id: { $in: fileIds }, - $or: [{ embedded: true }, { 'metadata.fileIdentifier': { $exists: true } }], }; + if (toolResourceSet.size) { + filter.$or = []; + } + + if (toolResourceSet.has(EToolResources.file_search)) { + filter.$or.push({ embedded: true }); + } + if (toolResourceSet.has(EToolResources.execute_code)) { + filter.$or.push({ 'metadata.fileIdentifier': { $exists: true } }); + } + const selectFields = { text: 0 }; const sortOptions = { updatedAt: -1 }; @@ -55,9 +67,9 @@ const getToolFilesByIds = async (fileIds) => { /** * Creates a new file with a TTL of 1 hour. - * @param {IMongoFile} data - The file data to be created, must contain file_id. + * @param {MongoFile} data - The file data to be created, must contain file_id. * @param {boolean} disableTTL - Whether to disable the TTL. - * @returns {Promise} A promise that resolves to the created file document. + * @returns {Promise} A promise that resolves to the created file document. */ const createFile = async (data, disableTTL) => { const fileData = { @@ -77,8 +89,8 @@ const createFile = async (data, disableTTL) => { /** * Updates a file identified by file_id with new data and removes the TTL. - * @param {IMongoFile} data - The data to update, must contain file_id. - * @returns {Promise} A promise that resolves to the updated file document. + * @param {MongoFile} data - The data to update, must contain file_id. + * @returns {Promise} A promise that resolves to the updated file document. */ const updateFile = async (data) => { const { file_id, ...update } = data; @@ -91,8 +103,8 @@ const updateFile = async (data) => { /** * Increments the usage of a file identified by file_id. - * @param {IMongoFile} data - The data to update, must contain file_id and the increment value for usage. - * @returns {Promise} A promise that resolves to the updated file document. + * @param {MongoFile} data - The data to update, must contain file_id and the increment value for usage. + * @returns {Promise} A promise that resolves to the updated file document. */ const updateFileUsage = async (data) => { const { file_id, inc = 1 } = data; @@ -106,7 +118,7 @@ const updateFileUsage = async (data) => { /** * Deletes a file identified by file_id. * @param {string} file_id - The unique identifier of the file to delete. - * @returns {Promise} A promise that resolves to the deleted file document or null. + * @returns {Promise} A promise that resolves to the deleted file document or null. */ const deleteFile = async (file_id) => { return await File.findOneAndDelete({ file_id }).lean(); @@ -115,7 +127,7 @@ const deleteFile = async (file_id) => { /** * Deletes a file identified by a filter. * @param {object} filter - The filter criteria to apply. - * @returns {Promise} A promise that resolves to the deleted file document or null. + * @returns {Promise} A promise that resolves to the deleted file document or null. */ const deleteFileByFilter = async (filter) => { return await File.findOneAndDelete(filter).lean(); diff --git a/api/models/Message.js b/api/models/Message.js index 58068813ef..86fd2fd549 100644 --- a/api/models/Message.js +++ b/api/models/Message.js @@ -61,6 +61,14 @@ async function saveMessage(req, params, metadata) { update.expiredAt = null; } + if (update.tokenCount != null && isNaN(update.tokenCount)) { + logger.warn( + `Resetting invalid \`tokenCount\` for message \`${params.messageId}\`: ${update.tokenCount}`, + ); + logger.info(`---\`saveMessage\` context: ${metadata?.context}`); + update.tokenCount = 0; + } + const message = await Message.findOneAndUpdate( { messageId: params.messageId, user: req.user.id }, update, @@ -97,7 +105,9 @@ async function saveMessage(req, params, metadata) { }; } catch (findError) { // If the findOne also fails, log it but don't crash - logger.warn(`Could not retrieve existing message with ID ${params.messageId}: ${findError.message}`); + logger.warn( + `Could not retrieve existing message with ID ${params.messageId}: ${findError.message}`, + ); return { ...params, messageId: params.messageId, diff --git a/api/models/Share.js b/api/models/Share.js index a8bfbce7fe..8611d01bc0 100644 --- a/api/models/Share.js +++ b/api/models/Share.js @@ -52,6 +52,14 @@ function anonymizeMessages(messages, newConvoId) { const newMessageId = anonymizeMessageId(message.messageId); idMap.set(message.messageId, newMessageId); + const anonymizedAttachments = message.attachments?.map((attachment) => { + return { + ...attachment, + messageId: newMessageId, + conversationId: newConvoId, + }; + }); + return { ...message, messageId: newMessageId, @@ -61,6 +69,7 @@ function anonymizeMessages(messages, newConvoId) { model: message.model?.startsWith('asst_') ? anonymizeAssistantId(message.model) : message.model, + attachments: anonymizedAttachments, }; }); } diff --git a/api/models/tx.js b/api/models/tx.js index 41003e665c..df88390b17 100644 --- a/api/models/tx.js +++ b/api/models/tx.js @@ -76,10 +76,15 @@ const tokenValues = Object.assign( '4k': { prompt: 1.5, completion: 2 }, '16k': { prompt: 3, completion: 4 }, 'gpt-3.5-turbo-1106': { prompt: 1, completion: 2 }, + 'o4-mini': { prompt: 1.1, completion: 4.4 }, 'o3-mini': { prompt: 1.1, completion: 4.4 }, + o3: { prompt: 10, completion: 40 }, 'o1-mini': { prompt: 1.1, completion: 4.4 }, 'o1-preview': { prompt: 15, completion: 60 }, o1: { prompt: 15, completion: 60 }, + 'gpt-4.1-nano': { prompt: 0.1, completion: 0.4 }, + 'gpt-4.1-mini': { prompt: 0.4, completion: 1.6 }, + 'gpt-4.1': { prompt: 2, completion: 8 }, 'gpt-4.5': { prompt: 75, completion: 150 }, 'gpt-4o-mini': { prompt: 0.15, completion: 0.6 }, 'gpt-4o': { prompt: 2.5, completion: 10 }, @@ -106,10 +111,15 @@ const tokenValues = Object.assign( /* cohere doesn't have rates for the older command models, so this was from https://artificialanalysis.ai/models/command-light/providers */ command: { prompt: 0.38, completion: 0.38 }, + gemma: { prompt: 0, completion: 0 }, // https://ai.google.dev/pricing + 'gemma-2': { prompt: 0, completion: 0 }, // https://ai.google.dev/pricing + 'gemma-3': { prompt: 0, completion: 0 }, // https://ai.google.dev/pricing + 'gemma-3-27b': { prompt: 0, completion: 0 }, // https://ai.google.dev/pricing 'gemini-2.0-flash-lite': { prompt: 0.075, completion: 0.3 }, 'gemini-2.0-flash': { prompt: 0.1, completion: 0.4 }, 'gemini-2.0': { prompt: 0, completion: 0 }, // https://ai.google.dev/pricing - 'gemini-2.5-pro-preview-03-25': { prompt: 1.25, completion: 10 }, + 'gemini-2.5-pro': { prompt: 1.25, completion: 10 }, + 'gemini-2.5-flash': { prompt: 0.15, completion: 3.5 }, 'gemini-2.5': { prompt: 0, completion: 0 }, // Free for a period of time 'gemini-1.5-flash-8b': { prompt: 0.075, completion: 0.3 }, 'gemini-1.5-flash': { prompt: 0.15, completion: 0.6 }, @@ -123,6 +133,10 @@ const tokenValues = Object.assign( 'grok-2-1212': { prompt: 2.0, completion: 10.0 }, 'grok-2-latest': { prompt: 2.0, completion: 10.0 }, 'grok-2': { prompt: 2.0, completion: 10.0 }, + 'grok-3-mini-fast': { prompt: 0.4, completion: 4 }, + 'grok-3-mini': { prompt: 0.3, completion: 0.5 }, + 'grok-3-fast': { prompt: 5.0, completion: 25.0 }, + 'grok-3': { prompt: 3.0, completion: 15.0 }, 'grok-beta': { prompt: 5.0, completion: 15.0 }, 'mistral-large': { prompt: 2.0, completion: 6.0 }, 'pixtral-large': { prompt: 2.0, completion: 6.0 }, @@ -171,6 +185,14 @@ const getValueKey = (model, endpoint) => { return 'gpt-3.5-turbo-1106'; } else if (modelName.includes('gpt-3.5')) { return '4k'; + } else if (modelName.includes('o4-mini')) { + return 'o4-mini'; + } else if (modelName.includes('o4')) { + return 'o4'; + } else if (modelName.includes('o3-mini')) { + return 'o3-mini'; + } else if (modelName.includes('o3')) { + return 'o3'; } else if (modelName.includes('o1-preview')) { return 'o1-preview'; } else if (modelName.includes('o1-mini')) { @@ -179,6 +201,12 @@ const getValueKey = (model, endpoint) => { return 'o1'; } else if (modelName.includes('gpt-4.5')) { return 'gpt-4.5'; + } else if (modelName.includes('gpt-4.1-nano')) { + return 'gpt-4.1-nano'; + } else if (modelName.includes('gpt-4.1-mini')) { + return 'gpt-4.1-mini'; + } else if (modelName.includes('gpt-4.1')) { + return 'gpt-4.1'; } else if (modelName.includes('gpt-4o-2024-05-13')) { return 'gpt-4o-2024-05-13'; } else if (modelName.includes('gpt-4o-mini')) { diff --git a/api/models/tx.spec.js b/api/models/tx.spec.js index f612e222bb..97a730232d 100644 --- a/api/models/tx.spec.js +++ b/api/models/tx.spec.js @@ -60,6 +60,30 @@ describe('getValueKey', () => { expect(getValueKey('gpt-4.5-0125')).toBe('gpt-4.5'); }); + it('should return "gpt-4.1" for model type of "gpt-4.1"', () => { + expect(getValueKey('gpt-4.1-preview')).toBe('gpt-4.1'); + expect(getValueKey('gpt-4.1-2024-08-06')).toBe('gpt-4.1'); + expect(getValueKey('gpt-4.1-2024-08-06-0718')).toBe('gpt-4.1'); + expect(getValueKey('openai/gpt-4.1')).toBe('gpt-4.1'); + expect(getValueKey('openai/gpt-4.1-2024-08-06')).toBe('gpt-4.1'); + expect(getValueKey('gpt-4.1-turbo')).toBe('gpt-4.1'); + expect(getValueKey('gpt-4.1-0125')).toBe('gpt-4.1'); + }); + + it('should return "gpt-4.1-mini" for model type of "gpt-4.1-mini"', () => { + expect(getValueKey('gpt-4.1-mini-preview')).toBe('gpt-4.1-mini'); + expect(getValueKey('gpt-4.1-mini-2024-08-06')).toBe('gpt-4.1-mini'); + expect(getValueKey('openai/gpt-4.1-mini')).toBe('gpt-4.1-mini'); + expect(getValueKey('gpt-4.1-mini-0125')).toBe('gpt-4.1-mini'); + }); + + it('should return "gpt-4.1-nano" for model type of "gpt-4.1-nano"', () => { + expect(getValueKey('gpt-4.1-nano-preview')).toBe('gpt-4.1-nano'); + expect(getValueKey('gpt-4.1-nano-2024-08-06')).toBe('gpt-4.1-nano'); + expect(getValueKey('openai/gpt-4.1-nano')).toBe('gpt-4.1-nano'); + expect(getValueKey('gpt-4.1-nano-0125')).toBe('gpt-4.1-nano'); + }); + it('should return "gpt-4o" for model type of "gpt-4o"', () => { expect(getValueKey('gpt-4o-2024-08-06')).toBe('gpt-4o'); expect(getValueKey('gpt-4o-2024-08-06-0718')).toBe('gpt-4o'); @@ -141,6 +165,15 @@ describe('getMultiplier', () => { ); }); + it('should return correct multipliers for o4-mini and o3', () => { + ['o4-mini', 'o3'].forEach((model) => { + const prompt = getMultiplier({ model, tokenType: 'prompt' }); + const completion = getMultiplier({ model, tokenType: 'completion' }); + expect(prompt).toBe(tokenValues[model].prompt); + expect(completion).toBe(tokenValues[model].completion); + }); + }); + it('should return defaultRate if tokenType is provided but not found in tokenValues', () => { expect(getMultiplier({ valueKey: '8k', tokenType: 'unknownType' })).toBe(defaultRate); }); @@ -185,6 +218,52 @@ describe('getMultiplier', () => { ); }); + it('should return the correct multiplier for gpt-4.1', () => { + const valueKey = getValueKey('gpt-4.1-2024-08-06'); + expect(getMultiplier({ valueKey, tokenType: 'prompt' })).toBe(tokenValues['gpt-4.1'].prompt); + expect(getMultiplier({ valueKey, tokenType: 'completion' })).toBe( + tokenValues['gpt-4.1'].completion, + ); + expect(getMultiplier({ model: 'gpt-4.1-preview', tokenType: 'prompt' })).toBe( + tokenValues['gpt-4.1'].prompt, + ); + expect(getMultiplier({ model: 'openai/gpt-4.1', tokenType: 'completion' })).toBe( + tokenValues['gpt-4.1'].completion, + ); + }); + + it('should return the correct multiplier for gpt-4.1-mini', () => { + const valueKey = getValueKey('gpt-4.1-mini-2024-08-06'); + expect(getMultiplier({ valueKey, tokenType: 'prompt' })).toBe( + tokenValues['gpt-4.1-mini'].prompt, + ); + expect(getMultiplier({ valueKey, tokenType: 'completion' })).toBe( + tokenValues['gpt-4.1-mini'].completion, + ); + expect(getMultiplier({ model: 'gpt-4.1-mini-preview', tokenType: 'prompt' })).toBe( + tokenValues['gpt-4.1-mini'].prompt, + ); + expect(getMultiplier({ model: 'openai/gpt-4.1-mini', tokenType: 'completion' })).toBe( + tokenValues['gpt-4.1-mini'].completion, + ); + }); + + it('should return the correct multiplier for gpt-4.1-nano', () => { + const valueKey = getValueKey('gpt-4.1-nano-2024-08-06'); + expect(getMultiplier({ valueKey, tokenType: 'prompt' })).toBe( + tokenValues['gpt-4.1-nano'].prompt, + ); + expect(getMultiplier({ valueKey, tokenType: 'completion' })).toBe( + tokenValues['gpt-4.1-nano'].completion, + ); + expect(getMultiplier({ model: 'gpt-4.1-nano-preview', tokenType: 'prompt' })).toBe( + tokenValues['gpt-4.1-nano'].prompt, + ); + expect(getMultiplier({ model: 'openai/gpt-4.1-nano', tokenType: 'completion' })).toBe( + tokenValues['gpt-4.1-nano'].completion, + ); + }); + it('should return the correct multiplier for gpt-4o-mini', () => { const valueKey = getValueKey('gpt-4o-mini-2024-07-18'); expect(getMultiplier({ valueKey, tokenType: 'prompt' })).toBe( @@ -348,9 +427,11 @@ describe('getCacheMultiplier', () => { it('should derive the valueKey from the model if not provided', () => { expect(getCacheMultiplier({ cacheType: 'write', model: 'claude-3-5-sonnet-20240620' })).toBe( - 3.75, + cacheTokenValues['claude-3-5-sonnet'].write, + ); + expect(getCacheMultiplier({ cacheType: 'read', model: 'claude-3-haiku-20240307' })).toBe( + cacheTokenValues['claude-3-haiku'].read, ); - expect(getCacheMultiplier({ cacheType: 'read', model: 'claude-3-haiku-20240307' })).toBe(0.03); }); it('should return null if only model or cacheType is missing', () => { @@ -371,10 +452,10 @@ describe('getCacheMultiplier', () => { }; expect( getCacheMultiplier({ model: 'custom-model', cacheType: 'write', endpointTokenConfig }), - ).toBe(5); + ).toBe(endpointTokenConfig['custom-model'].write); expect( getCacheMultiplier({ model: 'custom-model', cacheType: 'read', endpointTokenConfig }), - ).toBe(1); + ).toBe(endpointTokenConfig['custom-model'].read); }); it('should return null if model is not found in endpointTokenConfig', () => { @@ -395,18 +476,21 @@ describe('getCacheMultiplier', () => { model: 'bedrock/anthropic.claude-3-5-sonnet-20240620-v1:0', cacheType: 'write', }), - ).toBe(3.75); + ).toBe(cacheTokenValues['claude-3-5-sonnet'].write); expect( getCacheMultiplier({ model: 'bedrock/anthropic.claude-3-haiku-20240307-v1:0', cacheType: 'read', }), - ).toBe(0.03); + ).toBe(cacheTokenValues['claude-3-haiku'].read); }); }); describe('Google Model Tests', () => { const googleModels = [ + 'gemini-2.5-pro-preview-05-06', + 'gemini-2.5-flash-preview-04-17', + 'gemini-2.5-exp', 'gemini-2.0-flash-lite-preview-02-05', 'gemini-2.0-flash-001', 'gemini-2.0-flash-exp', @@ -444,6 +528,9 @@ describe('Google Model Tests', () => { it('should map to the correct model keys', () => { const expected = { + 'gemini-2.5-pro-preview-05-06': 'gemini-2.5-pro', + 'gemini-2.5-flash-preview-04-17': 'gemini-2.5-flash', + 'gemini-2.5-exp': 'gemini-2.5', 'gemini-2.0-flash-lite-preview-02-05': 'gemini-2.0-flash-lite', 'gemini-2.0-flash-001': 'gemini-2.0-flash', 'gemini-2.0-flash-exp': 'gemini-2.0-flash', @@ -488,24 +575,92 @@ describe('Grok Model Tests - Pricing', () => { test('should return correct prompt and completion rates for Grok vision models', () => { const models = ['grok-2-vision-1212', 'grok-2-vision', 'grok-2-vision-latest']; models.forEach((model) => { - expect(getMultiplier({ model, tokenType: 'prompt' })).toBe(2.0); - expect(getMultiplier({ model, tokenType: 'completion' })).toBe(10.0); + expect(getMultiplier({ model, tokenType: 'prompt' })).toBe( + tokenValues['grok-2-vision'].prompt, + ); + expect(getMultiplier({ model, tokenType: 'completion' })).toBe( + tokenValues['grok-2-vision'].completion, + ); }); }); test('should return correct prompt and completion rates for Grok text models', () => { const models = ['grok-2-1212', 'grok-2', 'grok-2-latest']; models.forEach((model) => { - expect(getMultiplier({ model, tokenType: 'prompt' })).toBe(2.0); - expect(getMultiplier({ model, tokenType: 'completion' })).toBe(10.0); + expect(getMultiplier({ model, tokenType: 'prompt' })).toBe(tokenValues['grok-2'].prompt); + expect(getMultiplier({ model, tokenType: 'completion' })).toBe( + tokenValues['grok-2'].completion, + ); }); }); test('should return correct prompt and completion rates for Grok beta models', () => { - expect(getMultiplier({ model: 'grok-vision-beta', tokenType: 'prompt' })).toBe(5.0); - expect(getMultiplier({ model: 'grok-vision-beta', tokenType: 'completion' })).toBe(15.0); - expect(getMultiplier({ model: 'grok-beta', tokenType: 'prompt' })).toBe(5.0); - expect(getMultiplier({ model: 'grok-beta', tokenType: 'completion' })).toBe(15.0); + expect(getMultiplier({ model: 'grok-vision-beta', tokenType: 'prompt' })).toBe( + tokenValues['grok-vision-beta'].prompt, + ); + expect(getMultiplier({ model: 'grok-vision-beta', tokenType: 'completion' })).toBe( + tokenValues['grok-vision-beta'].completion, + ); + expect(getMultiplier({ model: 'grok-beta', tokenType: 'prompt' })).toBe( + tokenValues['grok-beta'].prompt, + ); + expect(getMultiplier({ model: 'grok-beta', tokenType: 'completion' })).toBe( + tokenValues['grok-beta'].completion, + ); + }); + + test('should return correct prompt and completion rates for Grok 3 models', () => { + expect(getMultiplier({ model: 'grok-3', tokenType: 'prompt' })).toBe( + tokenValues['grok-3'].prompt, + ); + expect(getMultiplier({ model: 'grok-3', tokenType: 'completion' })).toBe( + tokenValues['grok-3'].completion, + ); + expect(getMultiplier({ model: 'grok-3-fast', tokenType: 'prompt' })).toBe( + tokenValues['grok-3-fast'].prompt, + ); + expect(getMultiplier({ model: 'grok-3-fast', tokenType: 'completion' })).toBe( + tokenValues['grok-3-fast'].completion, + ); + expect(getMultiplier({ model: 'grok-3-mini', tokenType: 'prompt' })).toBe( + tokenValues['grok-3-mini'].prompt, + ); + expect(getMultiplier({ model: 'grok-3-mini', tokenType: 'completion' })).toBe( + tokenValues['grok-3-mini'].completion, + ); + expect(getMultiplier({ model: 'grok-3-mini-fast', tokenType: 'prompt' })).toBe( + tokenValues['grok-3-mini-fast'].prompt, + ); + expect(getMultiplier({ model: 'grok-3-mini-fast', tokenType: 'completion' })).toBe( + tokenValues['grok-3-mini-fast'].completion, + ); + }); + + test('should return correct prompt and completion rates for Grok 3 models with prefixes', () => { + expect(getMultiplier({ model: 'xai/grok-3', tokenType: 'prompt' })).toBe( + tokenValues['grok-3'].prompt, + ); + expect(getMultiplier({ model: 'xai/grok-3', tokenType: 'completion' })).toBe( + tokenValues['grok-3'].completion, + ); + expect(getMultiplier({ model: 'xai/grok-3-fast', tokenType: 'prompt' })).toBe( + tokenValues['grok-3-fast'].prompt, + ); + expect(getMultiplier({ model: 'xai/grok-3-fast', tokenType: 'completion' })).toBe( + tokenValues['grok-3-fast'].completion, + ); + expect(getMultiplier({ model: 'xai/grok-3-mini', tokenType: 'prompt' })).toBe( + tokenValues['grok-3-mini'].prompt, + ); + expect(getMultiplier({ model: 'xai/grok-3-mini', tokenType: 'completion' })).toBe( + tokenValues['grok-3-mini'].completion, + ); + expect(getMultiplier({ model: 'xai/grok-3-mini-fast', tokenType: 'prompt' })).toBe( + tokenValues['grok-3-mini-fast'].prompt, + ); + expect(getMultiplier({ model: 'xai/grok-3-mini-fast', tokenType: 'completion' })).toBe( + tokenValues['grok-3-mini-fast'].completion, + ); }); }); }); diff --git a/api/package.json b/api/package.json index 19bad78cf8..bcf94a6cad 100644 --- a/api/package.json +++ b/api/package.json @@ -1,6 +1,6 @@ { "name": "@librechat/backend", - "version": "v0.7.7", + "version": "v0.7.8", "description": "", "scripts": { "start": "echo 'please run this from the root directory'", @@ -39,17 +39,16 @@ "@aws-sdk/s3-request-presigner": "^3.758.0", "@azure/identity": "^4.7.0", "@azure/search-documents": "^12.0.0", - "@azure/storage-blob": "^12.26.0", + "@azure/storage-blob": "^12.27.0", "@google/generative-ai": "^0.23.0", "@googleapis/youtube": "^20.0.0", - "@keyv/mongo": "^2.1.8", - "@keyv/redis": "^2.8.1", - "@langchain/community": "^0.3.39", - "@langchain/core": "^0.3.43", - "@langchain/google-genai": "^0.2.2", - "@langchain/google-vertexai": "^0.2.3", + "@keyv/redis": "^4.3.3", + "@langchain/community": "^0.3.42", + "@langchain/core": "^0.3.55", + "@langchain/google-genai": "^0.2.8", + "@langchain/google-vertexai": "^0.2.8", "@langchain/textsplitters": "^0.1.0", - "@librechat/agents": "^2.4.12", + "@librechat/agents": "^2.4.317", "@librechat/data-schemas": "*", "@waylaidwanderer/fetch-event-source": "^3.0.1", "axios": "^1.8.2", @@ -76,8 +75,8 @@ "ioredis": "^5.3.2", "js-yaml": "^4.1.0", "jsonwebtoken": "^9.0.0", - "keyv": "^4.5.4", - "keyv-file": "^0.2.0", + "keyv": "^5.3.2", + "keyv-file": "^5.1.2", "klona": "^2.0.6", "librechat-data-provider": "*", "librechat-mcp": "*", @@ -91,7 +90,7 @@ "nanoid": "^3.3.7", "nodemailer": "^6.9.15", "ollama": "^0.5.0", - "openai": "^4.47.1", + "openai": "^4.96.2", "openai-chat-tokens": "^0.2.8", "openid-client": "^5.4.2", "passport": "^0.6.0", @@ -117,6 +116,6 @@ "jest": "^29.7.0", "mongodb-memory-server": "^10.1.3", "nodemon": "^3.0.3", - "supertest": "^7.0.0" + "supertest": "^7.1.0" } } diff --git a/api/server/cleanup.js b/api/server/cleanup.js new file mode 100644 index 0000000000..6d5b77196a --- /dev/null +++ b/api/server/cleanup.js @@ -0,0 +1,387 @@ +const { logger } = require('~/config'); + +// WeakMap to hold temporary data associated with requests +const requestDataMap = new WeakMap(); + +const FinalizationRegistry = global.FinalizationRegistry || null; + +/** + * FinalizationRegistry to clean up client objects when they are garbage collected. + * This is used to prevent memory leaks and ensure that client objects are + * properly disposed of when they are no longer needed. + * The registry holds a weak reference to the client object and a cleanup + * callback that is called when the client object is garbage collected. + * The callback can be used to perform any necessary cleanup operations, + * such as removing event listeners or freeing up resources. + */ +const clientRegistry = FinalizationRegistry + ? new FinalizationRegistry((heldValue) => { + try { + // This will run when the client is garbage collected + if (heldValue && heldValue.userId) { + logger.debug(`[FinalizationRegistry] Cleaning up client for user ${heldValue.userId}`); + } else { + logger.debug('[FinalizationRegistry] Cleaning up client'); + } + } catch (e) { + // Ignore errors + } + }) + : null; + +/** + * Cleans up the client object by removing references to its properties. + * This is useful for preventing memory leaks and ensuring that the client + * and its properties can be garbage collected when it is no longer needed. + */ +function disposeClient(client) { + if (!client) { + return; + } + + try { + if (client.user) { + client.user = null; + } + if (client.apiKey) { + client.apiKey = null; + } + if (client.azure) { + client.azure = null; + } + if (client.conversationId) { + client.conversationId = null; + } + if (client.responseMessageId) { + client.responseMessageId = null; + } + if (client.message_file_map) { + client.message_file_map = null; + } + if (client.clientName) { + client.clientName = null; + } + if (client.sender) { + client.sender = null; + } + if (client.model) { + client.model = null; + } + if (client.maxContextTokens) { + client.maxContextTokens = null; + } + if (client.contextStrategy) { + client.contextStrategy = null; + } + if (client.currentDateString) { + client.currentDateString = null; + } + if (client.inputTokensKey) { + client.inputTokensKey = null; + } + if (client.outputTokensKey) { + client.outputTokensKey = null; + } + if (client.skipSaveUserMessage !== undefined) { + client.skipSaveUserMessage = null; + } + if (client.visionMode) { + client.visionMode = null; + } + if (client.continued !== undefined) { + client.continued = null; + } + if (client.fetchedConvo !== undefined) { + client.fetchedConvo = null; + } + if (client.previous_summary) { + client.previous_summary = null; + } + if (client.metadata) { + client.metadata = null; + } + if (client.isVisionModel) { + client.isVisionModel = null; + } + if (client.isChatCompletion !== undefined) { + client.isChatCompletion = null; + } + if (client.contextHandlers) { + client.contextHandlers = null; + } + if (client.augmentedPrompt) { + client.augmentedPrompt = null; + } + if (client.systemMessage) { + client.systemMessage = null; + } + if (client.azureEndpoint) { + client.azureEndpoint = null; + } + if (client.langchainProxy) { + client.langchainProxy = null; + } + if (client.isOmni !== undefined) { + client.isOmni = null; + } + if (client.runManager) { + client.runManager = null; + } + // Properties specific to AnthropicClient + if (client.message_start) { + client.message_start = null; + } + if (client.message_delta) { + client.message_delta = null; + } + if (client.isClaude3 !== undefined) { + client.isClaude3 = null; + } + if (client.useMessages !== undefined) { + client.useMessages = null; + } + if (client.isLegacyOutput !== undefined) { + client.isLegacyOutput = null; + } + if (client.supportsCacheControl !== undefined) { + client.supportsCacheControl = null; + } + // Properties specific to GoogleClient + if (client.serviceKey) { + client.serviceKey = null; + } + if (client.project_id) { + client.project_id = null; + } + if (client.client_email) { + client.client_email = null; + } + if (client.private_key) { + client.private_key = null; + } + if (client.access_token) { + client.access_token = null; + } + if (client.reverseProxyUrl) { + client.reverseProxyUrl = null; + } + if (client.authHeader) { + client.authHeader = null; + } + if (client.isGenerativeModel !== undefined) { + client.isGenerativeModel = null; + } + // Properties specific to OpenAIClient + if (client.ChatGPTClient) { + client.ChatGPTClient = null; + } + if (client.completionsUrl) { + client.completionsUrl = null; + } + if (client.shouldSummarize !== undefined) { + client.shouldSummarize = null; + } + if (client.isOllama !== undefined) { + client.isOllama = null; + } + if (client.FORCE_PROMPT !== undefined) { + client.FORCE_PROMPT = null; + } + if (client.isChatGptModel !== undefined) { + client.isChatGptModel = null; + } + if (client.isUnofficialChatGptModel !== undefined) { + client.isUnofficialChatGptModel = null; + } + if (client.useOpenRouter !== undefined) { + client.useOpenRouter = null; + } + if (client.startToken) { + client.startToken = null; + } + if (client.endToken) { + client.endToken = null; + } + if (client.userLabel) { + client.userLabel = null; + } + if (client.chatGptLabel) { + client.chatGptLabel = null; + } + if (client.modelLabel) { + client.modelLabel = null; + } + if (client.modelOptions) { + client.modelOptions = null; + } + if (client.defaultVisionModel) { + client.defaultVisionModel = null; + } + if (client.maxPromptTokens) { + client.maxPromptTokens = null; + } + if (client.maxResponseTokens) { + client.maxResponseTokens = null; + } + if (client.run) { + // Break circular references in run + if (client.run.Graph) { + client.run.Graph.resetValues(); + client.run.Graph.handlerRegistry = null; + client.run.Graph.runId = null; + client.run.Graph.tools = null; + client.run.Graph.signal = null; + client.run.Graph.config = null; + client.run.Graph.toolEnd = null; + client.run.Graph.toolMap = null; + client.run.Graph.provider = null; + client.run.Graph.streamBuffer = null; + client.run.Graph.clientOptions = null; + client.run.Graph.graphState = null; + if (client.run.Graph.boundModel?.client) { + client.run.Graph.boundModel.client = null; + } + client.run.Graph.boundModel = null; + client.run.Graph.systemMessage = null; + client.run.Graph.reasoningKey = null; + client.run.Graph.messages = null; + client.run.Graph.contentData = null; + client.run.Graph.stepKeyIds = null; + client.run.Graph.contentIndexMap = null; + client.run.Graph.toolCallStepIds = null; + client.run.Graph.messageIdsByStepKey = null; + client.run.Graph.messageStepHasToolCalls = null; + client.run.Graph.prelimMessageIdsByStepKey = null; + client.run.Graph.currentTokenType = null; + client.run.Graph.lastToken = null; + client.run.Graph.tokenTypeSwitch = null; + client.run.Graph.indexTokenCountMap = null; + client.run.Graph.currentUsage = null; + client.run.Graph.tokenCounter = null; + client.run.Graph.maxContextTokens = null; + client.run.Graph.pruneMessages = null; + client.run.Graph.lastStreamCall = null; + client.run.Graph.startIndex = null; + client.run.Graph = null; + } + if (client.run.handlerRegistry) { + client.run.handlerRegistry = null; + } + if (client.run.graphRunnable) { + if (client.run.graphRunnable.channels) { + client.run.graphRunnable.channels = null; + } + if (client.run.graphRunnable.nodes) { + client.run.graphRunnable.nodes = null; + } + if (client.run.graphRunnable.lc_kwargs) { + client.run.graphRunnable.lc_kwargs = null; + } + if (client.run.graphRunnable.builder?.nodes) { + client.run.graphRunnable.builder.nodes = null; + client.run.graphRunnable.builder = null; + } + client.run.graphRunnable = null; + } + client.run = null; + } + if (client.sendMessage) { + client.sendMessage = null; + } + if (client.savedMessageIds) { + client.savedMessageIds.clear(); + client.savedMessageIds = null; + } + if (client.currentMessages) { + client.currentMessages = null; + } + if (client.streamHandler) { + client.streamHandler = null; + } + if (client.contentParts) { + client.contentParts = null; + } + if (client.abortController) { + client.abortController = null; + } + if (client.collectedUsage) { + client.collectedUsage = null; + } + if (client.indexTokenCountMap) { + client.indexTokenCountMap = null; + } + if (client.agentConfigs) { + client.agentConfigs = null; + } + if (client.artifactPromises) { + client.artifactPromises = null; + } + if (client.usage) { + client.usage = null; + } + if (typeof client.dispose === 'function') { + client.dispose(); + } + if (client.options) { + if (client.options.req) { + client.options.req = null; + } + if (client.options.res) { + client.options.res = null; + } + if (client.options.attachments) { + client.options.attachments = null; + } + if (client.options.agent) { + client.options.agent = null; + } + } + client.options = null; + } catch (e) { + // Ignore errors during disposal + } +} + +function processReqData(data = {}, context) { + let { + abortKey, + userMessage, + userMessagePromise, + responseMessageId, + promptTokens, + conversationId, + userMessageId, + } = context; + for (const key in data) { + if (key === 'userMessage') { + userMessage = data[key]; + userMessageId = data[key].messageId; + } else if (key === 'userMessagePromise') { + userMessagePromise = data[key]; + } else if (key === 'responseMessageId') { + responseMessageId = data[key]; + } else if (key === 'promptTokens') { + promptTokens = data[key]; + } else if (key === 'abortKey') { + abortKey = data[key]; + } else if (!conversationId && key === 'conversationId') { + conversationId = data[key]; + } + } + return { + abortKey, + userMessage, + userMessagePromise, + responseMessageId, + promptTokens, + conversationId, + userMessageId, + }; +} + +module.exports = { + disposeClient, + requestDataMap, + clientRegistry, + processReqData, +}; diff --git a/api/server/controllers/AskController.js b/api/server/controllers/AskController.js index 2df6f34ede..40b209ef35 100644 --- a/api/server/controllers/AskController.js +++ b/api/server/controllers/AskController.js @@ -1,5 +1,15 @@ const { getResponseSender, Constants } = require('librechat-data-provider'); -const { createAbortController, handleAbortError } = require('~/server/middleware'); +const { + handleAbortError, + createAbortController, + cleanupAbortController, +} = require('~/server/middleware'); +const { + disposeClient, + processReqData, + clientRegistry, + requestDataMap, +} = require('~/server/cleanup'); const { sendMessage, createOnProgress } = require('~/server/utils'); const { saveMessage } = require('~/models'); const { logger } = require('~/config'); @@ -14,90 +24,162 @@ const AskController = async (req, res, next, initializeClient, addTitle) => { overrideParentMessageId = null, } = req.body; + let client = null; + let abortKey = null; + let cleanupHandlers = []; + let clientRef = null; + logger.debug('[AskController]', { text, conversationId, ...endpointOption, - modelsConfig: endpointOption.modelsConfig ? 'exists' : '', + modelsConfig: endpointOption?.modelsConfig ? 'exists' : '', }); - let userMessage; - let userMessagePromise; - let promptTokens; - let userMessageId; - let responseMessageId; + let userMessage = null; + let userMessagePromise = null; + let promptTokens = null; + let userMessageId = null; + let responseMessageId = null; + let getAbortData = null; + const sender = getResponseSender({ ...endpointOption, model: endpointOption.modelOptions.model, modelDisplayLabel, }); - const newConvo = !conversationId; - const user = req.user.id; + const initialConversationId = conversationId; + const newConvo = !initialConversationId; + const userId = req.user.id; - const getReqData = (data = {}) => { - for (let key in data) { - if (key === 'userMessage') { - userMessage = data[key]; - userMessageId = data[key].messageId; - } else if (key === 'userMessagePromise') { - userMessagePromise = data[key]; - } else if (key === 'responseMessageId') { - responseMessageId = data[key]; - } else if (key === 'promptTokens') { - promptTokens = data[key]; - } else if (!conversationId && key === 'conversationId') { - conversationId = data[key]; - } - } + let reqDataContext = { + userMessage, + userMessagePromise, + responseMessageId, + promptTokens, + conversationId, + userMessageId, }; - let getText; + const updateReqData = (data = {}) => { + reqDataContext = processReqData(data, reqDataContext); + abortKey = reqDataContext.abortKey; + userMessage = reqDataContext.userMessage; + userMessagePromise = reqDataContext.userMessagePromise; + responseMessageId = reqDataContext.responseMessageId; + promptTokens = reqDataContext.promptTokens; + conversationId = reqDataContext.conversationId; + userMessageId = reqDataContext.userMessageId; + }; + + let { onProgress: progressCallback, getPartialText } = createOnProgress(); + + const performCleanup = () => { + logger.debug('[AskController] Performing cleanup'); + if (Array.isArray(cleanupHandlers)) { + for (const handler of cleanupHandlers) { + try { + if (typeof handler === 'function') { + handler(); + } + } catch (e) { + // Ignore + } + } + } + + if (abortKey) { + logger.debug('[AskController] Cleaning up abort controller'); + cleanupAbortController(abortKey); + abortKey = null; + } + + if (client) { + disposeClient(client); + client = null; + } + + reqDataContext = null; + userMessage = null; + userMessagePromise = null; + promptTokens = null; + getAbortData = null; + progressCallback = null; + endpointOption = null; + cleanupHandlers = null; + addTitle = null; + + if (requestDataMap.has(req)) { + requestDataMap.delete(req); + } + logger.debug('[AskController] Cleanup completed'); + }; try { - const { client } = await initializeClient({ req, res, endpointOption }); - const { onProgress: progressCallback, getPartialText } = createOnProgress(); + ({ client } = await initializeClient({ req, res, endpointOption })); + if (clientRegistry && client) { + clientRegistry.register(client, { userId }, client); + } - getText = client.getStreamText != null ? client.getStreamText.bind(client) : getPartialText; + if (client) { + requestDataMap.set(req, { client }); + } - const getAbortData = () => ({ - sender, - conversationId, - userMessagePromise, - messageId: responseMessageId, - parentMessageId: overrideParentMessageId ?? userMessageId, - text: getText(), - userMessage, - promptTokens, - }); + clientRef = new WeakRef(client); - const { abortController, onStart } = createAbortController(req, res, getAbortData, getReqData); + getAbortData = () => { + const currentClient = clientRef?.deref(); + const currentText = + currentClient?.getStreamText != null ? currentClient.getStreamText() : getPartialText(); - res.on('close', () => { + return { + sender, + conversationId, + messageId: reqDataContext.responseMessageId, + parentMessageId: overrideParentMessageId ?? userMessageId, + text: currentText, + userMessage: userMessage, + userMessagePromise: userMessagePromise, + promptTokens: reqDataContext.promptTokens, + }; + }; + + const { onStart, abortController } = createAbortController( + req, + res, + getAbortData, + updateReqData, + ); + + const closeHandler = () => { logger.debug('[AskController] Request closed'); - if (!abortController) { - return; - } else if (abortController.signal.aborted) { - return; - } else if (abortController.requestCompleted) { + if (!abortController || abortController.signal.aborted || abortController.requestCompleted) { return; } - abortController.abort(); logger.debug('[AskController] Request aborted on close'); + }; + + res.on('close', closeHandler); + cleanupHandlers.push(() => { + try { + res.removeListener('close', closeHandler); + } catch (e) { + // Ignore + } }); const messageOptions = { - user, + user: userId, parentMessageId, - conversationId, + conversationId: reqDataContext.conversationId, overrideParentMessageId, - getReqData, + getReqData: updateReqData, onStart, abortController, progressCallback, progressOptions: { res, - // parentMessageId: overrideParentMessageId || userMessageId, }, }; @@ -105,59 +187,95 @@ const AskController = async (req, res, next, initializeClient, addTitle) => { let response = await client.sendMessage(text, messageOptions); response.endpoint = endpointOption.endpoint; - const { conversation = {} } = await client.responsePromise; + const databasePromise = response.databasePromise; + delete response.databasePromise; + + const { conversation: convoData = {} } = await databasePromise; + const conversation = { ...convoData }; conversation.title = conversation && !conversation.title ? null : conversation?.title || 'New Chat'; - if (client.options.attachments) { - userMessage.files = client.options.attachments; - conversation.model = endpointOption.modelOptions.model; - delete userMessage.image_urls; + const latestUserMessage = reqDataContext.userMessage; + + if (client?.options?.attachments && latestUserMessage) { + latestUserMessage.files = client.options.attachments; + if (endpointOption?.modelOptions?.model) { + conversation.model = endpointOption.modelOptions.model; + } + delete latestUserMessage.image_urls; } if (!abortController.signal.aborted) { + const finalResponseMessage = { ...response }; + sendMessage(res, { final: true, conversation, title: conversation.title, - requestMessage: userMessage, - responseMessage: response, + requestMessage: latestUserMessage, + responseMessage: finalResponseMessage, }); res.end(); - if (!client.savedMessageIds.has(response.messageId)) { + if (client?.savedMessageIds && !client.savedMessageIds.has(response.messageId)) { await saveMessage( req, - { ...response, user }, + { ...finalResponseMessage, user: userId }, { context: 'api/server/controllers/AskController.js - response end' }, ); } } - if (!client.skipSaveUserMessage) { - await saveMessage(req, userMessage, { - context: 'api/server/controllers/AskController.js - don\'t skip saving user message', + if (!client?.skipSaveUserMessage && latestUserMessage) { + await saveMessage(req, latestUserMessage, { + context: "api/server/controllers/AskController.js - don't skip saving user message", }); } - if (addTitle && parentMessageId === Constants.NO_PARENT && newConvo) { + if (typeof addTitle === 'function' && parentMessageId === Constants.NO_PARENT && newConvo) { addTitle(req, { text, - response, + response: { ...response }, client, - }); + }) + .then(() => { + logger.debug('[AskController] Title generation started'); + }) + .catch((err) => { + logger.error('[AskController] Error in title generation', err); + }) + .finally(() => { + logger.debug('[AskController] Title generation completed'); + performCleanup(); + }); + } else { + performCleanup(); } } catch (error) { - const partialText = getText && getText(); + logger.error('[AskController] Error handling request', error); + let partialText = ''; + try { + const currentClient = clientRef?.deref(); + partialText = + currentClient?.getStreamText != null ? currentClient.getStreamText() : getPartialText(); + } catch (getTextError) { + logger.error('[AskController] Error calling getText() during error handling', getTextError); + } + handleAbortError(res, req, error, { sender, partialText, - conversationId, - messageId: responseMessageId, - parentMessageId: overrideParentMessageId ?? userMessageId ?? parentMessageId, - }).catch((err) => { - logger.error('[AskController] Error in `handleAbortError`', err); - }); + conversationId: reqDataContext.conversationId, + messageId: reqDataContext.responseMessageId, + parentMessageId: overrideParentMessageId ?? reqDataContext.userMessageId ?? parentMessageId, + userMessageId: reqDataContext.userMessageId, + }) + .catch((err) => { + logger.error('[AskController] Error in `handleAbortError` during catch block', err); + }) + .finally(() => { + performCleanup(); + }); } }; diff --git a/api/server/controllers/EditController.js b/api/server/controllers/EditController.js index 1de9725722..d142d474df 100644 --- a/api/server/controllers/EditController.js +++ b/api/server/controllers/EditController.js @@ -1,5 +1,15 @@ const { getResponseSender } = require('librechat-data-provider'); -const { createAbortController, handleAbortError } = require('~/server/middleware'); +const { + handleAbortError, + createAbortController, + cleanupAbortController, +} = require('~/server/middleware'); +const { + disposeClient, + processReqData, + clientRegistry, + requestDataMap, +} = require('~/server/cleanup'); const { sendMessage, createOnProgress } = require('~/server/utils'); const { saveMessage } = require('~/models'); const { logger } = require('~/config'); @@ -17,6 +27,11 @@ const EditController = async (req, res, next, initializeClient) => { overrideParentMessageId = null, } = req.body; + let client = null; + let abortKey = null; + let cleanupHandlers = []; + let clientRef = null; // Declare clientRef here + logger.debug('[EditController]', { text, generation, @@ -26,123 +41,205 @@ const EditController = async (req, res, next, initializeClient) => { modelsConfig: endpointOption.modelsConfig ? 'exists' : '', }); - let userMessage; - let userMessagePromise; - let promptTokens; + let userMessage = null; + let userMessagePromise = null; + let promptTokens = null; + let getAbortData = null; + const sender = getResponseSender({ ...endpointOption, model: endpointOption.modelOptions.model, modelDisplayLabel, }); const userMessageId = parentMessageId; - const user = req.user.id; + const userId = req.user.id; - const getReqData = (data = {}) => { - for (let key in data) { - if (key === 'userMessage') { - userMessage = data[key]; - } else if (key === 'userMessagePromise') { - userMessagePromise = data[key]; - } else if (key === 'responseMessageId') { - responseMessageId = data[key]; - } else if (key === 'promptTokens') { - promptTokens = data[key]; - } - } + let reqDataContext = { userMessage, userMessagePromise, responseMessageId, promptTokens }; + + const updateReqData = (data = {}) => { + reqDataContext = processReqData(data, reqDataContext); + abortKey = reqDataContext.abortKey; + userMessage = reqDataContext.userMessage; + userMessagePromise = reqDataContext.userMessagePromise; + responseMessageId = reqDataContext.responseMessageId; + promptTokens = reqDataContext.promptTokens; }; - const { onProgress: progressCallback, getPartialText } = createOnProgress({ + let { onProgress: progressCallback, getPartialText } = createOnProgress({ generation, }); - let getText; + const performCleanup = () => { + logger.debug('[EditController] Performing cleanup'); + if (Array.isArray(cleanupHandlers)) { + for (const handler of cleanupHandlers) { + try { + if (typeof handler === 'function') { + handler(); + } + } catch (e) { + // Ignore + } + } + } + + if (abortKey) { + logger.debug('[AskController] Cleaning up abort controller'); + cleanupAbortController(abortKey); + abortKey = null; + } + + if (client) { + disposeClient(client); + client = null; + } + + reqDataContext = null; + userMessage = null; + userMessagePromise = null; + promptTokens = null; + getAbortData = null; + progressCallback = null; + endpointOption = null; + cleanupHandlers = null; + + if (requestDataMap.has(req)) { + requestDataMap.delete(req); + } + logger.debug('[EditController] Cleanup completed'); + }; try { - const { client } = await initializeClient({ req, res, endpointOption }); + ({ client } = await initializeClient({ req, res, endpointOption })); - getText = client.getStreamText != null ? client.getStreamText.bind(client) : getPartialText; + if (clientRegistry && client) { + clientRegistry.register(client, { userId }, client); + } - const getAbortData = () => ({ - conversationId, - userMessagePromise, - messageId: responseMessageId, - sender, - parentMessageId: overrideParentMessageId ?? userMessageId, - text: getText(), - userMessage, - promptTokens, - }); + if (client) { + requestDataMap.set(req, { client }); + } - const { abortController, onStart } = createAbortController(req, res, getAbortData, getReqData); + clientRef = new WeakRef(client); - res.on('close', () => { + getAbortData = () => { + const currentClient = clientRef?.deref(); + const currentText = + currentClient?.getStreamText != null ? currentClient.getStreamText() : getPartialText(); + + return { + sender, + conversationId, + messageId: reqDataContext.responseMessageId, + parentMessageId: overrideParentMessageId ?? userMessageId, + text: currentText, + userMessage: userMessage, + userMessagePromise: userMessagePromise, + promptTokens: reqDataContext.promptTokens, + }; + }; + + const { onStart, abortController } = createAbortController( + req, + res, + getAbortData, + updateReqData, + ); + + const closeHandler = () => { logger.debug('[EditController] Request closed'); - if (!abortController) { - return; - } else if (abortController.signal.aborted) { - return; - } else if (abortController.requestCompleted) { + if (!abortController || abortController.signal.aborted || abortController.requestCompleted) { return; } - abortController.abort(); logger.debug('[EditController] Request aborted on close'); + }; + + res.on('close', closeHandler); + cleanupHandlers.push(() => { + try { + res.removeListener('close', closeHandler); + } catch (e) { + // Ignore + } }); let response = await client.sendMessage(text, { - user, + user: userId, generation, isContinued, isEdited: true, conversationId, parentMessageId, - responseMessageId, + responseMessageId: reqDataContext.responseMessageId, overrideParentMessageId, - getReqData, + getReqData: updateReqData, onStart, abortController, progressCallback, progressOptions: { res, - // parentMessageId: overrideParentMessageId || userMessageId, }, }); - const { conversation = {} } = await client.responsePromise; + const databasePromise = response.databasePromise; + delete response.databasePromise; + + const { conversation: convoData = {} } = await databasePromise; + const conversation = { ...convoData }; conversation.title = conversation && !conversation.title ? null : conversation?.title || 'New Chat'; - if (client.options.attachments) { + if (client?.options?.attachments && endpointOption?.modelOptions?.model) { conversation.model = endpointOption.modelOptions.model; } if (!abortController.signal.aborted) { + const finalUserMessage = reqDataContext.userMessage; + const finalResponseMessage = { ...response }; + sendMessage(res, { final: true, conversation, title: conversation.title, - requestMessage: userMessage, - responseMessage: response, + requestMessage: finalUserMessage, + responseMessage: finalResponseMessage, }); res.end(); await saveMessage( req, - { ...response, user }, + { ...finalResponseMessage, user: userId }, { context: 'api/server/controllers/EditController.js - response end' }, ); } + + performCleanup(); } catch (error) { - const partialText = getText(); + logger.error('[EditController] Error handling request', error); + let partialText = ''; + try { + const currentClient = clientRef?.deref(); + partialText = + currentClient?.getStreamText != null ? currentClient.getStreamText() : getPartialText(); + } catch (getTextError) { + logger.error('[EditController] Error calling getText() during error handling', getTextError); + } + handleAbortError(res, req, error, { sender, partialText, conversationId, - messageId: responseMessageId, + messageId: reqDataContext.responseMessageId, parentMessageId: overrideParentMessageId ?? userMessageId ?? parentMessageId, - }).catch((err) => { - logger.error('[EditController] Error in `handleAbortError`', err); - }); + userMessageId, + }) + .catch((err) => { + logger.error('[EditController] Error in `handleAbortError` during catch block', err); + }) + .finally(() => { + performCleanup(); + }); } }; diff --git a/api/server/controllers/PluginController.js b/api/server/controllers/PluginController.js index 71e7ed348e..674e36002a 100644 --- a/api/server/controllers/PluginController.js +++ b/api/server/controllers/PluginController.js @@ -1,5 +1,5 @@ const { CacheKeys, AuthType } = require('librechat-data-provider'); -const { addOpenAPISpecs } = require('~/app/clients/tools/util/addOpenAPISpecs'); +const { getToolkitKey } = require('~/server/services/ToolService'); const { getCustomConfig } = require('~/server/services/Config'); const { availableTools } = require('~/app/clients/tools'); const { getMCPManager } = require('~/config'); @@ -69,7 +69,7 @@ const getAvailablePluginsController = async (req, res) => { ); } - let plugins = await addOpenAPISpecs(authenticatedPlugins); + let plugins = authenticatedPlugins; if (includedTools.length > 0) { plugins = plugins.filter((plugin) => includedTools.includes(plugin.pluginKey)); @@ -105,11 +105,11 @@ const getAvailableTools = async (req, res) => { return; } - const pluginManifest = availableTools; + let pluginManifest = availableTools; const customConfig = await getCustomConfig(); if (customConfig?.mcpServers != null) { const mcpManager = getMCPManager(); - await mcpManager.loadManifestTools(pluginManifest); + pluginManifest = await mcpManager.loadManifestTools(pluginManifest); } /** @type {TPlugin[]} */ @@ -128,7 +128,7 @@ const getAvailableTools = async (req, res) => { (plugin) => toolDefinitions[plugin.pluginKey] !== undefined || (plugin.toolkit === true && - Object.keys(toolDefinitions).some((key) => key.startsWith(`${plugin.pluginKey}_`))), + Object.keys(toolDefinitions).some((key) => getToolkitKey(key) === plugin.pluginKey)), ); await cache.set(CacheKeys.TOOLS, tools); diff --git a/api/server/controllers/agents/callbacks.js b/api/server/controllers/agents/callbacks.js index 6622ec3815..3f507f7d0b 100644 --- a/api/server/controllers/agents/callbacks.js +++ b/api/server/controllers/agents/callbacks.js @@ -14,15 +14,6 @@ const { loadAuthValues } = require('~/server/services/Tools/credentials'); const { saveBase64Image } = require('~/server/services/Files/process'); const { logger, sendEvent } = require('~/config'); -/** @typedef {import('@librechat/agents').Graph} Graph */ -/** @typedef {import('@librechat/agents').EventHandler} EventHandler */ -/** @typedef {import('@librechat/agents').ModelEndData} ModelEndData */ -/** @typedef {import('@librechat/agents').ToolEndData} ToolEndData */ -/** @typedef {import('@librechat/agents').ToolEndCallback} ToolEndCallback */ -/** @typedef {import('@librechat/agents').ChatModelStreamHandler} ChatModelStreamHandler */ -/** @typedef {import('@librechat/agents').ContentAggregatorResult['aggregateContent']} ContentAggregator */ -/** @typedef {import('@librechat/agents').GraphEvents} GraphEvents */ - class ModelEndHandler { /** * @param {Array} collectedUsage @@ -38,7 +29,7 @@ class ModelEndHandler { * @param {string} event * @param {ModelEndData | undefined} data * @param {Record | undefined} metadata - * @param {Graph} graph + * @param {StandardGraph} graph * @returns */ handle(event, data, metadata, graph) { @@ -61,7 +52,10 @@ class ModelEndHandler { } this.collectedUsage.push(usage); - if (!graph.clientOptions?.disableStreaming) { + const streamingDisabled = !!( + graph.clientOptions?.disableStreaming || graph?.boundModel?.disableStreaming + ); + if (!streamingDisabled) { return; } if (!data.output.content) { @@ -246,7 +240,11 @@ function createToolEndCallback({ req, res, artifactPromises }) { if (output.artifact.content) { /** @type {FormattedContent[]} */ const content = output.artifact.content; - for (const part of content) { + for (let i = 0; i < content.length; i++) { + const part = content[i]; + if (!part) { + continue; + } if (part.type !== 'image_url') { continue; } @@ -254,8 +252,10 @@ function createToolEndCallback({ req, res, artifactPromises }) { artifactPromises.push( (async () => { const filename = `${output.name}_${output.tool_call_id}_img_${nanoid()}`; + const file_id = output.artifact.file_ids?.[i]; const file = await saveBase64Image(url, { req, + file_id, filename, endpoint: metadata.provider, context: FileContext.image_generation, diff --git a/api/server/controllers/agents/client.js b/api/server/controllers/agents/client.js index ff98d80a13..a3484f6505 100644 --- a/api/server/controllers/agents/client.js +++ b/api/server/controllers/agents/client.js @@ -33,6 +33,7 @@ const { addCacheControl, createContextHandlers } = require('~/app/clients/prompt const { spendTokens, spendStructuredTokens } = require('~/models/spendTokens'); const { getBufferString, HumanMessage } = require('@langchain/core/messages'); const { encodeAndFormat } = require('~/server/services/Files/images/encode'); +const initOpenAI = require('~/server/services/Endpoints/openAI/initialize'); const Tokenizer = require('~/server/services/Tokenizer'); const BaseClient = require('~/app/clients/BaseClient'); const { logger, sendEvent } = require('~/config'); @@ -57,12 +58,27 @@ const payloadParser = ({ req, agent, endpoint }) => { const legacyContentEndpoints = new Set([KnownEndpoints.groq, KnownEndpoints.deepseek]); -const noSystemModelRegex = [/\bo1\b/gi]; +const noSystemModelRegex = [/\b(o1-preview|o1-mini|amazon\.titan-text)\b/gi]; // const { processMemory, memoryInstructions } = require('~/server/services/Endpoints/agents/memory'); // const { getFormattedMemories } = require('~/models/Memory'); // const { getCurrentDateTime } = require('~/utils'); +function createTokenCounter(encoding) { + return (message) => { + const countTokens = (text) => Tokenizer.getTokenCount(text, encoding); + return getTokenCountForMessage(message, countTokens); + }; +} + +function logToolError(graph, error, toolId) { + logger.error( + '[api/server/controllers/agents/client.js #chatCompletion] Tool Error', + error, + toolId, + ); +} + class AgentClient extends BaseClient { constructor(options = {}) { super(null, options); @@ -132,19 +148,13 @@ class AgentClient extends BaseClient { * @param {MongoFile[]} attachments */ checkVisionRequest(attachments) { - logger.info( - '[api/server/controllers/agents/client.js #checkVisionRequest] not implemented', - attachments, - ); // if (!attachments) { // return; // } - // const availableModels = this.options.modelsConfig?.[this.options.endpoint]; // if (!availableModels) { // return; // } - // let visionRequestDetected = false; // for (const file of attachments) { // if (file?.type?.includes('image')) { @@ -155,13 +165,11 @@ class AgentClient extends BaseClient { // if (!visionRequestDetected) { // return; // } - // this.isVisionModel = validateVisionModel({ model: this.modelOptions.model, availableModels }); // if (this.isVisionModel) { // delete this.modelOptions.stop; // return; // } - // for (const model of availableModels) { // if (!validateVisionModel({ model, availableModels })) { // continue; @@ -171,14 +179,12 @@ class AgentClient extends BaseClient { // delete this.modelOptions.stop; // return; // } - // if (!availableModels.includes(this.defaultVisionModel)) { // return; // } // if (!validateVisionModel({ model: this.defaultVisionModel, availableModels })) { // return; // } - // this.modelOptions.model = this.defaultVisionModel; // this.isVisionModel = true; // delete this.modelOptions.stop; @@ -348,7 +354,9 @@ class AgentClient extends BaseClient { this.contextHandlers?.processFile(file); continue; } - + if (file.metadata?.fileIdentifier) { + continue; + } // orderedMessages[i].tokenCount += this.calculateImageTokenCost({ // width: file.width, // height: file.height, @@ -535,6 +543,10 @@ class AgentClient extends BaseClient { } async chatCompletion({ payload, abortController = null }) { + /** @type {Partial & { version: 'v1' | 'v2'; run_id?: string; streamMode: string }} */ + let config; + /** @type {ReturnType} */ + let run; try { if (!abortController) { abortController = new AbortController(); @@ -632,11 +644,11 @@ class AgentClient extends BaseClient { /** @type {TCustomConfig['endpoints']['agents']} */ const agentsEConfig = this.options.req.app.locals[EModelEndpoint.agents]; - /** @type {Partial & { version: 'v1' | 'v2'; run_id?: string; streamMode: string }} */ - const config = { + config = { configurable: { thread_id: this.conversationId, last_agent_index: this.agentConfigs?.size ?? 0, + user_id: this.user ?? this.options.req.user?.id, hide_sequential_outputs: this.options.agent.hide_sequential_outputs, }, recursionLimit: agentsEConfig?.recursionLimit, @@ -651,19 +663,10 @@ class AgentClient extends BaseClient { this.indexTokenCountMap, toolSet, ); - if (legacyContentEndpoints.has(this.options.agent.endpoint)) { + if (legacyContentEndpoints.has(this.options.agent.endpoint?.toLowerCase())) { initialMessages = formatContentStrings(initialMessages); } - /** @type {ReturnType} */ - let run; - const countTokens = ((text) => this.getTokenCount(text)).bind(this); - - /** @type {(message: BaseMessage) => number} */ - const tokenCounter = (message) => { - return getTokenCountForMessage(message, countTokens); - }; - /** * * @param {Agent} agent @@ -715,12 +718,14 @@ class AgentClient extends BaseClient { } if (noSystemMessages === true && systemContent?.length) { - let latestMessage = _messages.pop().content; + const latestMessageContent = _messages.pop().content; if (typeof latestMessage !== 'string') { - latestMessage = latestMessage[0].text; + latestMessageContent[0].text = [systemContent, latestMessageContent[0].text].join('\n'); + _messages.push(new HumanMessage({ content: latestMessageContent })); + } else { + const text = [systemContent, latestMessageContent].join('\n'); + _messages.push(new HumanMessage(text)); } - latestMessage = [systemContent, latestMessage].join('\n'); - _messages.push(new HumanMessage(latestMessage)); } let messages = _messages; @@ -767,21 +772,18 @@ class AgentClient extends BaseClient { run.Graph.contentData = contentData; } + const encoding = this.getEncoding(); await run.processStream({ messages }, config, { keepContent: i !== 0, - tokenCounter, + tokenCounter: createTokenCounter(encoding), indexTokenCountMap: currentIndexCountMap, maxContextTokens: agent.maxContextTokens, callbacks: { - [Callback.TOOL_ERROR]: (graph, error, toolId) => { - logger.error( - '[api/server/controllers/agents/client.js #chatCompletion] Tool Error', - error, - toolId, - ); - }, + [Callback.TOOL_ERROR]: logToolError, }, }); + + config.signal = null; }; await runAgent(this.options.agent, initialMessages); @@ -809,6 +811,8 @@ class AgentClient extends BaseClient { break; } } + const encoding = this.getEncoding(); + const tokenCounter = createTokenCounter(encoding); for (const [agentId, agent] of this.agentConfigs) { if (abortController.signal.aborted === true) { break; @@ -917,19 +921,21 @@ class AgentClient extends BaseClient { * @param {string} params.text * @param {string} params.conversationId */ - async titleConvo({ text }) { + async titleConvo({ text, abortController }) { if (!this.run) { throw new Error('Run not initialized'); } const { handleLLMEnd, collected: collectedMetadata } = createMetadataAggregator(); + const endpoint = this.options.agent.endpoint; + const { req, res } = this.options; /** @type {import('@librechat/agents').ClientOptions} */ - const clientOptions = { + let clientOptions = { maxTokens: 75, }; - let endpointConfig = this.options.req.app.locals[this.options.agent.endpoint]; + let endpointConfig = req.app.locals[endpoint]; if (!endpointConfig) { try { - endpointConfig = await getCustomEndpointConfig(this.options.agent.endpoint); + endpointConfig = await getCustomEndpointConfig(endpoint); } catch (err) { logger.error( '[api/server/controllers/agents/client.js #titleConvo] Error getting custom endpoint config', @@ -944,12 +950,35 @@ class AgentClient extends BaseClient { ) { clientOptions.model = endpointConfig.titleModel; } + if ( + endpoint === EModelEndpoint.azureOpenAI && + clientOptions.model && + this.options.agent.model_parameters.model !== clientOptions.model + ) { + clientOptions = + ( + await initOpenAI({ + req, + res, + optionsOnly: true, + overrideModel: clientOptions.model, + overrideEndpoint: endpoint, + endpointOption: { + model_parameters: clientOptions, + }, + }) + )?.llmConfig ?? clientOptions; + } + if (/\b(o\d)\b/i.test(clientOptions.model) && clientOptions.maxTokens != null) { + delete clientOptions.maxTokens; + } try { const titleResult = await this.run.generateTitle({ inputText: text, contentParts: this.contentParts, clientOptions, chainOptions: { + signal: abortController.signal, callbacks: [ { handleLLMEnd, @@ -975,7 +1004,7 @@ class AgentClient extends BaseClient { }; }); - this.recordCollectedUsage({ + await this.recordCollectedUsage({ model: clientOptions.model, context: 'title', collectedUsage, diff --git a/api/server/controllers/agents/request.js b/api/server/controllers/agents/request.js index 91277d5bc4..fcee62edc7 100644 --- a/api/server/controllers/agents/request.js +++ b/api/server/controllers/agents/request.js @@ -1,5 +1,10 @@ const { Constants } = require('librechat-data-provider'); -const { createAbortController, handleAbortError } = require('~/server/middleware'); +const { + handleAbortError, + createAbortController, + cleanupAbortController, +} = require('~/server/middleware'); +const { disposeClient, clientRegistry, requestDataMap } = require('~/server/cleanup'); const { sendMessage } = require('~/server/utils'); const { saveMessage } = require('~/models'); const { logger } = require('~/config'); @@ -14,16 +19,22 @@ const AgentController = async (req, res, next, initializeClient, addTitle) => { } = req.body; let sender; + let abortKey; let userMessage; let promptTokens; let userMessageId; let responseMessageId; let userMessagePromise; + let getAbortData; + let client = null; + // Initialize as an array + let cleanupHandlers = []; const newConvo = !conversationId; - const user = req.user.id; + const userId = req.user.id; - const getReqData = (data = {}) => { + // Create handler to avoid capturing the entire parent scope + let getReqData = (data = {}) => { for (let key in data) { if (key === 'userMessage') { userMessage = data[key]; @@ -36,30 +47,96 @@ const AgentController = async (req, res, next, initializeClient, addTitle) => { promptTokens = data[key]; } else if (key === 'sender') { sender = data[key]; + } else if (key === 'abortKey') { + abortKey = data[key]; } else if (!conversationId && key === 'conversationId') { conversationId = data[key]; } } }; + // Create a function to handle final cleanup + const performCleanup = () => { + logger.debug('[AgentController] Performing cleanup'); + // Make sure cleanupHandlers is an array before iterating + if (Array.isArray(cleanupHandlers)) { + // Execute all cleanup handlers + for (const handler of cleanupHandlers) { + try { + if (typeof handler === 'function') { + handler(); + } + } catch (e) { + // Ignore cleanup errors + } + } + } + + // Clean up abort controller + if (abortKey) { + logger.debug('[AgentController] Cleaning up abort controller'); + cleanupAbortController(abortKey); + } + + // Dispose client properly + if (client) { + disposeClient(client); + } + + // Clear all references + client = null; + getReqData = null; + userMessage = null; + getAbortData = null; + endpointOption.agent = null; + endpointOption = null; + cleanupHandlers = null; + userMessagePromise = null; + + // Clear request data map + if (requestDataMap.has(req)) { + requestDataMap.delete(req); + } + logger.debug('[AgentController] Cleanup completed'); + }; + try { /** @type {{ client: TAgentClient }} */ - const { client } = await initializeClient({ req, res, endpointOption }); + const result = await initializeClient({ req, res, endpointOption }); + client = result.client; - const getAbortData = () => ({ - sender, - userMessage, - promptTokens, - conversationId, - userMessagePromise, - messageId: responseMessageId, - content: client.getContentParts(), - parentMessageId: overrideParentMessageId ?? userMessageId, - }); + // Register client with finalization registry if available + if (clientRegistry) { + clientRegistry.register(client, { userId }, client); + } + + // Store request data in WeakMap keyed by req object + requestDataMap.set(req, { client }); + + // Use WeakRef to allow GC but still access content if it exists + const contentRef = new WeakRef(client.contentParts || []); + + // Minimize closure scope - only capture small primitives and WeakRef + getAbortData = () => { + // Dereference WeakRef each time + const content = contentRef.deref(); + + return { + sender, + content: content || [], + userMessage, + promptTokens, + conversationId, + userMessagePromise, + messageId: responseMessageId, + parentMessageId: overrideParentMessageId ?? userMessageId, + }; + }; const { abortController, onStart } = createAbortController(req, res, getAbortData, getReqData); - res.on('close', () => { + // Simple handler to avoid capturing scope + const closeHandler = () => { logger.debug('[AgentController] Request closed'); if (!abortController) { return; @@ -71,10 +148,19 @@ const AgentController = async (req, res, next, initializeClient, addTitle) => { abortController.abort(); logger.debug('[AgentController] Request aborted on close'); + }; + + res.on('close', closeHandler); + cleanupHandlers.push(() => { + try { + res.removeListener('close', closeHandler); + } catch (e) { + // Ignore + } }); const messageOptions = { - user, + user: userId, onStart, getReqData, conversationId, @@ -83,69 +169,104 @@ const AgentController = async (req, res, next, initializeClient, addTitle) => { overrideParentMessageId, progressOptions: { res, - // parentMessageId: overrideParentMessageId || userMessageId, }, }; let response = await client.sendMessage(text, messageOptions); - response.endpoint = endpointOption.endpoint; - const { conversation = {} } = await client.responsePromise; + // Extract what we need and immediately break reference + const messageId = response.messageId; + const endpoint = endpointOption.endpoint; + response.endpoint = endpoint; + + // Store database promise locally + const databasePromise = response.databasePromise; + delete response.databasePromise; + + // Resolve database-related data + const { conversation: convoData = {} } = await databasePromise; + const conversation = { ...convoData }; conversation.title = conversation && !conversation.title ? null : conversation?.title || 'New Chat'; - if (req.body.files && client.options.attachments) { + // Process files if needed + if (req.body.files && client.options?.attachments) { userMessage.files = []; const messageFiles = new Set(req.body.files.map((file) => file.file_id)); for (let attachment of client.options.attachments) { if (messageFiles.has(attachment.file_id)) { - userMessage.files.push(attachment); + userMessage.files.push({ ...attachment }); } } delete userMessage.image_urls; } + // Only send if not aborted if (!abortController.signal.aborted) { + // Create a new response object with minimal copies + const finalResponse = { ...response }; + sendMessage(res, { final: true, conversation, title: conversation.title, requestMessage: userMessage, - responseMessage: response, + responseMessage: finalResponse, }); res.end(); - if (!client.savedMessageIds.has(response.messageId)) { + // Save the message if needed + if (client.savedMessageIds && !client.savedMessageIds.has(messageId)) { await saveMessage( req, - { ...response, user }, + { ...finalResponse, user: userId }, { context: 'api/server/controllers/agents/request.js - response end' }, ); } } + // Save user message if needed if (!client.skipSaveUserMessage) { await saveMessage(req, userMessage, { context: 'api/server/controllers/agents/request.js - don\'t skip saving user message', }); } + // Add title if needed - extract minimal data if (addTitle && parentMessageId === Constants.NO_PARENT && newConvo) { addTitle(req, { text, - response, + response: { ...response }, client, - }); + }) + .then(() => { + logger.debug('[AgentController] Title generation started'); + }) + .catch((err) => { + logger.error('[AgentController] Error in title generation', err); + }) + .finally(() => { + logger.debug('[AgentController] Title generation completed'); + performCleanup(); + }); + } else { + performCleanup(); } } catch (error) { + // Handle error without capturing much scope handleAbortError(res, req, error, { conversationId, sender, messageId: responseMessageId, parentMessageId: overrideParentMessageId ?? userMessageId ?? parentMessageId, - }).catch((err) => { - logger.error('[api/server/controllers/agents/request] Error in `handleAbortError`', err); - }); + userMessageId, + }) + .catch((err) => { + logger.error('[api/server/controllers/agents/request] Error in `handleAbortError`', err); + }) + .finally(() => { + performCleanup(); + }); } }; diff --git a/api/server/controllers/assistants/chatV1.js b/api/server/controllers/assistants/chatV1.js index 2f10d31a6b..5fa10e9e37 100644 --- a/api/server/controllers/assistants/chatV1.js +++ b/api/server/controllers/assistants/chatV1.js @@ -119,7 +119,7 @@ const chatV1 = async (req, res) => { } else if (/Files.*are invalid/.test(error.message)) { const errorMessage = `Files are invalid, or may not have uploaded yet.${ endpoint === EModelEndpoint.azureAssistants - ? ' If using Azure OpenAI, files are only available in the region of the assistant\'s model at the time of upload.' + ? " If using Azure OpenAI, files are only available in the region of the assistant's model at the time of upload." : '' }`; return sendResponse(req, res, messageData, errorMessage); @@ -379,8 +379,8 @@ const chatV1 = async (req, res) => { body.additional_instructions ? `${body.additional_instructions}\n` : '' }The user has uploaded ${imageCount} image${pluralized}. Use the \`${ImageVisionTool.function.name}\` tool to retrieve ${ - plural ? '' : 'a ' -}detailed text description${pluralized} for ${plural ? 'each' : 'the'} image${pluralized}.`; + plural ? '' : 'a ' + }detailed text description${pluralized} for ${plural ? 'each' : 'the'} image${pluralized}.`; return files; }; @@ -576,6 +576,8 @@ const chatV1 = async (req, res) => { thread_id, model: assistant_id, endpoint, + spec: endpointOption.spec, + iconURL: endpointOption.iconURL, }; sendMessage(res, { diff --git a/api/server/controllers/assistants/chatV2.js b/api/server/controllers/assistants/chatV2.js index 799326aea9..309e5a86c4 100644 --- a/api/server/controllers/assistants/chatV2.js +++ b/api/server/controllers/assistants/chatV2.js @@ -428,6 +428,8 @@ const chatV2 = async (req, res) => { thread_id, model: assistant_id, endpoint, + spec: endpointOption.spec, + iconURL: endpointOption.iconURL, }; sendMessage(res, { diff --git a/api/server/index.js b/api/server/index.js index 4a428789dd..cd0bdd3f88 100644 --- a/api/server/index.js +++ b/api/server/index.js @@ -88,8 +88,8 @@ const startServer = async () => { app.use('/api/actions', routes.actions); app.use('/api/keys', routes.keys); app.use('/api/user', routes.user); - app.use('/api/search', routes.search); app.use('/api/ask', routes.ask); + app.use('/api/search', routes.search); app.use('/api/edit', routes.edit); app.use('/api/messages', routes.messages); app.use('/api/convos', routes.convos); diff --git a/api/server/middleware/abortMiddleware.js b/api/server/middleware/abortMiddleware.js index ccc4ed0439..bfc28f513d 100644 --- a/api/server/middleware/abortMiddleware.js +++ b/api/server/middleware/abortMiddleware.js @@ -1,3 +1,4 @@ +// abortMiddleware.js const { isAssistantsEndpoint, ErrorTypes } = require('librechat-data-provider'); const { sendMessage, sendError, countTokens, isEnabled } = require('~/server/utils'); const { truncateText, smartTruncateText } = require('~/app/clients/prompts'); @@ -8,6 +9,68 @@ const { saveMessage, getConvo } = require('~/models'); const { abortRun } = require('./abortRun'); const { logger } = require('~/config'); +const abortDataMap = new WeakMap(); + +function cleanupAbortController(abortKey) { + if (!abortControllers.has(abortKey)) { + return false; + } + + const { abortController } = abortControllers.get(abortKey); + + if (!abortController) { + abortControllers.delete(abortKey); + return true; + } + + // 1. Check if this controller has any composed signals and clean them up + try { + // This creates a temporary composed signal to use for cleanup + const composedSignal = AbortSignal.any([abortController.signal]); + + // Get all event types - in practice, AbortSignal typically only uses 'abort' + const eventTypes = ['abort']; + + // First, execute a dummy listener removal to handle potential composed signals + for (const eventType of eventTypes) { + const dummyHandler = () => {}; + composedSignal.addEventListener(eventType, dummyHandler); + composedSignal.removeEventListener(eventType, dummyHandler); + + const listeners = composedSignal.listeners?.(eventType) || []; + for (const listener of listeners) { + composedSignal.removeEventListener(eventType, listener); + } + } + } catch (e) { + logger.debug(`Error cleaning up composed signals: ${e}`); + } + + // 2. Abort the controller if not already aborted + if (!abortController.signal.aborted) { + abortController.abort(); + } + + // 3. Remove from registry + abortControllers.delete(abortKey); + + // 4. Clean up any data stored in the WeakMap + if (abortDataMap.has(abortController)) { + abortDataMap.delete(abortController); + } + + // 5. Clean up function references on the controller + if (abortController.getAbortData) { + abortController.getAbortData = null; + } + + if (abortController.abortCompletion) { + abortController.abortCompletion = null; + } + + return true; +} + async function abortMessage(req, res) { let { abortKey, endpoint } = req.body; @@ -29,24 +92,24 @@ async function abortMessage(req, res) { if (!abortController) { return res.status(204).send({ message: 'Request not found' }); } - const finalEvent = await abortController.abortCompletion(); + + const finalEvent = await abortController.abortCompletion?.(); logger.debug( `[abortMessage] ID: ${req.user.id} | ${req.user.email} | Aborted request: ` + JSON.stringify({ abortKey }), ); - abortControllers.delete(abortKey); + cleanupAbortController(abortKey); if (res.headersSent && finalEvent) { return sendMessage(res, finalEvent); } res.setHeader('Content-Type', 'application/json'); - res.send(JSON.stringify(finalEvent)); } -const handleAbort = () => { - return async (req, res) => { +const handleAbort = function () { + return async function (req, res) { try { if (isEnabled(process.env.LIMIT_CONCURRENT_MESSAGES)) { await clearPendingReq({ userId: req.user.id }); @@ -62,8 +125,48 @@ const createAbortController = (req, res, getAbortData, getReqData) => { const abortController = new AbortController(); const { endpointOption } = req.body; + // Store minimal data in WeakMap to avoid circular references + abortDataMap.set(abortController, { + getAbortDataFn: getAbortData, + userId: req.user.id, + endpoint: endpointOption.endpoint, + iconURL: endpointOption.iconURL, + model: endpointOption.modelOptions?.model || endpointOption.model_parameters?.model, + }); + + // Replace the direct function reference with a wrapper that uses WeakMap abortController.getAbortData = function () { - return getAbortData(); + const data = abortDataMap.get(this); + if (!data || typeof data.getAbortDataFn !== 'function') { + return {}; + } + + try { + const result = data.getAbortDataFn(); + + // Create a copy without circular references + const cleanResult = { ...result }; + + // If userMessagePromise exists, break its reference to client + if ( + cleanResult.userMessagePromise && + typeof cleanResult.userMessagePromise.then === 'function' + ) { + // Create a new promise that fulfills with the same result but doesn't reference the original + const originalPromise = cleanResult.userMessagePromise; + cleanResult.userMessagePromise = new Promise((resolve, reject) => { + originalPromise.then( + (result) => resolve({ ...result }), + (error) => reject(error), + ); + }); + } + + return cleanResult; + } catch (err) { + logger.error('[abortController.getAbortData] Error:', err); + return {}; + } }; /** @@ -74,6 +177,7 @@ const createAbortController = (req, res, getAbortData, getReqData) => { sendMessage(res, { message: userMessage, created: true }); const abortKey = userMessage?.conversationId ?? req.user.id; + getReqData({ abortKey }); const prevRequest = abortControllers.get(abortKey); const { overrideUserMessageId } = req?.body ?? {}; @@ -81,34 +185,74 @@ const createAbortController = (req, res, getAbortData, getReqData) => { const data = prevRequest.abortController.getAbortData(); getReqData({ userMessage: data?.userMessage }); const addedAbortKey = `${abortKey}:${responseMessageId}`; - abortControllers.set(addedAbortKey, { abortController, ...endpointOption }); - res.on('finish', function () { - abortControllers.delete(addedAbortKey); - }); + + // Store minimal options + const minimalOptions = { + endpoint: endpointOption.endpoint, + iconURL: endpointOption.iconURL, + model: endpointOption.modelOptions?.model || endpointOption.model_parameters?.model, + }; + + abortControllers.set(addedAbortKey, { abortController, ...minimalOptions }); + + // Use a simple function for cleanup to avoid capturing context + const cleanupHandler = () => { + try { + cleanupAbortController(addedAbortKey); + } catch (e) { + // Ignore cleanup errors + } + }; + + res.on('finish', cleanupHandler); return; } - abortControllers.set(abortKey, { abortController, ...endpointOption }); + // Store minimal options + const minimalOptions = { + endpoint: endpointOption.endpoint, + iconURL: endpointOption.iconURL, + model: endpointOption.modelOptions?.model || endpointOption.model_parameters?.model, + }; - res.on('finish', function () { - abortControllers.delete(abortKey); - }); + abortControllers.set(abortKey, { abortController, ...minimalOptions }); + + // Use a simple function for cleanup to avoid capturing context + const cleanupHandler = () => { + try { + cleanupAbortController(abortKey); + } catch (e) { + // Ignore cleanup errors + } + }; + + res.on('finish', cleanupHandler); }; + // Define abortCompletion without capturing the entire parent scope abortController.abortCompletion = async function () { - abortController.abort(); + this.abort(); + + // Get data from WeakMap + const ctrlData = abortDataMap.get(this); + if (!ctrlData || !ctrlData.getAbortDataFn) { + return { final: true, conversation: {}, title: 'New Chat' }; + } + + // Get abort data using stored function const { conversationId, userMessage, userMessagePromise, promptTokens, ...responseData } = - getAbortData(); + ctrlData.getAbortDataFn(); + const completionTokens = await countTokens(responseData?.text ?? ''); - const user = req.user.id; + const user = ctrlData.userId; const responseMessage = { ...responseData, conversationId, finish_reason: 'incomplete', - endpoint: endpointOption.endpoint, - iconURL: endpointOption.iconURL, - model: endpointOption.modelOptions?.model ?? endpointOption.model_parameters?.model, + endpoint: ctrlData.endpoint, + iconURL: ctrlData.iconURL, + model: ctrlData.modelOptions?.model ?? ctrlData.model_parameters?.model, unfinished: false, error: false, isCreatedByUser: false, @@ -130,10 +274,12 @@ const createAbortController = (req, res, getAbortData, getReqData) => { if (userMessagePromise) { const resolved = await userMessagePromise; conversation = resolved?.conversation; + // Break reference to promise + resolved.conversation = null; } if (!conversation) { - conversation = await getConvo(req.user.id, conversationId); + conversation = await getConvo(user, conversationId); } return { @@ -165,7 +311,7 @@ const handleAbortError = async (res, req, error, data) => { } else { logger.error('[handleAbortError] AI response error; aborting request:', error); } - const { sender, conversationId, messageId, parentMessageId, partialText } = data; + const { sender, conversationId, messageId, parentMessageId, userMessageId, partialText } = data; if (error.stack && error.stack.includes('google')) { logger.warn( @@ -198,10 +344,10 @@ const handleAbortError = async (res, req, error, data) => { parentMessageId, text: errorText, user: req.user.id, - shouldSaveMessage: true, spec: endpointOption?.spec, iconURL: endpointOption?.iconURL, modelLabel: endpointOption?.modelLabel, + shouldSaveMessage: userMessageId != null, model: endpointOption?.modelOptions?.model || req.body?.model, }; @@ -218,11 +364,12 @@ const handleAbortError = async (res, req, error, data) => { }; } + // Create a simple callback without capturing parent scope const callback = async () => { - if (abortControllers.has(conversationId)) { - const { abortController } = abortControllers.get(conversationId); - abortController.abort(); - abortControllers.delete(conversationId); + try { + cleanupAbortController(conversationId); + } catch (e) { + // Ignore cleanup errors } }; @@ -243,6 +390,7 @@ const handleAbortError = async (res, req, error, data) => { module.exports = { handleAbort, - createAbortController, handleAbortError, + createAbortController, + cleanupAbortController, }; diff --git a/api/server/middleware/checkBan.js b/api/server/middleware/checkBan.js index 67540bb009..4e0593192a 100644 --- a/api/server/middleware/checkBan.js +++ b/api/server/middleware/checkBan.js @@ -1,4 +1,4 @@ -const Keyv = require('keyv'); +const { Keyv } = require('keyv'); const uap = require('ua-parser-js'); const { ViolationTypes } = require('librechat-data-provider'); const { isEnabled, removePorts } = require('~/server/utils'); diff --git a/api/server/middleware/concurrentLimiter.js b/api/server/middleware/concurrentLimiter.js index 21b3a86903..73de65dd25 100644 --- a/api/server/middleware/concurrentLimiter.js +++ b/api/server/middleware/concurrentLimiter.js @@ -1,4 +1,4 @@ -const { Time } = require('librechat-data-provider'); +const { Time, CacheKeys } = require('librechat-data-provider'); const clearPendingReq = require('~/cache/clearPendingReq'); const { logViolation, getLogStores } = require('~/cache'); const { isEnabled } = require('~/server/utils'); @@ -25,7 +25,7 @@ const { * @throws {Error} Throws an error if the user exceeds the concurrent request limit. */ const concurrentLimiter = async (req, res, next) => { - const namespace = 'pending_req'; + const namespace = CacheKeys.PENDING_REQ; const cache = getLogStores(namespace); if (!cache) { return next(); diff --git a/api/server/middleware/limiters/importLimiters.js b/api/server/middleware/limiters/importLimiters.js index 5e50046a30..f353f5e996 100644 --- a/api/server/middleware/limiters/importLimiters.js +++ b/api/server/middleware/limiters/importLimiters.js @@ -1,10 +1,9 @@ -const Keyv = require('keyv'); const rateLimit = require('express-rate-limit'); const { RedisStore } = require('rate-limit-redis'); const { ViolationTypes } = require('librechat-data-provider'); +const ioredisClient = require('~/cache/ioredisClient'); const logViolation = require('~/cache/logViolation'); const { isEnabled } = require('~/server/utils'); -const keyvRedis = require('~/cache/keyvRedis'); const { logger } = require('~/config'); const getEnvironmentVariables = () => { @@ -67,11 +66,9 @@ const createImportLimiters = () => { }, }; - if (isEnabled(process.env.USE_REDIS)) { + if (isEnabled(process.env.USE_REDIS) && ioredisClient) { logger.debug('Using Redis for import rate limiters.'); - const keyv = new Keyv({ store: keyvRedis }); - const client = keyv.opts.store.redis; - const sendCommand = (...args) => client.call(...args); + const sendCommand = (...args) => ioredisClient.call(...args); const ipStore = new RedisStore({ sendCommand, prefix: 'import_ip_limiter:', diff --git a/api/server/middleware/limiters/loginLimiter.js b/api/server/middleware/limiters/loginLimiter.js index 8cf10ccb12..d57af29414 100644 --- a/api/server/middleware/limiters/loginLimiter.js +++ b/api/server/middleware/limiters/loginLimiter.js @@ -1,8 +1,7 @@ -const Keyv = require('keyv'); const rateLimit = require('express-rate-limit'); const { RedisStore } = require('rate-limit-redis'); const { removePorts, isEnabled } = require('~/server/utils'); -const keyvRedis = require('~/cache/keyvRedis'); +const ioredisClient = require('~/cache/ioredisClient'); const { logViolation } = require('~/cache'); const { logger } = require('~/config'); @@ -31,13 +30,10 @@ const limiterOptions = { keyGenerator: removePorts, }; -if (isEnabled(process.env.USE_REDIS)) { +if (isEnabled(process.env.USE_REDIS) && ioredisClient) { logger.debug('Using Redis for login rate limiter.'); - const keyv = new Keyv({ store: keyvRedis }); - const client = keyv.opts.store.redis; - const sendCommand = (...args) => client.call(...args); const store = new RedisStore({ - sendCommand, + sendCommand: (...args) => ioredisClient.call(...args), prefix: 'login_limiter:', }); limiterOptions.store = store; diff --git a/api/server/middleware/limiters/messageLimiters.js b/api/server/middleware/limiters/messageLimiters.js index fe4f75a9c6..4191c9fe7c 100644 --- a/api/server/middleware/limiters/messageLimiters.js +++ b/api/server/middleware/limiters/messageLimiters.js @@ -1,9 +1,8 @@ -const Keyv = require('keyv'); const rateLimit = require('express-rate-limit'); const { RedisStore } = require('rate-limit-redis'); const denyRequest = require('~/server/middleware/denyRequest'); +const ioredisClient = require('~/cache/ioredisClient'); const { isEnabled } = require('~/server/utils'); -const keyvRedis = require('~/cache/keyvRedis'); const { logViolation } = require('~/cache'); const { logger } = require('~/config'); @@ -63,11 +62,9 @@ const userLimiterOptions = { }, }; -if (isEnabled(process.env.USE_REDIS)) { +if (isEnabled(process.env.USE_REDIS) && ioredisClient) { logger.debug('Using Redis for message rate limiters.'); - const keyv = new Keyv({ store: keyvRedis }); - const client = keyv.opts.store.redis; - const sendCommand = (...args) => client.call(...args); + const sendCommand = (...args) => ioredisClient.call(...args); const ipStore = new RedisStore({ sendCommand, prefix: 'message_ip_limiter:', diff --git a/api/server/middleware/limiters/registerLimiter.js b/api/server/middleware/limiters/registerLimiter.js index f9bf1215cd..7d38b3044e 100644 --- a/api/server/middleware/limiters/registerLimiter.js +++ b/api/server/middleware/limiters/registerLimiter.js @@ -1,8 +1,7 @@ -const Keyv = require('keyv'); const rateLimit = require('express-rate-limit'); const { RedisStore } = require('rate-limit-redis'); const { removePorts, isEnabled } = require('~/server/utils'); -const keyvRedis = require('~/cache/keyvRedis'); +const ioredisClient = require('~/cache/ioredisClient'); const { logViolation } = require('~/cache'); const { logger } = require('~/config'); @@ -31,13 +30,10 @@ const limiterOptions = { keyGenerator: removePorts, }; -if (isEnabled(process.env.USE_REDIS)) { +if (isEnabled(process.env.USE_REDIS) && ioredisClient) { logger.debug('Using Redis for register rate limiter.'); - const keyv = new Keyv({ store: keyvRedis }); - const client = keyv.opts.store.redis; - const sendCommand = (...args) => client.call(...args); const store = new RedisStore({ - sendCommand, + sendCommand: (...args) => ioredisClient.call(...args), prefix: 'register_limiter:', }); limiterOptions.store = store; diff --git a/api/server/middleware/limiters/resetPasswordLimiter.js b/api/server/middleware/limiters/resetPasswordLimiter.js index 9f56bd7949..673b23e8e5 100644 --- a/api/server/middleware/limiters/resetPasswordLimiter.js +++ b/api/server/middleware/limiters/resetPasswordLimiter.js @@ -1,9 +1,8 @@ -const Keyv = require('keyv'); const rateLimit = require('express-rate-limit'); const { RedisStore } = require('rate-limit-redis'); const { ViolationTypes } = require('librechat-data-provider'); const { removePorts, isEnabled } = require('~/server/utils'); -const keyvRedis = require('~/cache/keyvRedis'); +const ioredisClient = require('~/cache/ioredisClient'); const { logViolation } = require('~/cache'); const { logger } = require('~/config'); @@ -36,13 +35,10 @@ const limiterOptions = { keyGenerator: removePorts, }; -if (isEnabled(process.env.USE_REDIS)) { +if (isEnabled(process.env.USE_REDIS) && ioredisClient) { logger.debug('Using Redis for reset password rate limiter.'); - const keyv = new Keyv({ store: keyvRedis }); - const client = keyv.opts.store.redis; - const sendCommand = (...args) => client.call(...args); const store = new RedisStore({ - sendCommand, + sendCommand: (...args) => ioredisClient.call(...args), prefix: 'reset_password_limiter:', }); limiterOptions.store = store; diff --git a/api/server/middleware/limiters/sttLimiters.js b/api/server/middleware/limiters/sttLimiters.js index f9304637c4..72ed3af6a3 100644 --- a/api/server/middleware/limiters/sttLimiters.js +++ b/api/server/middleware/limiters/sttLimiters.js @@ -1,10 +1,9 @@ -const Keyv = require('keyv'); const rateLimit = require('express-rate-limit'); const { RedisStore } = require('rate-limit-redis'); const { ViolationTypes } = require('librechat-data-provider'); +const ioredisClient = require('~/cache/ioredisClient'); const logViolation = require('~/cache/logViolation'); const { isEnabled } = require('~/server/utils'); -const keyvRedis = require('~/cache/keyvRedis'); const { logger } = require('~/config'); const getEnvironmentVariables = () => { @@ -67,11 +66,9 @@ const createSTTLimiters = () => { }, }; - if (isEnabled(process.env.USE_REDIS)) { + if (isEnabled(process.env.USE_REDIS) && ioredisClient) { logger.debug('Using Redis for STT rate limiters.'); - const keyv = new Keyv({ store: keyvRedis }); - const client = keyv.opts.store.redis; - const sendCommand = (...args) => client.call(...args); + const sendCommand = (...args) => ioredisClient.call(...args); const ipStore = new RedisStore({ sendCommand, prefix: 'stt_ip_limiter:', diff --git a/api/server/middleware/limiters/toolCallLimiter.js b/api/server/middleware/limiters/toolCallLimiter.js index 7a867b5bcd..482744a3e9 100644 --- a/api/server/middleware/limiters/toolCallLimiter.js +++ b/api/server/middleware/limiters/toolCallLimiter.js @@ -1,10 +1,9 @@ -const Keyv = require('keyv'); const rateLimit = require('express-rate-limit'); const { RedisStore } = require('rate-limit-redis'); const { ViolationTypes } = require('librechat-data-provider'); +const ioredisClient = require('~/cache/ioredisClient'); const logViolation = require('~/cache/logViolation'); const { isEnabled } = require('~/server/utils'); -const keyvRedis = require('~/cache/keyvRedis'); const { logger } = require('~/config'); const handler = async (req, res) => { @@ -29,13 +28,10 @@ const limiterOptions = { }, }; -if (isEnabled(process.env.USE_REDIS)) { +if (isEnabled(process.env.USE_REDIS) && ioredisClient) { logger.debug('Using Redis for tool call rate limiter.'); - const keyv = new Keyv({ store: keyvRedis }); - const client = keyv.opts.store.redis; - const sendCommand = (...args) => client.call(...args); const store = new RedisStore({ - sendCommand, + sendCommand: (...args) => ioredisClient.call(...args), prefix: 'tool_call_limiter:', }); limiterOptions.store = store; diff --git a/api/server/middleware/limiters/ttsLimiters.js b/api/server/middleware/limiters/ttsLimiters.js index e13aaf48c3..9054a6beb1 100644 --- a/api/server/middleware/limiters/ttsLimiters.js +++ b/api/server/middleware/limiters/ttsLimiters.js @@ -1,10 +1,9 @@ -const Keyv = require('keyv'); const rateLimit = require('express-rate-limit'); const { RedisStore } = require('rate-limit-redis'); const { ViolationTypes } = require('librechat-data-provider'); +const ioredisClient = require('~/cache/ioredisClient'); const logViolation = require('~/cache/logViolation'); const { isEnabled } = require('~/server/utils'); -const keyvRedis = require('~/cache/keyvRedis'); const { logger } = require('~/config'); const getEnvironmentVariables = () => { @@ -67,11 +66,9 @@ const createTTSLimiters = () => { }, }; - if (isEnabled(process.env.USE_REDIS)) { + if (isEnabled(process.env.USE_REDIS) && ioredisClient) { logger.debug('Using Redis for TTS rate limiters.'); - const keyv = new Keyv({ store: keyvRedis }); - const client = keyv.opts.store.redis; - const sendCommand = (...args) => client.call(...args); + const sendCommand = (...args) => ioredisClient.call(...args); const ipStore = new RedisStore({ sendCommand, prefix: 'tts_ip_limiter:', diff --git a/api/server/middleware/limiters/uploadLimiters.js b/api/server/middleware/limiters/uploadLimiters.js index 9fffface61..d9049f898e 100644 --- a/api/server/middleware/limiters/uploadLimiters.js +++ b/api/server/middleware/limiters/uploadLimiters.js @@ -1,10 +1,9 @@ -const Keyv = require('keyv'); const rateLimit = require('express-rate-limit'); const { RedisStore } = require('rate-limit-redis'); const { ViolationTypes } = require('librechat-data-provider'); +const ioredisClient = require('~/cache/ioredisClient'); const logViolation = require('~/cache/logViolation'); const { isEnabled } = require('~/server/utils'); -const keyvRedis = require('~/cache/keyvRedis'); const { logger } = require('~/config'); const getEnvironmentVariables = () => { @@ -72,11 +71,9 @@ const createFileLimiters = () => { }, }; - if (isEnabled(process.env.USE_REDIS)) { + if (isEnabled(process.env.USE_REDIS) && ioredisClient) { logger.debug('Using Redis for file upload rate limiters.'); - const keyv = new Keyv({ store: keyvRedis }); - const client = keyv.opts.store.redis; - const sendCommand = (...args) => client.call(...args); + const sendCommand = (...args) => ioredisClient.call(...args); const ipStore = new RedisStore({ sendCommand, prefix: 'file_upload_ip_limiter:', diff --git a/api/server/middleware/limiters/verifyEmailLimiter.js b/api/server/middleware/limiters/verifyEmailLimiter.js index 0b245afbd1..73bfa2daf3 100644 --- a/api/server/middleware/limiters/verifyEmailLimiter.js +++ b/api/server/middleware/limiters/verifyEmailLimiter.js @@ -1,9 +1,8 @@ -const Keyv = require('keyv'); const rateLimit = require('express-rate-limit'); const { RedisStore } = require('rate-limit-redis'); const { ViolationTypes } = require('librechat-data-provider'); const { removePorts, isEnabled } = require('~/server/utils'); -const keyvRedis = require('~/cache/keyvRedis'); +const ioredisClient = require('~/cache/ioredisClient'); const { logViolation } = require('~/cache'); const { logger } = require('~/config'); @@ -36,13 +35,10 @@ const limiterOptions = { keyGenerator: removePorts, }; -if (isEnabled(process.env.USE_REDIS)) { +if (isEnabled(process.env.USE_REDIS) && ioredisClient) { logger.debug('Using Redis for verify email rate limiter.'); - const keyv = new Keyv({ store: keyvRedis }); - const client = keyv.opts.store.redis; - const sendCommand = (...args) => client.call(...args); const store = new RedisStore({ - sendCommand, + sendCommand: (...args) => ioredisClient.call(...args), prefix: 'verify_email_limiter:', }); limiterOptions.store = store; diff --git a/api/server/routes/actions.js b/api/server/routes/actions.js index 28845e3f15..dc474d1a67 100644 --- a/api/server/routes/actions.js +++ b/api/server/routes/actions.js @@ -1,5 +1,6 @@ const express = require('express'); const jwt = require('jsonwebtoken'); +const { CacheKeys } = require('librechat-data-provider'); const { getAccessToken } = require('~/server/services/TokenService'); const { logger, getFlowStateManager } = require('~/config'); const { getLogStores } = require('~/cache'); @@ -19,8 +20,8 @@ const JWT_SECRET = process.env.JWT_SECRET; router.get('/:action_id/oauth/callback', async (req, res) => { const { action_id } = req.params; const { code, state } = req.query; - - const flowManager = getFlowStateManager(getLogStores); + const flowsCache = getLogStores(CacheKeys.FLOWS); + const flowManager = getFlowStateManager(flowsCache); let identifier = action_id; try { let decodedState; diff --git a/api/server/routes/agents/actions.js b/api/server/routes/agents/actions.js index 786f44dd8e..5413bc1d68 100644 --- a/api/server/routes/agents/actions.js +++ b/api/server/routes/agents/actions.js @@ -58,7 +58,7 @@ router.post('/:agent_id', async (req, res) => { } let { domain } = metadata; - domain = await domainParser(req, domain, true); + domain = await domainParser(domain, true); if (!domain) { return res.status(400).json({ message: 'No domain provided' }); @@ -164,7 +164,7 @@ router.delete('/:agent_id/:action_id', async (req, res) => { return true; }); - domain = await domainParser(req, domain, true); + domain = await domainParser(domain, true); if (!domain) { return res.status(400).json({ message: 'No domain provided' }); diff --git a/api/server/routes/agents/chat.js b/api/server/routes/agents/chat.js index fe50fdc765..ef66ef7896 100644 --- a/api/server/routes/agents/chat.js +++ b/api/server/routes/agents/chat.js @@ -2,7 +2,6 @@ const express = require('express'); const { PermissionTypes, Permissions } = require('librechat-data-provider'); const { setHeaders, - handleAbort, moderateText, // validateModel, generateCheckAccess, @@ -16,7 +15,6 @@ const addTitle = require('~/server/services/Endpoints/agents/title'); const router = express.Router(); router.use(moderateText); -router.post('/abort', handleAbort()); const checkAgentAccess = generateCheckAccess(PermissionTypes.AGENTS, [Permissions.USE]); diff --git a/api/server/routes/ask/addToCache.js b/api/server/routes/ask/addToCache.js index 6e21edd2b8..a2f427098f 100644 --- a/api/server/routes/ask/addToCache.js +++ b/api/server/routes/ask/addToCache.js @@ -1,4 +1,4 @@ -const Keyv = require('keyv'); +const { Keyv } = require('keyv'); const { KeyvFile } = require('keyv-file'); const { logger } = require('~/config'); diff --git a/api/server/routes/ask/anthropic.js b/api/server/routes/ask/anthropic.js index a08d1d2570..afe1720d84 100644 --- a/api/server/routes/ask/anthropic.js +++ b/api/server/routes/ask/anthropic.js @@ -11,8 +11,6 @@ const { const router = express.Router(); -router.post('/abort', handleAbort()); - router.post( '/', validateEndpoint, diff --git a/api/server/routes/ask/custom.js b/api/server/routes/ask/custom.js index 668a9902cb..8fc343cf17 100644 --- a/api/server/routes/ask/custom.js +++ b/api/server/routes/ask/custom.js @@ -3,7 +3,6 @@ const AskController = require('~/server/controllers/AskController'); const { initializeClient } = require('~/server/services/Endpoints/custom'); const { addTitle } = require('~/server/services/Endpoints/openAI'); const { - handleAbort, setHeaders, validateModel, validateEndpoint, @@ -12,8 +11,6 @@ const { const router = express.Router(); -router.post('/abort', handleAbort()); - router.post( '/', validateEndpoint, diff --git a/api/server/routes/ask/google.js b/api/server/routes/ask/google.js index 2b3378bf6c..16c7e265f4 100644 --- a/api/server/routes/ask/google.js +++ b/api/server/routes/ask/google.js @@ -3,7 +3,6 @@ const AskController = require('~/server/controllers/AskController'); const { initializeClient, addTitle } = require('~/server/services/Endpoints/google'); const { setHeaders, - handleAbort, validateModel, validateEndpoint, buildEndpointOption, @@ -11,8 +10,6 @@ const { const router = express.Router(); -router.post('/abort', handleAbort()); - router.post( '/', validateEndpoint, diff --git a/api/server/routes/ask/gptPlugins.js b/api/server/routes/ask/gptPlugins.js index 036654f845..a40022848a 100644 --- a/api/server/routes/ask/gptPlugins.js +++ b/api/server/routes/ask/gptPlugins.js @@ -20,7 +20,6 @@ const { logger } = require('~/config'); const router = express.Router(); router.use(moderateText); -router.post('/abort', handleAbort()); router.post( '/', @@ -196,7 +195,8 @@ router.post( logger.debug('[/ask/gptPlugins]', response); - const { conversation = {} } = await client.responsePromise; + const { conversation = {} } = await response.databasePromise; + delete response.databasePromise; conversation.title = conversation && !conversation.title ? null : conversation?.title || 'New Chat'; diff --git a/api/server/routes/ask/openAI.js b/api/server/routes/ask/openAI.js index 5083a08b10..dadf00def4 100644 --- a/api/server/routes/ask/openAI.js +++ b/api/server/routes/ask/openAI.js @@ -12,7 +12,6 @@ const { const router = express.Router(); router.use(moderateText); -router.post('/abort', handleAbort()); router.post( '/', diff --git a/api/server/routes/assistants/actions.js b/api/server/routes/assistants/actions.js index 9f4db5d6b8..3dc3923503 100644 --- a/api/server/routes/assistants/actions.js +++ b/api/server/routes/assistants/actions.js @@ -36,7 +36,7 @@ router.post('/:assistant_id', async (req, res) => { } let { domain } = metadata; - domain = await domainParser(req, domain, true); + domain = await domainParser(domain, true); if (!domain) { return res.status(400).json({ message: 'No domain provided' }); @@ -172,7 +172,7 @@ router.delete('/:assistant_id/:action_id/:model', async (req, res) => { return true; }); - domain = await domainParser(req, domain, true); + domain = await domainParser(domain, true); if (!domain) { return res.status(400).json({ message: 'No domain provided' }); diff --git a/api/server/routes/bedrock/chat.js b/api/server/routes/bedrock/chat.js index 11db89f07e..263ca96002 100644 --- a/api/server/routes/bedrock/chat.js +++ b/api/server/routes/bedrock/chat.js @@ -14,7 +14,6 @@ const AgentController = require('~/server/controllers/agents/request'); const addTitle = require('~/server/services/Endpoints/agents/title'); router.use(moderateText); -router.post('/abort', handleAbort()); /** * @route POST / diff --git a/api/server/routes/config.js b/api/server/routes/config.js index e1e8ba763b..ebafb05c30 100644 --- a/api/server/routes/config.js +++ b/api/server/routes/config.js @@ -82,6 +82,7 @@ router.get('/', async function (req, res) { analyticsGtmId: process.env.ANALYTICS_GTM_ID, instanceProjectId: instanceProject._id.toString(), bundlerURL: process.env.SANDPACK_BUNDLER_URL, + staticBundlerURL: process.env.SANDPACK_STATIC_BUNDLER_URL, }; if (ldap) { diff --git a/api/server/routes/convos.js b/api/server/routes/convos.js index a4d81e24e6..2473eb68f9 100644 --- a/api/server/routes/convos.js +++ b/api/server/routes/convos.js @@ -1,16 +1,17 @@ const multer = require('multer'); const express = require('express'); const { CacheKeys, EModelEndpoint } = require('librechat-data-provider'); -const { getConvosByPage, deleteConvos, getConvo, saveConvo } = require('~/models/Conversation'); +const { getConvosByCursor, deleteConvos, getConvo, saveConvo } = require('~/models/Conversation'); const { forkConversation, duplicateConversation } = require('~/server/utils/import/fork'); const { storage, importFileFilter } = require('~/server/routes/files/multer'); const requireJwtAuth = require('~/server/middleware/requireJwtAuth'); const { importConversations } = require('~/server/utils/import'); const { createImportLimiters } = require('~/server/middleware'); const { deleteToolCalls } = require('~/models/ToolCall'); +const { isEnabled, sleep } = require('~/server/utils'); const getLogStores = require('~/cache/getLogStores'); -const { sleep } = require('~/server/utils'); const { logger } = require('~/config'); + const assistantClients = { [EModelEndpoint.azureAssistants]: require('~/server/services/Endpoints/azureAssistants'), [EModelEndpoint.assistants]: require('~/server/services/Endpoints/assistants'), @@ -20,28 +21,30 @@ const router = express.Router(); router.use(requireJwtAuth); router.get('/', async (req, res) => { - let pageNumber = req.query.pageNumber || 1; - pageNumber = parseInt(pageNumber, 10); + const limit = parseInt(req.query.limit, 10) || 25; + const cursor = req.query.cursor; + const isArchived = isEnabled(req.query.isArchived); + const search = req.query.search ? decodeURIComponent(req.query.search) : undefined; + const order = req.query.order || 'desc'; - if (isNaN(pageNumber) || pageNumber < 1) { - return res.status(400).json({ error: 'Invalid page number' }); - } - - let pageSize = req.query.pageSize || 25; - pageSize = parseInt(pageSize, 10); - - if (isNaN(pageSize) || pageSize < 1) { - return res.status(400).json({ error: 'Invalid page size' }); - } - const isArchived = req.query.isArchived === 'true'; let tags; if (req.query.tags) { tags = Array.isArray(req.query.tags) ? req.query.tags : [req.query.tags]; - } else { - tags = undefined; } - res.status(200).send(await getConvosByPage(req.user.id, pageNumber, pageSize, isArchived, tags)); + try { + const result = await getConvosByCursor(req.user.id, { + cursor, + limit, + isArchived, + tags, + search, + order, + }); + res.status(200).json(result); + } catch (error) { + res.status(500).json({ error: 'Error fetching conversations' }); + } }); router.get('/:conversationId', async (req, res) => { @@ -76,22 +79,28 @@ router.post('/gen_title', async (req, res) => { } }); -router.post('/clear', async (req, res) => { +router.delete('/', async (req, res) => { let filter = {}; const { conversationId, source, thread_id, endpoint } = req.body.arg; - if (conversationId) { - filter = { conversationId }; + + // Prevent deletion of all conversations + if (!conversationId && !source && !thread_id && !endpoint) { + return res.status(400).json({ + error: 'no parameters provided', + }); } - if (source === 'button' && !conversationId) { + if (conversationId) { + filter = { conversationId }; + } else if (source === 'button') { return res.status(200).send('No conversationId provided'); } if ( - typeof endpoint != 'undefined' && + typeof endpoint !== 'undefined' && Object.prototype.propertyIsEnumerable.call(assistantClients, endpoint) ) { - /** @type {{ openai: OpenAI}} */ + /** @type {{ openai: OpenAI }} */ const { openai } = await assistantClients[endpoint].initializeClient({ req, res }); try { const response = await openai.beta.threads.del(thread_id); @@ -101,9 +110,6 @@ router.post('/clear', async (req, res) => { } } - // for debugging deletion source - // logger.debug('source:', source); - try { const dbResponse = await deleteConvos(req.user.id, filter); await deleteToolCalls(req.user.id, filter.conversationId); @@ -114,6 +120,17 @@ router.post('/clear', async (req, res) => { } }); +router.delete('/all', async (req, res) => { + try { + const dbResponse = await deleteConvos(req.user.id, {}); + await deleteToolCalls(req.user.id); + res.status(201).json(dbResponse); + } catch (error) { + logger.error('Error clearing conversations', error); + res.status(500).send('Error clearing conversations'); + } +}); + router.post('/update', async (req, res) => { const update = req.body.arg; diff --git a/api/server/routes/edit/anthropic.js b/api/server/routes/edit/anthropic.js index c7bf128d7c..704a9f4ea4 100644 --- a/api/server/routes/edit/anthropic.js +++ b/api/server/routes/edit/anthropic.js @@ -3,7 +3,6 @@ const EditController = require('~/server/controllers/EditController'); const { initializeClient } = require('~/server/services/Endpoints/anthropic'); const { setHeaders, - handleAbort, validateModel, validateEndpoint, buildEndpointOption, @@ -11,8 +10,6 @@ const { const router = express.Router(); -router.post('/abort', handleAbort()); - router.post( '/', validateEndpoint, diff --git a/api/server/routes/edit/custom.js b/api/server/routes/edit/custom.js index 0bf97ba180..a6fd804763 100644 --- a/api/server/routes/edit/custom.js +++ b/api/server/routes/edit/custom.js @@ -12,8 +12,6 @@ const { const router = express.Router(); -router.post('/abort', handleAbort()); - router.post( '/', validateEndpoint, diff --git a/api/server/routes/edit/google.js b/api/server/routes/edit/google.js index 7482f11b4c..187f4f6158 100644 --- a/api/server/routes/edit/google.js +++ b/api/server/routes/edit/google.js @@ -3,7 +3,6 @@ const EditController = require('~/server/controllers/EditController'); const { initializeClient } = require('~/server/services/Endpoints/google'); const { setHeaders, - handleAbort, validateModel, validateEndpoint, buildEndpointOption, @@ -11,8 +10,6 @@ const { const router = express.Router(); -router.post('/abort', handleAbort()); - router.post( '/', validateEndpoint, diff --git a/api/server/routes/edit/gptPlugins.js b/api/server/routes/edit/gptPlugins.js index 5547a1fcdf..94d9b91d0b 100644 --- a/api/server/routes/edit/gptPlugins.js +++ b/api/server/routes/edit/gptPlugins.js @@ -2,7 +2,6 @@ const express = require('express'); const { getResponseSender } = require('librechat-data-provider'); const { setHeaders, - handleAbort, moderateText, validateModel, handleAbortError, @@ -19,7 +18,6 @@ const { logger } = require('~/config'); const router = express.Router(); router.use(moderateText); -router.post('/abort', handleAbort()); router.post( '/', @@ -173,7 +171,8 @@ router.post( logger.debug('[/edit/gptPlugins] CLIENT RESPONSE', response); - const { conversation = {} } = await client.responsePromise; + const { conversation = {} } = await response.databasePromise; + delete response.databasePromise; conversation.title = conversation && !conversation.title ? null : conversation?.title || 'New Chat'; diff --git a/api/server/routes/edit/openAI.js b/api/server/routes/edit/openAI.js index ae26b235c7..ee25a42ee3 100644 --- a/api/server/routes/edit/openAI.js +++ b/api/server/routes/edit/openAI.js @@ -2,7 +2,6 @@ const express = require('express'); const EditController = require('~/server/controllers/EditController'); const { initializeClient } = require('~/server/services/Endpoints/openAI'); const { - handleAbort, setHeaders, validateModel, validateEndpoint, @@ -12,7 +11,6 @@ const { const router = express.Router(); router.use(moderateText); -router.post('/abort', handleAbort()); router.post( '/', diff --git a/api/server/routes/files/files.js b/api/server/routes/files/files.js index 9040c2824c..5a520bdb65 100644 --- a/api/server/routes/files/files.js +++ b/api/server/routes/files/files.js @@ -21,6 +21,7 @@ const { getOpenAIClient } = require('~/server/controllers/assistants/helpers'); const { loadAuthValues } = require('~/server/services/Tools/credentials'); const { refreshS3FileUrls } = require('~/server/services/Files/S3/crud'); const { getFiles, batchUpdateFiles } = require('~/models/File'); +const { getAssistant } = require('~/models/Assistant'); const { getAgent } = require('~/models/Agent'); const { getLogStores } = require('~/cache'); const { logger } = require('~/config'); @@ -94,7 +95,7 @@ router.delete('/', async (req, res) => { }); } - /* Handle entity unlinking even if no valid files to delete */ + /* Handle agent unlinking even if no valid files to delete */ if (req.body.agent_id && req.body.tool_resource && dbFiles.length === 0) { const agent = await getAgent({ id: req.body.agent_id, @@ -104,7 +105,21 @@ router.delete('/', async (req, res) => { const agentFiles = files.filter((f) => toolResourceFiles.includes(f.file_id)); await processDeleteRequest({ req, files: agentFiles }); - res.status(200).json({ message: 'File associations removed successfully' }); + res.status(200).json({ message: 'File associations removed successfully from agent' }); + return; + } + + /* Handle assistant unlinking even if no valid files to delete */ + if (req.body.assistant_id && req.body.tool_resource && dbFiles.length === 0) { + const assistant = await getAssistant({ + id: req.body.assistant_id, + }); + + const toolResourceFiles = assistant.tool_resources?.[req.body.tool_resource]?.file_ids ?? []; + const assistantFiles = files.filter((f) => toolResourceFiles.includes(f.file_id)); + + await processDeleteRequest({ req, files: assistantFiles }); + res.status(200).json({ message: 'File associations removed successfully from assistant' }); return; } diff --git a/api/server/routes/index.js b/api/server/routes/index.js index 4b34029c7b..449759383d 100644 --- a/api/server/routes/index.js +++ b/api/server/routes/index.js @@ -10,6 +10,7 @@ const balance = require('./balance'); const plugins = require('./plugins'); const bedrock = require('./bedrock'); const actions = require('./actions'); +const banner = require('./banner'); const search = require('./search'); const models = require('./models'); const convos = require('./convos'); @@ -25,7 +26,6 @@ const edit = require('./edit'); const keys = require('./keys'); const user = require('./user'); const ask = require('./ask'); -const banner = require('./banner'); module.exports = { ask, @@ -38,13 +38,14 @@ module.exports = { oauth, files, share, + banner, agents, - bedrock, convos, search, - prompts, config, models, + bedrock, + prompts, plugins, actions, presets, @@ -55,5 +56,4 @@ module.exports = { assistants, categories, staticRoute, - banner, }; diff --git a/api/server/routes/messages.js b/api/server/routes/messages.js index 54c4aab1c2..d5980ae55b 100644 --- a/api/server/routes/messages.js +++ b/api/server/routes/messages.js @@ -10,12 +10,90 @@ const { } = require('~/models'); const { findAllArtifacts, replaceArtifactContent } = require('~/server/services/Artifacts/update'); const { requireJwtAuth, validateMessageReq } = require('~/server/middleware'); +const { cleanUpPrimaryKeyValue } = require('~/lib/utils/misc'); +const { getConvosQueried } = require('~/models/Conversation'); const { countTokens } = require('~/server/utils'); +const { Message } = require('~/models/Message'); const { logger } = require('~/config'); const router = express.Router(); router.use(requireJwtAuth); +router.get('/', async (req, res) => { + try { + const user = req.user.id ?? ''; + const { + cursor = null, + sortBy = 'createdAt', + sortDirection = 'desc', + pageSize: pageSizeRaw, + conversationId, + messageId, + search, + } = req.query; + const pageSize = parseInt(pageSizeRaw, 10) || 25; + + let response; + const sortField = ['endpoint', 'createdAt', 'updatedAt'].includes(sortBy) + ? sortBy + : 'createdAt'; + const sortOrder = sortDirection === 'asc' ? 1 : -1; + + if (conversationId && messageId) { + const message = await Message.findOne({ conversationId, messageId, user: user }).lean(); + response = { messages: message ? [message] : [], nextCursor: null }; + } else if (conversationId) { + const filter = { conversationId, user: user }; + if (cursor) { + filter[sortField] = sortOrder === 1 ? { $gt: cursor } : { $lt: cursor }; + } + const messages = await Message.find(filter) + .sort({ [sortField]: sortOrder }) + .limit(pageSize + 1) + .lean(); + const nextCursor = messages.length > pageSize ? messages.pop()[sortField] : null; + response = { messages, nextCursor }; + } else if (search) { + const searchResults = await Message.meiliSearch(search, undefined, true); + + const messages = searchResults.hits || []; + + const result = await getConvosQueried(req.user.id, messages, cursor); + + const activeMessages = []; + for (let i = 0; i < messages.length; i++) { + let message = messages[i]; + if (message.conversationId.includes('--')) { + message.conversationId = cleanUpPrimaryKeyValue(message.conversationId); + } + if (result.convoMap[message.conversationId]) { + const convo = result.convoMap[message.conversationId]; + + const dbMessage = await getMessage({ user, messageId: message.messageId }); + activeMessages.push({ + ...message, + title: convo.title, + conversationId: message.conversationId, + model: convo.model, + isCreatedByUser: dbMessage?.isCreatedByUser, + endpoint: dbMessage?.endpoint, + iconURL: dbMessage?.iconURL, + }); + } + } + + response = { messages: activeMessages, nextCursor: null }; + } else { + response = { messages: [], nextCursor: null }; + } + + res.status(200).json(response); + } catch (error) { + logger.error('Error fetching messages:', error); + res.status(500).json({ error: 'Internal server error' }); + } +}); + router.post('/artifact/:messageId', async (req, res) => { try { const { messageId } = req.params; diff --git a/api/server/routes/search.js b/api/server/routes/search.js index 68cff7532b..5c7846aee1 100644 --- a/api/server/routes/search.js +++ b/api/server/routes/search.js @@ -1,93 +1,17 @@ -const Keyv = require('keyv'); const express = require('express'); const { MeiliSearch } = require('meilisearch'); -const { Conversation, getConvosQueried } = require('~/models/Conversation'); const requireJwtAuth = require('~/server/middleware/requireJwtAuth'); -const { cleanUpPrimaryKeyValue } = require('~/lib/utils/misc'); -const { reduceHits } = require('~/lib/utils/reduceHits'); const { isEnabled } = require('~/server/utils'); -const { Message } = require('~/models/Message'); -const keyvRedis = require('~/cache/keyvRedis'); -const { logger } = require('~/config'); const router = express.Router(); -const expiration = 60 * 1000; -const cache = isEnabled(process.env.USE_REDIS) - ? new Keyv({ store: keyvRedis }) - : new Keyv({ namespace: 'search', ttl: expiration }); - router.use(requireJwtAuth); -router.get('/sync', async function (req, res) { - await Message.syncWithMeili(); - await Conversation.syncWithMeili(); - res.send('synced'); -}); - -router.get('/', async function (req, res) { - try { - let user = req.user.id ?? ''; - const { q } = req.query; - const pageNumber = req.query.pageNumber || 1; - const key = `${user}:search:${q}`; - const cached = await cache.get(key); - if (cached) { - logger.debug('[/search] cache hit: ' + key); - const { pages, pageSize, messages } = cached; - res - .status(200) - .send({ conversations: cached[pageNumber], pages, pageNumber, pageSize, messages }); - return; - } - - const messages = (await Message.meiliSearch(q, undefined, true)).hits; - const titles = (await Conversation.meiliSearch(q)).hits; - - const sortedHits = reduceHits(messages, titles); - const result = await getConvosQueried(user, sortedHits, pageNumber); - - const activeMessages = []; - for (let i = 0; i < messages.length; i++) { - let message = messages[i]; - if (message.conversationId.includes('--')) { - message.conversationId = cleanUpPrimaryKeyValue(message.conversationId); - } - if (result.convoMap[message.conversationId]) { - const convo = result.convoMap[message.conversationId]; - const { title, chatGptLabel, model } = convo; - message = { ...message, ...{ title, chatGptLabel, model } }; - activeMessages.push(message); - } - } - result.messages = activeMessages; - if (result.cache) { - result.cache.messages = activeMessages; - cache.set(key, result.cache, expiration); - delete result.cache; - } - delete result.convoMap; - - res.status(200).send(result); - } catch (error) { - logger.error('[/search] Error while searching messages & conversations', error); - res.status(500).send({ message: 'Error searching' }); - } -}); - -router.get('/test', async function (req, res) { - const { q } = req.query; - const messages = ( - await Message.meiliSearch(q, { attributesToHighlight: ['text'] }, true) - ).hits.map((message) => { - const { _formatted, ...rest } = message; - return { ...rest, searchResult: true, text: _formatted.text }; - }); - res.send(messages); -}); - router.get('/enable', async function (req, res) { - let result = false; + if (!isEnabled(process.env.SEARCH)) { + return res.send(false); + } + try { const client = new MeiliSearch({ host: process.env.MEILI_HOST, @@ -95,8 +19,7 @@ router.get('/enable', async function (req, res) { }); const { status } = await client.health(); - result = status === 'available' && !!process.env.SEARCH; - return res.send(result); + return res.send(status === 'available'); } catch (error) { return res.send(false); } diff --git a/api/server/services/ActionService.js b/api/server/services/ActionService.js index 12e30fd8b9..c8a7955427 100644 --- a/api/server/services/ActionService.js +++ b/api/server/services/ActionService.js @@ -50,7 +50,7 @@ const validateAndUpdateTool = async ({ req, tool, assistant_id }) => { return null; } - const parsedDomain = await domainParser(req, domain, true); + const parsedDomain = await domainParser(domain, true); if (!parsedDomain) { return null; @@ -66,16 +66,14 @@ const validateAndUpdateTool = async ({ req, tool, assistant_id }) => { * * Necessary due to `[a-zA-Z0-9_-]*` Regex Validation, limited to a 64-character maximum. * - * @param {Express.Request} req - The Express Request object. * @param {string} domain - The domain name to encode/decode. * @param {boolean} inverse - False to decode from base64, true to encode to base64. * @returns {Promise} Encoded or decoded domain string. */ -async function domainParser(req, domain, inverse = false) { +async function domainParser(domain, inverse = false) { if (!domain) { return; } - const domainsCache = getLogStores(CacheKeys.ENCODED_DOMAINS); const cachedDomain = await domainsCache.get(domain); if (inverse && cachedDomain) { @@ -122,7 +120,7 @@ async function loadActionSets(searchParams) { * Creates a general tool for an entire action set. * * @param {Object} params - The parameters for loading action sets. - * @param {ServerRequest} params.req + * @param {string} params.userId * @param {ServerResponse} params.res * @param {Action} params.action - The action set. Necessary for decrypting authentication values. * @param {ActionRequest} params.requestBuilder - The ActionRequest builder class to execute the API call. @@ -133,7 +131,7 @@ async function loadActionSets(searchParams) { * @returns { Promise unknown}> } An object with `_call` method to execute the tool input. */ async function createActionTool({ - req, + userId, res, action, requestBuilder, @@ -148,13 +146,13 @@ async function createActionTool({ /** @type {import('librechat-data-provider').ActionMetadataRuntime} */ const metadata = action.metadata; const executor = requestBuilder.createExecutor(); - const preparedExecutor = executor.setParams(toolInput); + const preparedExecutor = executor.setParams(toolInput ?? {}); if (metadata.auth && metadata.auth.type !== AuthTypeEnum.None) { try { if (metadata.auth.type === AuthTypeEnum.OAuth && metadata.auth.authorization_url) { const action_id = action.action_id; - const identifier = `${req.user.id}:${action.action_id}`; + const identifier = `${userId}:${action.action_id}`; const requestLogin = async () => { const { args: _args, stepId, ...toolCall } = config.toolCall ?? {}; if (!stepId) { @@ -162,7 +160,7 @@ async function createActionTool({ } const statePayload = { nonce: nanoid(), - user: req.user.id, + user: userId, action_id, }; @@ -189,7 +187,8 @@ async function createActionTool({ expires_at: Date.now() + Time.TWO_MINUTES, }, }; - const flowManager = getFlowStateManager(getLogStores); + const flowsCache = getLogStores(CacheKeys.FLOWS); + const flowManager = getFlowStateManager(flowsCache); await flowManager.createFlowWithHandler( `${identifier}:oauth_login:${config.metadata.thread_id}:${config.metadata.run_id}`, 'oauth_login', @@ -206,7 +205,7 @@ async function createActionTool({ 'oauth', { state: stateToken, - userId: req.user.id, + userId: userId, client_url: metadata.auth.client_url, redirect_uri: `${process.env.DOMAIN_CLIENT}/api/actions/${action_id}/oauth/callback`, /** Encrypted values */ @@ -232,10 +231,10 @@ async function createActionTool({ }; const tokenPromises = []; - tokenPromises.push(findToken({ userId: req.user.id, type: 'oauth', identifier })); + tokenPromises.push(findToken({ userId, type: 'oauth', identifier })); tokenPromises.push( findToken({ - userId: req.user.id, + userId, type: 'oauth_refresh', identifier: `${identifier}:refresh`, }), @@ -258,14 +257,15 @@ async function createActionTool({ const refresh_token = await decryptV2(refreshTokenData.token); const refreshTokens = async () => await refreshAccessToken({ + userId, identifier, refresh_token, - userId: req.user.id, client_url: metadata.auth.client_url, encrypted_oauth_client_id: encrypted.oauth_client_id, encrypted_oauth_client_secret: encrypted.oauth_client_secret, }); - const flowManager = getFlowStateManager(getLogStores); + const flowsCache = getLogStores(CacheKeys.FLOWS); + const flowManager = getFlowStateManager(flowsCache); const refreshData = await flowManager.createFlowWithHandler( `${identifier}:refresh`, 'oauth_refresh', diff --git a/api/server/services/ActionService.spec.js b/api/server/services/ActionService.spec.js index 8f9d67a9d1..f3b4423197 100644 --- a/api/server/services/ActionService.spec.js +++ b/api/server/services/ActionService.spec.js @@ -78,20 +78,20 @@ describe('domainParser', () => { // Non-azure request it('does not return domain as is if not azure', async () => { const domain = `example.com${actionDomainSeparator}test${actionDomainSeparator}`; - const result1 = await domainParser(reqNoAzure, domain, false); - const result2 = await domainParser(reqNoAzure, domain, true); + const result1 = await domainParser(domain, false); + const result2 = await domainParser(domain, true); expect(result1).not.toEqual(domain); expect(result2).not.toEqual(domain); }); // Test for Empty or Null Inputs it('returns undefined for null domain input', async () => { - const result = await domainParser(req, null, true); + const result = await domainParser(null, true); expect(result).toBeUndefined(); }); it('returns undefined for empty domain input', async () => { - const result = await domainParser(req, '', true); + const result = await domainParser('', true); expect(result).toBeUndefined(); }); @@ -102,7 +102,7 @@ describe('domainParser', () => { .toString('base64') .substring(0, Constants.ENCODED_DOMAIN_LENGTH); - await domainParser(req, domain, true); + await domainParser(domain, true); const cachedValue = await globalCache[encodedDomain]; expect(cachedValue).toEqual(Buffer.from(domain).toString('base64')); @@ -112,14 +112,14 @@ describe('domainParser', () => { it('encodes domain exactly at threshold without modification', async () => { const domain = 'a'.repeat(Constants.ENCODED_DOMAIN_LENGTH - TLD.length) + TLD; const expected = domain.replace(/\./g, actionDomainSeparator); - const result = await domainParser(req, domain, true); + const result = await domainParser(domain, true); expect(result).toEqual(expected); }); it('encodes domain just below threshold without modification', async () => { const domain = 'a'.repeat(Constants.ENCODED_DOMAIN_LENGTH - 1 - TLD.length) + TLD; const expected = domain.replace(/\./g, actionDomainSeparator); - const result = await domainParser(req, domain, true); + const result = await domainParser(domain, true); expect(result).toEqual(expected); }); @@ -129,7 +129,7 @@ describe('domainParser', () => { const encodedDomain = Buffer.from(unicodeDomain) .toString('base64') .substring(0, Constants.ENCODED_DOMAIN_LENGTH); - const result = await domainParser(req, unicodeDomain, true); + const result = await domainParser(unicodeDomain, true); expect(result).toEqual(encodedDomain); }); @@ -139,7 +139,6 @@ describe('domainParser', () => { globalCache[encodedDomain.substring(0, Constants.ENCODED_DOMAIN_LENGTH)] = encodedDomain; // Simulate caching const result = await domainParser( - req, encodedDomain.substring(0, Constants.ENCODED_DOMAIN_LENGTH), false, ); @@ -150,27 +149,27 @@ describe('domainParser', () => { it('returns domain with replaced separators if no cached domain exists', async () => { const domain = 'example.com'; const withSeparator = domain.replace(/\./g, actionDomainSeparator); - const result = await domainParser(req, withSeparator, false); + const result = await domainParser(withSeparator, false); expect(result).toEqual(domain); }); it('returns domain with replaced separators when inverse is false and under encoding length', async () => { const domain = 'examp.com'; const withSeparator = domain.replace(/\./g, actionDomainSeparator); - const result = await domainParser(req, withSeparator, false); + const result = await domainParser(withSeparator, false); expect(result).toEqual(domain); }); it('replaces periods with actionDomainSeparator when inverse is true and under encoding length', async () => { const domain = 'examp.com'; const expected = domain.replace(/\./g, actionDomainSeparator); - const result = await domainParser(req, domain, true); + const result = await domainParser(domain, true); expect(result).toEqual(expected); }); it('encodes domain when length is above threshold and inverse is true', async () => { const domain = 'a'.repeat(Constants.ENCODED_DOMAIN_LENGTH + 1).concat('.com'); - const result = await domainParser(req, domain, true); + const result = await domainParser(domain, true); expect(result).not.toEqual(domain); expect(result.length).toBeLessThanOrEqual(Constants.ENCODED_DOMAIN_LENGTH); }); @@ -180,20 +179,20 @@ describe('domainParser', () => { const encodedDomain = Buffer.from( originalDomain.replace(/\./g, actionDomainSeparator), ).toString('base64'); - const result = await domainParser(req, encodedDomain, false); + const result = await domainParser(encodedDomain, false); expect(result).toEqual(encodedDomain); }); it('decodes encoded value if cached and encoded value is provided, and inverse is false', async () => { const originalDomain = 'example.com'; - const encodedDomain = await domainParser(req, originalDomain, true); - const result = await domainParser(req, encodedDomain, false); + const encodedDomain = await domainParser(originalDomain, true); + const result = await domainParser(encodedDomain, false); expect(result).toEqual(originalDomain); }); it('handles invalid base64 encoded values gracefully', async () => { const invalidBase64Domain = 'not_base64_encoded'; - const result = await domainParser(req, invalidBase64Domain, false); + const result = await domainParser(invalidBase64Domain, false); expect(result).toEqual(invalidBase64Domain); }); }); diff --git a/api/server/services/AuthService.js b/api/server/services/AuthService.js index 6ad4a3acf7..0bb1e22cf8 100644 --- a/api/server/services/AuthService.js +++ b/api/server/services/AuthService.js @@ -56,7 +56,7 @@ const logoutUser = async (req, refreshToken) => { try { req.session.destroy(); } catch (destroyErr) { - logger.error('[logoutUser] Failed to destroy session.', destroyErr); + logger.debug('[logoutUser] Failed to destroy session.', destroyErr); } return { status: 200, message: 'Logout successful' }; diff --git a/api/server/services/Endpoints/agents/initialize.js b/api/server/services/Endpoints/agents/initialize.js index 0186541750..c9e363e815 100644 --- a/api/server/services/Endpoints/agents/initialize.js +++ b/api/server/services/Endpoints/agents/initialize.js @@ -3,8 +3,10 @@ const { Constants, ErrorTypes, EModelEndpoint, + EToolResources, getResponseSender, AgentCapabilities, + replaceSpecialVars, providerEndpointMap, } = require('librechat-data-provider'); const { @@ -41,12 +43,19 @@ const providerConfigMap = { }; /** - * @param {ServerRequest} req - * @param {Promise> | undefined} _attachments - * @param {AgentToolResources | undefined} _tool_resources + * @param {Object} params + * @param {ServerRequest} params.req + * @param {Promise> | undefined} [params.attachments] + * @param {Set} params.requestFileSet + * @param {AgentToolResources | undefined} [params.tool_resources] * @returns {Promise<{ attachments: Array | undefined, tool_resources: AgentToolResources | undefined }>} */ -const primeResources = async (req, _attachments, _tool_resources) => { +const primeResources = async ({ + req, + attachments: _attachments, + tool_resources: _tool_resources, + requestFileSet, +}) => { try { /** @type {Array | undefined} */ let attachments; @@ -54,7 +63,7 @@ const primeResources = async (req, _attachments, _tool_resources) => { const isOCREnabled = (req.app.locals?.[EModelEndpoint.agents]?.capabilities ?? []).includes( AgentCapabilities.ocr, ); - if (tool_resources.ocr?.file_ids && isOCREnabled) { + if (tool_resources[EToolResources.ocr]?.file_ids && isOCREnabled) { const context = await getFiles( { file_id: { $in: tool_resources.ocr.file_ids }, @@ -79,17 +88,28 @@ const primeResources = async (req, _attachments, _tool_resources) => { continue; } if (file.metadata?.fileIdentifier) { - const execute_code = tool_resources.execute_code ?? {}; + const execute_code = tool_resources[EToolResources.execute_code] ?? {}; if (!execute_code.files) { - tool_resources.execute_code = { ...execute_code, files: [] }; + tool_resources[EToolResources.execute_code] = { ...execute_code, files: [] }; } - tool_resources.execute_code.files.push(file); + tool_resources[EToolResources.execute_code].files.push(file); } else if (file.embedded === true) { - const file_search = tool_resources.file_search ?? {}; + const file_search = tool_resources[EToolResources.file_search] ?? {}; if (!file_search.files) { - tool_resources.file_search = { ...file_search, files: [] }; + tool_resources[EToolResources.file_search] = { ...file_search, files: [] }; } - tool_resources.file_search.files.push(file); + tool_resources[EToolResources.file_search].files.push(file); + } else if ( + requestFileSet.has(file.file_id) && + file.type.startsWith('image') && + file.height && + file.width + ) { + const image_edit = tool_resources[EToolResources.image_edit] ?? {}; + if (!image_edit.files) { + tool_resources[EToolResources.image_edit] = { ...image_edit, files: [] }; + } + tool_resources[EToolResources.image_edit].files.push(file); } attachments.push(file); @@ -146,7 +166,14 @@ const initializeAgentOptions = async ({ (agent.model_parameters?.resendFiles ?? true) === true ) { const fileIds = (await getConvoFiles(req.body.conversationId)) ?? []; - const toolFiles = await getToolFilesByIds(fileIds); + /** @type {Set} */ + const toolResourceSet = new Set(); + for (const tool of agent.tools) { + if (EToolResources[tool]) { + toolResourceSet.add(EToolResources[tool]); + } + } + const toolFiles = await getToolFilesByIds(fileIds, toolResourceSet); if (requestFiles.length || toolFiles.length) { currentFiles = await processFiles(requestFiles.concat(toolFiles)); } @@ -154,19 +181,26 @@ const initializeAgentOptions = async ({ currentFiles = await processFiles(requestFiles); } - const { attachments, tool_resources } = await primeResources( + const { attachments, tool_resources } = await primeResources({ req, - currentFiles, - agent.tool_resources, - ); - const { tools, toolContextMap } = await loadAgentTools({ - req, - res, - agent, - tool_resources, + attachments: currentFiles, + tool_resources: agent.tool_resources, + requestFileSet: new Set(requestFiles.map((file) => file.file_id)), }); const provider = agent.provider; + const { tools, toolContextMap } = await loadAgentTools({ + req, + res, + agent: { + id: agent.id, + tools: agent.tools, + provider, + model: agent.model, + }, + tool_resources, + }); + agent.endpoint = provider; let getOptions = providerConfigMap[provider]; if (!getOptions && providerConfigMap[provider.toLowerCase()] != null) { @@ -199,6 +233,13 @@ const initializeAgentOptions = async ({ endpointOption: _endpointOption, }); + if ( + agent.endpoint === EModelEndpoint.azureOpenAI && + options.llmConfig?.azureOpenAIApiInstanceName == null + ) { + agent.provider = Providers.OPENAI; + } + if (options.provider != null) { agent.provider = options.provider; } @@ -213,6 +254,13 @@ const initializeAgentOptions = async ({ agent.model_parameters.model = agent.model; } + if (agent.instructions && agent.instructions !== '') { + agent.instructions = replaceSpecialVars({ + text: agent.instructions, + user: req.user, + }); + } + if (typeof agent.artifacts === 'string' && agent.artifacts !== '') { agent.additional_instructions = generateArtifactsPrompt({ endpoint: agent.provider, diff --git a/api/server/services/Endpoints/agents/title.js b/api/server/services/Endpoints/agents/title.js index f25746582e..ab171bc79d 100644 --- a/api/server/services/Endpoints/agents/title.js +++ b/api/server/services/Endpoints/agents/title.js @@ -2,7 +2,11 @@ const { CacheKeys } = require('librechat-data-provider'); const getLogStores = require('~/cache/getLogStores'); const { isEnabled } = require('~/server/utils'); const { saveConvo } = require('~/models'); +const { logger } = require('~/config'); +/** + * Add title to conversation in a way that avoids memory retention + */ const addTitle = async (req, { text, response, client }) => { const { TITLE_CONVO = true } = process.env ?? {}; if (!isEnabled(TITLE_CONVO)) { @@ -13,37 +17,55 @@ const addTitle = async (req, { text, response, client }) => { return; } - // If the request was aborted, don't generate the title. - if (client.abortController.signal.aborted) { - return; - } - const titleCache = getLogStores(CacheKeys.GEN_TITLE); const key = `${req.user.id}-${response.conversationId}`; - const responseText = - response?.content && Array.isArray(response?.content) - ? response.content.reduce((acc, block) => { - if (block?.type === 'text') { - return acc + block.text; - } - return acc; - }, '') - : (response?.content ?? response?.text ?? ''); + /** @type {NodeJS.Timeout} */ + let timeoutId; + try { + const timeoutPromise = new Promise((_, reject) => { + timeoutId = setTimeout(() => reject(new Error('Title generation timeout')), 25000); + }).catch((error) => { + logger.error('Title error:', error); + }); - const title = await client.titleConvo({ - text, - responseText, - conversationId: response.conversationId, - }); - await titleCache.set(key, title, 120000); - await saveConvo( - req, - { - conversationId: response.conversationId, - title, - }, - { context: 'api/server/services/Endpoints/agents/title.js' }, - ); + let titlePromise; + let abortController = new AbortController(); + if (client && typeof client.titleConvo === 'function') { + titlePromise = Promise.race([ + client + .titleConvo({ + text, + abortController, + }) + .catch((error) => { + logger.error('Client title error:', error); + }), + timeoutPromise, + ]); + } else { + return; + } + + const title = await titlePromise; + if (!abortController.signal.aborted) { + abortController.abort(); + } + if (timeoutId) { + clearTimeout(timeoutId); + } + + await titleCache.set(key, title, 120000); + await saveConvo( + req, + { + conversationId: response.conversationId, + title, + }, + { context: 'api/server/services/Endpoints/agents/title.js' }, + ); + } catch (error) { + logger.error('Error generating title:', error); + } }; module.exports = addTitle; diff --git a/api/server/services/Endpoints/anthropic/initialize.js b/api/server/services/Endpoints/anthropic/initialize.js index 6c89eff463..d4c6dd1795 100644 --- a/api/server/services/Endpoints/anthropic/initialize.js +++ b/api/server/services/Endpoints/anthropic/initialize.js @@ -1,7 +1,7 @@ const { EModelEndpoint } = require('librechat-data-provider'); const { getUserKey, checkUserKeyExpiry } = require('~/server/services/UserService'); const { getLLMConfig } = require('~/server/services/Endpoints/anthropic/llm'); -const { AnthropicClient } = require('~/app'); +const AnthropicClient = require('~/app/clients/AnthropicClient'); const initializeClient = async ({ req, res, endpointOption, overrideModel, optionsOnly }) => { const { ANTHROPIC_API_KEY, ANTHROPIC_REVERSE_PROXY, PROXY } = process.env; diff --git a/api/server/services/Endpoints/anthropic/title.js b/api/server/services/Endpoints/anthropic/title.js index 5c477632d2..0f9a5e97d0 100644 --- a/api/server/services/Endpoints/anthropic/title.js +++ b/api/server/services/Endpoints/anthropic/title.js @@ -13,11 +13,6 @@ const addTitle = async (req, { text, response, client }) => { return; } - // If the request was aborted, don't generate the title. - if (client.abortController.signal.aborted) { - return; - } - const titleCache = getLogStores(CacheKeys.GEN_TITLE); const key = `${req.user.id}-${response.conversationId}`; diff --git a/api/server/services/Endpoints/assistants/build.js b/api/server/services/Endpoints/assistants/build.js index 544567dd01..00a2abf606 100644 --- a/api/server/services/Endpoints/assistants/build.js +++ b/api/server/services/Endpoints/assistants/build.js @@ -3,7 +3,6 @@ const generateArtifactsPrompt = require('~/app/clients/prompts/artifacts'); const { getAssistant } = require('~/models/Assistant'); const buildOptions = async (endpoint, parsedBody) => { - const { promptPrefix, assistant_id, iconURL, greeting, spec, artifacts, ...modelOptions } = parsedBody; const endpointOption = removeNullishValues({ diff --git a/api/server/services/Endpoints/bedrock/options.js b/api/server/services/Endpoints/bedrock/options.js index 6740ae882e..1936a8f483 100644 --- a/api/server/services/Endpoints/bedrock/options.js +++ b/api/server/services/Endpoints/bedrock/options.js @@ -8,7 +8,7 @@ const { removeNullishValues, } = require('librechat-data-provider'); const { getUserKey, checkUserKeyExpiry } = require('~/server/services/UserService'); -const { sleep } = require('~/server/utils'); +const { createHandleLLMNewToken } = require('~/app/clients/generators'); const getOptions = async ({ req, overrideModel, endpointOption }) => { const { @@ -90,12 +90,7 @@ const getOptions = async ({ req, overrideModel, endpointOption }) => { llmConfig.callbacks = [ { - handleLLMNewToken: async () => { - if (!streamRate) { - return; - } - await sleep(streamRate); - }, + handleLLMNewToken: createHandleLLMNewToken(streamRate), }, ]; diff --git a/api/server/services/Endpoints/custom/initialize.js b/api/server/services/Endpoints/custom/initialize.js index e98ec71980..592440db54 100644 --- a/api/server/services/Endpoints/custom/initialize.js +++ b/api/server/services/Endpoints/custom/initialize.js @@ -9,10 +9,11 @@ const { Providers } = require('@librechat/agents'); const { getUserKeyValues, checkUserKeyExpiry } = require('~/server/services/UserService'); const { getLLMConfig } = require('~/server/services/Endpoints/openAI/llm'); const { getCustomEndpointConfig } = require('~/server/services/Config'); +const { createHandleLLMNewToken } = require('~/app/clients/generators'); const { fetchModels } = require('~/server/services/ModelService'); -const { isUserProvided, sleep } = require('~/server/utils'); +const OpenAIClient = require('~/app/clients/OpenAIClient'); +const { isUserProvided } = require('~/server/utils'); const getLogStores = require('~/cache/getLogStores'); -const { OpenAIClient } = require('~/app'); const { PROXY } = process.env; @@ -148,9 +149,7 @@ const initializeClient = async ({ req, res, endpointOption, optionsOnly, overrid } options.llmConfig.callbacks = [ { - handleLLMNewToken: async () => { - await sleep(customOptions.streamRate); - }, + handleLLMNewToken: createHandleLLMNewToken(clientOptions.streamRate), }, ]; return options; diff --git a/api/server/services/Endpoints/openAI/initialize.js b/api/server/services/Endpoints/openAI/initialize.js index 4d358cef1a..714ed5a1e6 100644 --- a/api/server/services/Endpoints/openAI/initialize.js +++ b/api/server/services/Endpoints/openAI/initialize.js @@ -6,9 +6,10 @@ const { } = require('librechat-data-provider'); const { getUserKeyValues, checkUserKeyExpiry } = require('~/server/services/UserService'); const { getLLMConfig } = require('~/server/services/Endpoints/openAI/llm'); -const { isEnabled, isUserProvided, sleep } = require('~/server/utils'); +const { createHandleLLMNewToken } = require('~/app/clients/generators'); +const { isEnabled, isUserProvided } = require('~/server/utils'); +const OpenAIClient = require('~/app/clients/OpenAIClient'); const { getAzureCredentials } = require('~/utils'); -const { OpenAIClient } = require('~/app'); const initializeClient = async ({ req, @@ -140,14 +141,13 @@ const initializeClient = async ({ clientOptions = Object.assign({ modelOptions }, clientOptions); clientOptions.modelOptions.user = req.user.id; const options = getLLMConfig(apiKey, clientOptions); - if (!clientOptions.streamRate) { + const streamRate = clientOptions.streamRate; + if (!streamRate) { return options; } options.llmConfig.callbacks = [ { - handleLLMNewToken: async () => { - await sleep(clientOptions.streamRate); - }, + handleLLMNewToken: createHandleLLMNewToken(streamRate), }, ]; return options; diff --git a/api/server/services/Endpoints/openAI/llm.js b/api/server/services/Endpoints/openAI/llm.js index a8aeeb5b9d..c1fd090b28 100644 --- a/api/server/services/Endpoints/openAI/llm.js +++ b/api/server/services/Endpoints/openAI/llm.js @@ -136,7 +136,7 @@ function getLLMConfig(apiKey, options = {}, endpoint = null) { Object.assign(llmConfig, azure); llmConfig.model = llmConfig.azureOpenAIApiDeploymentName; } else { - llmConfig.openAIApiKey = apiKey; + llmConfig.apiKey = apiKey; // Object.assign(llmConfig, { // configuration: { apiKey }, // }); @@ -153,6 +153,12 @@ function getLLMConfig(apiKey, options = {}, endpoint = null) { delete llmConfig.reasoning_effort; } + if (llmConfig?.['max_tokens'] != null) { + /** @type {number} */ + llmConfig.maxTokens = llmConfig['max_tokens']; + delete llmConfig['max_tokens']; + } + return { /** @type {OpenAIClientOptions} */ llmConfig, diff --git a/api/server/services/Endpoints/openAI/title.js b/api/server/services/Endpoints/openAI/title.js index 35291c5e31..3b9e9c82b7 100644 --- a/api/server/services/Endpoints/openAI/title.js +++ b/api/server/services/Endpoints/openAI/title.js @@ -13,11 +13,6 @@ const addTitle = async (req, { text, response, client }) => { return; } - // If the request was aborted and is not azure, don't generate the title. - if (!client.azure && client.abortController.signal.aborted) { - return; - } - const titleCache = getLogStores(CacheKeys.GEN_TITLE); const key = `${req.user.id}-${response.conversationId}`; diff --git a/api/server/services/Files/Local/crud.js b/api/server/services/Files/Local/crud.js index c2bb75c125..783230f2f6 100644 --- a/api/server/services/Files/Local/crud.js +++ b/api/server/services/Files/Local/crud.js @@ -309,6 +309,24 @@ function getLocalFileStream(req, filepath) { throw new Error(`Invalid file path: ${filepath}`); } + return fs.createReadStream(fullPath); + } else if (filepath.includes('/images/')) { + const basePath = filepath.split('/images/')[1]; + + if (!basePath) { + logger.warn(`Invalid base path: ${filepath}`); + throw new Error(`Invalid file path: ${filepath}`); + } + + const fullPath = path.join(req.app.locals.paths.imageOutput, basePath); + const publicDir = req.app.locals.paths.imageOutput; + + const rel = path.relative(publicDir, fullPath); + if (rel.startsWith('..') || path.isAbsolute(rel) || rel.includes(`..${path.sep}`)) { + logger.warn(`Invalid relative file path: ${filepath}`); + throw new Error(`Invalid file path: ${filepath}`); + } + return fs.createReadStream(fullPath); } return fs.createReadStream(filepath); diff --git a/api/server/services/Files/MistralOCR/crud.js b/api/server/services/Files/MistralOCR/crud.js index 689e4152ba..0c544b9eb4 100644 --- a/api/server/services/Files/MistralOCR/crud.js +++ b/api/server/services/Files/MistralOCR/crud.js @@ -69,16 +69,20 @@ async function getSignedUrl({ /** * @param {Object} params * @param {string} params.apiKey - * @param {string} params.documentUrl + * @param {string} params.url - The document or image URL + * @param {string} [params.documentType='document_url'] - 'document_url' or 'image_url' + * @param {string} [params.model] * @param {string} [params.baseURL] * @returns {Promise} */ async function performOCR({ apiKey, - documentUrl, + url, + documentType = 'document_url', model = 'mistral-ocr-latest', baseURL = 'https://api.mistral.ai/v1', }) { + const documentKey = documentType === 'image_url' ? 'image_url' : 'document_url'; return axios .post( `${baseURL}/ocr`, @@ -86,8 +90,8 @@ async function performOCR({ model, include_image_base64: false, document: { - type: 'document_url', - document_url: documentUrl, + type: documentType, + [documentKey]: url, }, }, { @@ -109,6 +113,19 @@ function extractVariableName(str) { return match ? match[1] : null; } +/** + * Uploads a file to the Mistral OCR API and processes the OCR result. + * + * @param {Object} params - The params object. + * @param {ServerRequest} params.req - The request object from Express. It should have a `user` property with an `id` + * representing the user + * @param {Express.Multer.File} params.file - The file object, which is part of the request. The file object should + * have a `mimetype` property that tells us the file type + * @param {string} params.file_id - The file ID. + * @param {string} [params.entity_id] - The entity ID, not used here but passed for consistency. + * @returns {Promise<{ filepath: string, bytes: number }>} - The result object containing the processed `text` and `images` (not currently used), + * along with the `filename` and `bytes` properties. + */ const uploadMistralOCR = async ({ req, file, file_id, entity_id }) => { try { /** @type {TCustomConfig['ocr']} */ @@ -160,11 +177,18 @@ const uploadMistralOCR = async ({ req, file, file_id, entity_id }) => { fileId: mistralFile.id, }); + const mimetype = (file.mimetype || '').toLowerCase(); + const originalname = file.originalname || ''; + const isImage = + mimetype.startsWith('image') || /\.(png|jpe?g|gif|bmp|webp|tiff?)$/i.test(originalname); + const documentType = isImage ? 'image_url' : 'document_url'; + const ocrResult = await performOCR({ apiKey, baseURL, model, - documentUrl: signedUrlResponse.url, + url: signedUrlResponse.url, + documentType, }); let aggregatedText = ''; diff --git a/api/server/services/Files/MistralOCR/crud.spec.js b/api/server/services/Files/MistralOCR/crud.spec.js index 6d0b321bbf..c3d2f46c40 100644 --- a/api/server/services/Files/MistralOCR/crud.spec.js +++ b/api/server/services/Files/MistralOCR/crud.spec.js @@ -172,7 +172,7 @@ describe('MistralOCR Service', () => { }); describe('performOCR', () => { - it('should perform OCR using Mistral API', async () => { + it('should perform OCR using Mistral API (document_url)', async () => { const mockResponse = { data: { pages: [{ markdown: 'Page 1 content' }, { markdown: 'Page 2 content' }], @@ -182,8 +182,9 @@ describe('MistralOCR Service', () => { const result = await performOCR({ apiKey: 'test-api-key', - documentUrl: 'https://document-url.com', + url: 'https://document-url.com', model: 'mistral-ocr-latest', + documentType: 'document_url', }); expect(mockAxios.post).toHaveBeenCalledWith( @@ -206,6 +207,41 @@ describe('MistralOCR Service', () => { expect(result).toEqual(mockResponse.data); }); + it('should perform OCR using Mistral API (image_url)', async () => { + const mockResponse = { + data: { + pages: [{ markdown: 'Image OCR content' }], + }, + }; + mockAxios.post.mockResolvedValueOnce(mockResponse); + + const result = await performOCR({ + apiKey: 'test-api-key', + url: 'https://image-url.com/image.png', + model: 'mistral-ocr-latest', + documentType: 'image_url', + }); + + expect(mockAxios.post).toHaveBeenCalledWith( + 'https://api.mistral.ai/v1/ocr', + { + model: 'mistral-ocr-latest', + include_image_base64: false, + document: { + type: 'image_url', + image_url: 'https://image-url.com/image.png', + }, + }, + { + headers: { + 'Content-Type': 'application/json', + Authorization: 'Bearer test-api-key', + }, + }, + ); + expect(result).toEqual(mockResponse.data); + }); + it('should handle errors during OCR processing', async () => { const errorMessage = 'OCR processing error'; mockAxios.post.mockRejectedValueOnce(new Error(errorMessage)); @@ -213,7 +249,7 @@ describe('MistralOCR Service', () => { await expect( performOCR({ apiKey: 'test-api-key', - documentUrl: 'https://document-url.com', + url: 'https://document-url.com', }), ).rejects.toThrow(); @@ -295,6 +331,7 @@ describe('MistralOCR Service', () => { const file = { path: '/tmp/upload/file.pdf', originalname: 'document.pdf', + mimetype: 'application/pdf', }; const result = await uploadMistralOCR({ @@ -322,6 +359,90 @@ describe('MistralOCR Service', () => { }); }); + it('should process OCR for an image file and use image_url type', async () => { + const { loadAuthValues } = require('~/server/services/Tools/credentials'); + loadAuthValues.mockResolvedValue({ + OCR_API_KEY: 'test-api-key', + OCR_BASEURL: 'https://api.mistral.ai/v1', + }); + + // Mock file upload response + mockAxios.post.mockResolvedValueOnce({ + data: { id: 'file-456', purpose: 'ocr' }, + }); + + // Mock signed URL response + mockAxios.get.mockResolvedValueOnce({ + data: { url: 'https://signed-url.com/image.png' }, + }); + + // Mock OCR response for image + mockAxios.post.mockResolvedValueOnce({ + data: { + pages: [ + { + markdown: 'Image OCR result', + images: [{ image_base64: 'imgbase64' }], + }, + ], + }, + }); + + const req = { + user: { id: 'user456' }, + app: { + locals: { + ocr: { + apiKey: '${OCR_API_KEY}', + baseURL: '${OCR_BASEURL}', + mistralModel: 'mistral-medium', + }, + }, + }, + }; + + const file = { + path: '/tmp/upload/image.png', + originalname: 'image.png', + mimetype: 'image/png', + }; + + const result = await uploadMistralOCR({ + req, + file, + file_id: 'file456', + entity_id: 'entity456', + }); + + expect(fs.createReadStream).toHaveBeenCalledWith('/tmp/upload/image.png'); + + expect(loadAuthValues).toHaveBeenCalledWith({ + userId: 'user456', + authFields: ['OCR_BASEURL', 'OCR_API_KEY'], + optional: expect.any(Set), + }); + + // Check that the OCR API was called with image_url type + expect(mockAxios.post).toHaveBeenCalledWith( + 'https://api.mistral.ai/v1/ocr', + expect.objectContaining({ + document: expect.objectContaining({ + type: 'image_url', + image_url: 'https://signed-url.com/image.png', + }), + }), + expect.any(Object), + ); + + expect(result).toEqual({ + filename: 'image.png', + bytes: expect.any(Number), + filepath: 'mistral_ocr', + text: expect.stringContaining('Image OCR result'), + images: ['imgbase64'], + }); + }); + it('should process variable references in configuration', async () => { // Setup mocks with environment variables const { loadAuthValues } = require('~/server/services/Tools/credentials'); diff --git a/api/server/services/Files/S3/crud.js b/api/server/services/Files/S3/crud.js index e685c8c8c2..10c04106d8 100644 --- a/api/server/services/Files/S3/crud.js +++ b/api/server/services/Files/S3/crud.js @@ -358,10 +358,10 @@ async function getNewS3URL(currentURL) { /** * Refreshes S3 URLs for an array of files if they're expired or close to expiring * - * @param {IMongoFile[]} files - Array of file documents + * @param {MongoFile[]} files - Array of file documents * @param {(files: MongoFile[]) => Promise} batchUpdateFiles - Function to update files in the database * @param {number} [bufferSeconds=3600] - Buffer time in seconds to check for expiration - * @returns {Promise} The files with refreshed URLs if needed + * @returns {Promise} The files with refreshed URLs if needed */ async function refreshS3FileUrls(files, batchUpdateFiles, bufferSeconds = 3600) { if (!files || !Array.isArray(files) || files.length === 0) { diff --git a/api/server/services/Files/images/encode.js b/api/server/services/Files/images/encode.js index f733a0d6d6..154941fd89 100644 --- a/api/server/services/Files/images/encode.js +++ b/api/server/services/Files/images/encode.js @@ -10,6 +10,44 @@ const { getStrategyFunctions } = require('~/server/services/Files/strategies'); const { logAxiosError } = require('~/utils'); const { logger } = require('~/config'); +/** + * Converts a readable stream to a base64 encoded string. + * + * @param {NodeJS.ReadableStream} stream - The readable stream to convert. + * @param {boolean} [destroyStream=true] - Whether to destroy the stream after processing. + * @returns {Promise} - Promise resolving to the base64 encoded content. + */ +async function streamToBase64(stream, destroyStream = true) { + return new Promise((resolve, reject) => { + const chunks = []; + + stream.on('data', (chunk) => { + chunks.push(chunk); + }); + + stream.on('end', () => { + try { + const buffer = Buffer.concat(chunks); + const base64Data = buffer.toString('base64'); + chunks.length = 0; // Clear the array + resolve(base64Data); + } catch (err) { + reject(err); + } + }); + + stream.on('error', (error) => { + chunks.length = 0; + reject(error); + }); + }).finally(() => { + // Clean up the stream if required + if (destroyStream && stream.destroy && typeof stream.destroy === 'function') { + stream.destroy(); + } + }); +} + /** * Fetches an image from a URL and returns its base64 representation. * @@ -23,7 +61,9 @@ async function fetchImageToBase64(url) { const response = await axios.get(url, { responseType: 'arraybuffer', }); - return Buffer.from(response.data).toString('base64'); + const base64Data = Buffer.from(response.data).toString('base64'); + response.data = null; + return base64Data; } catch (error) { const message = 'Error fetching image to convert to base64'; throw new Error(logAxiosError({ message, error })); @@ -89,38 +129,15 @@ async function encodeAndFormat(req, files, endpoint, mode) { if (blobStorageSources.has(source)) { try { const downloadStream = encodingMethods[source].getDownloadStream; - const stream = await downloadStream(req, file.filepath); - const streamPromise = new Promise((resolve, reject) => { - /** @type {Uint8Array[]} */ - const chunks = []; - stream.on('readable', () => { - let chunk; - while (null !== (chunk = stream.read())) { - chunks.push(chunk); - } - }); - - stream.on('end', () => { - const buffer = Buffer.concat(chunks); - const base64Data = buffer.toString('base64'); - resolve(base64Data); - }); - stream.on('error', (error) => { - reject(error); - }); - }); - const base64Data = await streamPromise; + let stream = await downloadStream(req, file.filepath); + let base64Data = await streamToBase64(stream); + stream = null; promises.push([file, base64Data]); + base64Data = null; continue; } catch (error) { - logger.error( - `Error processing blob storage file stream for ${file.name} base64 payload:`, - error, - ); - continue; + // Error handling code } - - /* Google & Anthropic don't support passing URLs to payload */ } else if (source !== FileSources.local && base64Only.has(endpoint)) { const [_file, imageURL] = await preparePayload(req, file); promises.push([_file, await fetchImageToBase64(imageURL)]); @@ -137,6 +154,7 @@ async function encodeAndFormat(req, files, endpoint, mode) { /** @type {Array<[MongoFile, string]>} */ const formattedImages = await Promise.all(promises); + promises.length = 0; for (const [file, imageContent] of formattedImages) { const fileMetadata = { @@ -169,8 +187,8 @@ async function encodeAndFormat(req, files, endpoint, mode) { }; if (mode === VisionModes.agents) { - result.image_urls.push(imagePart); - result.files.push(fileMetadata); + result.image_urls.push({ ...imagePart }); + result.files.push({ ...fileMetadata }); continue; } @@ -192,10 +210,11 @@ async function encodeAndFormat(req, files, endpoint, mode) { delete imagePart.image_url; } - result.image_urls.push(imagePart); - result.files.push(fileMetadata); + result.image_urls.push({ ...imagePart }); + result.files.push({ ...fileMetadata }); } - return result; + formattedImages.length = 0; + return { ...result }; } module.exports = { diff --git a/api/server/services/Files/process.js b/api/server/services/Files/process.js index 384955dabf..81a4f52855 100644 --- a/api/server/services/Files/process.js +++ b/api/server/services/Files/process.js @@ -520,7 +520,7 @@ const processAgentFileUpload = async ({ req, res, metadata }) => { throw new Error('OCR capability is not enabled for Agents'); } - const { handleFileUpload } = getStrategyFunctions( + const { handleFileUpload: uploadMistralOCR } = getStrategyFunctions( req.app.locals?.ocr?.strategy ?? FileSources.mistral_ocr, ); const { file_id, temp_file_id } = metadata; @@ -532,7 +532,7 @@ const processAgentFileUpload = async ({ req, res, metadata }) => { images, filename, filepath: ocrFileURL, - } = await handleFileUpload({ req, file, file_id, entity_id: agent_id, basePath }); + } = await uploadMistralOCR({ req, file, file_id, entity_id: agent_id, basePath }); const fileInfo = removeNullishValues({ text, @@ -540,7 +540,7 @@ const processAgentFileUpload = async ({ req, res, metadata }) => { file_id, temp_file_id, user: req.user.id, - type: file.mimetype, + type: 'text/plain', filepath: ocrFileURL, source: FileSources.text, filename: filename ?? file.originalname, diff --git a/api/server/services/MCP.js b/api/server/services/MCP.js index 0a2711c672..1d4fc5112c 100644 --- a/api/server/services/MCP.js +++ b/api/server/services/MCP.js @@ -19,7 +19,7 @@ const { logger, getMCPManager } = require('~/config'); * @param {string} params.model - The model for the tool. * @returns { Promise unknown}> } An object with `_call` method to execute the tool input. */ -async function createMCPTool({ req, toolKey, provider }) { +async function createMCPTool({ req, toolKey, provider: _provider }) { const toolDefinition = req.app.locals.availableTools[toolKey]?.function; if (!toolDefinition) { logger.error(`Tool ${toolKey} not found in available tools`); @@ -27,9 +27,10 @@ async function createMCPTool({ req, toolKey, provider }) { } /** @type {LCTool} */ const { description, parameters } = toolDefinition; - const isGoogle = provider === Providers.VERTEXAI || provider === Providers.GOOGLE; + const isGoogle = _provider === Providers.VERTEXAI || _provider === Providers.GOOGLE; let schema = convertJsonSchemaToZod(parameters, { allowEmptyObject: !isGoogle, + transformOneOfAnyOf: true, }); if (!schema) { @@ -37,9 +38,8 @@ async function createMCPTool({ req, toolKey, provider }) { } const [toolName, serverName] = toolKey.split(Constants.mcp_delimiter); - const userId = req.user?.id; - if (!userId) { + if (!req.user?.id) { logger.error( `[MCP][${serverName}][${toolName}] User ID not found on request. Cannot create tool.`, ); @@ -49,15 +49,17 @@ async function createMCPTool({ req, toolKey, provider }) { /** @type {(toolArguments: Object | string, config?: GraphRunnableConfig) => Promise} */ const _call = async (toolArguments, config) => { try { - const mcpManager = getMCPManager(); + const derivedSignal = config?.signal ? AbortSignal.any([config.signal]) : undefined; + const mcpManager = getMCPManager(config?.configurable?.user_id); + const provider = (config?.metadata?.provider || _provider)?.toLowerCase(); const result = await mcpManager.callTool({ serverName, toolName, provider, toolArguments, options: { - userId, - signal: config?.signal, + userId: config?.configurable?.user_id, + signal: derivedSignal, }, }); @@ -70,7 +72,7 @@ async function createMCPTool({ req, toolKey, provider }) { return result; } catch (error) { logger.error( - `[MCP][User: ${userId}][${serverName}] Error calling "${toolName}" MCP tool:`, + `[MCP][User: ${config?.configurable?.user_id}][${serverName}] Error calling "${toolName}" MCP tool:`, error, ); throw new Error( diff --git a/api/server/services/Threads/manage.js b/api/server/services/Threads/manage.js index f99dca7534..5eace214c3 100644 --- a/api/server/services/Threads/manage.js +++ b/api/server/services/Threads/manage.js @@ -132,6 +132,8 @@ async function saveUserMessage(req, params) { * @param {string} params.endpoint - The conversation endpoint * @param {string} params.parentMessageId - The latest user message that triggered this response. * @param {string} [params.instructions] - Optional: from preset for `instructions` field. + * @param {string} [params.spec] - Optional: Model spec identifier. + * @param {string} [params.iconURL] * Overrides the instructions of the assistant. * @param {string} [params.promptPrefix] - Optional: from preset for `additional_instructions` field. * @return {Promise} A promise that resolves to the created run object. @@ -154,6 +156,8 @@ async function saveAssistantMessage(req, params) { text: params.text, unfinished: false, // tokenCount, + iconURL: params.iconURL, + spec: params.spec, }); await saveConvo( @@ -165,6 +169,8 @@ async function saveAssistantMessage(req, params) { instructions: params.instructions, assistant_id: params.assistant_id, model: params.model, + iconURL: params.iconURL, + spec: params.spec, }, { context: 'api/server/services/Threads/manage.js #saveAssistantMessage' }, ); diff --git a/api/server/services/ToolService.js b/api/server/services/ToolService.js index fca26ffcfe..b71e97f742 100644 --- a/api/server/services/ToolService.js +++ b/api/server/services/ToolService.js @@ -8,6 +8,7 @@ const { ErrorTypes, ContentTypes, imageGenTools, + EToolResources, EModelEndpoint, actionDelimiter, ImageVisionTool, @@ -16,13 +17,18 @@ const { validateAndParseOpenAPISpec, } = require('librechat-data-provider'); const { - loadActionSets, createActionTool, decryptMetadata, + loadActionSets, domainParser, } = require('./ActionService'); +const { + createOpenAIImageTools, + createYouTubeTools, + manifestToolMap, + toolkits, +} = require('~/app/clients/tools'); const { processFileURL, uploadImageBuffer } = require('~/server/services/Files/process'); -const { createYouTubeTools, manifestToolMap, toolkits } = require('~/app/clients/tools'); const { isActionDomainAllowed } = require('~/server/services/domains'); const { getEndpointsConfig } = require('~/server/services/Config'); const { recordUsage } = require('~/server/services/Threads'); @@ -31,6 +37,30 @@ const { redactMessage } = require('~/config/parsers'); const { sleep } = require('~/server/utils'); const { logger } = require('~/config'); +/** + * @param {string} toolName + * @returns {string | undefined} toolKey + */ +function getToolkitKey(toolName) { + /** @type {string|undefined} */ + let toolkitKey; + for (const toolkit of toolkits) { + if (toolName.startsWith(EToolResources.image_edit)) { + const splitMatches = toolkit.pluginKey.split('_'); + const suffix = splitMatches[splitMatches.length - 1]; + if (toolName.endsWith(suffix)) { + toolkitKey = toolkit.pluginKey; + break; + } + } + if (toolName.startsWith(toolkit.pluginKey)) { + toolkitKey = toolkit.pluginKey; + break; + } + } + return toolkitKey; +} + /** * Loads and formats tools from the specified tool directory. * @@ -103,14 +133,16 @@ function loadAndFormatTools({ directory, adminFilter = [], adminIncluded = [] }) tools.push(formattedTool); } - /** Basic Tools; schema: { input: string } */ - const basicToolInstances = [new Calculator(), ...createYouTubeTools({ override: true })]; + /** Basic Tools & Toolkits; schema: { input: string } */ + const basicToolInstances = [ + new Calculator(), + ...createOpenAIImageTools({ override: true }), + ...createYouTubeTools({ override: true }), + ]; for (const toolInstance of basicToolInstances) { const formattedTool = formatToOpenAIAssistantTool(toolInstance); let toolName = formattedTool[Tools.function].name; - toolName = toolkits.some((toolkit) => toolName.startsWith(toolkit.pluginKey)) - ? toolName.split('_')[0] - : toolName; + toolName = getToolkitKey(toolName) ?? toolName; if (filter.has(toolName) && included.size === 0) { continue; } @@ -334,7 +366,7 @@ async function processRequiredActions(client, requiredActions) { const domainMap = new Map(); for (const action of actionSets) { - const domain = await domainParser(client.req, action.metadata.domain, true); + const domain = await domainParser(action.metadata.domain, true); domainMap.set(domain, action); // Check if domain is allowed @@ -404,7 +436,7 @@ async function processRequiredActions(client, requiredActions) { // We've already decrypted the metadata, so we can pass it directly tool = await createActionTool({ - req: client.req, + userId: client.req.user.id, res: client.res, action, requestBuilder, @@ -458,7 +490,7 @@ async function processRequiredActions(client, requiredActions) { * @param {Object} params - Run params containing user and request information. * @param {ServerRequest} params.req - The request object. * @param {ServerResponse} params.res - The request object. - * @param {Agent} params.agent - The agent to load tools for. + * @param {Pick} The agent tools. */ @@ -570,7 +602,7 @@ async function loadAgentTools({ req, res, agent, tool_resources, openAIApiKey }) const domainMap = new Map(); for (const action of actionSets) { - const domain = await domainParser(req, action.metadata.domain, true); + const domain = await domainParser(action.metadata.domain, true); domainMap.set(domain, action); // Check if domain is allowed (do this once per action set) @@ -639,7 +671,7 @@ async function loadAgentTools({ req, res, agent, tool_resources, openAIApiKey }) if (requestBuilder) { const tool = await createActionTool({ - req, + userId: req.user.id, res, action, requestBuilder, @@ -673,6 +705,7 @@ async function loadAgentTools({ req, res, agent, tool_resources, openAIApiKey }) } module.exports = { + getToolkitKey, loadAgentTools, loadAndFormatTools, processRequiredActions, diff --git a/api/server/socialLogins.js b/api/server/socialLogins.js index af80a3b880..0eb44514d3 100644 --- a/api/server/socialLogins.js +++ b/api/server/socialLogins.js @@ -1,4 +1,4 @@ -const Keyv = require('keyv'); +const { Keyv } = require('keyv'); const passport = require('passport'); const session = require('express-session'); const MemoryStore = require('memorystore')(session); @@ -53,7 +53,7 @@ const configureSocialLogins = (app) => { if (isEnabled(process.env.USE_REDIS)) { logger.debug('Using Redis for session storage in OpenID...'); const keyv = new Keyv({ store: keyvRedis }); - const client = keyv.opts.store.redis; + const client = keyv.opts.store.client; sessionOptions.store = new RedisStore({ client, prefix: 'openid_session' }); } else { sessionOptions.store = new MemoryStore({ diff --git a/api/server/utils/streamResponse.js b/api/server/utils/streamResponse.js index 0f042339a9..bb8d63b229 100644 --- a/api/server/utils/streamResponse.js +++ b/api/server/utils/streamResponse.js @@ -70,7 +70,13 @@ const sendError = async (req, res, options, callback) => { } if (shouldSaveMessage) { - await saveMessage(req, { ...errorMessage, user }); + await saveMessage( + req, + { ...errorMessage, user }, + { + context: 'api/server/utils/streamResponse.js - sendError', + }, + ); } if (!errorMessage.error) { diff --git a/api/test/__mocks__/KeyvMongo.js b/api/test/__mocks__/KeyvMongo.js deleted file mode 100644 index f88bc144be..0000000000 --- a/api/test/__mocks__/KeyvMongo.js +++ /dev/null @@ -1,30 +0,0 @@ -const mockGet = jest.fn(); -const mockSet = jest.fn(); - -jest.mock('@keyv/mongo', () => { - const EventEmitter = require('events'); - class KeyvMongo extends EventEmitter { - constructor(url = 'mongodb://127.0.0.1:27017', options) { - super(); - this.ttlSupport = false; - url = url ?? {}; - if (typeof url === 'string') { - url = { url }; - } - if (url.uri) { - url = { url: url.uri, ...url }; - } - this.opts = { - url, - collection: 'keyv', - ...url, - ...options, - }; - } - - get = mockGet; - set = mockSet; - } - - return KeyvMongo; -}); diff --git a/api/typedefs.js b/api/typedefs.js index 24dd29a932..d65d8c9191 100644 --- a/api/typedefs.js +++ b/api/typedefs.js @@ -7,6 +7,11 @@ * @typedef {import('openai').OpenAI} OpenAI * @memberof typedefs */ +/** + * @exports OpenAIImagesResponse + * @typedef {Promise} OpenAIImagesResponse + * @memberof typedefs + */ /** * @exports ServerRequest @@ -14,6 +19,18 @@ * @memberof typedefs */ +/** + * @template T + * @typedef {ReadableStream | NodeJS.ReadableStream} NodeStream + * @memberof typedefs + */ + +/** + * @template T + * @typedef {(req: ServerRequest, filepath: string) => Promise>} NodeStreamDownloader + * @memberof typedefs + */ + /** * @exports ServerResponse * @typedef {import('express').Response} ServerResponse @@ -26,6 +43,60 @@ * @memberof typedefs */ +/** + * @exports Graph + * @typedef {import('@librechat/agents').Graph} Graph + * @memberof typedefs + */ + +/** + * @exports StandardGraph + * @typedef {import('@librechat/agents').StandardGraph} StandardGraph + * @memberof typedefs + */ + +/** + * @exports EventHandler + * @typedef {import('@librechat/agents').EventHandler} EventHandler + * @memberof typedefs + */ + +/** + * @exports ModelEndData + * @typedef {import('@librechat/agents').ModelEndData} ModelEndData + * @memberof typedefs + */ + +/** + * @exports ToolEndData + * @typedef {import('@librechat/agents').ToolEndData} ToolEndData + * @memberof typedefs + */ + +/** + * @exports ToolEndCallback + * @typedef {import('@librechat/agents').ToolEndCallback} ToolEndCallback + * @memberof typedefs + */ + +/** + * @exports ChatModelStreamHandler + * @typedef {import('@librechat/agents').ChatModelStreamHandler} ChatModelStreamHandler + * @memberof typedefs + */ + +/** + * @exports ContentAggregator + * @typedef {import('@librechat/agents').ContentAggregatorResult['aggregateContent']} ContentAggregator + * @memberof typedefs + */ + +/** + * @exports GraphEvents + * @typedef {import('@librechat/agents').GraphEvents} GraphEvents + * @memberof typedefs + */ + /** * @exports AgentRun * @typedef {import('@librechat/agents').Run} AgentRun @@ -80,12 +151,6 @@ * @memberof typedefs */ -/** - * @exports ToolEndData - * @typedef {import('@librechat/agents').ToolEndData} ToolEndData - * @memberof typedefs - */ - /** * @exports BaseMessage * @typedef {import('@langchain/core/messages').BaseMessage} BaseMessage @@ -816,8 +881,9 @@ /** * @typedef {Partial & { * message?: string, - * signal?: AbortSignal - * memory?: ConversationSummaryBufferMemory + * signal?: AbortSignal, + * memory?: ConversationSummaryBufferMemory, + * tool_resources?: AgentToolResources, * }} LoadToolOptions * @memberof typedefs */ diff --git a/api/utils/tokens.js b/api/utils/tokens.js index 2982aedcb8..7ff59acfdd 100644 --- a/api/utils/tokens.js +++ b/api/utils/tokens.js @@ -2,7 +2,9 @@ const z = require('zod'); const { EModelEndpoint } = require('librechat-data-provider'); const openAIModels = { + 'o4-mini': 200000, 'o3-mini': 195000, // -5000 from max + o3: 200000, o1: 195000, // -5000 from max 'o1-mini': 127500, // -500 from max 'o1-preview': 127500, // -500 from max @@ -14,6 +16,9 @@ const openAIModels = { 'gpt-4-1106': 127500, // -500 from max 'gpt-4-0125': 127500, // -500 from max 'gpt-4.5': 127500, // -500 from max + 'gpt-4.1': 1047576, + 'gpt-4.1-mini': 1047576, + 'gpt-4.1-nano': 1047576, 'gpt-4o': 127500, // -500 from max 'gpt-4o-mini': 127500, // -500 from max 'gpt-4o-2024-05-13': 127500, // -500 from max @@ -55,10 +60,16 @@ const cohereModels = { const googleModels = { /* Max I/O is combined so we subtract the amount from max response tokens for actual total */ + gemma: 8196, + 'gemma-2': 32768, + 'gemma-3': 32768, + 'gemma-3-27b': 131072, gemini: 30720, // -2048 from max 'gemini-pro-vision': 12288, 'gemini-exp': 2000000, 'gemini-2.5': 1000000, // 1M input tokens, 64k output tokens + 'gemini-2.5-pro': 1000000, + 'gemini-2.5-flash': 1000000, 'gemini-2.0': 2000000, 'gemini-2.0-flash': 1000000, 'gemini-2.0-flash-lite': 1000000, @@ -196,6 +207,7 @@ const bedrockModels = { }; const xAIModels = { + grok: 131072, 'grok-beta': 131072, 'grok-vision-beta': 8192, 'grok-2': 131072, @@ -204,6 +216,10 @@ const xAIModels = { 'grok-2-vision': 32768, 'grok-2-vision-latest': 32768, 'grok-2-vision-1212': 32768, + 'grok-3': 131072, + 'grok-3-fast': 131072, + 'grok-3-mini': 131072, + 'grok-3-mini-fast': 131072, }; const aggregateModels = { ...openAIModels, ...googleModels, ...bedrockModels, ...xAIModels }; @@ -225,12 +241,15 @@ const modelMaxOutputs = { system_default: 1024, }; +/** Outputs from https://docs.anthropic.com/en/docs/about-claude/models/all-models#model-names */ const anthropicMaxOutputs = { 'claude-3-haiku': 4096, 'claude-3-sonnet': 4096, 'claude-3-opus': 4096, 'claude-3.5-sonnet': 8192, 'claude-3-5-sonnet': 8192, + 'claude-3.7-sonnet': 128000, + 'claude-3-7-sonnet': 128000, }; const maxOutputTokensMap = { diff --git a/api/utils/tokens.spec.js b/api/utils/tokens.spec.js index e5ae21b646..57a9f72e89 100644 --- a/api/utils/tokens.spec.js +++ b/api/utils/tokens.spec.js @@ -113,6 +113,43 @@ describe('getModelMaxTokens', () => { ); }); + test('should return correct tokens for gpt-4.1 matches', () => { + expect(getModelMaxTokens('gpt-4.1')).toBe(maxTokensMap[EModelEndpoint.openAI]['gpt-4.1']); + expect(getModelMaxTokens('gpt-4.1-preview')).toBe( + maxTokensMap[EModelEndpoint.openAI]['gpt-4.1'], + ); + expect(getModelMaxTokens('openai/gpt-4.1')).toBe( + maxTokensMap[EModelEndpoint.openAI]['gpt-4.1'], + ); + expect(getModelMaxTokens('gpt-4.1-2024-08-06')).toBe( + maxTokensMap[EModelEndpoint.openAI]['gpt-4.1'], + ); + }); + + test('should return correct tokens for gpt-4.1-mini matches', () => { + expect(getModelMaxTokens('gpt-4.1-mini')).toBe( + maxTokensMap[EModelEndpoint.openAI]['gpt-4.1-mini'], + ); + expect(getModelMaxTokens('gpt-4.1-mini-preview')).toBe( + maxTokensMap[EModelEndpoint.openAI]['gpt-4.1-mini'], + ); + expect(getModelMaxTokens('openai/gpt-4.1-mini')).toBe( + maxTokensMap[EModelEndpoint.openAI]['gpt-4.1-mini'], + ); + }); + + test('should return correct tokens for gpt-4.1-nano matches', () => { + expect(getModelMaxTokens('gpt-4.1-nano')).toBe( + maxTokensMap[EModelEndpoint.openAI]['gpt-4.1-nano'], + ); + expect(getModelMaxTokens('gpt-4.1-nano-preview')).toBe( + maxTokensMap[EModelEndpoint.openAI]['gpt-4.1-nano'], + ); + expect(getModelMaxTokens('openai/gpt-4.1-nano')).toBe( + maxTokensMap[EModelEndpoint.openAI]['gpt-4.1-nano'], + ); + }); + test('should return correct tokens for Anthropic models', () => { const models = [ 'claude-2.1', @@ -303,6 +340,15 @@ describe('getModelMaxTokens', () => { expect(getModelMaxTokens('o1-preview-something')).toBe(o1PreviewTokens); expect(getModelMaxTokens('openai/o1-preview-something')).toBe(o1PreviewTokens); }); + + test('should return correct max context tokens for o4-mini and o3', () => { + const o4MiniTokens = maxTokensMap[EModelEndpoint.openAI]['o4-mini']; + const o3Tokens = maxTokensMap[EModelEndpoint.openAI]['o3']; + expect(getModelMaxTokens('o4-mini')).toBe(o4MiniTokens); + expect(getModelMaxTokens('openai/o4-mini')).toBe(o4MiniTokens); + expect(getModelMaxTokens('o3')).toBe(o3Tokens); + expect(getModelMaxTokens('openai/o3')).toBe(o3Tokens); + }); }); describe('matchModelName', () => { @@ -355,6 +401,25 @@ describe('matchModelName', () => { expect(matchModelName('gpt-4-0125-vision-preview')).toBe('gpt-4-0125'); }); + it('should return the closest matching key for gpt-4.1 matches', () => { + expect(matchModelName('openai/gpt-4.1')).toBe('gpt-4.1'); + expect(matchModelName('gpt-4.1-preview')).toBe('gpt-4.1'); + expect(matchModelName('gpt-4.1-2024-08-06')).toBe('gpt-4.1'); + expect(matchModelName('gpt-4.1-2024-08-06-0718')).toBe('gpt-4.1'); + }); + + it('should return the closest matching key for gpt-4.1-mini matches', () => { + expect(matchModelName('openai/gpt-4.1-mini')).toBe('gpt-4.1-mini'); + expect(matchModelName('gpt-4.1-mini-preview')).toBe('gpt-4.1-mini'); + expect(matchModelName('gpt-4.1-mini-2024-08-06')).toBe('gpt-4.1-mini'); + }); + + it('should return the closest matching key for gpt-4.1-nano matches', () => { + expect(matchModelName('openai/gpt-4.1-nano')).toBe('gpt-4.1-nano'); + expect(matchModelName('gpt-4.1-nano-preview')).toBe('gpt-4.1-nano'); + expect(matchModelName('gpt-4.1-nano-2024-08-06')).toBe('gpt-4.1-nano'); + }); + // Tests for Google models it('should return the exact model name if it exists in maxTokensMap - Google models', () => { expect(matchModelName('text-bison-32k', EModelEndpoint.google)).toBe('text-bison-32k'); @@ -517,18 +582,30 @@ describe('Grok Model Tests - Tokens', () => { expect(getModelMaxTokens('grok-2-latest')).toBe(131072); }); + test('should return correct tokens for Grok 3 series models', () => { + expect(getModelMaxTokens('grok-3')).toBe(131072); + expect(getModelMaxTokens('grok-3-fast')).toBe(131072); + expect(getModelMaxTokens('grok-3-mini')).toBe(131072); + expect(getModelMaxTokens('grok-3-mini-fast')).toBe(131072); + }); + test('should handle partial matches for Grok models with prefixes', () => { // Vision models should match before general models - expect(getModelMaxTokens('openai/grok-2-vision-1212')).toBe(32768); - expect(getModelMaxTokens('openai/grok-2-vision')).toBe(32768); - expect(getModelMaxTokens('openai/grok-2-vision-latest')).toBe(32768); + expect(getModelMaxTokens('xai/grok-2-vision-1212')).toBe(32768); + expect(getModelMaxTokens('xai/grok-2-vision')).toBe(32768); + expect(getModelMaxTokens('xai/grok-2-vision-latest')).toBe(32768); // Beta models - expect(getModelMaxTokens('openai/grok-vision-beta')).toBe(8192); - expect(getModelMaxTokens('openai/grok-beta')).toBe(131072); + expect(getModelMaxTokens('xai/grok-vision-beta')).toBe(8192); + expect(getModelMaxTokens('xai/grok-beta')).toBe(131072); // Text models - expect(getModelMaxTokens('openai/grok-2-1212')).toBe(131072); - expect(getModelMaxTokens('openai/grok-2')).toBe(131072); - expect(getModelMaxTokens('openai/grok-2-latest')).toBe(131072); + expect(getModelMaxTokens('xai/grok-2-1212')).toBe(131072); + expect(getModelMaxTokens('xai/grok-2')).toBe(131072); + expect(getModelMaxTokens('xai/grok-2-latest')).toBe(131072); + // Grok 3 models + expect(getModelMaxTokens('xai/grok-3')).toBe(131072); + expect(getModelMaxTokens('xai/grok-3-fast')).toBe(131072); + expect(getModelMaxTokens('xai/grok-3-mini')).toBe(131072); + expect(getModelMaxTokens('xai/grok-3-mini-fast')).toBe(131072); }); }); @@ -545,20 +622,30 @@ describe('Grok Model Tests - Tokens', () => { expect(matchModelName('grok-2-1212')).toBe('grok-2-1212'); expect(matchModelName('grok-2')).toBe('grok-2'); expect(matchModelName('grok-2-latest')).toBe('grok-2-latest'); + // Grok 3 models + expect(matchModelName('grok-3')).toBe('grok-3'); + expect(matchModelName('grok-3-fast')).toBe('grok-3-fast'); + expect(matchModelName('grok-3-mini')).toBe('grok-3-mini'); + expect(matchModelName('grok-3-mini-fast')).toBe('grok-3-mini-fast'); }); test('should match Grok model variations with prefixes', () => { // Vision models should match before general models - expect(matchModelName('openai/grok-2-vision-1212')).toBe('grok-2-vision-1212'); - expect(matchModelName('openai/grok-2-vision')).toBe('grok-2-vision'); - expect(matchModelName('openai/grok-2-vision-latest')).toBe('grok-2-vision-latest'); + expect(matchModelName('xai/grok-2-vision-1212')).toBe('grok-2-vision-1212'); + expect(matchModelName('xai/grok-2-vision')).toBe('grok-2-vision'); + expect(matchModelName('xai/grok-2-vision-latest')).toBe('grok-2-vision-latest'); // Beta models - expect(matchModelName('openai/grok-vision-beta')).toBe('grok-vision-beta'); - expect(matchModelName('openai/grok-beta')).toBe('grok-beta'); + expect(matchModelName('xai/grok-vision-beta')).toBe('grok-vision-beta'); + expect(matchModelName('xai/grok-beta')).toBe('grok-beta'); // Text models - expect(matchModelName('openai/grok-2-1212')).toBe('grok-2-1212'); - expect(matchModelName('openai/grok-2')).toBe('grok-2'); - expect(matchModelName('openai/grok-2-latest')).toBe('grok-2-latest'); + expect(matchModelName('xai/grok-2-1212')).toBe('grok-2-1212'); + expect(matchModelName('xai/grok-2')).toBe('grok-2'); + expect(matchModelName('xai/grok-2-latest')).toBe('grok-2-latest'); + // Grok 3 models + expect(matchModelName('xai/grok-3')).toBe('grok-3'); + expect(matchModelName('xai/grok-3-fast')).toBe('grok-3-fast'); + expect(matchModelName('xai/grok-3-mini')).toBe('grok-3-mini'); + expect(matchModelName('xai/grok-3-mini-fast')).toBe('grok-3-mini-fast'); }); }); }); diff --git a/client/package.json b/client/package.json index 184e768c5f..5fd9729a74 100644 --- a/client/package.json +++ b/client/package.json @@ -1,6 +1,6 @@ { "name": "@librechat/frontend", - "version": "v0.7.7", + "version": "v0.7.8", "description": "", "type": "module", "scripts": { @@ -29,7 +29,7 @@ "homepage": "https://librechat.ai", "dependencies": { "@ariakit/react": "^0.4.15", - "@ariakit/react-core": "^0.4.15", + "@ariakit/react-core": "^0.4.17", "@codesandbox/sandpack-react": "^2.19.10", "@dicebear/collection": "^9.2.2", "@dicebear/core": "^9.2.2", @@ -73,7 +73,6 @@ "lodash": "^4.17.21", "lucide-react": "^0.394.0", "match-sorter": "^6.3.4", - "msedge-tts": "^2.0.0", "qrcode.react": "^4.2.0", "rc-input-number": "^7.4.2", "react": "^18.2.0", @@ -87,7 +86,7 @@ "react-i18next": "^15.4.0", "react-lazy-load-image-component": "^1.6.0", "react-markdown": "^9.0.1", - "react-resizable-panels": "^2.1.7", + "react-resizable-panels": "^2.1.8", "react-router-dom": "^6.11.2", "react-speech-recognition": "^3.10.0", "react-textarea-autosize": "^8.4.0", @@ -119,6 +118,7 @@ "@testing-library/user-event": "^14.4.3", "@types/jest": "^29.5.14", "@types/js-cookie": "^3.0.6", + "@types/lodash": "^4.17.15", "@types/node": "^20.3.0", "@types/react": "^18.2.11", "@types/react-dom": "^18.2.4", @@ -141,7 +141,7 @@ "tailwindcss": "^3.4.1", "ts-jest": "^29.2.5", "typescript": "^5.3.3", - "vite": "^6.2.5", + "vite": "^6.3.4", "vite-plugin-compression2": "^1.3.3", "vite-plugin-node-polyfills": "^0.23.0", "vite-plugin-pwa": "^0.21.2" diff --git a/client/public/assets/image_gen_oai.png b/client/public/assets/image_gen_oai.png new file mode 100644 index 0000000000..e1762e7091 Binary files /dev/null and b/client/public/assets/image_gen_oai.png differ diff --git a/client/public/assets/xai.svg b/client/public/assets/xai.svg deleted file mode 100644 index 2aca45ed4f..0000000000 --- a/client/public/assets/xai.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/client/src/Providers/SearchContext.tsx b/client/src/Providers/SearchContext.tsx deleted file mode 100644 index 678818aa18..0000000000 --- a/client/src/Providers/SearchContext.tsx +++ /dev/null @@ -1,6 +0,0 @@ -import { createContext, useContext } from 'react'; -import useSearch from '~/hooks/Conversations/useSearch'; -type SearchContextType = ReturnType; - -export const SearchContext = createContext({} as SearchContextType); -export const useSearchContext = () => useContext(SearchContext); diff --git a/client/src/Providers/index.ts b/client/src/Providers/index.ts index 7363c97d41..43da0d346b 100644 --- a/client/src/Providers/index.ts +++ b/client/src/Providers/index.ts @@ -4,7 +4,6 @@ export { default as AgentsProvider } from './AgentsContext'; export * from './ChatContext'; export * from './ShareContext'; export * from './ToastContext'; -export * from './SearchContext'; export * from './FileMapContext'; export * from './AddedChatContext'; export * from './EditorContext'; diff --git a/client/src/common/types.ts b/client/src/common/types.ts index ce47a4667b..cd8b45f6b7 100644 --- a/client/src/common/types.ts +++ b/client/src/common/types.ts @@ -29,7 +29,6 @@ export enum STTEndpoints { export enum TTSEndpoints { browser = 'browser', - edge = 'edge', external = 'external', } @@ -307,11 +306,14 @@ export type TAskProps = { export type TOptions = { editedMessageId?: string | null; editedText?: string | null; - resubmitFiles?: boolean; isRegenerate?: boolean; isContinued?: boolean; isEdited?: boolean; overrideMessages?: t.TMessage[]; + /** This value is only true when the user submits a message with "Save & Submit" for a user-created message */ + isResubmission?: boolean; + /** Currently only utilized when `isResubmission === true`, uses that message's currently attached files */ + overrideFiles?: t.TMessage['files']; }; export type TAskFunction = (props: TAskProps, options?: TOptions) => void; @@ -506,7 +508,10 @@ export interface ModelItemProps { className?: string; } -export type ContextType = { navVisible: boolean; setNavVisible: (visible: boolean) => void }; +export type ContextType = { + navVisible: boolean; + setNavVisible: React.Dispatch>; +}; export interface SwitcherProps { endpoint?: t.EModelEndpoint | null; @@ -549,7 +554,8 @@ export type TResData = TBaseResData & { responseMessage: t.TMessage; }; -export type TFinalResData = TBaseResData & { +export type TFinalResData = Omit & { + conversation: Partial & Pick; requestMessage?: t.TMessage; responseMessage?: t.TMessage; }; diff --git a/client/src/components/Artifacts/Artifact.tsx b/client/src/components/Artifacts/Artifact.tsx index 5081d9cc59..2b06a2ccc0 100644 --- a/client/src/components/Artifacts/Artifact.tsx +++ b/client/src/components/Artifacts/Artifact.tsx @@ -2,6 +2,7 @@ import React, { useEffect, useCallback, useRef, useState } from 'react'; import throttle from 'lodash/throttle'; import { visit } from 'unist-util-visit'; import { useSetRecoilState } from 'recoil'; +import { useLocation } from 'react-router-dom'; import type { Pluggable } from 'unified'; import type { Artifact } from '~/common'; import { useMessageContext, useArtifactContext } from '~/Providers'; @@ -11,7 +12,16 @@ import ArtifactButton from './ArtifactButton'; export const artifactPlugin: Pluggable = () => { return (tree) => { - visit(tree, ['textDirective', 'leafDirective', 'containerDirective'], (node) => { + visit(tree, ['textDirective', 'leafDirective', 'containerDirective'], (node, index, parent) => { + if (node.type === 'textDirective') { + const replacementText = `:${node.name}`; + if (parent && Array.isArray(parent.children) && typeof index === 'number') { + parent.children[index] = { + type: 'text', + value: replacementText, + }; + } + } if (node.name !== 'artifact') { return; } @@ -25,14 +35,18 @@ export const artifactPlugin: Pluggable = () => { }; }; +const defaultTitle = 'untitled'; +const defaultType = 'unknown'; +const defaultIdentifier = 'lc-no-identifier'; + export function Artifact({ - // eslint-disable-next-line @typescript-eslint/no-unused-vars node, ...props }: Artifact & { children: React.ReactNode | { props: { children: React.ReactNode } }; node: unknown; }) { + const location = useLocation(); const { messageId } = useMessageContext(); const { getNextIndex, resetCounter } = useArtifactContext(); const artifactIndex = useRef(getNextIndex(false)).current; @@ -50,15 +64,18 @@ export function Artifact({ const content = extractContent(props.children); logger.log('artifacts', 'updateArtifact: content.length', content.length); - const title = props.title ?? 'Untitled Artifact'; - const type = props.type ?? 'unknown'; - const identifier = props.identifier ?? 'no-identifier'; + const title = props.title ?? defaultTitle; + const type = props.type ?? defaultType; + const identifier = props.identifier ?? defaultIdentifier; const artifactKey = `${identifier}_${type}_${title}_${messageId}` .replace(/\s+/g, '_') .toLowerCase(); throttledUpdateRef.current(() => { const now = Date.now(); + if (artifactKey === `${defaultIdentifier}_${defaultType}_${defaultTitle}_${messageId}`) { + return; + } const currentArtifact: Artifact = { id: artifactKey, @@ -71,6 +88,10 @@ export function Artifact({ lastUpdateTime: now, }; + if (!location.pathname.includes('/c/')) { + return setArtifact(currentArtifact); + } + setArtifacts((prevArtifacts) => { if ( prevArtifacts?.[artifactKey] != null && @@ -95,6 +116,7 @@ export function Artifact({ props.identifier, messageId, artifactIndex, + location.pathname, ]); useEffect(() => { diff --git a/client/src/components/Artifacts/ArtifactButton.tsx b/client/src/components/Artifacts/ArtifactButton.tsx index d8fa557700..162e7d717c 100644 --- a/client/src/components/Artifacts/ArtifactButton.tsx +++ b/client/src/components/Artifacts/ArtifactButton.tsx @@ -1,14 +1,52 @@ -import { useSetRecoilState } from 'recoil'; +import { useEffect, useRef } from 'react'; +import debounce from 'lodash/debounce'; +import { useLocation } from 'react-router-dom'; +import { useRecoilState, useSetRecoilState, useResetRecoilState } from 'recoil'; import type { Artifact } from '~/common'; import FilePreview from '~/components/Chat/Input/Files/FilePreview'; +import { getFileType, logger } from '~/utils'; import { useLocalize } from '~/hooks'; -import { getFileType } from '~/utils'; import store from '~/store'; const ArtifactButton = ({ artifact }: { artifact: Artifact | null }) => { const localize = useLocalize(); - const setVisible = useSetRecoilState(store.artifactsVisible); - const setArtifactId = useSetRecoilState(store.currentArtifactId); + const location = useLocation(); + const setVisible = useSetRecoilState(store.artifactsVisibility); + const [artifacts, setArtifacts] = useRecoilState(store.artifactsState); + const setCurrentArtifactId = useSetRecoilState(store.currentArtifactId); + const resetCurrentArtifactId = useResetRecoilState(store.currentArtifactId); + const [visibleArtifacts, setVisibleArtifacts] = useRecoilState(store.visibleArtifacts); + + const debouncedSetVisibleRef = useRef( + debounce((artifactToSet: Artifact) => { + logger.log( + 'artifacts_visibility', + 'Setting artifact to visible state from Artifact button', + artifactToSet, + ); + setVisibleArtifacts((prev) => ({ + ...prev, + [artifactToSet.id]: artifactToSet, + })); + }, 750), + ); + + useEffect(() => { + if (artifact == null || artifact?.id == null || artifact.id === '') { + return; + } + + if (!location.pathname.includes('/c/')) { + return; + } + + const debouncedSetVisible = debouncedSetVisibleRef.current; + debouncedSetVisible(artifact); + return () => { + debouncedSetVisible.cancel(); + }; + }, [artifact, location.pathname]); + if (artifact === null || artifact === undefined) { return null; } @@ -19,12 +57,21 @@ const ArtifactButton = ({ artifact }: { artifact: Artifact | null }) => {

{currentArtifact.title}

@@ -118,22 +107,8 @@ export default function Artifacts() { {localize('com_ui_code')} - @@ -149,29 +124,13 @@ export default function Artifacts() {
{`${currentIndex + 1} / ${ orderedArtifactIds.length }`}
diff --git a/client/src/components/Artifacts/Code.tsx b/client/src/components/Artifacts/Code.tsx index de92c4c0da..21db2055d7 100644 --- a/client/src/components/Artifacts/Code.tsx +++ b/client/src/components/Artifacts/Code.tsx @@ -35,7 +35,7 @@ export const CodeMarkdown = memo( const [userScrolled, setUserScrolled] = useState(false); const currentContent = content; const rehypePlugins = [ - [rehypeKatex, { output: 'mathml' }], + [rehypeKatex], [ rehypeHighlight, { diff --git a/client/src/components/Audio/TTS.tsx b/client/src/components/Audio/TTS.tsx index 14c6346b0f..3ceacb7f8d 100644 --- a/client/src/components/Audio/TTS.tsx +++ b/client/src/components/Audio/TTS.tsx @@ -2,9 +2,8 @@ import { useEffect, useMemo } from 'react'; import { useRecoilValue } from 'recoil'; import type { TMessageAudio } from '~/common'; -import { useLocalize, useTTSBrowser, useTTSEdge, useTTSExternal } from '~/hooks'; -import { VolumeIcon, VolumeMuteIcon, Spinner } from '~/components/svg'; -import { useToastContext } from '~/Providers/ToastContext'; +import { useLocalize, useTTSBrowser, useTTSExternal } from '~/hooks'; +import { VolumeIcon, VolumeMuteIcon, Spinner } from '~/components'; import { logger } from '~/utils'; import store from '~/store'; @@ -85,97 +84,6 @@ export function BrowserTTS({ isLast, index, messageId, content, className }: TMe ); } -export function EdgeTTS({ isLast, index, messageId, content, className }: TMessageAudio) { - const localize = useLocalize(); - const playbackRate = useRecoilValue(store.playbackRate); - const isBrowserSupported = useMemo( - () => typeof MediaSource !== 'undefined' && MediaSource.isTypeSupported('audio/mpeg'), - [], - ); - - const { showToast } = useToastContext(); - const { toggleSpeech, isSpeaking, isLoading, audioRef } = useTTSEdge({ - isLast, - index, - messageId, - content, - }); - - const renderIcon = (size: string) => { - if (isLoading === true) { - return ; - } - - if (isSpeaking === true) { - return ; - } - - return ; - }; - - useEffect(() => { - const messageAudio = document.getElementById(`audio-${messageId}`) as HTMLAudioElement | null; - if (!messageAudio) { - return; - } - if (playbackRate != null && playbackRate > 0 && messageAudio.playbackRate !== playbackRate) { - messageAudio.playbackRate = playbackRate; - } - }, [audioRef, isSpeaking, playbackRate, messageId]); - - logger.log( - 'MessageAudio: audioRef.current?.src, audioRef.current', - audioRef.current?.src, - audioRef.current, - ); - - return ( - <> - - {isBrowserSupported ? ( -
); @@ -85,6 +58,7 @@ export function ExternalVoiceDropdown() { onChange={handleVoiceChange} sizeClasses="min-w-[200px] !max-w-[400px] [--anchor-max-width:400px]" testId="ExternalVoiceDropdown" + className="z-50" />
); diff --git a/client/src/components/Bookmarks/BookmarkItem.tsx b/client/src/components/Bookmarks/BookmarkItem.tsx index 92a6df0b54..60698a3165 100644 --- a/client/src/components/Bookmarks/BookmarkItem.tsx +++ b/client/src/components/Bookmarks/BookmarkItem.tsx @@ -34,19 +34,22 @@ const BookmarkItem: FC = ({ tag, selected, handleSubmit, icon, .. if (icon != null) { return icon; } + if (isLoading) { return ; } + if (selected) { return ; } + return ; }; return ( { - const { title: _t, ...convo } = conversation ?? ({} as TConversation); setAddedConvo({ ...convo, @@ -42,7 +41,7 @@ function AddMultiConvo() { role="button" onClick={clickHandler} data-testid="parameters-button" - className="inline-flex size-10 flex-shrink-0 items-center justify-center rounded-lg border border-border-light bg-transparent text-text-primary transition-all ease-in-out hover:bg-surface-tertiary disabled:pointer-events-none disabled:opacity-50 radix-state-open:bg-surface-tertiary" + className="inline-flex size-10 flex-shrink-0 items-center justify-center rounded-xl border border-border-light bg-transparent text-text-primary transition-all ease-in-out hover:bg-surface-tertiary disabled:pointer-events-none disabled:opacity-50 radix-state-open:bg-surface-tertiary" > diff --git a/client/src/components/Chat/ChatView.tsx b/client/src/components/Chat/ChatView.tsx index 9196b3f23c..a554c5f7d1 100644 --- a/client/src/components/Chat/ChatView.tsx +++ b/client/src/components/Chat/ChatView.tsx @@ -2,22 +2,33 @@ import { memo, useCallback } from 'react'; import { useRecoilValue } from 'recoil'; import { useForm } from 'react-hook-form'; import { useParams } from 'react-router-dom'; -import { useGetMessagesByConvoId } from 'librechat-data-provider/react-query'; +import { Constants } from 'librechat-data-provider'; import type { TMessage } from 'librechat-data-provider'; import type { ChatFormValues } from '~/common'; import { ChatContext, AddedChatContext, useFileMapContext, ChatFormProvider } from '~/Providers'; import { useChatHelpers, useAddedResponse, useSSE } from '~/hooks'; import ConversationStarters from './Input/ConversationStarters'; +import { useGetMessagesByConvoId } from '~/data-provider'; import MessagesView from './Messages/MessagesView'; import { Spinner } from '~/components/svg'; import Presentation from './Presentation'; +import { buildTree, cn } from '~/utils'; import ChatForm from './Input/ChatForm'; -import { buildTree } from '~/utils'; import Landing from './Landing'; import Header from './Header'; import Footer from './Footer'; import store from '~/store'; +function LoadingSpinner() { + return ( +
+
+ +
+
+ ); +} + function ChatView({ index = 0 }: { index?: number }) { const { conversationId } = useParams(); const rootSubmission = useRecoilValue(store.submissionByIndex(index)); @@ -48,16 +59,15 @@ function ChatView({ index = 0 }: { index?: number }) { }); let content: JSX.Element | null | undefined; - const isLandingPage = !messagesTree || messagesTree.length === 0; + const isLandingPage = + (!messagesTree || messagesTree.length === 0) && + (conversationId === Constants.NEW_CONVO || !conversationId); + const isNavigating = (!messagesTree || messagesTree.length === 0) && conversationId != null; - if (isLoading && conversationId !== 'new') { - content = ( -
-
- -
-
- ); + if (isLoading && conversationId !== Constants.NEW_CONVO) { + content = ; + } else if ((isLoading || isNavigating) && !isLandingPage) { + content = ; } else if (!isLandingPage) { content = ; } else { @@ -71,27 +81,28 @@ function ChatView({ index = 0 }: { index?: number }) {
{!isLoading &&
} - - {isLandingPage ? ( - <> -
- {content} -
- - -
-
-