diff --git a/.env.example b/.env.example
index d87021ea4b..096903299e 100644
--- a/.env.example
+++ b/.env.example
@@ -20,6 +20,11 @@ DOMAIN_CLIENT=http://localhost:3080
DOMAIN_SERVER=http://localhost:3080
NO_INDEX=true
+# Use the address that is at most n number of hops away from the Express application.
+# req.socket.remoteAddress is the first hop, and the rest are looked for in the X-Forwarded-For header from right to left.
+# A value of 0 means that the first untrusted address would be req.socket.remoteAddress, i.e. there is no reverse proxy.
+# Defaulted to 1.
+TRUST_PROXY=1
#===============#
# JSON Logging #
@@ -83,7 +88,7 @@ PROXY=
#============#
ANTHROPIC_API_KEY=user_provided
-# ANTHROPIC_MODELS=claude-3-5-haiku-20241022,claude-3-5-sonnet-20241022,claude-3-5-sonnet-latest,claude-3-5-sonnet-20240620,claude-3-opus-20240229,claude-3-sonnet-20240229,claude-3-haiku-20240307,claude-2.1,claude-2,claude-1.2,claude-1,claude-1-100k,claude-instant-1,claude-instant-1-100k
+# ANTHROPIC_MODELS=claude-3-7-sonnet-latest,claude-3-7-sonnet-20250219,claude-3-5-haiku-20241022,claude-3-5-sonnet-20241022,claude-3-5-sonnet-latest,claude-3-5-sonnet-20240620,claude-3-opus-20240229,claude-3-sonnet-20240229,claude-3-haiku-20240307,claude-2.1,claude-2,claude-1.2,claude-1,claude-1-100k,claude-instant-1,claude-instant-1-100k
# ANTHROPIC_REVERSE_PROXY=
#============#
@@ -137,12 +142,12 @@ GOOGLE_KEY=user_provided
# GOOGLE_AUTH_HEADER=true
# Gemini API (AI Studio)
-# GOOGLE_MODELS=gemini-2.0-flash-exp,gemini-2.0-flash-thinking-exp-1219,gemini-exp-1121,gemini-exp-1114,gemini-1.5-flash-latest,gemini-1.0-pro,gemini-1.0-pro-001,gemini-1.0-pro-latest,gemini-1.0-pro-vision-latest,gemini-1.5-pro-latest,gemini-pro,gemini-pro-vision
+# GOOGLE_MODELS=gemini-2.5-pro-preview-05-06,gemini-2.5-flash-preview-04-17,gemini-2.0-flash-001,gemini-2.0-flash-exp,gemini-2.0-flash-lite-001,gemini-1.5-pro-002,gemini-1.5-flash-002
# Vertex AI
-# GOOGLE_MODELS=gemini-1.5-flash-preview-0514,gemini-1.5-pro-preview-0514,gemini-1.0-pro-vision-001,gemini-1.0-pro-002,gemini-1.0-pro-001,gemini-pro-vision,gemini-1.0-pro
+# GOOGLE_MODELS=gemini-2.5-pro-preview-05-06,gemini-2.5-flash-preview-04-17,gemini-2.0-flash-001,gemini-2.0-flash-exp,gemini-2.0-flash-lite-001,gemini-1.5-pro-002,gemini-1.5-flash-002
-# GOOGLE_TITLE_MODEL=gemini-pro
+# GOOGLE_TITLE_MODEL=gemini-2.0-flash-lite-001
# GOOGLE_LOC=us-central1
@@ -170,7 +175,7 @@ GOOGLE_KEY=user_provided
#============#
OPENAI_API_KEY=user_provided
-# OPENAI_MODELS=o1,o1-mini,o1-preview,gpt-4o,chatgpt-4o-latest,gpt-4o-mini,gpt-3.5-turbo-0125,gpt-3.5-turbo-0301,gpt-3.5-turbo,gpt-4,gpt-4-0613,gpt-4-vision-preview,gpt-3.5-turbo-0613,gpt-3.5-turbo-16k-0613,gpt-4-0125-preview,gpt-4-turbo-preview,gpt-4-1106-preview,gpt-3.5-turbo-1106,gpt-3.5-turbo-instruct,gpt-3.5-turbo-instruct-0914,gpt-3.5-turbo-16k
+# OPENAI_MODELS=o1,o1-mini,o1-preview,gpt-4o,gpt-4.5-preview,chatgpt-4o-latest,gpt-4o-mini,gpt-3.5-turbo-0125,gpt-3.5-turbo-0301,gpt-3.5-turbo,gpt-4,gpt-4-0613,gpt-4-vision-preview,gpt-3.5-turbo-0613,gpt-3.5-turbo-16k-0613,gpt-4-0125-preview,gpt-4-turbo-preview,gpt-4-1106-preview,gpt-3.5-turbo-1106,gpt-3.5-turbo-instruct,gpt-3.5-turbo-instruct-0914,gpt-3.5-turbo-16k
DEBUG_OPENAI=false
@@ -204,12 +209,6 @@ ASSISTANTS_API_KEY=user_provided
# More info, including how to enable use of Assistants with Azure here:
# https://www.librechat.ai/docs/configuration/librechat_yaml/ai_endpoints/azure#using-assistants-with-azure
-#============#
-# OpenRouter #
-#============#
-# !!!Warning: Use the variable above instead of this one. Using this one will override the OpenAI endpoint
-# OPENROUTER_API_KEY=
-
#============#
# Plugins #
#============#
@@ -232,6 +231,14 @@ AZURE_AI_SEARCH_SEARCH_OPTION_QUERY_TYPE=
AZURE_AI_SEARCH_SEARCH_OPTION_TOP=
AZURE_AI_SEARCH_SEARCH_OPTION_SELECT=
+# OpenAI Image Tools Customization
+#----------------
+# IMAGE_GEN_OAI_DESCRIPTION_WITH_FILES=Custom description for image generation tool when files are present
+# IMAGE_GEN_OAI_DESCRIPTION_NO_FILES=Custom description for image generation tool when no files are present
+# IMAGE_EDIT_OAI_DESCRIPTION=Custom description for image editing tool
+# IMAGE_GEN_OAI_PROMPT_DESCRIPTION=Custom prompt description for image generation tool
+# IMAGE_EDIT_OAI_PROMPT_DESCRIPTION=Custom prompt description for image editing tool
+
# DALL·E
#----------------
# DALLE_API_KEY=
@@ -249,6 +256,13 @@ AZURE_AI_SEARCH_SEARCH_OPTION_SELECT=
# DALLE3_AZURE_API_VERSION=
# DALLE2_AZURE_API_VERSION=
+# Flux
+#-----------------
+FLUX_API_BASE_URL=https://api.us1.bfl.ai
+# FLUX_API_BASE_URL = 'https://api.bfl.ml';
+
+# Get your API key at https://api.us1.bfl.ai/auth/profile
+# FLUX_API_KEY=
# Google
#-----------------
@@ -292,6 +306,10 @@ MEILI_NO_ANALYTICS=true
MEILI_HOST=http://0.0.0.0:7700
MEILI_MASTER_KEY=DrhYf7zENyR6AlUCKmnz0eYASOQdl6zxH7s7MKFSfFCt
+# Optional: Disable indexing, useful in a multi-node setup
+# where only one instance should perform an index sync.
+# MEILI_NO_SYNC=true
+
#==================================================#
# Speech to Text & Text to Speech #
#==================================================#
@@ -354,7 +372,7 @@ ILLEGAL_MODEL_REQ_SCORE=5
# Balance #
#========================#
-CHECK_BALANCE=false
+# CHECK_BALANCE=false
# START_BALANCE=20000 # note: the number of tokens that will be credited after registration.
#========================#
@@ -389,7 +407,7 @@ FACEBOOK_CALLBACK_URL=/oauth/facebook/callback
GITHUB_CLIENT_ID=
GITHUB_CLIENT_SECRET=
GITHUB_CALLBACK_URL=/oauth/github/callback
-# GitHub Eenterprise
+# GitHub Enterprise
# GITHUB_ENTERPRISE_BASE_URL=
# GITHUB_ENTERPRISE_USER_AGENT=
@@ -422,15 +440,19 @@ OPENID_NAME_CLAIM=
OPENID_BUTTON_LABEL=
OPENID_IMAGE_URL=
+# Set to true to automatically redirect to the OpenID provider when a user visits the login page
+# This will bypass the login form completely for users, only use this if OpenID is your only authentication method
+OPENID_AUTO_REDIRECT=false
# LDAP
LDAP_URL=
LDAP_BIND_DN=
LDAP_BIND_CREDENTIALS=
LDAP_USER_SEARCH_BASE=
-LDAP_SEARCH_FILTER=mail={{username}}
+#LDAP_SEARCH_FILTER="mail="
LDAP_CA_CERT_PATH=
# LDAP_TLS_REJECT_UNAUTHORIZED=
+# LDAP_STARTTLS=
# LDAP_LOGIN_USES_USERNAME=true
# LDAP_ID=
# LDAP_USERNAME=
@@ -463,6 +485,24 @@ FIREBASE_STORAGE_BUCKET=
FIREBASE_MESSAGING_SENDER_ID=
FIREBASE_APP_ID=
+#========================#
+# S3 AWS Bucket #
+#========================#
+
+AWS_ENDPOINT_URL=
+AWS_ACCESS_KEY_ID=
+AWS_SECRET_ACCESS_KEY=
+AWS_REGION=
+AWS_BUCKET_NAME=
+
+#========================#
+# Azure Blob Storage #
+#========================#
+
+AZURE_STORAGE_CONNECTION_STRING=
+AZURE_STORAGE_PUBLIC_ACCESS=false
+AZURE_CONTAINER_NAME=files
+
#========================#
# Shared Links #
#========================#
@@ -495,6 +535,16 @@ HELP_AND_FAQ_URL=https://librechat.ai
# Google tag manager id
#ANALYTICS_GTM_ID=user provided google tag manager id
+#===============#
+# REDIS Options #
+#===============#
+
+# REDIS_URI=10.10.10.10:6379
+# USE_REDIS=true
+
+# USE_REDIS_CLUSTER=true
+# REDIS_CA=/path/to/ca.crt
+
#==================================================#
# Others #
#==================================================#
@@ -502,9 +552,6 @@ HELP_AND_FAQ_URL=https://librechat.ai
# NODE_ENV=
-# REDIS_URI=
-# USE_REDIS=
-
# E2E_USER_EMAIL=
# E2E_USER_PASSWORD=
@@ -527,4 +574,4 @@ HELP_AND_FAQ_URL=https://librechat.ai
#=====================================================#
# OpenWeather #
#=====================================================#
-OPENWEATHER_API_KEY=
\ No newline at end of file
+OPENWEATHER_API_KEY=
diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md
index 5951ed694e..09444a1b44 100644
--- a/.github/CONTRIBUTING.md
+++ b/.github/CONTRIBUTING.md
@@ -24,22 +24,40 @@ Project maintainers have the right and responsibility to remove, edit, or reject
## To contribute to this project, please adhere to the following guidelines:
-## 1. Development notes
+## 1. Development Setup
-1. Before starting work, make sure your main branch has the latest commits with `npm run update`
-2. Run linting command to find errors: `npm run lint`. Alternatively, ensure husky pre-commit checks are functioning.
+1. Use Node.JS 20.x.
+2. Install typescript globally: `npm i -g typescript`.
+3. Run `npm ci` to install dependencies.
+4. Build the data provider: `npm run build:data-provider`.
+5. Build MCP: `npm run build:mcp`.
+6. Build data schemas: `npm run build:data-schemas`.
+7. Setup and run unit tests:
+ - Copy `.env.test`: `cp api/test/.env.test.example api/test/.env.test`.
+ - Run backend unit tests: `npm run test:api`.
+ - Run frontend unit tests: `npm run test:client`.
+8. Setup and run integration tests:
+ - Build client: `cd client && npm run build`.
+ - Create `.env`: `cp .env.example .env`.
+ - Install [MongoDB Community Edition](https://www.mongodb.com/docs/manual/administration/install-community/), ensure that `mongosh` connects to your local instance.
+ - Run: `npx install playwright`, then `npx playwright install`.
+ - Copy `config.local`: `cp e2e/config.local.example.ts e2e/config.local.ts`.
+ - Copy `librechat.yaml`: `cp librechat.example.yaml librechat.yaml`.
+ - Run: `npm run e2e`.
+
+## 2. Development Notes
+
+1. Before starting work, make sure your main branch has the latest commits with `npm run update`.
+3. Run linting command to find errors: `npm run lint`. Alternatively, ensure husky pre-commit checks are functioning.
3. After your changes, reinstall packages in your current branch using `npm run reinstall` and ensure everything still works.
- Restart the ESLint server ("ESLint: Restart ESLint Server" in VS Code command bar) and your IDE after reinstalling or updating.
4. Clear web app localStorage and cookies before and after changes.
-5. For frontend changes:
- - Install typescript globally: `npm i -g typescript`.
- - Compile typescript before and after changes to check for introduced errors: `cd client && tsc --noEmit`.
-6. Run tests locally:
- - Backend unit tests: `npm run test:api`
- - Frontend unit tests: `npm run test:client`
- - Integration tests: `npm run e2e` (requires playwright installed, `npx install playwright`)
+5. For frontend changes, compile typescript before and after changes to check for introduced errors: `cd client && npm run build`.
+6. Run backend unit tests: `npm run test:api`.
+7. Run frontend unit tests: `npm run test:client`.
+8. Run integration tests: `npm run e2e`.
-## 2. Git Workflow
+## 3. Git Workflow
We utilize a GitFlow workflow to manage changes to this project's codebase. Follow these general steps when contributing code:
@@ -49,7 +67,7 @@ We utilize a GitFlow workflow to manage changes to this project's codebase. Foll
4. Submit a pull request with a clear and concise description of your changes and the reasons behind them.
5. We will review your pull request, provide feedback as needed, and eventually merge the approved changes into the main branch.
-## 3. Commit Message Format
+## 4. Commit Message Format
We follow the [semantic format](https://gist.github.com/joshbuchea/6f47e86d2510bce28f8e7f42ae84c716) for commit messages.
@@ -76,7 +94,7 @@ feat: add hat wobble
```
-## 4. Pull Request Process
+## 5. Pull Request Process
When submitting a pull request, please follow these guidelines:
@@ -91,7 +109,7 @@ Ensure that your changes meet the following criteria:
- The commit history is clean and easy to follow. You can use `git rebase` or `git merge --squash` to clean your commit history before submitting the pull request.
- The pull request description clearly outlines the changes and the reasons behind them. Be sure to include the steps to test the pull request.
-## 5. Naming Conventions
+## 6. Naming Conventions
Apply the following naming conventions to branches, labels, and other Git-related entities:
@@ -100,7 +118,7 @@ Apply the following naming conventions to branches, labels, and other Git-relate
- **JS/TS:** Directories and file names: Descriptive and camelCase. First letter uppercased for React files (e.g., `helperFunction.ts, ReactComponent.tsx`).
- **Docs:** Directories and file names: Descriptive and snake_case (e.g., `config_files.md`).
-## 6. TypeScript Conversion
+## 7. TypeScript Conversion
1. **Original State**: The project was initially developed entirely in JavaScript (JS).
@@ -126,7 +144,7 @@ Apply the following naming conventions to branches, labels, and other Git-relate
- **Current Stance**: At present, this backend transition is of lower priority and might not be pursued.
-## 7. Module Import Conventions
+## 8. Module Import Conventions
- `npm` packages first,
- from shortest line (top) to longest (bottom)
diff --git a/.github/ISSUE_TEMPLATE/BUG-REPORT.yml b/.github/ISSUE_TEMPLATE/BUG-REPORT.yml
index 3a3b828ee1..610396959f 100644
--- a/.github/ISSUE_TEMPLATE/BUG-REPORT.yml
+++ b/.github/ISSUE_TEMPLATE/BUG-REPORT.yml
@@ -79,6 +79,8 @@ body:
For UI-related issues, browser console logs can be very helpful. You can provide these as screenshots or paste the text here.
render: shell
+ validations:
+ required: true
- type: textarea
id: screenshots
attributes:
diff --git a/.github/ISSUE_TEMPLATE/LOCIZE_TRANSLATION_ACCESS_REQUEST.yml b/.github/ISSUE_TEMPLATE/LOCIZE_TRANSLATION_ACCESS_REQUEST.yml
new file mode 100644
index 0000000000..49b01a814d
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/LOCIZE_TRANSLATION_ACCESS_REQUEST.yml
@@ -0,0 +1,42 @@
+name: Locize Translation Access Request
+description: Request access to an additional language in Locize for LibreChat translations.
+title: "Locize Access Request: "
+labels: ["🌍 i18n", "🔑 access request"]
+body:
+ - type: markdown
+ attributes:
+ value: |
+ Thank you for your interest in contributing to LibreChat translations!
+ Please fill out the form below to request access to an additional language in **Locize**.
+
+ **🔗 Available Languages:** [View the list here](https://www.librechat.ai/docs/translation)
+
+ **📌 Note:** Ensure that the requested language is supported before submitting your request.
+ - type: input
+ id: account_name
+ attributes:
+ label: Locize Account Name
+ description: Please provide your Locize account name (e.g., John Doe).
+ placeholder: e.g., John Doe
+ validations:
+ required: true
+ - type: input
+ id: language_requested
+ attributes:
+ label: Language Code (ISO 639-1)
+ description: |
+ Enter the **ISO 639-1** language code for the language you want to translate into.
+ Example: `es` for Spanish, `zh-Hant` for Traditional Chinese.
+
+ **🔗 Reference:** [Available Languages](https://www.librechat.ai/docs/translation)
+ placeholder: e.g., es
+ validations:
+ required: true
+ - type: checkboxes
+ id: agreement
+ attributes:
+ label: Agreement
+ description: By submitting this request, you confirm that you will contribute responsibly and adhere to the project guidelines.
+ options:
+ - label: I agree to use my access solely for contributing to LibreChat translations.
+ required: true
\ No newline at end of file
diff --git a/.github/ISSUE_TEMPLATE/QUESTION.yml b/.github/ISSUE_TEMPLATE/QUESTION.yml
deleted file mode 100644
index c66e6baa3b..0000000000
--- a/.github/ISSUE_TEMPLATE/QUESTION.yml
+++ /dev/null
@@ -1,50 +0,0 @@
-name: Question
-description: Ask your question
-title: "[Question]: "
-labels: ["❓ question"]
-body:
- - type: markdown
- attributes:
- value: |
- Thanks for taking the time to fill this!
- - type: textarea
- id: what-is-your-question
- attributes:
- label: What is your question?
- description: Please give as many details as possible
- placeholder: Please give as many details as possible
- validations:
- required: true
- - type: textarea
- id: more-details
- attributes:
- label: More Details
- description: Please provide more details if needed.
- placeholder: Please provide more details if needed.
- validations:
- required: true
- - type: dropdown
- id: browsers
- attributes:
- label: What is the main subject of your question?
- multiple: true
- options:
- - Documentation
- - Installation
- - UI
- - Endpoints
- - User System/OAuth
- - Other
- - type: textarea
- id: screenshots
- attributes:
- label: Screenshots
- description: If applicable, add screenshots to help explain your problem. You can drag and drop, paste images directly here or link to them.
- - type: checkboxes
- id: terms
- attributes:
- label: Code of Conduct
- description: By submitting this issue, you agree to follow our [Code of Conduct](https://github.com/danny-avila/LibreChat/blob/main/.github/CODE_OF_CONDUCT.md)
- options:
- - label: I agree to follow this project's Code of Conduct
- required: true
diff --git a/.github/configuration-release.json b/.github/configuration-release.json
new file mode 100644
index 0000000000..68fe80ed8f
--- /dev/null
+++ b/.github/configuration-release.json
@@ -0,0 +1,60 @@
+{
+ "categories": [
+ {
+ "title": "### ✨ New Features",
+ "labels": ["feat"]
+ },
+ {
+ "title": "### 🌍 Internationalization",
+ "labels": ["i18n"]
+ },
+ {
+ "title": "### 👐 Accessibility",
+ "labels": ["a11y"]
+ },
+ {
+ "title": "### 🔧 Fixes",
+ "labels": ["Fix", "fix"]
+ },
+ {
+ "title": "### ⚙️ Other Changes",
+ "labels": ["ci", "style", "docs", "refactor", "chore"]
+ }
+ ],
+ "ignore_labels": [
+ "🔁 duplicate",
+ "📊 analytics",
+ "🌱 good first issue",
+ "🔍 investigation",
+ "🙏 help wanted",
+ "❌ invalid",
+ "❓ question",
+ "🚫 wontfix",
+ "🚀 release",
+ "version"
+ ],
+ "base_branches": ["main"],
+ "sort": {
+ "order": "ASC",
+ "on_property": "mergedAt"
+ },
+ "label_extractor": [
+ {
+ "pattern": "^(?:[^A-Za-z0-9]*)(feat|fix|chore|docs|refactor|ci|style|a11y|i18n)\\s*:",
+ "target": "$1",
+ "flags": "i",
+ "on_property": "title",
+ "method": "match"
+ },
+ {
+ "pattern": "^(?:[^A-Za-z0-9]*)(v\\d+\\.\\d+\\.\\d+(?:-rc\\d+)?).*",
+ "target": "version",
+ "flags": "i",
+ "on_property": "title",
+ "method": "match"
+ }
+ ],
+ "template": "## [#{{TO_TAG}}] - #{{TO_TAG_DATE}}\n\nChanges from #{{FROM_TAG}} to #{{TO_TAG}}.\n\n#{{CHANGELOG}}\n\n[See full release details][release-#{{TO_TAG}}]\n\n[release-#{{TO_TAG}}]: https://github.com/#{{OWNER}}/#{{REPO}}/releases/tag/#{{TO_TAG}}\n\n---",
+ "pr_template": "- #{{TITLE}} by **@#{{AUTHOR}}** in [##{{NUMBER}}](#{{URL}})",
+ "empty_template": "- no changes"
+}
\ No newline at end of file
diff --git a/.github/configuration-unreleased.json b/.github/configuration-unreleased.json
new file mode 100644
index 0000000000..29eaf5e13b
--- /dev/null
+++ b/.github/configuration-unreleased.json
@@ -0,0 +1,68 @@
+{
+ "categories": [
+ {
+ "title": "### ✨ New Features",
+ "labels": ["feat"]
+ },
+ {
+ "title": "### 🌍 Internationalization",
+ "labels": ["i18n"]
+ },
+ {
+ "title": "### 👐 Accessibility",
+ "labels": ["a11y"]
+ },
+ {
+ "title": "### 🔧 Fixes",
+ "labels": ["Fix", "fix"]
+ },
+ {
+ "title": "### ⚙️ Other Changes",
+ "labels": ["ci", "style", "docs", "refactor", "chore"]
+ }
+ ],
+ "ignore_labels": [
+ "🔁 duplicate",
+ "📊 analytics",
+ "🌱 good first issue",
+ "🔍 investigation",
+ "🙏 help wanted",
+ "❌ invalid",
+ "❓ question",
+ "🚫 wontfix",
+ "🚀 release",
+ "version",
+ "action"
+ ],
+ "base_branches": ["main"],
+ "sort": {
+ "order": "ASC",
+ "on_property": "mergedAt"
+ },
+ "label_extractor": [
+ {
+ "pattern": "^(?:[^A-Za-z0-9]*)(feat|fix|chore|docs|refactor|ci|style|a11y|i18n)\\s*:",
+ "target": "$1",
+ "flags": "i",
+ "on_property": "title",
+ "method": "match"
+ },
+ {
+ "pattern": "^(?:[^A-Za-z0-9]*)(v\\d+\\.\\d+\\.\\d+(?:-rc\\d+)?).*",
+ "target": "version",
+ "flags": "i",
+ "on_property": "title",
+ "method": "match"
+ },
+ {
+ "pattern": "^(?:[^A-Za-z0-9]*)(action)\\b.*",
+ "target": "action",
+ "flags": "i",
+ "on_property": "title",
+ "method": "match"
+ }
+ ],
+ "template": "## [Unreleased]\n\n#{{CHANGELOG}}\n\n---",
+ "pr_template": "- #{{TITLE}} by **@#{{AUTHOR}}** in [##{{NUMBER}}](#{{URL}})",
+ "empty_template": "- no changes"
+}
\ No newline at end of file
diff --git a/.github/workflows/backend-review.yml b/.github/workflows/backend-review.yml
index 5bc3d3b2db..b7bccecae8 100644
--- a/.github/workflows/backend-review.yml
+++ b/.github/workflows/backend-review.yml
@@ -39,6 +39,9 @@ jobs:
- name: Install MCP Package
run: npm run build:mcp
+ - name: Install Data Schemas Package
+ run: npm run build:data-schemas
+
- name: Create empty auth.json file
run: |
mkdir -p api/data
@@ -61,4 +64,7 @@ jobs:
run: cd api && npm run test:ci
- name: Run librechat-data-provider unit tests
- run: cd packages/data-provider && npm run test:ci
\ No newline at end of file
+ run: cd packages/data-provider && npm run test:ci
+
+ - name: Run librechat-mcp unit tests
+ run: cd packages/mcp && npm run test:ci
\ No newline at end of file
diff --git a/.github/workflows/data-schemas.yml b/.github/workflows/data-schemas.yml
new file mode 100644
index 0000000000..fee72fbe02
--- /dev/null
+++ b/.github/workflows/data-schemas.yml
@@ -0,0 +1,58 @@
+name: Publish `@librechat/data-schemas` to NPM
+
+on:
+ push:
+ branches:
+ - main
+ paths:
+ - 'packages/data-schemas/package.json'
+ workflow_dispatch:
+ inputs:
+ reason:
+ description: 'Reason for manual trigger'
+ required: false
+ default: 'Manual publish requested'
+
+jobs:
+ build-and-publish:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+
+ - name: Use Node.js
+ uses: actions/setup-node@v4
+ with:
+ node-version: '18.x'
+
+ - name: Install dependencies
+ run: cd packages/data-schemas && npm ci
+
+ - name: Build
+ run: cd packages/data-schemas && npm run build
+
+ - name: Set up npm authentication
+ run: echo "//registry.npmjs.org/:_authToken=${{ secrets.PUBLISH_NPM_TOKEN }}" > ~/.npmrc
+
+ - name: Check version change
+ id: check
+ working-directory: packages/data-schemas
+ run: |
+ PACKAGE_VERSION=$(node -p "require('./package.json').version")
+ PUBLISHED_VERSION=$(npm view @librechat/data-schemas version 2>/dev/null || echo "0.0.0")
+ if [ "$PACKAGE_VERSION" = "$PUBLISHED_VERSION" ]; then
+ echo "No version change, skipping publish"
+ echo "skip=true" >> $GITHUB_OUTPUT
+ else
+ echo "Version changed, proceeding with publish"
+ echo "skip=false" >> $GITHUB_OUTPUT
+ fi
+
+ - name: Pack package
+ if: steps.check.outputs.skip != 'true'
+ working-directory: packages/data-schemas
+ run: npm pack
+
+ - name: Publish
+ if: steps.check.outputs.skip != 'true'
+ working-directory: packages/data-schemas
+ run: npm publish *.tgz --access public
\ No newline at end of file
diff --git a/.github/workflows/generate-release-changelog-pr.yml b/.github/workflows/generate-release-changelog-pr.yml
new file mode 100644
index 0000000000..405f0ca6dc
--- /dev/null
+++ b/.github/workflows/generate-release-changelog-pr.yml
@@ -0,0 +1,95 @@
+name: Generate Release Changelog PR
+
+on:
+ push:
+ tags:
+ - 'v*.*.*'
+ workflow_dispatch:
+
+jobs:
+ generate-release-changelog-pr:
+ permissions:
+ contents: write # Needed for pushing commits and creating branches.
+ pull-requests: write
+ runs-on: ubuntu-latest
+ steps:
+ # 1. Checkout the repository (with full history).
+ - name: Checkout Repository
+ uses: actions/checkout@v4
+ with:
+ fetch-depth: 0
+
+ # 2. Generate the release changelog using our custom configuration.
+ - name: Generate Release Changelog
+ id: generate_release
+ uses: mikepenz/release-changelog-builder-action@v5.1.0
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ with:
+ configuration: ".github/configuration-release.json"
+ owner: ${{ github.repository_owner }}
+ repo: ${{ github.event.repository.name }}
+ outputFile: CHANGELOG-release.md
+
+ # 3. Update the main CHANGELOG.md:
+ # - If it doesn't exist, create it with a basic header.
+ # - Remove the "Unreleased" section (if present).
+ # - Prepend the new release changelog above previous releases.
+ # - Remove all temporary files before committing.
+ - name: Update CHANGELOG.md
+ run: |
+ # Determine the release tag, e.g. "v1.2.3"
+ TAG=${GITHUB_REF##*/}
+ echo "Using release tag: $TAG"
+
+ # Ensure CHANGELOG.md exists; if not, create a basic header.
+ if [ ! -f CHANGELOG.md ]; then
+ echo "# Changelog" > CHANGELOG.md
+ echo "" >> CHANGELOG.md
+ echo "All notable changes to this project will be documented in this file." >> CHANGELOG.md
+ echo "" >> CHANGELOG.md
+ fi
+
+ echo "Updating CHANGELOG.md…"
+
+ # Remove the "Unreleased" section (from "## [Unreleased]" until the first occurrence of '---') if it exists.
+ if grep -q "^## \[Unreleased\]" CHANGELOG.md; then
+ awk '/^## \[Unreleased\]/{flag=1} flag && /^---/{flag=0; next} !flag' CHANGELOG.md > CHANGELOG.cleaned
+ else
+ cp CHANGELOG.md CHANGELOG.cleaned
+ fi
+
+ # Split the cleaned file into:
+ # - header.md: content before the first release header ("## [v...").
+ # - tail.md: content from the first release header onward.
+ awk '/^## \[v/{exit} {print}' CHANGELOG.cleaned > header.md
+ awk 'f{print} /^## \[v/{f=1; print}' CHANGELOG.cleaned > tail.md
+
+ # Combine header, the new release changelog, and the tail.
+ echo "Combining updated changelog parts..."
+ cat header.md CHANGELOG-release.md > CHANGELOG.md.new
+ echo "" >> CHANGELOG.md.new
+ cat tail.md >> CHANGELOG.md.new
+
+ mv CHANGELOG.md.new CHANGELOG.md
+
+ # Remove temporary files.
+ rm -f CHANGELOG.cleaned header.md tail.md CHANGELOG-release.md
+
+ echo "Final CHANGELOG.md content:"
+ cat CHANGELOG.md
+
+ # 4. Create (or update) the Pull Request with the updated CHANGELOG.md.
+ - name: Create Pull Request
+ uses: peter-evans/create-pull-request@v7
+ with:
+ token: ${{ secrets.GITHUB_TOKEN }}
+ sign-commits: true
+ commit-message: "chore: update CHANGELOG for release ${{ github.ref_name }}"
+ base: main
+ branch: "changelog/${{ github.ref_name }}"
+ reviewers: danny-avila
+ title: "📜 docs: Changelog for release ${{ github.ref_name }}"
+ body: |
+ **Description**:
+ - This PR updates the CHANGELOG.md by removing the "Unreleased" section and adding new release notes for release ${{ github.ref_name }} above previous releases.
diff --git a/.github/workflows/generate-unreleased-changelog-pr.yml b/.github/workflows/generate-unreleased-changelog-pr.yml
new file mode 100644
index 0000000000..133e19f1e2
--- /dev/null
+++ b/.github/workflows/generate-unreleased-changelog-pr.yml
@@ -0,0 +1,107 @@
+name: Generate Unreleased Changelog PR
+
+on:
+ schedule:
+ - cron: "0 0 * * 1" # Runs every Monday at 00:00 UTC
+ workflow_dispatch:
+
+jobs:
+ generate-unreleased-changelog-pr:
+ permissions:
+ contents: write # Needed for pushing commits and creating branches.
+ pull-requests: write
+ runs-on: ubuntu-latest
+ steps:
+ # 1. Checkout the repository on main.
+ - name: Checkout Repository on Main
+ uses: actions/checkout@v4
+ with:
+ ref: main
+ fetch-depth: 0
+
+ # 4. Get the latest version tag.
+ - name: Get Latest Tag
+ id: get_latest_tag
+ run: |
+ LATEST_TAG=$(git describe --tags $(git rev-list --tags --max-count=1) || echo "none")
+ echo "Latest tag: $LATEST_TAG"
+ echo "tag=$LATEST_TAG" >> $GITHUB_OUTPUT
+
+ # 5. Generate the Unreleased changelog.
+ - name: Generate Unreleased Changelog
+ id: generate_unreleased
+ uses: mikepenz/release-changelog-builder-action@v5.1.0
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ with:
+ configuration: ".github/configuration-unreleased.json"
+ owner: ${{ github.repository_owner }}
+ repo: ${{ github.event.repository.name }}
+ outputFile: CHANGELOG-unreleased.md
+ fromTag: ${{ steps.get_latest_tag.outputs.tag }}
+ toTag: main
+
+ # 7. Update CHANGELOG.md with the new Unreleased section.
+ - name: Update CHANGELOG.md
+ id: update_changelog
+ run: |
+ # Create CHANGELOG.md if it doesn't exist.
+ if [ ! -f CHANGELOG.md ]; then
+ echo "# Changelog" > CHANGELOG.md
+ echo "" >> CHANGELOG.md
+ echo "All notable changes to this project will be documented in this file." >> CHANGELOG.md
+ echo "" >> CHANGELOG.md
+ fi
+
+ echo "Updating CHANGELOG.md…"
+
+ # Extract content before the "## [Unreleased]" (or first version header if missing).
+ if grep -q "^## \[Unreleased\]" CHANGELOG.md; then
+ awk '/^## \[Unreleased\]/{exit} {print}' CHANGELOG.md > CHANGELOG_TMP.md
+ else
+ awk '/^## \[v/{exit} {print}' CHANGELOG.md > CHANGELOG_TMP.md
+ fi
+
+ # Append the generated Unreleased changelog.
+ echo "" >> CHANGELOG_TMP.md
+ cat CHANGELOG-unreleased.md >> CHANGELOG_TMP.md
+ echo "" >> CHANGELOG_TMP.md
+
+ # Append the remainder of the original changelog (starting from the first version header).
+ awk 'f{print} /^## \[v/{f=1; print}' CHANGELOG.md >> CHANGELOG_TMP.md
+
+ # Replace the old file with the updated file.
+ mv CHANGELOG_TMP.md CHANGELOG.md
+
+ # Remove the temporary generated file.
+ rm -f CHANGELOG-unreleased.md
+
+ echo "Final CHANGELOG.md:"
+ cat CHANGELOG.md
+
+ # 8. Check if CHANGELOG.md has any updates.
+ - name: Check for CHANGELOG.md changes
+ id: changelog_changes
+ run: |
+ if git diff --quiet CHANGELOG.md; then
+ echo "has_changes=false" >> $GITHUB_OUTPUT
+ else
+ echo "has_changes=true" >> $GITHUB_OUTPUT
+ fi
+
+ # 9. Create (or update) the Pull Request only if there are changes.
+ - name: Create Pull Request
+ if: steps.changelog_changes.outputs.has_changes == 'true'
+ uses: peter-evans/create-pull-request@v7
+ with:
+ token: ${{ secrets.GITHUB_TOKEN }}
+ base: main
+ branch: "changelog/unreleased-update"
+ sign-commits: true
+ commit-message: "action: update Unreleased changelog"
+ title: "📜 docs: Unreleased Changelog"
+ body: |
+ **Description**:
+ - This PR updates the Unreleased section in CHANGELOG.md.
+ - It compares the current main branch with the latest version tag (determined as ${{ steps.get_latest_tag.outputs.tag }}),
+ regenerates the Unreleased changelog, removes any old Unreleased block, and inserts the new content.
diff --git a/.github/workflows/i18n-unused-keys.yml b/.github/workflows/i18n-unused-keys.yml
index 79f95d3b27..f720a61783 100644
--- a/.github/workflows/i18n-unused-keys.yml
+++ b/.github/workflows/i18n-unused-keys.yml
@@ -4,6 +4,7 @@ on:
pull_request:
paths:
- "client/src/**"
+ - "api/**"
jobs:
detect-unused-i18n-keys:
@@ -21,7 +22,7 @@ jobs:
# Define paths
I18N_FILE="client/src/locales/en/translation.json"
- SOURCE_DIR="client/src"
+ SOURCE_DIRS=("client/src" "api")
# Check if translation file exists
if [[ ! -f "$I18N_FILE" ]]; then
@@ -37,7 +38,38 @@ jobs:
# Check if each key is used in the source code
for KEY in $KEYS; do
- if ! grep -r --include=\*.{js,jsx,ts,tsx} -q "$KEY" "$SOURCE_DIR"; then
+ FOUND=false
+
+ # Special case for dynamically constructed special variable keys
+ if [[ "$KEY" == com_ui_special_var_* ]]; then
+ # Check if TSpecialVarLabel is used in the codebase
+ for DIR in "${SOURCE_DIRS[@]}"; do
+ if grep -r --include=\*.{js,jsx,ts,tsx} -q "TSpecialVarLabel" "$DIR"; then
+ FOUND=true
+ break
+ fi
+ done
+
+ # Also check if the key is directly used somewhere
+ if [[ "$FOUND" == false ]]; then
+ for DIR in "${SOURCE_DIRS[@]}"; do
+ if grep -r --include=\*.{js,jsx,ts,tsx} -q "$KEY" "$DIR"; then
+ FOUND=true
+ break
+ fi
+ done
+ fi
+ else
+ # Regular check for other keys
+ for DIR in "${SOURCE_DIRS[@]}"; do
+ if grep -r --include=\*.{js,jsx,ts,tsx} -q "$KEY" "$DIR"; then
+ FOUND=true
+ break
+ fi
+ done
+ fi
+
+ if [[ "$FOUND" == false ]]; then
UNUSED_KEYS+=("$KEY")
fi
done
@@ -59,8 +91,8 @@ jobs:
run: |
PR_NUMBER=$(jq --raw-output .pull_request.number "$GITHUB_EVENT_PATH")
- # Format the unused keys list correctly, filtering out empty entries
- FILTERED_KEYS=$(echo "$unused_keys" | jq -r '.[]' | grep -v '^\s*$' | sed 's/^/- `/;s/$/`/' )
+ # Format the unused keys list as checkboxes for easy manual checking.
+ FILTERED_KEYS=$(echo "$unused_keys" | jq -r '.[]' | grep -v '^\s*$' | sed 's/^/- [ ] `/;s/$/`/' )
COMMENT_BODY=$(cat <
-
+
diff --git a/api/app/clients/AnthropicClient.js b/api/app/clients/AnthropicClient.js
index 522b6beb4f..91939975c4 100644
--- a/api/app/clients/AnthropicClient.js
+++ b/api/app/clients/AnthropicClient.js
@@ -2,12 +2,14 @@ const Anthropic = require('@anthropic-ai/sdk');
const { HttpsProxyAgent } = require('https-proxy-agent');
const {
Constants,
+ ErrorTypes,
EModelEndpoint,
+ parseTextParts,
anthropicSettings,
getResponseSender,
validateVisionModel,
} = require('librechat-data-provider');
-const { encodeAndFormat } = require('~/server/services/Files/images/encode');
+const { SplitStreamHandler: _Handler } = require('@librechat/agents');
const {
truncateText,
formatMessage,
@@ -16,8 +18,15 @@ const {
parseParamFromPrompt,
createContextHandlers,
} = require('./prompts');
+const {
+ getClaudeHeaders,
+ configureReasoning,
+ checkPromptCacheSupport,
+} = require('~/server/services/Endpoints/anthropic/helpers');
const { getModelMaxTokens, getModelMaxOutputTokens, matchModelName } = require('~/utils');
const { spendTokens, spendStructuredTokens } = require('~/models/spendTokens');
+const { encodeAndFormat } = require('~/server/services/Files/images/encode');
+const { createFetch, createStreamEventHandlers } = require('./generators');
const Tokenizer = require('~/server/services/Tokenizer');
const { sleep } = require('~/server/utils');
const BaseClient = require('./BaseClient');
@@ -26,6 +35,15 @@ const { logger } = require('~/config');
const HUMAN_PROMPT = '\n\nHuman:';
const AI_PROMPT = '\n\nAssistant:';
+class SplitStreamHandler extends _Handler {
+ getDeltaContent(chunk) {
+ return (chunk?.delta?.text ?? chunk?.completion) || '';
+ }
+ getReasoningDelta(chunk) {
+ return chunk?.delta?.thinking || '';
+ }
+}
+
/** Helper function to introduce a delay before retrying */
function delayBeforeRetry(attempts, baseDelay = 1000) {
return new Promise((resolve) => setTimeout(resolve, baseDelay * attempts));
@@ -68,6 +86,8 @@ class AnthropicClient extends BaseClient {
/** The key for the usage object's output tokens
* @type {string} */
this.outputTokensKey = 'output_tokens';
+ /** @type {SplitStreamHandler | undefined} */
+ this.streamHandler;
}
setOptions(options) {
@@ -97,9 +117,10 @@ class AnthropicClient extends BaseClient {
const modelMatch = matchModelName(this.modelOptions.model, EModelEndpoint.anthropic);
this.isClaude3 = modelMatch.includes('claude-3');
- this.isLegacyOutput = !modelMatch.includes('claude-3-5-sonnet');
- this.supportsCacheControl =
- this.options.promptCache && this.checkPromptCacheSupport(modelMatch);
+ this.isLegacyOutput = !(
+ /claude-3[-.]5-sonnet/.test(modelMatch) || /claude-3[-.]7/.test(modelMatch)
+ );
+ this.supportsCacheControl = this.options.promptCache && checkPromptCacheSupport(modelMatch);
if (
this.isLegacyOutput &&
@@ -125,16 +146,21 @@ class AnthropicClient extends BaseClient {
this.options.endpointType ?? this.options.endpoint,
this.options.endpointTokenConfig,
) ??
- 1500;
+ anthropicSettings.maxOutputTokens.reset(this.modelOptions.model);
this.maxPromptTokens =
this.options.maxPromptTokens || this.maxContextTokens - this.maxResponseTokens;
- if (this.maxPromptTokens + this.maxResponseTokens > this.maxContextTokens) {
- throw new Error(
- `maxPromptTokens + maxOutputTokens (${this.maxPromptTokens} + ${this.maxResponseTokens} = ${
- this.maxPromptTokens + this.maxResponseTokens
- }) must be less than or equal to maxContextTokens (${this.maxContextTokens})`,
- );
+ const reservedTokens = this.maxPromptTokens + this.maxResponseTokens;
+ if (reservedTokens > this.maxContextTokens) {
+ const info = `Total Possible Tokens + Max Output Tokens must be less than or equal to Max Context Tokens: ${this.maxPromptTokens} (total possible output) + ${this.maxResponseTokens} (max output) = ${reservedTokens}/${this.maxContextTokens} (max context)`;
+ const errorMessage = `{ "type": "${ErrorTypes.INPUT_LENGTH}", "info": "${info}" }`;
+ logger.warn(info);
+ throw new Error(errorMessage);
+ } else if (this.maxResponseTokens === this.maxContextTokens) {
+ const info = `Max Output Tokens must be less than Max Context Tokens: ${this.maxResponseTokens} (max output) = ${this.maxContextTokens} (max context)`;
+ const errorMessage = `{ "type": "${ErrorTypes.INPUT_LENGTH}", "info": "${info}" }`;
+ logger.warn(info);
+ throw new Error(errorMessage);
}
this.sender =
@@ -159,7 +185,10 @@ class AnthropicClient extends BaseClient {
getClient(requestOptions) {
/** @type {Anthropic.ClientOptions} */
const options = {
- fetch: this.fetch,
+ fetch: createFetch({
+ directEndpoint: this.options.directEndpoint,
+ reverseProxyUrl: this.options.reverseProxyUrl,
+ }),
apiKey: this.apiKey,
};
@@ -171,18 +200,9 @@ class AnthropicClient extends BaseClient {
options.baseURL = this.options.reverseProxyUrl;
}
- if (
- this.supportsCacheControl &&
- requestOptions?.model &&
- requestOptions.model.includes('claude-3-5-sonnet')
- ) {
- options.defaultHeaders = {
- 'anthropic-beta': 'max-tokens-3-5-sonnet-2024-07-15,prompt-caching-2024-07-31',
- };
- } else if (this.supportsCacheControl) {
- options.defaultHeaders = {
- 'anthropic-beta': 'prompt-caching-2024-07-31',
- };
+ const headers = getClaudeHeaders(requestOptions?.model, this.supportsCacheControl);
+ if (headers) {
+ options.defaultHeaders = headers;
}
return new Anthropic(options);
@@ -376,13 +396,13 @@ class AnthropicClient extends BaseClient {
const formattedMessages = orderedMessages.map((message, i) => {
const formattedMessage = this.useMessages
? formatMessage({
- message,
- endpoint: EModelEndpoint.anthropic,
- })
+ message,
+ endpoint: EModelEndpoint.anthropic,
+ })
: {
- author: message.isCreatedByUser ? this.userLabel : this.assistantLabel,
- content: message?.content ?? message.text,
- };
+ author: message.isCreatedByUser ? this.userLabel : this.assistantLabel,
+ content: message?.content ?? message.text,
+ };
const needsTokenCount = this.contextStrategy && !orderedMessages[i].tokenCount;
/* If tokens were never counted, or, is a Vision request and the message has files, count again */
@@ -398,6 +418,9 @@ class AnthropicClient extends BaseClient {
this.contextHandlers?.processFile(file);
continue;
}
+ if (file.metadata?.fileIdentifier) {
+ continue;
+ }
orderedMessages[i].tokenCount += this.calculateImageTokenCost({
width: file.width,
@@ -657,7 +680,7 @@ class AnthropicClient extends BaseClient {
}
getCompletion() {
- logger.debug('AnthropicClient doesn\'t use getCompletion (all handled in sendCompletion)');
+ logger.debug("AnthropicClient doesn't use getCompletion (all handled in sendCompletion)");
}
/**
@@ -668,29 +691,41 @@ class AnthropicClient extends BaseClient {
* @returns {Promise} The response from the Anthropic client.
*/
async createResponse(client, options, useMessages) {
- return useMessages ?? this.useMessages
+ return (useMessages ?? this.useMessages)
? await client.messages.create(options)
: await client.completions.create(options);
}
+ getMessageMapMethod() {
+ /**
+ * @param {TMessage} msg
+ */
+ return (msg) => {
+ if (msg.text != null && msg.text && msg.text.startsWith(':::thinking')) {
+ msg.text = msg.text.replace(/:::thinking.*?:::/gs, '').trim();
+ } else if (msg.content != null) {
+ msg.text = parseTextParts(msg.content, true);
+ delete msg.content;
+ }
+
+ return msg;
+ };
+ }
+
/**
- * @param {string} modelName
- * @returns {boolean}
+ * @param {string[]} [intermediateReply]
+ * @returns {string}
*/
- checkPromptCacheSupport(modelName) {
- const modelMatch = matchModelName(modelName, EModelEndpoint.anthropic);
- if (modelMatch.includes('claude-3-5-sonnet-latest')) {
- return false;
+ getStreamText(intermediateReply) {
+ if (!this.streamHandler) {
+ return intermediateReply?.join('') ?? '';
}
- if (
- modelMatch === 'claude-3-5-sonnet' ||
- modelMatch === 'claude-3-5-haiku' ||
- modelMatch === 'claude-3-haiku' ||
- modelMatch === 'claude-3-opus'
- ) {
- return true;
- }
- return false;
+
+ const reasoningText = this.streamHandler.reasoningTokens.join('');
+
+ const reasoningBlock = reasoningText.length > 0 ? `:::thinking\n${reasoningText}\n:::\n` : '';
+
+ return `${reasoningBlock}${this.streamHandler.tokens.join('')}`;
}
async sendCompletion(payload, { onProgress, abortController }) {
@@ -710,7 +745,6 @@ class AnthropicClient extends BaseClient {
user_id: this.user,
};
- let text = '';
const {
stream,
model,
@@ -721,22 +755,34 @@ class AnthropicClient extends BaseClient {
topK: top_k,
} = this.modelOptions;
- const requestOptions = {
+ let requestOptions = {
model,
stream: stream || true,
stop_sequences,
temperature,
metadata,
- top_p,
- top_k,
};
if (this.useMessages) {
requestOptions.messages = payload;
- requestOptions.max_tokens = maxOutputTokens || legacy.maxOutputTokens.default;
+ requestOptions.max_tokens =
+ maxOutputTokens || anthropicSettings.maxOutputTokens.reset(requestOptions.model);
} else {
requestOptions.prompt = payload;
- requestOptions.max_tokens_to_sample = maxOutputTokens || 1500;
+ requestOptions.max_tokens_to_sample = maxOutputTokens || legacy.maxOutputTokens.default;
+ }
+
+ requestOptions = configureReasoning(requestOptions, {
+ thinking: this.options.thinking,
+ thinkingBudget: this.options.thinkingBudget,
+ });
+
+ if (!/claude-3[-.]7/.test(model)) {
+ requestOptions.top_p = top_p;
+ requestOptions.top_k = top_k;
+ } else if (requestOptions.thinking == null) {
+ requestOptions.topP = top_p;
+ requestOptions.topK = top_k;
}
if (this.systemMessage && this.supportsCacheControl === true) {
@@ -756,13 +802,14 @@ class AnthropicClient extends BaseClient {
}
logger.debug('[AnthropicClient]', { ...requestOptions });
+ const handlers = createStreamEventHandlers(this.options.res);
+ this.streamHandler = new SplitStreamHandler({
+ accumulate: true,
+ runId: this.responseMessageId,
+ handlers,
+ });
- const handleChunk = (currentChunk) => {
- if (currentChunk) {
- text += currentChunk;
- onProgress(currentChunk);
- }
- };
+ let intermediateReply = this.streamHandler.tokens;
const maxRetries = 3;
const streamRate = this.options.streamRate ?? Constants.DEFAULT_STREAM_RATE;
@@ -783,22 +830,15 @@ class AnthropicClient extends BaseClient {
});
for await (const completion of response) {
- // Handle each completion as before
const type = completion?.type ?? '';
if (tokenEventTypes.has(type)) {
logger.debug(`[AnthropicClient] ${type}`, completion);
this[type] = completion;
}
- if (completion?.delta?.text) {
- handleChunk(completion.delta.text);
- } else if (completion.completion) {
- handleChunk(completion.completion);
- }
-
+ this.streamHandler.handle(completion);
await sleep(streamRate);
}
- // Successful processing, exit loop
break;
} catch (error) {
attempts += 1;
@@ -808,6 +848,10 @@ class AnthropicClient extends BaseClient {
if (attempts < maxRetries) {
await delayBeforeRetry(attempts, 350);
+ } else if (this.streamHandler && this.streamHandler.reasoningTokens.length) {
+ return this.getStreamText();
+ } else if (intermediateReply.length > 0) {
+ return this.getStreamText(intermediateReply);
} else {
throw new Error(`Operation failed after ${maxRetries} attempts: ${error.message}`);
}
@@ -823,8 +867,7 @@ class AnthropicClient extends BaseClient {
}
await processResponse.bind(this)();
-
- return text.trim();
+ return this.getStreamText(intermediateReply);
}
getSaveOptions() {
@@ -834,6 +877,8 @@ class AnthropicClient extends BaseClient {
promptPrefix: this.options.promptPrefix,
modelLabel: this.options.modelLabel,
promptCache: this.options.promptCache,
+ thinking: this.options.thinking,
+ thinkingBudget: this.options.thinkingBudget,
resendFiles: this.options.resendFiles,
iconURL: this.options.iconURL,
greeting: this.options.greeting,
@@ -843,7 +888,7 @@ class AnthropicClient extends BaseClient {
}
getBuildMessagesOptions() {
- logger.debug('AnthropicClient doesn\'t use getBuildMessagesOptions');
+ logger.debug("AnthropicClient doesn't use getBuildMessagesOptions");
}
getEncoding() {
diff --git a/api/app/clients/BaseClient.js b/api/app/clients/BaseClient.js
index ebf3ca12d9..55b8780180 100644
--- a/api/app/clients/BaseClient.js
+++ b/api/app/clients/BaseClient.js
@@ -5,13 +5,15 @@ const {
isAgentsEndpoint,
isParamEndpoint,
EModelEndpoint,
+ ContentTypes,
+ excludedKeys,
ErrorTypes,
Constants,
} = require('librechat-data-provider');
-const { getMessages, saveMessage, updateMessage, saveConvo } = require('~/models');
-const { addSpaceIfNeeded, isEnabled } = require('~/server/utils');
+const { getMessages, saveMessage, updateMessage, saveConvo, getConvo } = require('~/models');
+const { checkBalance } = require('~/models/balanceMethods');
const { truncateToolCallOutputs } = require('./prompts');
-const checkBalance = require('~/models/checkBalance');
+const { addSpaceIfNeeded } = require('~/server/utils');
const { getFiles } = require('~/models/File');
const TextStream = require('./TextStream');
const { logger } = require('~/config');
@@ -26,15 +28,10 @@ class BaseClient {
month: 'long',
day: 'numeric',
});
- this.fetch = this.fetch.bind(this);
/** @type {boolean} */
this.skipSaveConvo = false;
/** @type {boolean} */
this.skipSaveUserMessage = false;
- /** @type {ClientDatabaseSavePromise} */
- this.userMessagePromise;
- /** @type {ClientDatabaseSavePromise} */
- this.responsePromise;
/** @type {string} */
this.user;
/** @type {string} */
@@ -55,6 +52,10 @@ class BaseClient {
* Flag to determine if the client re-submitted the latest assistant message.
* @type {boolean | undefined} */
this.continued;
+ /**
+ * Flag to determine if the client has already fetched the conversation while saving new messages.
+ * @type {boolean | undefined} */
+ this.fetchedConvo;
/** @type {TMessage[]} */
this.currentMessages = [];
/** @type {import('librechat-data-provider').VisionModes | undefined} */
@@ -62,15 +63,15 @@ class BaseClient {
}
setOptions() {
- throw new Error('Method \'setOptions\' must be implemented.');
+ throw new Error("Method 'setOptions' must be implemented.");
}
async getCompletion() {
- throw new Error('Method \'getCompletion\' must be implemented.');
+ throw new Error("Method 'getCompletion' must be implemented.");
}
async sendCompletion() {
- throw new Error('Method \'sendCompletion\' must be implemented.');
+ throw new Error("Method 'sendCompletion' must be implemented.");
}
getSaveOptions() {
@@ -236,11 +237,11 @@ class BaseClient {
const userMessage = opts.isEdited
? this.currentMessages[this.currentMessages.length - 2]
: this.createUserMessage({
- messageId: userMessageId,
- parentMessageId,
- conversationId,
- text: message,
- });
+ messageId: userMessageId,
+ parentMessageId,
+ conversationId,
+ text: message,
+ });
if (typeof opts?.getReqData === 'function') {
opts.getReqData({
@@ -360,17 +361,14 @@ class BaseClient {
* context: TMessage[],
* remainingContextTokens: number,
* messagesToRefine: TMessage[],
- * summaryIndex: number,
- * }>} An object with four properties: `context`, `summaryIndex`, `remainingContextTokens`, and `messagesToRefine`.
+ * }>} An object with three properties: `context`, `remainingContextTokens`, and `messagesToRefine`.
* `context` is an array of messages that fit within the token limit.
- * `summaryIndex` is the index of the first message in the `messagesToRefine` array.
* `remainingContextTokens` is the number of tokens remaining within the limit after adding the messages to the context.
* `messagesToRefine` is an array of messages that were not added to the context because they would have exceeded the token limit.
*/
async getMessagesWithinTokenLimit({ messages: _messages, maxContextTokens, instructions }) {
// Every reply is primed with <|start|>assistant<|message|>, so we
// start with 3 tokens for the label after all messages have been counted.
- let summaryIndex = -1;
let currentTokenCount = 3;
const instructionsTokenCount = instructions?.tokenCount ?? 0;
let remainingContextTokens =
@@ -403,14 +401,12 @@ class BaseClient {
}
const prunedMemory = messages;
- summaryIndex = prunedMemory.length - 1;
remainingContextTokens -= currentTokenCount;
return {
context: context.reverse(),
remainingContextTokens,
messagesToRefine: prunedMemory,
- summaryIndex,
};
}
@@ -453,7 +449,7 @@ class BaseClient {
let orderedWithInstructions = this.addInstructions(orderedMessages, instructions);
- let { context, remainingContextTokens, messagesToRefine, summaryIndex } =
+ let { context, remainingContextTokens, messagesToRefine } =
await this.getMessagesWithinTokenLimit({
messages: orderedWithInstructions,
instructions,
@@ -523,7 +519,7 @@ class BaseClient {
}
// Make sure to only continue summarization logic if the summary message was generated
- shouldSummarize = summaryMessage && shouldSummarize;
+ shouldSummarize = summaryMessage != null && shouldSummarize === true;
logger.debug('[BaseClient] Context Count (2/2)', {
remainingContextTokens,
@@ -533,17 +529,18 @@ class BaseClient {
/** @type {Record | undefined} */
let tokenCountMap;
if (buildTokenMap) {
- tokenCountMap = orderedWithInstructions.reduce((map, message, index) => {
+ const currentPayload = shouldSummarize ? orderedWithInstructions : context;
+ tokenCountMap = currentPayload.reduce((map, message, index) => {
const { messageId } = message;
if (!messageId) {
return map;
}
- if (shouldSummarize && index === summaryIndex && !usePrevSummary) {
+ if (shouldSummarize && index === messagesToRefine.length - 1 && !usePrevSummary) {
map.summaryMessage = { ...summaryMessage, messageId, tokenCount: summaryTokenCount };
}
- map[messageId] = orderedWithInstructions[index].tokenCount;
+ map[messageId] = currentPayload[index].tokenCount;
return map;
}, {});
}
@@ -562,6 +559,8 @@ class BaseClient {
}
async sendMessage(message, opts = {}) {
+ /** @type {Promise} */
+ let userMessagePromise;
const { user, head, isEdited, conversationId, responseMessageId, saveOptions, userMessage } =
await this.handleStartMethods(message, opts);
@@ -623,17 +622,18 @@ class BaseClient {
}
if (!isEdited && !this.skipSaveUserMessage) {
- this.userMessagePromise = this.saveMessageToDatabase(userMessage, saveOptions, user);
+ userMessagePromise = this.saveMessageToDatabase(userMessage, saveOptions, user);
this.savedMessageIds.add(userMessage.messageId);
if (typeof opts?.getReqData === 'function') {
opts.getReqData({
- userMessagePromise: this.userMessagePromise,
+ userMessagePromise,
});
}
}
+ const balance = this.options.req?.app?.locals?.balance;
if (
- isEnabled(process.env.CHECK_BALANCE) &&
+ balance?.enabled &&
supportsBalanceCheck[this.options.endpointType ?? this.options.endpoint]
) {
await checkBalance({
@@ -652,7 +652,9 @@ class BaseClient {
/** @type {string|string[]|undefined} */
const completion = await this.sendCompletion(payload, opts);
- this.abortController.requestCompleted = true;
+ if (this.abortController) {
+ this.abortController.requestCompleted = true;
+ }
/** @type {TMessage} */
const responseMessage = {
@@ -673,7 +675,8 @@ class BaseClient {
responseMessage.text = addSpaceIfNeeded(generation) + completion;
} else if (
Array.isArray(completion) &&
- isParamEndpoint(this.options.endpoint, this.options.endpointType)
+ (this.clientName === EModelEndpoint.agents ||
+ isParamEndpoint(this.options.endpoint, this.options.endpointType))
) {
responseMessage.text = '';
responseMessage.content = completion;
@@ -699,7 +702,13 @@ class BaseClient {
if (usage != null && Number(usage[this.outputTokensKey]) > 0) {
responseMessage.tokenCount = usage[this.outputTokensKey];
completionTokens = responseMessage.tokenCount;
- await this.updateUserMessageTokenCount({ usage, tokenCountMap, userMessage, opts });
+ await this.updateUserMessageTokenCount({
+ usage,
+ tokenCountMap,
+ userMessage,
+ userMessagePromise,
+ opts,
+ });
} else {
responseMessage.tokenCount = this.getTokenCountForResponse(responseMessage);
completionTokens = responseMessage.tokenCount;
@@ -708,8 +717,8 @@ class BaseClient {
await this.recordTokenUsage({ promptTokens, completionTokens, usage });
}
- if (this.userMessagePromise) {
- await this.userMessagePromise;
+ if (userMessagePromise) {
+ await userMessagePromise;
}
if (this.artifactPromises) {
@@ -724,7 +733,11 @@ class BaseClient {
}
}
- this.responsePromise = this.saveMessageToDatabase(responseMessage, saveOptions, user);
+ responseMessage.databasePromise = this.saveMessageToDatabase(
+ responseMessage,
+ saveOptions,
+ user,
+ );
this.savedMessageIds.add(responseMessage.messageId);
delete responseMessage.tokenCount;
return responseMessage;
@@ -745,9 +758,16 @@ class BaseClient {
* @param {StreamUsage} params.usage
* @param {Record} params.tokenCountMap
* @param {TMessage} params.userMessage
+ * @param {Promise} params.userMessagePromise
* @param {object} params.opts
*/
- async updateUserMessageTokenCount({ usage, tokenCountMap, userMessage, opts }) {
+ async updateUserMessageTokenCount({
+ usage,
+ tokenCountMap,
+ userMessage,
+ userMessagePromise,
+ opts,
+ }) {
/** @type {boolean} */
const shouldUpdateCount =
this.calculateCurrentTokenCount != null &&
@@ -783,7 +803,7 @@ class BaseClient {
Note: we update the user message to be sure it gets the calculated token count;
though `AskController` saves the user message, EditController does not
*/
- await this.userMessagePromise;
+ await userMessagePromise;
await this.updateMessageInDatabase({
messageId: userMessage.messageId,
tokenCount: userMessageTokenCount,
@@ -849,7 +869,7 @@ class BaseClient {
}
const savedMessage = await saveMessage(
- this.options.req,
+ this.options?.req,
{
...message,
endpoint: this.options.endpoint,
@@ -863,16 +883,40 @@ class BaseClient {
return { message: savedMessage };
}
- const conversation = await saveConvo(
- this.options.req,
- {
- conversationId: message.conversationId,
- endpoint: this.options.endpoint,
- endpointType: this.options.endpointType,
- ...endpointOptions,
- },
- { context: 'api/app/clients/BaseClient.js - saveMessageToDatabase #saveConvo' },
- );
+ const fieldsToKeep = {
+ conversationId: message.conversationId,
+ endpoint: this.options.endpoint,
+ endpointType: this.options.endpointType,
+ ...endpointOptions,
+ };
+
+ const existingConvo =
+ this.fetchedConvo === true
+ ? null
+ : await getConvo(this.options?.req?.user?.id, message.conversationId);
+
+ const unsetFields = {};
+ const exceptions = new Set(['spec', 'iconURL']);
+ if (existingConvo != null) {
+ this.fetchedConvo = true;
+ for (const key in existingConvo) {
+ if (!key) {
+ continue;
+ }
+ if (excludedKeys.has(key) && !exceptions.has(key)) {
+ continue;
+ }
+
+ if (endpointOptions?.[key] === undefined) {
+ unsetFields[key] = 1;
+ }
+ }
+ }
+
+ const conversation = await saveConvo(this.options?.req, fieldsToKeep, {
+ context: 'api/app/clients/BaseClient.js - saveMessageToDatabase #saveConvo',
+ unsetFields,
+ });
return { message: savedMessage, conversation };
}
@@ -993,11 +1037,17 @@ class BaseClient {
const processValue = (value) => {
if (Array.isArray(value)) {
for (let item of value) {
- if (!item || !item.type || item.type === 'image_url') {
+ if (
+ !item ||
+ !item.type ||
+ item.type === ContentTypes.THINK ||
+ item.type === ContentTypes.ERROR ||
+ item.type === ContentTypes.IMAGE_URL
+ ) {
continue;
}
- if (item.type === 'tool_call' && item.tool_call != null) {
+ if (item.type === ContentTypes.TOOL_CALL && item.tool_call != null) {
const toolName = item.tool_call?.name || '';
if (toolName != null && toolName && typeof toolName === 'string') {
numTokens += this.getTokenCount(toolName);
@@ -1093,9 +1143,13 @@ class BaseClient {
return message;
}
- const files = await getFiles({
- file_id: { $in: fileIds },
- });
+ const files = await getFiles(
+ {
+ file_id: { $in: fileIds },
+ },
+ {},
+ {},
+ );
await this.addImageURLs(message, files, this.visionMode);
diff --git a/api/app/clients/ChatGPTClient.js b/api/app/clients/ChatGPTClient.js
index 5450300a17..07b2fa97bb 100644
--- a/api/app/clients/ChatGPTClient.js
+++ b/api/app/clients/ChatGPTClient.js
@@ -1,4 +1,4 @@
-const Keyv = require('keyv');
+const { Keyv } = require('keyv');
const crypto = require('crypto');
const { CohereClient } = require('cohere-ai');
const { fetchEventSource } = require('@waylaidwanderer/fetch-event-source');
@@ -339,7 +339,7 @@ class ChatGPTClient extends BaseClient {
opts.body = JSON.stringify(modelOptions);
if (modelOptions.stream) {
- // eslint-disable-next-line no-async-promise-executor
+
return new Promise(async (resolve, reject) => {
try {
let done = false;
diff --git a/api/app/clients/GoogleClient.js b/api/app/clients/GoogleClient.js
index 03461a6796..c9102e9ae2 100644
--- a/api/app/clients/GoogleClient.js
+++ b/api/app/clients/GoogleClient.js
@@ -9,6 +9,7 @@ const {
validateVisionModel,
getResponseSender,
endpointSettings,
+ parseTextParts,
EModelEndpoint,
ContentTypes,
VisionModes,
@@ -51,7 +52,7 @@ class GoogleClient extends BaseClient {
const serviceKey = creds[AuthKeys.GOOGLE_SERVICE_KEY] ?? {};
this.serviceKey =
- serviceKey && typeof serviceKey === 'string' ? JSON.parse(serviceKey) : serviceKey ?? {};
+ serviceKey && typeof serviceKey === 'string' ? JSON.parse(serviceKey) : (serviceKey ?? {});
/** @type {string | null | undefined} */
this.project_id = this.serviceKey.project_id;
this.client_email = this.serviceKey.client_email;
@@ -73,6 +74,8 @@ class GoogleClient extends BaseClient {
* @type {string} */
this.outputTokensKey = 'output_tokens';
this.visionMode = VisionModes.generative;
+ /** @type {string} */
+ this.systemMessage;
if (options.skipSetOptions) {
return;
}
@@ -137,8 +140,7 @@ class GoogleClient extends BaseClient {
this.options.attachments?.then((attachments) => this.checkVisionRequest(attachments));
/** @type {boolean} Whether using a "GenerativeAI" Model */
- this.isGenerativeModel =
- this.modelOptions.model.includes('gemini') || this.modelOptions.model.includes('learnlm');
+ this.isGenerativeModel = /gemini|learnlm|gemma/.test(this.modelOptions.model);
this.maxContextTokens =
this.options.maxContextTokens ??
@@ -184,7 +186,7 @@ class GoogleClient extends BaseClient {
if (typeof this.options.artifactsPrompt === 'string' && this.options.artifactsPrompt) {
promptPrefix = `${promptPrefix ?? ''}\n${this.options.artifactsPrompt}`.trim();
}
- this.options.promptPrefix = promptPrefix;
+ this.systemMessage = promptPrefix;
this.initializeClient();
return this;
}
@@ -196,7 +198,11 @@ class GoogleClient extends BaseClient {
*/
checkVisionRequest(attachments) {
/* Validation vision request */
- this.defaultVisionModel = this.options.visionModel ?? 'gemini-pro-vision';
+ this.defaultVisionModel =
+ this.options.visionModel ??
+ (!EXCLUDED_GENAI_MODELS.test(this.modelOptions.model)
+ ? this.modelOptions.model
+ : 'gemini-pro-vision');
const availableModels = this.options.modelsConfig?.[EModelEndpoint.google];
this.isVisionModel = validateVisionModel({ model: this.modelOptions.model, availableModels });
@@ -311,10 +317,13 @@ class GoogleClient extends BaseClient {
this.contextHandlers?.processFile(file);
continue;
}
+ if (file.metadata?.fileIdentifier) {
+ continue;
+ }
}
this.augmentedPrompt = await this.contextHandlers.createContext();
- this.options.promptPrefix = this.augmentedPrompt + this.options.promptPrefix;
+ this.systemMessage = this.augmentedPrompt + this.systemMessage;
}
}
@@ -361,8 +370,8 @@ class GoogleClient extends BaseClient {
throw new Error('[GoogleClient] PaLM 2 and Codey models are no longer supported.');
}
- if (this.options.promptPrefix) {
- const instructionsTokenCount = this.getTokenCount(this.options.promptPrefix);
+ if (this.systemMessage) {
+ const instructionsTokenCount = this.getTokenCount(this.systemMessage);
this.maxContextTokens = this.maxContextTokens - instructionsTokenCount;
if (this.maxContextTokens < 0) {
@@ -417,8 +426,8 @@ class GoogleClient extends BaseClient {
],
};
- if (this.options.promptPrefix) {
- payload.instances[0].context = this.options.promptPrefix;
+ if (this.systemMessage) {
+ payload.instances[0].context = this.systemMessage;
}
logger.debug('[GoogleClient] buildMessages', payload);
@@ -464,7 +473,7 @@ class GoogleClient extends BaseClient {
identityPrefix = `${identityPrefix}\nYou are ${this.options.modelLabel}`;
}
- let promptPrefix = (this.options.promptPrefix ?? '').trim();
+ let promptPrefix = (this.systemMessage ?? '').trim();
if (identityPrefix) {
promptPrefix = `${identityPrefix}${promptPrefix}`;
@@ -639,7 +648,7 @@ class GoogleClient extends BaseClient {
let error;
try {
if (!EXCLUDED_GENAI_MODELS.test(modelName) && !this.project_id) {
- /** @type {GenAI} */
+ /** @type {GenerativeModel} */
const client = this.client;
/** @type {GenerateContentRequest} */
const requestOptions = {
@@ -648,7 +657,7 @@ class GoogleClient extends BaseClient {
generationConfig: googleGenConfigSchema.parse(this.modelOptions),
};
- const promptPrefix = (this.options.promptPrefix ?? '').trim();
+ const promptPrefix = (this.systemMessage ?? '').trim();
if (promptPrefix.length) {
requestOptions.systemInstruction = {
parts: [
@@ -663,7 +672,17 @@ class GoogleClient extends BaseClient {
/** @type {GenAIUsageMetadata} */
let usageMetadata;
- const result = await client.generateContentStream(requestOptions);
+ abortController.signal.addEventListener(
+ 'abort',
+ () => {
+ logger.warn('[GoogleClient] Request was aborted', abortController.signal.reason);
+ },
+ { once: true },
+ );
+
+ const result = await client.generateContentStream(requestOptions, {
+ signal: abortController.signal,
+ });
for await (const chunk of result.stream) {
usageMetadata = !usageMetadata
? chunk?.usageMetadata
@@ -758,6 +777,22 @@ class GoogleClient extends BaseClient {
return this.usage;
}
+ getMessageMapMethod() {
+ /**
+ * @param {TMessage} msg
+ */
+ return (msg) => {
+ if (msg.text != null && msg.text && msg.text.startsWith(':::thinking')) {
+ msg.text = msg.text.replace(/:::thinking.*?:::/gs, '').trim();
+ } else if (msg.content != null) {
+ msg.text = parseTextParts(msg.content, true);
+ delete msg.content;
+ }
+
+ return msg;
+ };
+ }
+
/**
* Calculates the correct token count for the current user message based on the token count map and API usage.
* Edge case: If the calculation results in a negative value, it returns the original estimate.
@@ -815,7 +850,8 @@ class GoogleClient extends BaseClient {
let reply = '';
const { abortController } = options;
- const model = this.modelOptions.modelName ?? this.modelOptions.model ?? '';
+ const model =
+ this.options.titleModel ?? this.modelOptions.modelName ?? this.modelOptions.model ?? '';
const safetySettings = getSafetySettings(model);
if (!EXCLUDED_GENAI_MODELS.test(model) && !this.project_id) {
logger.debug('Identified titling model as GenAI version');
diff --git a/api/app/clients/OllamaClient.js b/api/app/clients/OllamaClient.js
index d86e120f43..77d007580c 100644
--- a/api/app/clients/OllamaClient.js
+++ b/api/app/clients/OllamaClient.js
@@ -2,7 +2,7 @@ const { z } = require('zod');
const axios = require('axios');
const { Ollama } = require('ollama');
const { Constants } = require('librechat-data-provider');
-const { deriveBaseURL } = require('~/utils');
+const { deriveBaseURL, logAxiosError } = require('~/utils');
const { sleep } = require('~/server/utils');
const { logger } = require('~/config');
@@ -68,7 +68,7 @@ class OllamaClient {
} catch (error) {
const logMessage =
'Failed to fetch models from Ollama API. If you are not using Ollama directly, and instead, through some aggregator or reverse proxy that handles fetching via OpenAI spec, ensure the name of the endpoint doesn\'t start with `ollama` (case-insensitive).';
- logger.error(logMessage, error);
+ logAxiosError({ message: logMessage, error });
return [];
}
}
diff --git a/api/app/clients/OpenAIClient.js b/api/app/clients/OpenAIClient.js
index 368e7d6e84..280db89284 100644
--- a/api/app/clients/OpenAIClient.js
+++ b/api/app/clients/OpenAIClient.js
@@ -1,12 +1,14 @@
-const OpenAI = require('openai');
const { OllamaClient } = require('./OllamaClient');
const { HttpsProxyAgent } = require('https-proxy-agent');
-const { SplitStreamHandler, GraphEvents } = require('@librechat/agents');
+const { SplitStreamHandler, CustomOpenAIClient: OpenAI } = require('@librechat/agents');
const {
Constants,
ImageDetail,
+ ContentTypes,
+ parseTextParts,
EModelEndpoint,
resolveHeaders,
+ KnownEndpoints,
openAISettings,
ImageDetailCost,
CohereConstants,
@@ -29,17 +31,18 @@ const {
createContextHandlers,
} = require('./prompts');
const { encodeAndFormat } = require('~/server/services/Files/images/encode');
+const { createFetch, createStreamEventHandlers } = require('./generators');
const { addSpaceIfNeeded, isEnabled, sleep } = require('~/server/utils');
const Tokenizer = require('~/server/services/Tokenizer');
const { spendTokens } = require('~/models/spendTokens');
const { handleOpenAIErrors } = require('./tools/util');
const { createLLM, RunManager } = require('./llm');
-const { logger, sendEvent } = require('~/config');
const ChatGPTClient = require('./ChatGPTClient');
const { summaryBuffer } = require('./memory');
const { runTitleChain } = require('./chains');
const { tokenSplit } = require('./document');
const BaseClient = require('./BaseClient');
+const { logger } = require('~/config');
class OpenAIClient extends BaseClient {
constructor(apiKey, options = {}) {
@@ -105,21 +108,17 @@ class OpenAIClient extends BaseClient {
this.checkVisionRequest(this.options.attachments);
}
- const omniPattern = /\b(o1|o3)\b/i;
+ const omniPattern = /\b(o\d)\b/i;
this.isOmni = omniPattern.test(this.modelOptions.model);
- const { OPENROUTER_API_KEY, OPENAI_FORCE_PROMPT } = process.env ?? {};
- if (OPENROUTER_API_KEY && !this.azure) {
- this.apiKey = OPENROUTER_API_KEY;
- this.useOpenRouter = true;
- }
-
+ const { OPENAI_FORCE_PROMPT } = process.env ?? {};
const { reverseProxyUrl: reverseProxy } = this.options;
if (
!this.useOpenRouter &&
- reverseProxy &&
- reverseProxy.includes('https://openrouter.ai/api/v1')
+ ((reverseProxy && reverseProxy.includes(KnownEndpoints.openrouter)) ||
+ (this.options.endpoint &&
+ this.options.endpoint.toLowerCase().includes(KnownEndpoints.openrouter)))
) {
this.useOpenRouter = true;
}
@@ -228,10 +227,6 @@ class OpenAIClient extends BaseClient {
logger.debug('Using Azure endpoint');
}
- if (this.useOpenRouter) {
- this.completionsUrl = 'https://openrouter.ai/api/v1/chat/completions';
- }
-
return this;
}
@@ -306,7 +301,9 @@ class OpenAIClient extends BaseClient {
}
getEncoding() {
- return this.model?.includes('gpt-4o') ? 'o200k_base' : 'cl100k_base';
+ return this.modelOptions?.model && /gpt-4[^-\s]/.test(this.modelOptions.model)
+ ? 'o200k_base'
+ : 'cl100k_base';
}
/**
@@ -458,6 +455,9 @@ class OpenAIClient extends BaseClient {
this.contextHandlers?.processFile(file);
continue;
}
+ if (file.metadata?.fileIdentifier) {
+ continue;
+ }
orderedMessages[i].tokenCount += this.calculateImageTokenCost({
width: file.width,
@@ -475,7 +475,9 @@ class OpenAIClient extends BaseClient {
promptPrefix = this.augmentedPrompt + promptPrefix;
}
- if (promptPrefix && this.isOmni !== true) {
+ const noSystemModelRegex = /\b(o1-preview|o1-mini)\b/i.test(this.modelOptions.model);
+
+ if (promptPrefix && !noSystemModelRegex) {
promptPrefix = `Instructions:\n${promptPrefix.trim()}`;
instructions = {
role: 'system',
@@ -503,11 +505,27 @@ class OpenAIClient extends BaseClient {
};
/** EXPERIMENTAL */
- if (promptPrefix && this.isOmni === true) {
+ if (promptPrefix && noSystemModelRegex) {
const lastUserMessageIndex = payload.findLastIndex((message) => message.role === 'user');
if (lastUserMessageIndex !== -1) {
- payload[lastUserMessageIndex].content =
- `${promptPrefix}\n${payload[lastUserMessageIndex].content}`;
+ if (Array.isArray(payload[lastUserMessageIndex].content)) {
+ const firstTextPartIndex = payload[lastUserMessageIndex].content.findIndex(
+ (part) => part.type === ContentTypes.TEXT,
+ );
+ if (firstTextPartIndex !== -1) {
+ const firstTextPart = payload[lastUserMessageIndex].content[firstTextPartIndex];
+ payload[lastUserMessageIndex].content[firstTextPartIndex].text =
+ `${promptPrefix}\n${firstTextPart.text}`;
+ } else {
+ payload[lastUserMessageIndex].content.unshift({
+ type: ContentTypes.TEXT,
+ text: promptPrefix,
+ });
+ }
+ } else {
+ payload[lastUserMessageIndex].content =
+ `${promptPrefix}\n${payload[lastUserMessageIndex].content}`;
+ }
}
}
@@ -596,7 +614,7 @@ class OpenAIClient extends BaseClient {
return result.trim();
}
- logger.debug('[OpenAIClient] sendCompletion: result', result);
+ logger.debug('[OpenAIClient] sendCompletion: result', { ...result });
if (this.isChatCompletion) {
reply = result.choices[0].message.content;
@@ -613,7 +631,7 @@ class OpenAIClient extends BaseClient {
}
initializeLLM({
- model = 'gpt-4o-mini',
+ model = openAISettings.model.default,
modelName,
temperature = 0.2,
max_tokens,
@@ -714,7 +732,7 @@ class OpenAIClient extends BaseClient {
const { OPENAI_TITLE_MODEL } = process.env ?? {};
- let model = this.options.titleModel ?? OPENAI_TITLE_MODEL ?? 'gpt-4o-mini';
+ let model = this.options.titleModel ?? OPENAI_TITLE_MODEL ?? openAISettings.model.default;
if (model === Constants.CURRENT_MODEL) {
model = this.modelOptions.model;
}
@@ -805,7 +823,7 @@ ${convo}
const completionTokens = this.getTokenCount(title);
- this.recordTokenUsage({ promptTokens, completionTokens, context: 'title' });
+ await this.recordTokenUsage({ promptTokens, completionTokens, context: 'title' });
} catch (e) {
logger.error(
'[OpenAIClient] There was an issue generating the title with the completion method',
@@ -907,7 +925,7 @@ ${convo}
let prompt;
// TODO: remove the gpt fallback and make it specific to endpoint
- const { OPENAI_SUMMARY_MODEL = 'gpt-4o-mini' } = process.env ?? {};
+ const { OPENAI_SUMMARY_MODEL = openAISettings.model.default } = process.env ?? {};
let model = this.options.summaryModel ?? OPENAI_SUMMARY_MODEL;
if (model === Constants.CURRENT_MODEL) {
model = this.modelOptions.model;
@@ -1108,6 +1126,9 @@ ${convo}
return (msg) => {
if (msg.text != null && msg.text && msg.text.startsWith(':::thinking')) {
msg.text = msg.text.replace(/:::thinking.*?:::/gs, '').trim();
+ } else if (msg.content != null) {
+ msg.text = parseTextParts(msg.content, true);
+ delete msg.content;
}
return msg;
@@ -1159,10 +1180,6 @@ ${convo}
opts.httpAgent = new HttpsProxyAgent(this.options.proxy);
}
- if (this.isVisionModel) {
- modelOptions.max_tokens = 4000;
- }
-
/** @type {TAzureConfig | undefined} */
const azureConfig = this.options?.req?.app?.locals?.[EModelEndpoint.azureOpenAI];
@@ -1212,9 +1229,9 @@ ${convo}
opts.baseURL = this.langchainProxy
? constructAzureURL({
- baseURL: this.langchainProxy,
- azureOptions: this.azure,
- })
+ baseURL: this.langchainProxy,
+ azureOptions: this.azure,
+ })
: this.azureEndpoint.split(/(? {
+ const dropParams = [...this.options.dropParams];
+ dropParams.forEach((param) => {
delete modelOptions[param];
});
logger.debug('[OpenAIClient] chatCompletion: dropped params', {
- dropParams: this.options.dropParams,
+ dropParams: dropParams,
modelOptions,
});
}
@@ -1301,15 +1357,11 @@ ${convo}
let streamResolve;
if (
- this.isOmni === true &&
- (this.azure || /o1(?!-(?:mini|preview)).*$/.test(modelOptions.model)) &&
- !/o3-.*$/.test(this.modelOptions.model) &&
- modelOptions.stream
+ (!this.isOmni || /^o1-(mini|preview)/i.test(modelOptions.model)) &&
+ modelOptions.reasoning_effort != null
) {
- delete modelOptions.stream;
- delete modelOptions.stop;
- } else if (!this.isOmni && modelOptions.reasoning_effort != null) {
delete modelOptions.reasoning_effort;
+ delete modelOptions.temperature;
}
let reasoningKey = 'reasoning_content';
@@ -1317,16 +1369,19 @@ ${convo}
modelOptions.include_reasoning = true;
reasoningKey = 'reasoning';
}
+ if (this.useOpenRouter && modelOptions.reasoning_effort != null) {
+ modelOptions.reasoning = {
+ effort: modelOptions.reasoning_effort,
+ };
+ delete modelOptions.reasoning_effort;
+ }
+ const handlers = createStreamEventHandlers(this.options.res);
this.streamHandler = new SplitStreamHandler({
reasoningKey,
accumulate: true,
runId: this.responseMessageId,
- handlers: {
- [GraphEvents.ON_RUN_STEP]: (event) => sendEvent(this.options.res, event),
- [GraphEvents.ON_MESSAGE_DELTA]: (event) => sendEvent(this.options.res, event),
- [GraphEvents.ON_REASONING_DELTA]: (event) => sendEvent(this.options.res, event),
- },
+ handlers,
});
intermediateReply = this.streamHandler.tokens;
@@ -1340,12 +1395,6 @@ ${convo}
...modelOptions,
stream: true,
};
- if (
- this.options.endpoint === EModelEndpoint.openAI ||
- this.options.endpoint === EModelEndpoint.azureOpenAI
- ) {
- params.stream_options = { include_usage: true };
- }
const stream = await openai.beta.chat.completions
.stream(params)
.on('abort', () => {
@@ -1430,6 +1479,11 @@ ${convo}
});
}
+ if (openai.abortHandler && abortController.signal) {
+ abortController.signal.removeEventListener('abort', openai.abortHandler);
+ openai.abortHandler = undefined;
+ }
+
if (!chatCompletion && UnexpectedRoleError) {
throw new Error(
'OpenAI error: Invalid final message: OpenAI expects final message to include role=assistant',
diff --git a/api/app/clients/PluginsClient.js b/api/app/clients/PluginsClient.js
index bfe222e248..d0ffe2ef75 100644
--- a/api/app/clients/PluginsClient.js
+++ b/api/app/clients/PluginsClient.js
@@ -5,9 +5,8 @@ const { addImages, buildErrorInput, buildPromptPrefix } = require('./output_pars
const { initializeCustomAgent, initializeFunctionsAgent } = require('./agents');
const { processFileURL } = require('~/server/services/Files/process');
const { EModelEndpoint } = require('librechat-data-provider');
+const { checkBalance } = require('~/models/balanceMethods');
const { formatLangChainMessages } = require('./prompts');
-const checkBalance = require('~/models/checkBalance');
-const { isEnabled } = require('~/server/utils');
const { extractBaseURL } = require('~/utils');
const { loadTools } = require('./tools/util');
const { logger } = require('~/config');
@@ -253,12 +252,14 @@ class PluginsClient extends OpenAIClient {
await this.recordTokenUsage(responseMessage);
}
- this.responsePromise = this.saveMessageToDatabase(responseMessage, saveOptions, user);
+ const databasePromise = this.saveMessageToDatabase(responseMessage, saveOptions, user);
delete responseMessage.tokenCount;
- return { ...responseMessage, ...result };
+ return { ...responseMessage, ...result, databasePromise };
}
async sendMessage(message, opts = {}) {
+ /** @type {Promise} */
+ let userMessagePromise;
/** @type {{ filteredTools: string[], includedTools: string[] }} */
const { filteredTools = [], includedTools = [] } = this.options.req.app.locals;
@@ -328,15 +329,16 @@ class PluginsClient extends OpenAIClient {
}
if (!this.skipSaveUserMessage) {
- this.userMessagePromise = this.saveMessageToDatabase(userMessage, saveOptions, user);
+ userMessagePromise = this.saveMessageToDatabase(userMessage, saveOptions, user);
if (typeof opts?.getReqData === 'function') {
opts.getReqData({
- userMessagePromise: this.userMessagePromise,
+ userMessagePromise,
});
}
}
- if (isEnabled(process.env.CHECK_BALANCE)) {
+ const balance = this.options.req?.app?.locals?.balance;
+ if (balance?.enabled) {
await checkBalance({
req: this.options.req,
res: this.options.res,
diff --git a/api/app/clients/callbacks/createStartHandler.js b/api/app/clients/callbacks/createStartHandler.js
index 4bc32bc0c2..b7292aaf17 100644
--- a/api/app/clients/callbacks/createStartHandler.js
+++ b/api/app/clients/callbacks/createStartHandler.js
@@ -1,8 +1,8 @@
const { promptTokensEstimate } = require('openai-chat-tokens');
const { EModelEndpoint, supportsBalanceCheck } = require('librechat-data-provider');
const { formatFromLangChain } = require('~/app/clients/prompts');
-const checkBalance = require('~/models/checkBalance');
-const { isEnabled } = require('~/server/utils');
+const { getBalanceConfig } = require('~/server/services/Config');
+const { checkBalance } = require('~/models/balanceMethods');
const { logger } = require('~/config');
const createStartHandler = ({
@@ -49,8 +49,8 @@ const createStartHandler = ({
prelimPromptTokens += tokenBuffer;
try {
- // TODO: if plugins extends to non-OpenAI models, this will need to be updated
- if (isEnabled(process.env.CHECK_BALANCE) && supportsBalanceCheck[EModelEndpoint.openAI]) {
+ const balance = await getBalanceConfig();
+ if (balance?.enabled && supportsBalanceCheck[EModelEndpoint.openAI]) {
const generations =
initialMessageCount && messages.length > initialMessageCount
? messages.slice(initialMessageCount)
diff --git a/api/app/clients/generators.js b/api/app/clients/generators.js
new file mode 100644
index 0000000000..9814cac7a5
--- /dev/null
+++ b/api/app/clients/generators.js
@@ -0,0 +1,71 @@
+const fetch = require('node-fetch');
+const { GraphEvents } = require('@librechat/agents');
+const { logger, sendEvent } = require('~/config');
+const { sleep } = require('~/server/utils');
+
+/**
+ * Makes a function to make HTTP request and logs the process.
+ * @param {Object} params
+ * @param {boolean} [params.directEndpoint] - Whether to use a direct endpoint.
+ * @param {string} [params.reverseProxyUrl] - The reverse proxy URL to use for the request.
+ * @returns {Promise} - A promise that resolves to the response of the fetch request.
+ */
+function createFetch({ directEndpoint = false, reverseProxyUrl = '' }) {
+ /**
+ * Makes an HTTP request and logs the process.
+ * @param {RequestInfo} url - The URL to make the request to. Can be a string or a Request object.
+ * @param {RequestInit} [init] - Optional init options for the request.
+ * @returns {Promise} - A promise that resolves to the response of the fetch request.
+ */
+ return async (_url, init) => {
+ let url = _url;
+ if (directEndpoint) {
+ url = reverseProxyUrl;
+ }
+ logger.debug(`Making request to ${url}`);
+ if (typeof Bun !== 'undefined') {
+ return await fetch(url, init);
+ }
+ return await fetch(url, init);
+ };
+}
+
+// Add this at the module level outside the class
+/**
+ * Creates event handlers for stream events that don't capture client references
+ * @param {Object} res - The response object to send events to
+ * @returns {Object} Object containing handler functions
+ */
+function createStreamEventHandlers(res) {
+ return {
+ [GraphEvents.ON_RUN_STEP]: (event) => {
+ if (res) {
+ sendEvent(res, event);
+ }
+ },
+ [GraphEvents.ON_MESSAGE_DELTA]: (event) => {
+ if (res) {
+ sendEvent(res, event);
+ }
+ },
+ [GraphEvents.ON_REASONING_DELTA]: (event) => {
+ if (res) {
+ sendEvent(res, event);
+ }
+ },
+ };
+}
+
+function createHandleLLMNewToken(streamRate) {
+ return async () => {
+ if (streamRate) {
+ await sleep(streamRate);
+ }
+ };
+}
+
+module.exports = {
+ createFetch,
+ createHandleLLMNewToken,
+ createStreamEventHandlers,
+};
diff --git a/api/app/clients/llm/createLLM.js b/api/app/clients/llm/createLLM.js
index 7dc0d40ceb..c8d6666bce 100644
--- a/api/app/clients/llm/createLLM.js
+++ b/api/app/clients/llm/createLLM.js
@@ -34,6 +34,7 @@ function createLLM({
let credentials = { openAIApiKey };
let configuration = {
apiKey: openAIApiKey,
+ ...(configOptions.basePath && { baseURL: configOptions.basePath }),
};
/** @type {AzureOptions} */
diff --git a/api/app/clients/prompts/addCacheControl.js b/api/app/clients/prompts/addCacheControl.js
index eed5910dc9..6bfd901a65 100644
--- a/api/app/clients/prompts/addCacheControl.js
+++ b/api/app/clients/prompts/addCacheControl.js
@@ -1,7 +1,7 @@
/**
* Anthropic API: Adds cache control to the appropriate user messages in the payload.
- * @param {Array} messages - The array of message objects.
- * @returns {Array} - The updated array of message objects with cache control added.
+ * @param {Array} messages - The array of message objects.
+ * @returns {Array} - The updated array of message objects with cache control added.
*/
function addCacheControl(messages) {
if (!Array.isArray(messages) || messages.length < 2) {
@@ -13,7 +13,9 @@ function addCacheControl(messages) {
for (let i = updatedMessages.length - 1; i >= 0 && userMessagesModified < 2; i--) {
const message = updatedMessages[i];
- if (message.role !== 'user') {
+ if (message.getType != null && message.getType() !== 'human') {
+ continue;
+ } else if (message.getType == null && message.role !== 'user') {
continue;
}
diff --git a/api/app/clients/prompts/formatAgentMessages.spec.js b/api/app/clients/prompts/formatAgentMessages.spec.js
index 20731f6984..360fa00a34 100644
--- a/api/app/clients/prompts/formatAgentMessages.spec.js
+++ b/api/app/clients/prompts/formatAgentMessages.spec.js
@@ -282,4 +282,80 @@ describe('formatAgentMessages', () => {
// Additional check to ensure the consecutive assistant messages were combined
expect(result[1].content).toHaveLength(2);
});
+
+ it('should skip THINK type content parts', () => {
+ const payload = [
+ {
+ role: 'assistant',
+ content: [
+ { type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Initial response' },
+ { type: ContentTypes.THINK, [ContentTypes.THINK]: 'Reasoning about the problem...' },
+ { type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Final answer' },
+ ],
+ },
+ ];
+
+ const result = formatAgentMessages(payload);
+
+ expect(result).toHaveLength(1);
+ expect(result[0]).toBeInstanceOf(AIMessage);
+ expect(result[0].content).toEqual('Initial response\nFinal answer');
+ });
+
+ it('should join TEXT content as string when THINK content type is present', () => {
+ const payload = [
+ {
+ role: 'assistant',
+ content: [
+ { type: ContentTypes.THINK, [ContentTypes.THINK]: 'Analyzing the problem...' },
+ { type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'First part of response' },
+ { type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Second part of response' },
+ { type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Final part of response' },
+ ],
+ },
+ ];
+
+ const result = formatAgentMessages(payload);
+
+ expect(result).toHaveLength(1);
+ expect(result[0]).toBeInstanceOf(AIMessage);
+ expect(typeof result[0].content).toBe('string');
+ expect(result[0].content).toBe(
+ 'First part of response\nSecond part of response\nFinal part of response',
+ );
+ expect(result[0].content).not.toContain('Analyzing the problem...');
+ });
+
+ it('should exclude ERROR type content parts', () => {
+ const payload = [
+ {
+ role: 'assistant',
+ content: [
+ { type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Hello there' },
+ {
+ type: ContentTypes.ERROR,
+ [ContentTypes.ERROR]:
+ 'An error occurred while processing the request: Something went wrong',
+ },
+ { type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Final answer' },
+ ],
+ },
+ ];
+
+ const result = formatAgentMessages(payload);
+
+ expect(result).toHaveLength(1);
+ expect(result[0]).toBeInstanceOf(AIMessage);
+ expect(result[0].content).toEqual([
+ { type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Hello there' },
+ { type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Final answer' },
+ ]);
+
+ // Make sure no error content exists in the result
+ const hasErrorContent = result[0].content.some(
+ (item) =>
+ item.type === ContentTypes.ERROR || JSON.stringify(item).includes('An error occurred'),
+ );
+ expect(hasErrorContent).toBe(false);
+ });
});
diff --git a/api/app/clients/prompts/formatMessages.js b/api/app/clients/prompts/formatMessages.js
index d84e62cca8..9fa0d40497 100644
--- a/api/app/clients/prompts/formatMessages.js
+++ b/api/app/clients/prompts/formatMessages.js
@@ -153,6 +153,7 @@ const formatAgentMessages = (payload) => {
let currentContent = [];
let lastAIMessage = null;
+ let hasReasoning = false;
for (const part of message.content) {
if (part.type === ContentTypes.TEXT && part.tool_call_ids) {
/*
@@ -207,11 +208,27 @@ const formatAgentMessages = (payload) => {
content: output || '',
}),
);
+ } else if (part.type === ContentTypes.THINK) {
+ hasReasoning = true;
+ continue;
+ } else if (part.type === ContentTypes.ERROR || part.type === ContentTypes.AGENT_UPDATE) {
+ continue;
} else {
currentContent.push(part);
}
}
+ if (hasReasoning) {
+ currentContent = currentContent
+ .reduce((acc, curr) => {
+ if (curr.type === ContentTypes.TEXT) {
+ return `${acc}${curr[ContentTypes.TEXT]}\n`;
+ }
+ return acc;
+ }, '')
+ .trim();
+ }
+
if (currentContent.length > 0) {
messages.push(new AIMessage({ content: currentContent }));
}
diff --git a/api/app/clients/specs/AnthropicClient.test.js b/api/app/clients/specs/AnthropicClient.test.js
index eef6bb6748..223f3038c0 100644
--- a/api/app/clients/specs/AnthropicClient.test.js
+++ b/api/app/clients/specs/AnthropicClient.test.js
@@ -1,3 +1,4 @@
+const { SplitStreamHandler } = require('@librechat/agents');
const { anthropicSettings } = require('librechat-data-provider');
const AnthropicClient = require('~/app/clients/AnthropicClient');
@@ -405,4 +406,327 @@ describe('AnthropicClient', () => {
expect(Number.isNaN(result)).toBe(false);
});
});
+
+ describe('maxOutputTokens handling for different models', () => {
+ it('should not cap maxOutputTokens for Claude 3.5 Sonnet models', () => {
+ const client = new AnthropicClient('test-api-key');
+ const highTokenValue = anthropicSettings.legacy.maxOutputTokens.default * 10;
+
+ client.setOptions({
+ modelOptions: {
+ model: 'claude-3-5-sonnet',
+ maxOutputTokens: highTokenValue,
+ },
+ });
+
+ expect(client.modelOptions.maxOutputTokens).toBe(highTokenValue);
+
+ // Test with decimal notation
+ client.setOptions({
+ modelOptions: {
+ model: 'claude-3.5-sonnet',
+ maxOutputTokens: highTokenValue,
+ },
+ });
+
+ expect(client.modelOptions.maxOutputTokens).toBe(highTokenValue);
+ });
+
+ it('should not cap maxOutputTokens for Claude 3.7 models', () => {
+ const client = new AnthropicClient('test-api-key');
+ const highTokenValue = anthropicSettings.legacy.maxOutputTokens.default * 2;
+
+ client.setOptions({
+ modelOptions: {
+ model: 'claude-3-7-sonnet',
+ maxOutputTokens: highTokenValue,
+ },
+ });
+
+ expect(client.modelOptions.maxOutputTokens).toBe(highTokenValue);
+
+ // Test with decimal notation
+ client.setOptions({
+ modelOptions: {
+ model: 'claude-3.7-sonnet',
+ maxOutputTokens: highTokenValue,
+ },
+ });
+
+ expect(client.modelOptions.maxOutputTokens).toBe(highTokenValue);
+ });
+
+ it('should cap maxOutputTokens for Claude 3.5 Haiku models', () => {
+ const client = new AnthropicClient('test-api-key');
+ const highTokenValue = anthropicSettings.legacy.maxOutputTokens.default * 2;
+
+ client.setOptions({
+ modelOptions: {
+ model: 'claude-3-5-haiku',
+ maxOutputTokens: highTokenValue,
+ },
+ });
+
+ expect(client.modelOptions.maxOutputTokens).toBe(
+ anthropicSettings.legacy.maxOutputTokens.default,
+ );
+
+ // Test with decimal notation
+ client.setOptions({
+ modelOptions: {
+ model: 'claude-3.5-haiku',
+ maxOutputTokens: highTokenValue,
+ },
+ });
+
+ expect(client.modelOptions.maxOutputTokens).toBe(
+ anthropicSettings.legacy.maxOutputTokens.default,
+ );
+ });
+
+ it('should cap maxOutputTokens for Claude 3 Haiku and Opus models', () => {
+ const client = new AnthropicClient('test-api-key');
+ const highTokenValue = anthropicSettings.legacy.maxOutputTokens.default * 2;
+
+ // Test haiku
+ client.setOptions({
+ modelOptions: {
+ model: 'claude-3-haiku',
+ maxOutputTokens: highTokenValue,
+ },
+ });
+
+ expect(client.modelOptions.maxOutputTokens).toBe(
+ anthropicSettings.legacy.maxOutputTokens.default,
+ );
+
+ // Test opus
+ client.setOptions({
+ modelOptions: {
+ model: 'claude-3-opus',
+ maxOutputTokens: highTokenValue,
+ },
+ });
+
+ expect(client.modelOptions.maxOutputTokens).toBe(
+ anthropicSettings.legacy.maxOutputTokens.default,
+ );
+ });
+ });
+
+ describe('topK/topP parameters for different models', () => {
+ beforeEach(() => {
+ // Mock the SplitStreamHandler
+ jest.spyOn(SplitStreamHandler.prototype, 'handle').mockImplementation(() => {});
+ });
+
+ afterEach(() => {
+ jest.restoreAllMocks();
+ });
+
+ it('should include top_k and top_p parameters for non-claude-3.7 models', async () => {
+ const client = new AnthropicClient('test-api-key');
+
+ // Create a mock async generator function
+ async function* mockAsyncGenerator() {
+ yield { type: 'message_start', message: { usage: {} } };
+ yield { delta: { text: 'Test response' } };
+ yield { type: 'message_delta', usage: {} };
+ }
+
+ // Mock createResponse to return the async generator
+ jest.spyOn(client, 'createResponse').mockImplementation(() => {
+ return mockAsyncGenerator();
+ });
+
+ client.setOptions({
+ modelOptions: {
+ model: 'claude-3-opus',
+ temperature: 0.7,
+ topK: 10,
+ topP: 0.9,
+ },
+ });
+
+ // Mock getClient to capture the request options
+ let capturedOptions = null;
+ jest.spyOn(client, 'getClient').mockImplementation((options) => {
+ capturedOptions = options;
+ return {};
+ });
+
+ const payload = [{ role: 'user', content: 'Test message' }];
+ await client.sendCompletion(payload, {});
+
+ // Check the options passed to getClient
+ expect(capturedOptions).toHaveProperty('top_k', 10);
+ expect(capturedOptions).toHaveProperty('top_p', 0.9);
+ });
+
+ it('should include top_k and top_p parameters for claude-3-5-sonnet models', async () => {
+ const client = new AnthropicClient('test-api-key');
+
+ // Create a mock async generator function
+ async function* mockAsyncGenerator() {
+ yield { type: 'message_start', message: { usage: {} } };
+ yield { delta: { text: 'Test response' } };
+ yield { type: 'message_delta', usage: {} };
+ }
+
+ // Mock createResponse to return the async generator
+ jest.spyOn(client, 'createResponse').mockImplementation(() => {
+ return mockAsyncGenerator();
+ });
+
+ client.setOptions({
+ modelOptions: {
+ model: 'claude-3-5-sonnet',
+ temperature: 0.7,
+ topK: 10,
+ topP: 0.9,
+ },
+ });
+
+ // Mock getClient to capture the request options
+ let capturedOptions = null;
+ jest.spyOn(client, 'getClient').mockImplementation((options) => {
+ capturedOptions = options;
+ return {};
+ });
+
+ const payload = [{ role: 'user', content: 'Test message' }];
+ await client.sendCompletion(payload, {});
+
+ // Check the options passed to getClient
+ expect(capturedOptions).toHaveProperty('top_k', 10);
+ expect(capturedOptions).toHaveProperty('top_p', 0.9);
+ });
+
+ it('should not include top_k and top_p parameters for claude-3-7-sonnet models', async () => {
+ const client = new AnthropicClient('test-api-key');
+
+ // Create a mock async generator function
+ async function* mockAsyncGenerator() {
+ yield { type: 'message_start', message: { usage: {} } };
+ yield { delta: { text: 'Test response' } };
+ yield { type: 'message_delta', usage: {} };
+ }
+
+ // Mock createResponse to return the async generator
+ jest.spyOn(client, 'createResponse').mockImplementation(() => {
+ return mockAsyncGenerator();
+ });
+
+ client.setOptions({
+ modelOptions: {
+ model: 'claude-3-7-sonnet',
+ temperature: 0.7,
+ topK: 10,
+ topP: 0.9,
+ },
+ });
+
+ // Mock getClient to capture the request options
+ let capturedOptions = null;
+ jest.spyOn(client, 'getClient').mockImplementation((options) => {
+ capturedOptions = options;
+ return {};
+ });
+
+ const payload = [{ role: 'user', content: 'Test message' }];
+ await client.sendCompletion(payload, {});
+
+ // Check the options passed to getClient
+ expect(capturedOptions).not.toHaveProperty('top_k');
+ expect(capturedOptions).not.toHaveProperty('top_p');
+ });
+
+ it('should not include top_k and top_p parameters for models with decimal notation (claude-3.7)', async () => {
+ const client = new AnthropicClient('test-api-key');
+
+ // Create a mock async generator function
+ async function* mockAsyncGenerator() {
+ yield { type: 'message_start', message: { usage: {} } };
+ yield { delta: { text: 'Test response' } };
+ yield { type: 'message_delta', usage: {} };
+ }
+
+ // Mock createResponse to return the async generator
+ jest.spyOn(client, 'createResponse').mockImplementation(() => {
+ return mockAsyncGenerator();
+ });
+
+ client.setOptions({
+ modelOptions: {
+ model: 'claude-3.7-sonnet',
+ temperature: 0.7,
+ topK: 10,
+ topP: 0.9,
+ },
+ });
+
+ // Mock getClient to capture the request options
+ let capturedOptions = null;
+ jest.spyOn(client, 'getClient').mockImplementation((options) => {
+ capturedOptions = options;
+ return {};
+ });
+
+ const payload = [{ role: 'user', content: 'Test message' }];
+ await client.sendCompletion(payload, {});
+
+ // Check the options passed to getClient
+ expect(capturedOptions).not.toHaveProperty('top_k');
+ expect(capturedOptions).not.toHaveProperty('top_p');
+ });
+ });
+
+ it('should include top_k and top_p parameters for Claude-3.7 models when thinking is explicitly disabled', async () => {
+ const client = new AnthropicClient('test-api-key', {
+ modelOptions: {
+ model: 'claude-3-7-sonnet',
+ temperature: 0.7,
+ topK: 10,
+ topP: 0.9,
+ },
+ thinking: false,
+ });
+
+ async function* mockAsyncGenerator() {
+ yield { type: 'message_start', message: { usage: {} } };
+ yield { delta: { text: 'Test response' } };
+ yield { type: 'message_delta', usage: {} };
+ }
+
+ jest.spyOn(client, 'createResponse').mockImplementation(() => {
+ return mockAsyncGenerator();
+ });
+
+ let capturedOptions = null;
+ jest.spyOn(client, 'getClient').mockImplementation((options) => {
+ capturedOptions = options;
+ return {};
+ });
+
+ const payload = [{ role: 'user', content: 'Test message' }];
+ await client.sendCompletion(payload, {});
+
+ expect(capturedOptions).toHaveProperty('topK', 10);
+ expect(capturedOptions).toHaveProperty('topP', 0.9);
+
+ client.setOptions({
+ modelOptions: {
+ model: 'claude-3.7-sonnet',
+ temperature: 0.7,
+ topK: 10,
+ topP: 0.9,
+ },
+ thinking: false,
+ });
+
+ await client.sendCompletion(payload, {});
+
+ expect(capturedOptions).toHaveProperty('topK', 10);
+ expect(capturedOptions).toHaveProperty('topP', 0.9);
+ });
});
diff --git a/api/app/clients/specs/BaseClient.test.js b/api/app/clients/specs/BaseClient.test.js
index e899449fb9..d620d5f647 100644
--- a/api/app/clients/specs/BaseClient.test.js
+++ b/api/app/clients/specs/BaseClient.test.js
@@ -30,7 +30,9 @@ jest.mock('~/models', () => ({
updateFileUsage: jest.fn(),
}));
-jest.mock('@langchain/openai', () => {
+const { getConvo, saveConvo } = require('~/models');
+
+jest.mock('@librechat/agents', () => {
return {
ChatOpenAI: jest.fn().mockImplementation(() => {
return {};
@@ -162,7 +164,7 @@ describe('BaseClient', () => {
const result = await TestClient.getMessagesWithinTokenLimit({ messages });
expect(result.context).toEqual(expectedContext);
- expect(result.summaryIndex).toEqual(expectedIndex);
+ expect(result.messagesToRefine.length - 1).toEqual(expectedIndex);
expect(result.remainingContextTokens).toBe(expectedRemainingContextTokens);
expect(result.messagesToRefine).toEqual(expectedMessagesToRefine);
});
@@ -198,7 +200,7 @@ describe('BaseClient', () => {
const result = await TestClient.getMessagesWithinTokenLimit({ messages });
expect(result.context).toEqual(expectedContext);
- expect(result.summaryIndex).toEqual(expectedIndex);
+ expect(result.messagesToRefine.length - 1).toEqual(expectedIndex);
expect(result.remainingContextTokens).toBe(expectedRemainingContextTokens);
expect(result.messagesToRefine).toEqual(expectedMessagesToRefine);
});
@@ -540,10 +542,11 @@ describe('BaseClient', () => {
test('saveMessageToDatabase is called with the correct arguments', async () => {
const saveOptions = TestClient.getSaveOptions();
- const user = {}; // Mock user
+ const user = {};
const opts = { user };
+ const saveSpy = jest.spyOn(TestClient, 'saveMessageToDatabase');
await TestClient.sendMessage('Hello, world!', opts);
- expect(TestClient.saveMessageToDatabase).toHaveBeenCalledWith(
+ expect(saveSpy).toHaveBeenCalledWith(
expect.objectContaining({
sender: expect.any(String),
text: expect.any(String),
@@ -557,6 +560,157 @@ describe('BaseClient', () => {
);
});
+ test('should handle existing conversation when getConvo retrieves one', async () => {
+ const existingConvo = {
+ conversationId: 'existing-convo-id',
+ endpoint: 'openai',
+ endpointType: 'openai',
+ model: 'gpt-3.5-turbo',
+ messages: [
+ { role: 'user', content: 'Existing message 1' },
+ { role: 'assistant', content: 'Existing response 1' },
+ ],
+ temperature: 1,
+ };
+
+ const { temperature: _temp, ...newConvo } = existingConvo;
+
+ const user = {
+ id: 'user-id',
+ };
+
+ getConvo.mockResolvedValue(existingConvo);
+ saveConvo.mockResolvedValue(newConvo);
+
+ TestClient = initializeFakeClient(
+ apiKey,
+ {
+ ...options,
+ req: {
+ user,
+ },
+ },
+ [],
+ );
+
+ const saveSpy = jest.spyOn(TestClient, 'saveMessageToDatabase');
+
+ const newMessage = 'New message in existing conversation';
+ const response = await TestClient.sendMessage(newMessage, {
+ user,
+ conversationId: existingConvo.conversationId,
+ });
+
+ expect(getConvo).toHaveBeenCalledWith(user.id, existingConvo.conversationId);
+ expect(TestClient.conversationId).toBe(existingConvo.conversationId);
+ expect(response.conversationId).toBe(existingConvo.conversationId);
+ expect(TestClient.fetchedConvo).toBe(true);
+
+ expect(saveSpy).toHaveBeenCalledWith(
+ expect.objectContaining({
+ conversationId: existingConvo.conversationId,
+ text: newMessage,
+ }),
+ expect.any(Object),
+ expect.any(Object),
+ );
+
+ expect(saveConvo).toHaveBeenCalledTimes(2);
+ expect(saveConvo).toHaveBeenCalledWith(
+ expect.any(Object),
+ expect.objectContaining({
+ conversationId: existingConvo.conversationId,
+ }),
+ expect.objectContaining({
+ context: 'api/app/clients/BaseClient.js - saveMessageToDatabase #saveConvo',
+ unsetFields: {
+ temperature: 1,
+ },
+ }),
+ );
+
+ await TestClient.sendMessage('Another message', {
+ conversationId: existingConvo.conversationId,
+ });
+ expect(getConvo).toHaveBeenCalledTimes(1);
+ });
+
+ test('should correctly handle existing conversation and unset fields appropriately', async () => {
+ const existingConvo = {
+ conversationId: 'existing-convo-id',
+ endpoint: 'openai',
+ endpointType: 'openai',
+ model: 'gpt-3.5-turbo',
+ messages: [
+ { role: 'user', content: 'Existing message 1' },
+ { role: 'assistant', content: 'Existing response 1' },
+ ],
+ title: 'Existing Conversation',
+ someExistingField: 'existingValue',
+ anotherExistingField: 'anotherValue',
+ temperature: 0.7,
+ modelLabel: 'GPT-3.5',
+ };
+
+ getConvo.mockResolvedValue(existingConvo);
+ saveConvo.mockResolvedValue(existingConvo);
+
+ TestClient = initializeFakeClient(
+ apiKey,
+ {
+ ...options,
+ modelOptions: {
+ model: 'gpt-4',
+ temperature: 0.5,
+ },
+ },
+ [],
+ );
+
+ const newMessage = 'New message in existing conversation';
+ await TestClient.sendMessage(newMessage, {
+ conversationId: existingConvo.conversationId,
+ });
+
+ expect(saveConvo).toHaveBeenCalledTimes(2);
+
+ const saveConvoCall = saveConvo.mock.calls[0];
+ const [, savedFields, saveOptions] = saveConvoCall;
+
+ // Instead of checking all excludedKeys, we'll just check specific fields
+ // that we know should be excluded
+ expect(savedFields).not.toHaveProperty('messages');
+ expect(savedFields).not.toHaveProperty('title');
+
+ // Only check that someExistingField is in unsetFields
+ expect(saveOptions.unsetFields).toHaveProperty('someExistingField', 1);
+
+ // Mock saveConvo to return the expected fields
+ saveConvo.mockImplementation((req, fields) => {
+ return Promise.resolve({
+ ...fields,
+ endpoint: 'openai',
+ endpointType: 'openai',
+ model: 'gpt-4',
+ temperature: 0.5,
+ });
+ });
+
+ // Only check the conversationId since that's the only field we can be sure about
+ expect(savedFields).toHaveProperty('conversationId', 'existing-convo-id');
+
+ expect(TestClient.fetchedConvo).toBe(true);
+
+ await TestClient.sendMessage('Another message', {
+ conversationId: existingConvo.conversationId,
+ });
+
+ expect(getConvo).toHaveBeenCalledTimes(1);
+
+ const secondSaveConvoCall = saveConvo.mock.calls[1];
+ expect(secondSaveConvoCall[2]).toHaveProperty('unsetFields', {});
+ });
+
test('sendCompletion is called with the correct arguments', async () => {
const payload = {}; // Mock payload
TestClient.buildMessages.mockReturnValue({ prompt: payload, tokenCountMap: null });
diff --git a/api/app/clients/specs/FakeClient.js b/api/app/clients/specs/FakeClient.js
index 7f4b75e1db..a466bb97f9 100644
--- a/api/app/clients/specs/FakeClient.js
+++ b/api/app/clients/specs/FakeClient.js
@@ -56,7 +56,6 @@ const initializeFakeClient = (apiKey, options, fakeMessages) => {
let TestClient = new FakeClient(apiKey);
TestClient.options = options;
TestClient.abortController = { abort: jest.fn() };
- TestClient.saveMessageToDatabase = jest.fn();
TestClient.loadHistory = jest
.fn()
.mockImplementation((conversationId, parentMessageId = null) => {
@@ -86,7 +85,6 @@ const initializeFakeClient = (apiKey, options, fakeMessages) => {
return 'Mock response text';
});
- // eslint-disable-next-line no-unused-vars
TestClient.getCompletion = jest.fn().mockImplementation(async (..._args) => {
return {
choices: [
diff --git a/api/app/clients/specs/OpenAIClient.test.js b/api/app/clients/specs/OpenAIClient.test.js
index 2aaec518eb..579f636eef 100644
--- a/api/app/clients/specs/OpenAIClient.test.js
+++ b/api/app/clients/specs/OpenAIClient.test.js
@@ -1,9 +1,7 @@
jest.mock('~/cache/getLogStores');
require('dotenv').config();
-const OpenAI = require('openai');
-const getLogStores = require('~/cache/getLogStores');
const { fetchEventSource } = require('@waylaidwanderer/fetch-event-source');
-const { genAzureChatCompletion } = require('~/utils/azureUtils');
+const getLogStores = require('~/cache/getLogStores');
const OpenAIClient = require('../OpenAIClient');
jest.mock('meilisearch');
@@ -36,19 +34,21 @@ jest.mock('~/models', () => ({
updateFileUsage: jest.fn(),
}));
-jest.mock('@langchain/openai', () => {
- return {
- ChatOpenAI: jest.fn().mockImplementation(() => {
- return {};
- }),
- };
+// Import the actual module but mock specific parts
+const agents = jest.requireActual('@librechat/agents');
+const { CustomOpenAIClient } = agents;
+
+// Also mock ChatOpenAI to prevent real API calls
+agents.ChatOpenAI = jest.fn().mockImplementation(() => {
+ return {};
+});
+agents.AzureChatOpenAI = jest.fn().mockImplementation(() => {
+ return {};
});
-jest.mock('openai');
-
-jest.spyOn(OpenAI, 'constructor').mockImplementation(function (...options) {
- // We can add additional logic here if needed
- return new OpenAI(...options);
+// Mock only the CustomOpenAIClient constructor
+jest.spyOn(CustomOpenAIClient, 'constructor').mockImplementation(function (...options) {
+ return new CustomOpenAIClient(...options);
});
const finalChatCompletion = jest.fn().mockResolvedValue({
@@ -120,7 +120,13 @@ const create = jest.fn().mockResolvedValue({
],
});
-OpenAI.mockImplementation(() => ({
+// Mock the implementation of CustomOpenAIClient instances
+jest.spyOn(CustomOpenAIClient.prototype, 'constructor').mockImplementation(function () {
+ return this;
+});
+
+// Create a mock for the CustomOpenAIClient class
+const mockCustomOpenAIClient = jest.fn().mockImplementation(() => ({
beta: {
chat: {
completions: {
@@ -135,11 +141,14 @@ OpenAI.mockImplementation(() => ({
},
}));
-describe('OpenAIClient', () => {
- const mockSet = jest.fn();
- const mockCache = { set: mockSet };
+CustomOpenAIClient.mockImplementation = mockCustomOpenAIClient;
+describe('OpenAIClient', () => {
beforeEach(() => {
+ const mockCache = {
+ get: jest.fn().mockResolvedValue({}),
+ set: jest.fn(),
+ };
getLogStores.mockReturnValue(mockCache);
});
let client;
@@ -202,14 +211,6 @@ describe('OpenAIClient', () => {
expect(client.modelOptions.temperature).toBe(0.7);
});
- it('should set apiKey and useOpenRouter if OPENROUTER_API_KEY is present', () => {
- process.env.OPENROUTER_API_KEY = 'openrouter-key';
- client.setOptions({});
- expect(client.apiKey).toBe('openrouter-key');
- expect(client.useOpenRouter).toBe(true);
- delete process.env.OPENROUTER_API_KEY; // Cleanup
- });
-
it('should set FORCE_PROMPT based on OPENAI_FORCE_PROMPT or reverseProxyUrl', () => {
process.env.OPENAI_FORCE_PROMPT = 'true';
client.setOptions({});
@@ -534,7 +535,6 @@ describe('OpenAIClient', () => {
afterEach(() => {
delete process.env.AZURE_OPENAI_DEFAULT_MODEL;
delete process.env.AZURE_USE_MODEL_AS_DEPLOYMENT_NAME;
- delete process.env.OPENROUTER_API_KEY;
});
it('should call getCompletion and fetchEventSource when using a text/instruct model', async () => {
@@ -567,41 +567,6 @@ describe('OpenAIClient', () => {
expect(requestBody).toHaveProperty('model');
expect(requestBody.model).toBe(model);
});
-
- it('[Azure OpenAI] should call chatCompletion and OpenAI.stream with correct args', async () => {
- // Set a default model
- process.env.AZURE_OPENAI_DEFAULT_MODEL = 'gpt4-turbo';
-
- const onProgress = jest.fn().mockImplementation(() => ({}));
- client.azure = defaultAzureOptions;
- const chatCompletion = jest.spyOn(client, 'chatCompletion');
- await client.sendMessage('Hi mom!', {
- replaceOptions: true,
- ...defaultOptions,
- modelOptions: { model: 'gpt4-turbo', stream: true },
- onProgress,
- azure: defaultAzureOptions,
- });
-
- expect(chatCompletion).toHaveBeenCalled();
- expect(chatCompletion.mock.calls.length).toBe(1);
-
- const chatCompletionArgs = chatCompletion.mock.calls[0][0];
- const { payload } = chatCompletionArgs;
-
- expect(payload[0].role).toBe('user');
- expect(payload[0].content).toBe('Hi mom!');
-
- // Azure OpenAI does not use the model property, and will error if it's passed
- // This check ensures the model property is not present
- const streamArgs = stream.mock.calls[0][0];
- expect(streamArgs).not.toHaveProperty('model');
-
- // Check if the baseURL is correct
- const constructorArgs = OpenAI.mock.calls[0][0];
- const expectedURL = genAzureChatCompletion(defaultAzureOptions).split('/chat')[0];
- expect(constructorArgs.baseURL).toBe(expectedURL);
- });
});
describe('checkVisionRequest functionality', () => {
diff --git a/api/app/clients/tools/index.js b/api/app/clients/tools/index.js
index b8df50c77d..87b1884e88 100644
--- a/api/app/clients/tools/index.js
+++ b/api/app/clients/tools/index.js
@@ -2,13 +2,15 @@ const availableTools = require('./manifest.json');
// Structured Tools
const DALLE3 = require('./structured/DALLE3');
+const FluxAPI = require('./structured/FluxAPI');
const OpenWeather = require('./structured/OpenWeather');
-const createYouTubeTools = require('./structured/YouTube');
const StructuredWolfram = require('./structured/Wolfram');
+const createYouTubeTools = require('./structured/YouTube');
const StructuredACS = require('./structured/AzureAISearch');
const StructuredSD = require('./structured/StableDiffusion');
const GoogleSearchAPI = require('./structured/GoogleSearch');
const TraversaalSearch = require('./structured/TraversaalSearch');
+const createOpenAIImageTools = require('./structured/OpenAIImageTools');
const TavilySearchResults = require('./structured/TavilySearchResults');
/** @type {Record} */
@@ -30,6 +32,7 @@ module.exports = {
manifestToolMap,
// Structured Tools
DALLE3,
+ FluxAPI,
OpenWeather,
StructuredSD,
StructuredACS,
@@ -38,4 +41,5 @@ module.exports = {
StructuredWolfram,
createYouTubeTools,
TavilySearchResults,
+ createOpenAIImageTools,
};
diff --git a/api/app/clients/tools/manifest.json b/api/app/clients/tools/manifest.json
index 7cb92b8d87..55c1b1c51e 100644
--- a/api/app/clients/tools/manifest.json
+++ b/api/app/clients/tools/manifest.json
@@ -44,6 +44,20 @@
}
]
},
+ {
+ "name": "OpenAI Image Tools",
+ "pluginKey": "image_gen_oai",
+ "toolkit": true,
+ "description": "Image Generation and Editing using OpenAI's latest state-of-the-art models",
+ "icon": "/assets/image_gen_oai.png",
+ "authConfig": [
+ {
+ "authField": "IMAGE_GEN_OAI_API_KEY",
+ "label": "OpenAI Image Tools API Key",
+ "description": "Your OpenAI API Key for Image Generation and Editing"
+ }
+ ]
+ },
{
"name": "Wolfram",
"pluginKey": "wolfram",
@@ -164,5 +178,19 @@
"description": "Sign up at OpenWeather, then get your key at API keys."
}
]
+ },
+ {
+ "name": "Flux",
+ "pluginKey": "flux",
+ "description": "Generate images using text with the Flux API.",
+ "icon": "https://blackforestlabs.ai/wp-content/uploads/2024/07/bfl_logo_retraced_blk.png",
+ "isAuthRequired": "true",
+ "authConfig": [
+ {
+ "authField": "FLUX_API_KEY",
+ "label": "Your Flux API Key",
+ "description": "Provide your Flux API key from your user profile."
+ }
+ ]
}
]
diff --git a/api/app/clients/tools/structured/DALLE3.js b/api/app/clients/tools/structured/DALLE3.js
index b604ad4ea4..fc0f1851f6 100644
--- a/api/app/clients/tools/structured/DALLE3.js
+++ b/api/app/clients/tools/structured/DALLE3.js
@@ -1,14 +1,17 @@
const { z } = require('zod');
const path = require('path');
const OpenAI = require('openai');
+const fetch = require('node-fetch');
const { v4: uuidv4 } = require('uuid');
const { Tool } = require('@langchain/core/tools');
const { HttpsProxyAgent } = require('https-proxy-agent');
-const { FileContext } = require('librechat-data-provider');
+const { FileContext, ContentTypes } = require('librechat-data-provider');
const { getImageBasename } = require('~/server/services/Files/images');
const extractBaseURL = require('~/utils/extractBaseURL');
const { logger } = require('~/config');
+const displayMessage =
+ 'DALL-E displayed an image. All generated images are already plainly visible, so don\'t repeat the descriptions in detail. Do not list download links as they are available in the UI already. The user may download the images by clicking on them, but do not mention anything about downloading to the user.';
class DALLE3 extends Tool {
constructor(fields = {}) {
super();
@@ -114,10 +117,7 @@ class DALLE3 extends Tool {
if (this.isAgent === true && typeof value === 'string') {
return [value, {}];
} else if (this.isAgent === true && typeof value === 'object') {
- return [
- 'DALL-E displayed an image. All generated images are already plainly visible, so don\'t repeat the descriptions in detail. Do not list download links as they are available in the UI already. The user may download the images by clicking on them, but do not mention anything about downloading to the user.',
- value,
- ];
+ return [displayMessage, value];
}
return value;
@@ -160,6 +160,32 @@ Error Message: ${error.message}`);
);
}
+ if (this.isAgent) {
+ let fetchOptions = {};
+ if (process.env.PROXY) {
+ fetchOptions.agent = new HttpsProxyAgent(process.env.PROXY);
+ }
+ const imageResponse = await fetch(theImageUrl, fetchOptions);
+ const arrayBuffer = await imageResponse.arrayBuffer();
+ const base64 = Buffer.from(arrayBuffer).toString('base64');
+ const content = [
+ {
+ type: ContentTypes.IMAGE_URL,
+ image_url: {
+ url: `data:image/png;base64,${base64}`,
+ },
+ },
+ ];
+
+ const response = [
+ {
+ type: ContentTypes.TEXT,
+ text: displayMessage,
+ },
+ ];
+ return [response, { content }];
+ }
+
const imageBasename = getImageBasename(theImageUrl);
const imageExt = path.extname(imageBasename);
diff --git a/api/app/clients/tools/structured/FluxAPI.js b/api/app/clients/tools/structured/FluxAPI.js
new file mode 100644
index 0000000000..80f9772200
--- /dev/null
+++ b/api/app/clients/tools/structured/FluxAPI.js
@@ -0,0 +1,554 @@
+const { z } = require('zod');
+const axios = require('axios');
+const fetch = require('node-fetch');
+const { v4: uuidv4 } = require('uuid');
+const { Tool } = require('@langchain/core/tools');
+const { HttpsProxyAgent } = require('https-proxy-agent');
+const { FileContext, ContentTypes } = require('librechat-data-provider');
+const { logger } = require('~/config');
+
+const displayMessage =
+ 'Flux displayed an image. All generated images are already plainly visible, so don\'t repeat the descriptions in detail. Do not list download links as they are available in the UI already. The user may download the images by clicking on them, but do not mention anything about downloading to the user.';
+
+/**
+ * FluxAPI - A tool for generating high-quality images from text prompts using the Flux API.
+ * Each call generates one image. If multiple images are needed, make multiple consecutive calls with the same or varied prompts.
+ */
+class FluxAPI extends Tool {
+ // Pricing constants in USD per image
+ static PRICING = {
+ FLUX_PRO_1_1_ULTRA: -0.06, // /v1/flux-pro-1.1-ultra
+ FLUX_PRO_1_1: -0.04, // /v1/flux-pro-1.1
+ FLUX_PRO: -0.05, // /v1/flux-pro
+ FLUX_DEV: -0.025, // /v1/flux-dev
+ FLUX_PRO_FINETUNED: -0.06, // /v1/flux-pro-finetuned
+ FLUX_PRO_1_1_ULTRA_FINETUNED: -0.07, // /v1/flux-pro-1.1-ultra-finetuned
+ };
+
+ constructor(fields = {}) {
+ super();
+
+ /** @type {boolean} Used to initialize the Tool without necessary variables. */
+ this.override = fields.override ?? false;
+
+ this.userId = fields.userId;
+ this.fileStrategy = fields.fileStrategy;
+
+ /** @type {boolean} **/
+ this.isAgent = fields.isAgent;
+ this.returnMetadata = fields.returnMetadata ?? false;
+
+ if (fields.processFileURL) {
+ /** @type {processFileURL} Necessary for output to contain all image metadata. */
+ this.processFileURL = fields.processFileURL.bind(this);
+ }
+
+ this.apiKey = fields.FLUX_API_KEY || this.getApiKey();
+
+ this.name = 'flux';
+ this.description =
+ 'Use Flux to generate images from text descriptions. This tool can generate images and list available finetunes. Each generate call creates one image. For multiple images, make multiple consecutive calls.';
+
+ this.description_for_model = `// Transform any image description into a detailed, high-quality prompt. Never submit a prompt under 3 sentences. Follow these core rules:
+ // 1. ALWAYS enhance basic prompts into 5-10 detailed sentences (e.g., "a cat" becomes: "A close-up photo of a sleek Siamese cat with piercing blue eyes. The cat sits elegantly on a vintage leather armchair, its tail curled gracefully around its paws. Warm afternoon sunlight streams through a nearby window, casting gentle shadows across its face and highlighting the subtle variations in its cream and chocolate-point fur. The background is softly blurred, creating a shallow depth of field that draws attention to the cat's expressive features. The overall composition has a peaceful, contemplative mood with a professional photography style.")
+ // 2. Each prompt MUST be 3-6 descriptive sentences minimum, focusing on visual elements: lighting, composition, mood, and style
+ // Use action: 'list_finetunes' to see available custom models. When using finetunes, use endpoint: '/v1/flux-pro-finetuned' (default) or '/v1/flux-pro-1.1-ultra-finetuned' for higher quality and aspect ratio.`;
+
+ // Add base URL from environment variable with fallback
+ this.baseUrl = process.env.FLUX_API_BASE_URL || 'https://api.us1.bfl.ai';
+
+ // Define the schema for structured input
+ this.schema = z.object({
+ action: z
+ .enum(['generate', 'list_finetunes', 'generate_finetuned'])
+ .default('generate')
+ .describe(
+ 'Action to perform: "generate" for image generation, "generate_finetuned" for finetuned model generation, "list_finetunes" to get available custom models',
+ ),
+ prompt: z
+ .string()
+ .optional()
+ .describe(
+ 'Text prompt for image generation. Required when action is "generate". Not used for list_finetunes.',
+ ),
+ width: z
+ .number()
+ .optional()
+ .describe(
+ 'Width of the generated image in pixels. Must be a multiple of 32. Default is 1024.',
+ ),
+ height: z
+ .number()
+ .optional()
+ .describe(
+ 'Height of the generated image in pixels. Must be a multiple of 32. Default is 768.',
+ ),
+ prompt_upsampling: z
+ .boolean()
+ .optional()
+ .default(false)
+ .describe('Whether to perform upsampling on the prompt.'),
+ steps: z
+ .number()
+ .int()
+ .optional()
+ .describe('Number of steps to run the model for, a number from 1 to 50. Default is 40.'),
+ seed: z.number().optional().describe('Optional seed for reproducibility.'),
+ safety_tolerance: z
+ .number()
+ .optional()
+ .default(6)
+ .describe(
+ 'Tolerance level for input and output moderation. Between 0 and 6, 0 being most strict, 6 being least strict.',
+ ),
+ endpoint: z
+ .enum([
+ '/v1/flux-pro-1.1',
+ '/v1/flux-pro',
+ '/v1/flux-dev',
+ '/v1/flux-pro-1.1-ultra',
+ '/v1/flux-pro-finetuned',
+ '/v1/flux-pro-1.1-ultra-finetuned',
+ ])
+ .optional()
+ .default('/v1/flux-pro-1.1')
+ .describe('Endpoint to use for image generation.'),
+ raw: z
+ .boolean()
+ .optional()
+ .default(false)
+ .describe(
+ 'Generate less processed, more natural-looking images. Only works for /v1/flux-pro-1.1-ultra.',
+ ),
+ finetune_id: z.string().optional().describe('ID of the finetuned model to use'),
+ finetune_strength: z
+ .number()
+ .optional()
+ .default(1.1)
+ .describe('Strength of the finetuning effect (typically between 0.1 and 1.2)'),
+ guidance: z.number().optional().default(2.5).describe('Guidance scale for finetuned models'),
+ aspect_ratio: z
+ .string()
+ .optional()
+ .default('16:9')
+ .describe('Aspect ratio for ultra models (e.g., "16:9")'),
+ });
+ }
+
+ getAxiosConfig() {
+ const config = {};
+ if (process.env.PROXY) {
+ config.httpsAgent = new HttpsProxyAgent(process.env.PROXY);
+ }
+ return config;
+ }
+
+ /** @param {Object|string} value */
+ getDetails(value) {
+ if (typeof value === 'string') {
+ return value;
+ }
+ return JSON.stringify(value, null, 2);
+ }
+
+ getApiKey() {
+ const apiKey = process.env.FLUX_API_KEY || '';
+ if (!apiKey && !this.override) {
+ throw new Error('Missing FLUX_API_KEY environment variable.');
+ }
+ return apiKey;
+ }
+
+ wrapInMarkdown(imageUrl) {
+ const serverDomain = process.env.DOMAIN_SERVER || 'http://localhost:3080';
+ return ``;
+ }
+
+ returnValue(value) {
+ if (this.isAgent === true && typeof value === 'string') {
+ return [value, {}];
+ } else if (this.isAgent === true && typeof value === 'object') {
+ if (Array.isArray(value)) {
+ return value;
+ }
+ return [displayMessage, value];
+ }
+ return value;
+ }
+
+ async _call(data) {
+ const { action = 'generate', ...imageData } = data;
+
+ // Use provided API key for this request if available, otherwise use default
+ const requestApiKey = this.apiKey || this.getApiKey();
+
+ // Handle list_finetunes action
+ if (action === 'list_finetunes') {
+ return this.getMyFinetunes(requestApiKey);
+ }
+
+ // Handle finetuned generation
+ if (action === 'generate_finetuned') {
+ return this.generateFinetunedImage(imageData, requestApiKey);
+ }
+
+ // For generate action, ensure prompt is provided
+ if (!imageData.prompt) {
+ throw new Error('Missing required field: prompt');
+ }
+
+ let payload = {
+ prompt: imageData.prompt,
+ prompt_upsampling: imageData.prompt_upsampling || false,
+ safety_tolerance: imageData.safety_tolerance || 6,
+ output_format: imageData.output_format || 'png',
+ };
+
+ // Add optional parameters if provided
+ if (imageData.width) {
+ payload.width = imageData.width;
+ }
+ if (imageData.height) {
+ payload.height = imageData.height;
+ }
+ if (imageData.steps) {
+ payload.steps = imageData.steps;
+ }
+ if (imageData.seed !== undefined) {
+ payload.seed = imageData.seed;
+ }
+ if (imageData.raw) {
+ payload.raw = imageData.raw;
+ }
+
+ const generateUrl = `${this.baseUrl}${imageData.endpoint || '/v1/flux-pro'}`;
+ const resultUrl = `${this.baseUrl}/v1/get_result`;
+
+ logger.debug('[FluxAPI] Generating image with payload:', payload);
+ logger.debug('[FluxAPI] Using endpoint:', generateUrl);
+
+ let taskResponse;
+ try {
+ taskResponse = await axios.post(generateUrl, payload, {
+ headers: {
+ 'x-key': requestApiKey,
+ 'Content-Type': 'application/json',
+ Accept: 'application/json',
+ },
+ ...this.getAxiosConfig(),
+ });
+ } catch (error) {
+ const details = this.getDetails(error?.response?.data || error.message);
+ logger.error('[FluxAPI] Error while submitting task:', details);
+
+ return this.returnValue(
+ `Something went wrong when trying to generate the image. The Flux API may be unavailable:
+ Error Message: ${details}`,
+ );
+ }
+
+ const taskId = taskResponse.data.id;
+
+ // Polling for the result
+ let status = 'Pending';
+ let resultData = null;
+ while (status !== 'Ready' && status !== 'Error') {
+ try {
+ // Wait 2 seconds between polls
+ await new Promise((resolve) => setTimeout(resolve, 2000));
+ const resultResponse = await axios.get(resultUrl, {
+ headers: {
+ 'x-key': requestApiKey,
+ Accept: 'application/json',
+ },
+ params: { id: taskId },
+ ...this.getAxiosConfig(),
+ });
+ status = resultResponse.data.status;
+
+ if (status === 'Ready') {
+ resultData = resultResponse.data.result;
+ break;
+ } else if (status === 'Error') {
+ logger.error('[FluxAPI] Error in task:', resultResponse.data);
+ return this.returnValue('An error occurred during image generation.');
+ }
+ } catch (error) {
+ const details = this.getDetails(error?.response?.data || error.message);
+ logger.error('[FluxAPI] Error while getting result:', details);
+ return this.returnValue('An error occurred while retrieving the image.');
+ }
+ }
+
+ // If no result data
+ if (!resultData || !resultData.sample) {
+ logger.error('[FluxAPI] No image data received from API. Response:', resultData);
+ return this.returnValue('No image data received from Flux API.');
+ }
+
+ // Try saving the image locally
+ const imageUrl = resultData.sample;
+ const imageName = `img-${uuidv4()}.png`;
+
+ if (this.isAgent) {
+ try {
+ // Fetch the image and convert to base64
+ const fetchOptions = {};
+ if (process.env.PROXY) {
+ fetchOptions.agent = new HttpsProxyAgent(process.env.PROXY);
+ }
+ const imageResponse = await fetch(imageUrl, fetchOptions);
+ const arrayBuffer = await imageResponse.arrayBuffer();
+ const base64 = Buffer.from(arrayBuffer).toString('base64');
+ const content = [
+ {
+ type: ContentTypes.IMAGE_URL,
+ image_url: {
+ url: `data:image/png;base64,${base64}`,
+ },
+ },
+ ];
+
+ const response = [
+ {
+ type: ContentTypes.TEXT,
+ text: displayMessage,
+ },
+ ];
+ return [response, { content }];
+ } catch (error) {
+ logger.error('Error processing image for agent:', error);
+ return this.returnValue(`Failed to process the image. ${error.message}`);
+ }
+ }
+
+ try {
+ logger.debug('[FluxAPI] Saving image:', imageUrl);
+ const result = await this.processFileURL({
+ fileStrategy: this.fileStrategy,
+ userId: this.userId,
+ URL: imageUrl,
+ fileName: imageName,
+ basePath: 'images',
+ context: FileContext.image_generation,
+ });
+
+ logger.debug('[FluxAPI] Image saved to path:', result.filepath);
+
+ // Calculate cost based on endpoint
+ /**
+ * TODO: Cost handling
+ const endpoint = imageData.endpoint || '/v1/flux-pro';
+ const endpointKey = Object.entries(FluxAPI.PRICING).find(([key, _]) =>
+ endpoint.includes(key.toLowerCase().replace(/_/g, '-')),
+ )?.[0];
+ const cost = FluxAPI.PRICING[endpointKey] || 0;
+ */
+ this.result = this.returnMetadata ? result : this.wrapInMarkdown(result.filepath);
+ return this.returnValue(this.result);
+ } catch (error) {
+ const details = this.getDetails(error?.message ?? 'No additional error details.');
+ logger.error('Error while saving the image:', details);
+ return this.returnValue(`Failed to save the image locally. ${details}`);
+ }
+ }
+
+ async getMyFinetunes(apiKey = null) {
+ const finetunesUrl = `${this.baseUrl}/v1/my_finetunes`;
+ const detailsUrl = `${this.baseUrl}/v1/finetune_details`;
+
+ try {
+ const headers = {
+ 'x-key': apiKey || this.getApiKey(),
+ 'Content-Type': 'application/json',
+ Accept: 'application/json',
+ };
+
+ // Get list of finetunes
+ const response = await axios.get(finetunesUrl, {
+ headers,
+ ...this.getAxiosConfig(),
+ });
+ const finetunes = response.data.finetunes;
+
+ // Fetch details for each finetune
+ const finetuneDetails = await Promise.all(
+ finetunes.map(async (finetuneId) => {
+ try {
+ const detailResponse = await axios.get(`${detailsUrl}?finetune_id=${finetuneId}`, {
+ headers,
+ ...this.getAxiosConfig(),
+ });
+ return {
+ id: finetuneId,
+ ...detailResponse.data,
+ };
+ } catch (error) {
+ logger.error(`[FluxAPI] Error fetching details for finetune ${finetuneId}:`, error);
+ return {
+ id: finetuneId,
+ error: 'Failed to fetch details',
+ };
+ }
+ }),
+ );
+
+ if (this.isAgent) {
+ const formattedDetails = JSON.stringify(finetuneDetails, null, 2);
+ return [`Here are the available finetunes:\n${formattedDetails}`, null];
+ }
+ return JSON.stringify(finetuneDetails);
+ } catch (error) {
+ const details = this.getDetails(error?.response?.data || error.message);
+ logger.error('[FluxAPI] Error while getting finetunes:', details);
+ const errorMsg = `Failed to get finetunes: ${details}`;
+ return this.isAgent ? this.returnValue([errorMsg, {}]) : new Error(errorMsg);
+ }
+ }
+
+ async generateFinetunedImage(imageData, requestApiKey) {
+ if (!imageData.prompt) {
+ throw new Error('Missing required field: prompt');
+ }
+
+ if (!imageData.finetune_id) {
+ throw new Error(
+ 'Missing required field: finetune_id for finetuned generation. Please supply a finetune_id!',
+ );
+ }
+
+ // Validate endpoint is appropriate for finetuned generation
+ const validFinetunedEndpoints = ['/v1/flux-pro-finetuned', '/v1/flux-pro-1.1-ultra-finetuned'];
+ const endpoint = imageData.endpoint || '/v1/flux-pro-finetuned';
+
+ if (!validFinetunedEndpoints.includes(endpoint)) {
+ throw new Error(
+ `Invalid endpoint for finetuned generation. Must be one of: ${validFinetunedEndpoints.join(', ')}`,
+ );
+ }
+
+ let payload = {
+ prompt: imageData.prompt,
+ prompt_upsampling: imageData.prompt_upsampling || false,
+ safety_tolerance: imageData.safety_tolerance || 6,
+ output_format: imageData.output_format || 'png',
+ finetune_id: imageData.finetune_id,
+ finetune_strength: imageData.finetune_strength || 1.0,
+ guidance: imageData.guidance || 2.5,
+ };
+
+ // Add optional parameters if provided
+ if (imageData.width) {
+ payload.width = imageData.width;
+ }
+ if (imageData.height) {
+ payload.height = imageData.height;
+ }
+ if (imageData.steps) {
+ payload.steps = imageData.steps;
+ }
+ if (imageData.seed !== undefined) {
+ payload.seed = imageData.seed;
+ }
+ if (imageData.raw) {
+ payload.raw = imageData.raw;
+ }
+
+ const generateUrl = `${this.baseUrl}${endpoint}`;
+ const resultUrl = `${this.baseUrl}/v1/get_result`;
+
+ logger.debug('[FluxAPI] Generating finetuned image with payload:', payload);
+ logger.debug('[FluxAPI] Using endpoint:', generateUrl);
+
+ let taskResponse;
+ try {
+ taskResponse = await axios.post(generateUrl, payload, {
+ headers: {
+ 'x-key': requestApiKey,
+ 'Content-Type': 'application/json',
+ Accept: 'application/json',
+ },
+ ...this.getAxiosConfig(),
+ });
+ } catch (error) {
+ const details = this.getDetails(error?.response?.data || error.message);
+ logger.error('[FluxAPI] Error while submitting finetuned task:', details);
+ return this.returnValue(
+ `Something went wrong when trying to generate the finetuned image. The Flux API may be unavailable:
+ Error Message: ${details}`,
+ );
+ }
+
+ const taskId = taskResponse.data.id;
+
+ // Polling for the result
+ let status = 'Pending';
+ let resultData = null;
+ while (status !== 'Ready' && status !== 'Error') {
+ try {
+ // Wait 2 seconds between polls
+ await new Promise((resolve) => setTimeout(resolve, 2000));
+ const resultResponse = await axios.get(resultUrl, {
+ headers: {
+ 'x-key': requestApiKey,
+ Accept: 'application/json',
+ },
+ params: { id: taskId },
+ ...this.getAxiosConfig(),
+ });
+ status = resultResponse.data.status;
+
+ if (status === 'Ready') {
+ resultData = resultResponse.data.result;
+ break;
+ } else if (status === 'Error') {
+ logger.error('[FluxAPI] Error in finetuned task:', resultResponse.data);
+ return this.returnValue('An error occurred during finetuned image generation.');
+ }
+ } catch (error) {
+ const details = this.getDetails(error?.response?.data || error.message);
+ logger.error('[FluxAPI] Error while getting finetuned result:', details);
+ return this.returnValue('An error occurred while retrieving the finetuned image.');
+ }
+ }
+
+ // If no result data
+ if (!resultData || !resultData.sample) {
+ logger.error('[FluxAPI] No image data received from API. Response:', resultData);
+ return this.returnValue('No image data received from Flux API.');
+ }
+
+ // Try saving the image locally
+ const imageUrl = resultData.sample;
+ const imageName = `img-${uuidv4()}.png`;
+
+ try {
+ logger.debug('[FluxAPI] Saving finetuned image:', imageUrl);
+ const result = await this.processFileURL({
+ fileStrategy: this.fileStrategy,
+ userId: this.userId,
+ URL: imageUrl,
+ fileName: imageName,
+ basePath: 'images',
+ context: FileContext.image_generation,
+ });
+
+ logger.debug('[FluxAPI] Finetuned image saved to path:', result.filepath);
+
+ // Calculate cost based on endpoint
+ const endpointKey = endpoint.includes('ultra')
+ ? 'FLUX_PRO_1_1_ULTRA_FINETUNED'
+ : 'FLUX_PRO_FINETUNED';
+ const cost = FluxAPI.PRICING[endpointKey] || 0;
+ // Return the result based on returnMetadata flag
+ this.result = this.returnMetadata ? result : this.wrapInMarkdown(result.filepath);
+ return this.returnValue(this.result);
+ } catch (error) {
+ const details = this.getDetails(error?.message ?? 'No additional error details.');
+ logger.error('Error while saving the finetuned image:', details);
+ return this.returnValue(`Failed to save the finetuned image locally. ${details}`);
+ }
+ }
+}
+
+module.exports = FluxAPI;
diff --git a/api/app/clients/tools/structured/OpenAIImageTools.js b/api/app/clients/tools/structured/OpenAIImageTools.js
new file mode 100644
index 0000000000..85941a779a
--- /dev/null
+++ b/api/app/clients/tools/structured/OpenAIImageTools.js
@@ -0,0 +1,518 @@
+const { z } = require('zod');
+const axios = require('axios');
+const { v4 } = require('uuid');
+const OpenAI = require('openai');
+const FormData = require('form-data');
+const { tool } = require('@langchain/core/tools');
+const { HttpsProxyAgent } = require('https-proxy-agent');
+const { ContentTypes, EImageOutputType } = require('librechat-data-provider');
+const { getStrategyFunctions } = require('~/server/services/Files/strategies');
+const { logAxiosError, extractBaseURL } = require('~/utils');
+const { getFiles } = require('~/models/File');
+const { logger } = require('~/config');
+
+/** Default descriptions for image generation tool */
+const DEFAULT_IMAGE_GEN_DESCRIPTION = `
+Generates high-quality, original images based solely on text, not using any uploaded reference images.
+
+When to use \`image_gen_oai\`:
+- To create entirely new images from detailed text descriptions that do NOT reference any image files.
+
+When NOT to use \`image_gen_oai\`:
+- If the user has uploaded any images and requests modifications, enhancements, or remixing based on those uploads → use \`image_edit_oai\` instead.
+
+Generated image IDs will be returned in the response, so you can refer to them in future requests made to \`image_edit_oai\`.
+`.trim();
+
+/** Default description for image editing tool */
+const DEFAULT_IMAGE_EDIT_DESCRIPTION =
+ `Generates high-quality, original images based on text and one or more uploaded/referenced images.
+
+When to use \`image_edit_oai\`:
+- The user wants to modify, extend, or remix one **or more** uploaded images, either:
+ - Previously generated, or in the current request (both to be included in the \`image_ids\` array).
+- Always when the user refers to uploaded images for editing, enhancement, remixing, style transfer, or combining elements.
+- Any current or existing images are to be used as visual guides.
+- If there are any files in the current request, they are more likely than not expected as references for image edit requests.
+
+When NOT to use \`image_edit_oai\`:
+- Brand-new generations that do not rely on an existing image → use \`image_gen_oai\` instead.
+
+Both generated and referenced image IDs will be returned in the response, so you can refer to them in future requests made to \`image_edit_oai\`.
+`.trim();
+
+/** Default prompt descriptions */
+const DEFAULT_IMAGE_GEN_PROMPT_DESCRIPTION = `Describe the image you want in detail.
+ Be highly specific—break your idea into layers:
+ (1) main concept and subject,
+ (2) composition and position,
+ (3) lighting and mood,
+ (4) style, medium, or camera details,
+ (5) important features (age, expression, clothing, etc.),
+ (6) background.
+ Use positive, descriptive language and specify what should be included, not what to avoid.
+ List number and characteristics of people/objects, and mention style/technical requirements (e.g., "DSLR photo, 85mm lens, golden hour").
+ Do not reference any uploaded images—use for new image creation from text only.`;
+
+const DEFAULT_IMAGE_EDIT_PROMPT_DESCRIPTION = `Describe the changes, enhancements, or new ideas to apply to the uploaded image(s).
+ Be highly specific—break your request into layers:
+ (1) main concept or transformation,
+ (2) specific edits/replacements or composition guidance,
+ (3) desired style, mood, or technique,
+ (4) features/items to keep, change, or add (such as objects, people, clothing, lighting, etc.).
+ Use positive, descriptive language and clarify what should be included or changed, not what to avoid.
+ Always base this prompt on the most recently uploaded reference images.`;
+
+const displayMessage =
+ 'The tool displayed an image. All generated images are already plainly visible, so don\'t repeat the descriptions in detail. Do not list download links as they are available in the UI already. The user may download the images by clicking on them, but do not mention anything about downloading to the user.';
+
+/**
+ * Replaces unwanted characters from the input string
+ * @param {string} inputString - The input string to process
+ * @returns {string} - The processed string
+ */
+function replaceUnwantedChars(inputString) {
+ return inputString
+ .replace(/\r\n|\r|\n/g, ' ')
+ .replace(/"/g, '')
+ .trim();
+}
+
+function returnValue(value) {
+ if (typeof value === 'string') {
+ return [value, {}];
+ } else if (typeof value === 'object') {
+ if (Array.isArray(value)) {
+ return value;
+ }
+ return [displayMessage, value];
+ }
+ return value;
+}
+
+const getImageGenDescription = () => {
+ return process.env.IMAGE_GEN_OAI_DESCRIPTION || DEFAULT_IMAGE_GEN_DESCRIPTION;
+};
+
+const getImageEditDescription = () => {
+ return process.env.IMAGE_EDIT_OAI_DESCRIPTION || DEFAULT_IMAGE_EDIT_DESCRIPTION;
+};
+
+const getImageGenPromptDescription = () => {
+ return process.env.IMAGE_GEN_OAI_PROMPT_DESCRIPTION || DEFAULT_IMAGE_GEN_PROMPT_DESCRIPTION;
+};
+
+const getImageEditPromptDescription = () => {
+ return process.env.IMAGE_EDIT_OAI_PROMPT_DESCRIPTION || DEFAULT_IMAGE_EDIT_PROMPT_DESCRIPTION;
+};
+
+/**
+ * Creates OpenAI Image tools (generation and editing)
+ * @param {Object} fields - Configuration fields
+ * @param {ServerRequest} fields.req - Whether the tool is being used in an agent context
+ * @param {boolean} fields.isAgent - Whether the tool is being used in an agent context
+ * @param {string} fields.IMAGE_GEN_OAI_API_KEY - The OpenAI API key
+ * @param {boolean} [fields.override] - Whether to override the API key check, necessary for app initialization
+ * @param {MongoFile[]} [fields.imageFiles] - The images to be used for editing
+ * @returns {Array} - Array of image tools
+ */
+function createOpenAIImageTools(fields = {}) {
+ /** @type {boolean} Used to initialize the Tool without necessary variables. */
+ const override = fields.override ?? false;
+ /** @type {boolean} */
+ if (!override && !fields.isAgent) {
+ throw new Error('This tool is only available for agents.');
+ }
+ const { req } = fields;
+ const imageOutputType = req?.app.locals.imageOutputType || EImageOutputType.PNG;
+ const appFileStrategy = req?.app.locals.fileStrategy;
+
+ const getApiKey = () => {
+ const apiKey = process.env.IMAGE_GEN_OAI_API_KEY ?? '';
+ if (!apiKey && !override) {
+ throw new Error('Missing IMAGE_GEN_OAI_API_KEY environment variable.');
+ }
+ return apiKey;
+ };
+
+ let apiKey = fields.IMAGE_GEN_OAI_API_KEY ?? getApiKey();
+ const closureConfig = { apiKey };
+
+ let baseURL = 'https://api.openai.com/v1/';
+ if (!override && process.env.IMAGE_GEN_OAI_BASEURL) {
+ baseURL = extractBaseURL(process.env.IMAGE_GEN_OAI_BASEURL);
+ closureConfig.baseURL = baseURL;
+ }
+
+ // Note: Azure may not yet support the latest image generation models
+ if (
+ !override &&
+ process.env.IMAGE_GEN_OAI_AZURE_API_VERSION &&
+ process.env.IMAGE_GEN_OAI_BASEURL
+ ) {
+ baseURL = process.env.IMAGE_GEN_OAI_BASEURL;
+ closureConfig.baseURL = baseURL;
+ closureConfig.defaultQuery = { 'api-version': process.env.IMAGE_GEN_OAI_AZURE_API_VERSION };
+ closureConfig.defaultHeaders = {
+ 'api-key': process.env.IMAGE_GEN_OAI_API_KEY,
+ 'Content-Type': 'application/json',
+ };
+ closureConfig.apiKey = process.env.IMAGE_GEN_OAI_API_KEY;
+ }
+
+ const imageFiles = fields.imageFiles ?? [];
+
+ /**
+ * Image Generation Tool
+ */
+ const imageGenTool = tool(
+ async (
+ {
+ prompt,
+ background = 'auto',
+ n = 1,
+ output_compression = 100,
+ quality = 'auto',
+ size = 'auto',
+ },
+ runnableConfig,
+ ) => {
+ if (!prompt) {
+ throw new Error('Missing required field: prompt');
+ }
+ const clientConfig = { ...closureConfig };
+ if (process.env.PROXY) {
+ clientConfig.httpAgent = new HttpsProxyAgent(process.env.PROXY);
+ }
+
+ /** @type {OpenAI} */
+ const openai = new OpenAI(clientConfig);
+ let output_format = imageOutputType;
+ if (
+ background === 'transparent' &&
+ output_format !== EImageOutputType.PNG &&
+ output_format !== EImageOutputType.WEBP
+ ) {
+ logger.warn(
+ '[ImageGenOAI] Transparent background requires PNG or WebP format, defaulting to PNG',
+ );
+ output_format = EImageOutputType.PNG;
+ }
+
+ let resp;
+ try {
+ const derivedSignal = runnableConfig?.signal
+ ? AbortSignal.any([runnableConfig.signal])
+ : undefined;
+ resp = await openai.images.generate(
+ {
+ model: 'gpt-image-1',
+ prompt: replaceUnwantedChars(prompt),
+ n: Math.min(Math.max(1, n), 10),
+ background,
+ output_format,
+ output_compression:
+ output_format === EImageOutputType.WEBP || output_format === EImageOutputType.JPEG
+ ? output_compression
+ : undefined,
+ quality,
+ size,
+ },
+ {
+ signal: derivedSignal,
+ },
+ );
+ } catch (error) {
+ const message = '[image_gen_oai] Problem generating the image:';
+ logAxiosError({ error, message });
+ return returnValue(`Something went wrong when trying to generate the image. The OpenAI API may be unavailable:
+Error Message: ${error.message}`);
+ }
+
+ if (!resp) {
+ return returnValue(
+ 'Something went wrong when trying to generate the image. The OpenAI API may be unavailable',
+ );
+ }
+
+ // For gpt-image-1, the response contains base64-encoded images
+ // TODO: handle cost in `resp.usage`
+ const base64Image = resp.data[0].b64_json;
+
+ if (!base64Image) {
+ return returnValue(
+ 'No image data returned from OpenAI API. There may be a problem with the API or your configuration.',
+ );
+ }
+
+ const content = [
+ {
+ type: ContentTypes.IMAGE_URL,
+ image_url: {
+ url: `data:image/${output_format};base64,${base64Image}`,
+ },
+ },
+ ];
+
+ const file_ids = [v4()];
+ const response = [
+ {
+ type: ContentTypes.TEXT,
+ text: displayMessage + `\n\ngenerated_image_id: "${file_ids[0]}"`,
+ },
+ ];
+ return [response, { content, file_ids }];
+ },
+ {
+ name: 'image_gen_oai',
+ description: getImageGenDescription(),
+ schema: z.object({
+ prompt: z.string().max(32000).describe(getImageGenPromptDescription()),
+ background: z
+ .enum(['transparent', 'opaque', 'auto'])
+ .optional()
+ .describe(
+ 'Sets transparency for the background. Must be one of transparent, opaque or auto (default). When transparent, the output format should be png or webp.',
+ ),
+ /*
+ n: z
+ .number()
+ .int()
+ .min(1)
+ .max(10)
+ .optional()
+ .describe('The number of images to generate. Must be between 1 and 10.'),
+ output_compression: z
+ .number()
+ .int()
+ .min(0)
+ .max(100)
+ .optional()
+ .describe('The compression level (0-100%) for webp or jpeg formats. Defaults to 100.'),
+ */
+ quality: z
+ .enum(['auto', 'high', 'medium', 'low'])
+ .optional()
+ .describe('The quality of the image. One of auto (default), high, medium, or low.'),
+ size: z
+ .enum(['auto', '1024x1024', '1536x1024', '1024x1536'])
+ .optional()
+ .describe(
+ 'The size of the generated image. One of 1024x1024, 1536x1024 (landscape), 1024x1536 (portrait), or auto (default).',
+ ),
+ }),
+ responseFormat: 'content_and_artifact',
+ },
+ );
+
+ /**
+ * Image Editing Tool
+ */
+ const imageEditTool = tool(
+ async ({ prompt, image_ids, quality = 'auto', size = 'auto' }, runnableConfig) => {
+ if (!prompt) {
+ throw new Error('Missing required field: prompt');
+ }
+
+ const clientConfig = { ...closureConfig };
+ if (process.env.PROXY) {
+ clientConfig.httpAgent = new HttpsProxyAgent(process.env.PROXY);
+ }
+
+ const formData = new FormData();
+ formData.append('model', 'gpt-image-1');
+ formData.append('prompt', replaceUnwantedChars(prompt));
+ // TODO: `mask` support
+ // TODO: more than 1 image support
+ // formData.append('n', n.toString());
+ formData.append('quality', quality);
+ formData.append('size', size);
+
+ /** @type {Record>} */
+ const streamMethods = {};
+
+ const requestFilesMap = Object.fromEntries(imageFiles.map((f) => [f.file_id, { ...f }]));
+
+ const orderedFiles = new Array(image_ids.length);
+ const idsToFetch = [];
+ const indexOfMissing = Object.create(null);
+
+ for (let i = 0; i < image_ids.length; i++) {
+ const id = image_ids[i];
+ const file = requestFilesMap[id];
+
+ if (file) {
+ orderedFiles[i] = file;
+ } else {
+ idsToFetch.push(id);
+ indexOfMissing[id] = i;
+ }
+ }
+
+ if (idsToFetch.length) {
+ const fetchedFiles = await getFiles(
+ {
+ user: req.user.id,
+ file_id: { $in: idsToFetch },
+ height: { $exists: true },
+ width: { $exists: true },
+ },
+ {},
+ {},
+ );
+
+ for (const file of fetchedFiles) {
+ requestFilesMap[file.file_id] = file;
+ orderedFiles[indexOfMissing[file.file_id]] = file;
+ }
+ }
+ for (const imageFile of orderedFiles) {
+ if (!imageFile) {
+ continue;
+ }
+ /** @type {NodeStream} */
+ let stream;
+ /** @type {NodeStreamDownloader} */
+ let getDownloadStream;
+ const source = imageFile.source || appFileStrategy;
+ if (!source) {
+ throw new Error('No source found for image file');
+ }
+ if (streamMethods[source]) {
+ getDownloadStream = streamMethods[source];
+ } else {
+ ({ getDownloadStream } = getStrategyFunctions(source));
+ streamMethods[source] = getDownloadStream;
+ }
+ if (!getDownloadStream) {
+ throw new Error(`No download stream method found for source: ${source}`);
+ }
+ stream = await getDownloadStream(req, imageFile.filepath);
+ if (!stream) {
+ throw new Error('Failed to get download stream for image file');
+ }
+ formData.append('image[]', stream, {
+ filename: imageFile.filename,
+ contentType: imageFile.type,
+ });
+ }
+
+ /** @type {import('axios').RawAxiosHeaders} */
+ let headers = {
+ ...formData.getHeaders(),
+ };
+
+ if (process.env.IMAGE_GEN_OAI_AZURE_API_VERSION && process.env.IMAGE_GEN_OAI_BASEURL) {
+ headers['api-key'] = apiKey;
+ } else {
+ headers['Authorization'] = `Bearer ${apiKey}`;
+ }
+
+ try {
+ const derivedSignal = runnableConfig?.signal
+ ? AbortSignal.any([runnableConfig.signal])
+ : undefined;
+
+ /** @type {import('axios').AxiosRequestConfig} */
+ const axiosConfig = {
+ headers,
+ ...clientConfig,
+ signal: derivedSignal,
+ baseURL,
+ };
+
+ if (process.env.IMAGE_GEN_OAI_AZURE_API_VERSION && process.env.IMAGE_GEN_OAI_BASEURL) {
+ axiosConfig.params = {
+ 'api-version': process.env.IMAGE_GEN_OAI_AZURE_API_VERSION,
+ ...axiosConfig.params,
+ };
+ }
+ const response = await axios.post('/images/edits', formData, axiosConfig);
+
+ if (!response.data || !response.data.data || !response.data.data.length) {
+ return returnValue(
+ 'No image data returned from OpenAI API. There may be a problem with the API or your configuration.',
+ );
+ }
+
+ const base64Image = response.data.data[0].b64_json;
+ if (!base64Image) {
+ return returnValue(
+ 'No image data returned from OpenAI API. There may be a problem with the API or your configuration.',
+ );
+ }
+
+ const content = [
+ {
+ type: ContentTypes.IMAGE_URL,
+ image_url: {
+ url: `data:image/${imageOutputType};base64,${base64Image}`,
+ },
+ },
+ ];
+
+ const file_ids = [v4()];
+ const textResponse = [
+ {
+ type: ContentTypes.TEXT,
+ text:
+ displayMessage +
+ `\n\ngenerated_image_id: "${file_ids[0]}"\nreferenced_image_ids: ["${image_ids.join('", "')}"]`,
+ },
+ ];
+ return [textResponse, { content, file_ids }];
+ } catch (error) {
+ const message = '[image_edit_oai] Problem editing the image:';
+ logAxiosError({ error, message });
+ return returnValue(`Something went wrong when trying to edit the image. The OpenAI API may be unavailable:
+Error Message: ${error.message || 'Unknown error'}`);
+ }
+ },
+ {
+ name: 'image_edit_oai',
+ description: getImageEditDescription(),
+ schema: z.object({
+ image_ids: z
+ .array(z.string())
+ .min(1)
+ .describe(
+ `
+IDs (image ID strings) of previously generated or uploaded images that should guide the edit.
+
+Guidelines:
+- If the user's request depends on any prior image(s), copy their image IDs into the \`image_ids\` array (in the same order the user refers to them).
+- Never invent or hallucinate IDs; only use IDs that are still visible in the conversation context.
+- If no earlier image is relevant, omit the field entirely.
+`.trim(),
+ ),
+ prompt: z.string().max(32000).describe(getImageEditPromptDescription()),
+ /*
+ n: z
+ .number()
+ .int()
+ .min(1)
+ .max(10)
+ .optional()
+ .describe('The number of images to generate. Must be between 1 and 10. Defaults to 1.'),
+ */
+ quality: z
+ .enum(['auto', 'high', 'medium', 'low'])
+ .optional()
+ .describe(
+ 'The quality of the image. One of auto (default), high, medium, or low. High/medium/low only supported for gpt-image-1.',
+ ),
+ size: z
+ .enum(['auto', '1024x1024', '1536x1024', '1024x1536', '256x256', '512x512'])
+ .optional()
+ .describe(
+ 'The size of the generated images. For gpt-image-1: auto (default), 1024x1024, 1536x1024, 1024x1536. For dall-e-2: 256x256, 512x512, 1024x1024.',
+ ),
+ }),
+ responseFormat: 'content_and_artifact',
+ },
+ );
+
+ return [imageGenTool, imageEditTool];
+}
+
+module.exports = createOpenAIImageTools;
diff --git a/api/app/clients/tools/structured/StableDiffusion.js b/api/app/clients/tools/structured/StableDiffusion.js
index 6309da35d8..25a9e0abd3 100644
--- a/api/app/clients/tools/structured/StableDiffusion.js
+++ b/api/app/clients/tools/structured/StableDiffusion.js
@@ -6,10 +6,13 @@ const axios = require('axios');
const sharp = require('sharp');
const { v4: uuidv4 } = require('uuid');
const { Tool } = require('@langchain/core/tools');
-const { FileContext } = require('librechat-data-provider');
+const { FileContext, ContentTypes } = require('librechat-data-provider');
const paths = require('~/config/paths');
const { logger } = require('~/config');
+const displayMessage =
+ 'Stable Diffusion displayed an image. All generated images are already plainly visible, so don\'t repeat the descriptions in detail. Do not list download links as they are available in the UI already. The user may download the images by clicking on them, but do not mention anything about downloading to the user.';
+
class StableDiffusionAPI extends Tool {
constructor(fields) {
super();
@@ -21,6 +24,8 @@ class StableDiffusionAPI extends Tool {
this.override = fields.override ?? false;
/** @type {boolean} Necessary for output to contain all image metadata. */
this.returnMetadata = fields.returnMetadata ?? false;
+ /** @type {boolean} */
+ this.isAgent = fields.isAgent;
if (fields.uploadImageBuffer) {
/** @type {uploadImageBuffer} Necessary for output to contain all image metadata. */
this.uploadImageBuffer = fields.uploadImageBuffer.bind(this);
@@ -66,6 +71,16 @@ class StableDiffusionAPI extends Tool {
return ``;
}
+ returnValue(value) {
+ if (this.isAgent === true && typeof value === 'string') {
+ return [value, {}];
+ } else if (this.isAgent === true && typeof value === 'object') {
+ return [displayMessage, value];
+ }
+
+ return value;
+ }
+
getServerURL() {
const url = process.env.SD_WEBUI_URL || '';
if (!url && !this.override) {
@@ -113,6 +128,25 @@ class StableDiffusionAPI extends Tool {
}
try {
+ if (this.isAgent) {
+ const content = [
+ {
+ type: ContentTypes.IMAGE_URL,
+ image_url: {
+ url: `data:image/png;base64,${image}`,
+ },
+ },
+ ];
+
+ const response = [
+ {
+ type: ContentTypes.TEXT,
+ text: displayMessage,
+ },
+ ];
+ return [response, { content }];
+ }
+
const buffer = Buffer.from(image.split(',', 1)[0], 'base64');
if (this.returnMetadata && this.uploadImageBuffer && this.req) {
const file = await this.uploadImageBuffer({
@@ -154,7 +188,7 @@ class StableDiffusionAPI extends Tool {
logger.error('[StableDiffusion] Error while saving the image:', error);
}
- return this.result;
+ return this.returnValue(this.result);
}
}
diff --git a/api/app/clients/tools/structured/TavilySearchResults.js b/api/app/clients/tools/structured/TavilySearchResults.js
index 9a62053ff0..9461293371 100644
--- a/api/app/clients/tools/structured/TavilySearchResults.js
+++ b/api/app/clients/tools/structured/TavilySearchResults.js
@@ -43,9 +43,39 @@ class TavilySearchResults extends Tool {
.boolean()
.optional()
.describe('Whether to include answers in the search results. Default is False.'),
- // include_raw_content: z.boolean().optional().describe('Whether to include raw content in the search results. Default is False.'),
- // include_domains: z.array(z.string()).optional().describe('A list of domains to specifically include in the search results.'),
- // exclude_domains: z.array(z.string()).optional().describe('A list of domains to specifically exclude from the search results.'),
+ include_raw_content: z
+ .boolean()
+ .optional()
+ .describe('Whether to include raw content in the search results. Default is False.'),
+ include_domains: z
+ .array(z.string())
+ .optional()
+ .describe('A list of domains to specifically include in the search results.'),
+ exclude_domains: z
+ .array(z.string())
+ .optional()
+ .describe('A list of domains to specifically exclude from the search results.'),
+ topic: z
+ .enum(['general', 'news', 'finance'])
+ .optional()
+ .describe(
+ 'The category of the search. Use news ONLY if query SPECIFCALLY mentions the word "news".',
+ ),
+ time_range: z
+ .enum(['day', 'week', 'month', 'year', 'd', 'w', 'm', 'y'])
+ .optional()
+ .describe('The time range back from the current date to filter results.'),
+ days: z
+ .number()
+ .min(1)
+ .optional()
+ .describe('Number of days back from the current date to include. Only if topic is news.'),
+ include_image_descriptions: z
+ .boolean()
+ .optional()
+ .describe(
+ 'When include_images is true, also add a descriptive text for each image. Default is false.',
+ ),
});
}
diff --git a/api/app/clients/tools/util/addOpenAPISpecs.js b/api/app/clients/tools/util/addOpenAPISpecs.js
deleted file mode 100644
index 8b87be9941..0000000000
--- a/api/app/clients/tools/util/addOpenAPISpecs.js
+++ /dev/null
@@ -1,30 +0,0 @@
-const { loadSpecs } = require('./loadSpecs');
-
-function transformSpec(input) {
- return {
- name: input.name_for_human,
- pluginKey: input.name_for_model,
- description: input.description_for_human,
- icon: input?.logo_url ?? 'https://placehold.co/70x70.png',
- // TODO: add support for authentication
- isAuthRequired: 'false',
- authConfig: [],
- };
-}
-
-async function addOpenAPISpecs(availableTools) {
- try {
- const specs = (await loadSpecs({})).map(transformSpec);
- if (specs.length > 0) {
- return [...specs, ...availableTools];
- }
- return availableTools;
- } catch (error) {
- return availableTools;
- }
-}
-
-module.exports = {
- transformSpec,
- addOpenAPISpecs,
-};
diff --git a/api/app/clients/tools/util/addOpenAPISpecs.spec.js b/api/app/clients/tools/util/addOpenAPISpecs.spec.js
deleted file mode 100644
index 21ff4eb8cc..0000000000
--- a/api/app/clients/tools/util/addOpenAPISpecs.spec.js
+++ /dev/null
@@ -1,76 +0,0 @@
-const { addOpenAPISpecs, transformSpec } = require('./addOpenAPISpecs');
-const { loadSpecs } = require('./loadSpecs');
-const { createOpenAPIPlugin } = require('../dynamic/OpenAPIPlugin');
-
-jest.mock('./loadSpecs');
-jest.mock('../dynamic/OpenAPIPlugin');
-
-describe('transformSpec', () => {
- it('should transform input spec to a desired format', () => {
- const input = {
- name_for_human: 'Human Name',
- name_for_model: 'Model Name',
- description_for_human: 'Human Description',
- logo_url: 'https://example.com/logo.png',
- };
-
- const expectedOutput = {
- name: 'Human Name',
- pluginKey: 'Model Name',
- description: 'Human Description',
- icon: 'https://example.com/logo.png',
- isAuthRequired: 'false',
- authConfig: [],
- };
-
- expect(transformSpec(input)).toEqual(expectedOutput);
- });
-
- it('should use default icon if logo_url is not provided', () => {
- const input = {
- name_for_human: 'Human Name',
- name_for_model: 'Model Name',
- description_for_human: 'Human Description',
- };
-
- const expectedOutput = {
- name: 'Human Name',
- pluginKey: 'Model Name',
- description: 'Human Description',
- icon: 'https://placehold.co/70x70.png',
- isAuthRequired: 'false',
- authConfig: [],
- };
-
- expect(transformSpec(input)).toEqual(expectedOutput);
- });
-});
-
-describe('addOpenAPISpecs', () => {
- it('should add specs to available tools', async () => {
- const availableTools = ['Tool1', 'Tool2'];
- const specs = [
- {
- name_for_human: 'Human Name',
- name_for_model: 'Model Name',
- description_for_human: 'Human Description',
- logo_url: 'https://example.com/logo.png',
- },
- ];
-
- loadSpecs.mockResolvedValue(specs);
- createOpenAPIPlugin.mockReturnValue('Plugin');
-
- const result = await addOpenAPISpecs(availableTools);
- expect(result).toEqual([...specs.map(transformSpec), ...availableTools]);
- });
-
- it('should return available tools if specs loading fails', async () => {
- const availableTools = ['Tool1', 'Tool2'];
-
- loadSpecs.mockRejectedValue(new Error('Failed to load specs'));
-
- const result = await addOpenAPISpecs(availableTools);
- expect(result).toEqual(availableTools);
- });
-});
diff --git a/api/app/clients/tools/util/fileSearch.js b/api/app/clients/tools/util/fileSearch.js
index 23ba58bb5a..54da483362 100644
--- a/api/app/clients/tools/util/fileSearch.js
+++ b/api/app/clients/tools/util/fileSearch.js
@@ -106,18 +106,21 @@ const createFileSearchTool = async ({ req, files, entity_id }) => {
const formattedResults = validResults
.flatMap((result) =>
- result.data.map(([docInfo, relevanceScore]) => ({
+ result.data.map(([docInfo, distance]) => ({
filename: docInfo.metadata.source.split('/').pop(),
content: docInfo.page_content,
- relevanceScore,
+ distance,
})),
)
- .sort((a, b) => b.relevanceScore - a.relevanceScore);
+ // TODO: results should be sorted by relevance, not distance
+ .sort((a, b) => a.distance - b.distance)
+ // TODO: make this configurable
+ .slice(0, 10);
const formattedString = formattedResults
.map(
(result) =>
- `File: ${result.filename}\nRelevance: ${result.relevanceScore.toFixed(4)}\nContent: ${
+ `File: ${result.filename}\nRelevance: ${1.0 - result.distance.toFixed(4)}\nContent: ${
result.content
}\n`,
)
diff --git a/api/app/clients/tools/util/handleTools.js b/api/app/clients/tools/util/handleTools.js
index f1dfa24a49..e480dd4928 100644
--- a/api/app/clients/tools/util/handleTools.js
+++ b/api/app/clients/tools/util/handleTools.js
@@ -1,7 +1,7 @@
-const { Tools, Constants } = require('librechat-data-provider');
const { SerpAPI } = require('@langchain/community/tools/serpapi');
const { Calculator } = require('@langchain/community/tools/calculator');
const { createCodeExecutionTool, EnvVar } = require('@librechat/agents');
+const { Tools, Constants, EToolResources } = require('librechat-data-provider');
const { getUserPluginAuthValue } = require('~/server/services/PluginService');
const {
availableTools,
@@ -10,6 +10,7 @@ const {
GoogleSearchAPI,
// Structured Tools
DALLE3,
+ FluxAPI,
OpenWeather,
StructuredSD,
StructuredACS,
@@ -17,11 +18,12 @@ const {
StructuredWolfram,
createYouTubeTools,
TavilySearchResults,
+ createOpenAIImageTools,
} = require('../');
const { primeFiles: primeCodeFiles } = require('~/server/services/Files/Code/process');
const { createFileSearchTool, primeFiles: primeSearchFiles } = require('./fileSearch');
+const { loadAuthValues } = require('~/server/services/Tools/credentials');
const { createMCPTool } = require('~/server/services/MCP');
-const { loadSpecs } = require('./loadSpecs');
const { logger } = require('~/config');
const mcpToolPattern = new RegExp(`^.+${Constants.mcp_delimiter}.+$`);
@@ -89,45 +91,6 @@ const validateTools = async (user, tools = []) => {
}
};
-const loadAuthValues = async ({ userId, authFields, throwError = true }) => {
- let authValues = {};
-
- /**
- * Finds the first non-empty value for the given authentication field, supporting alternate fields.
- * @param {string[]} fields Array of strings representing the authentication fields. Supports alternate fields delimited by "||".
- * @returns {Promise<{ authField: string, authValue: string} | null>} An object containing the authentication field and value, or null if not found.
- */
- const findAuthValue = async (fields) => {
- for (const field of fields) {
- let value = process.env[field];
- if (value) {
- return { authField: field, authValue: value };
- }
- try {
- value = await getUserPluginAuthValue(userId, field, throwError);
- } catch (err) {
- if (field === fields[fields.length - 1] && !value) {
- throw err;
- }
- }
- if (value) {
- return { authField: field, authValue: value };
- }
- }
- return null;
- };
-
- for (let authField of authFields) {
- const fields = authField.split('||');
- const result = await findAuthValue(fields);
- if (result) {
- authValues[result.authField] = result.authValue;
- }
- }
-
- return authValues;
-};
-
/** @typedef {typeof import('@langchain/core/tools').Tool} ToolConstructor */
/** @typedef {import('@langchain/core/tools').Tool} Tool */
@@ -160,7 +123,7 @@ const getAuthFields = (toolKey) => {
*
* @param {object} object
* @param {string} object.user
- * @param {Agent} [object.agent]
+ * @param {Pick} [object.agent]
* @param {string} [object.model]
* @param {EModelEndpoint} [object.endpoint]
* @param {LoadToolOptions} [object.options]
@@ -182,6 +145,7 @@ const loadTools = async ({
returnMap = false,
}) => {
const toolConstructors = {
+ flux: FluxAPI,
calculator: Calculator,
google: GoogleSearchAPI,
open_weather: OpenWeather,
@@ -193,7 +157,7 @@ const loadTools = async ({
};
const customConstructors = {
- serpapi: async () => {
+ serpapi: async (_toolContextMap) => {
const authFields = getAuthFields('serpapi');
let envVar = authFields[0] ?? '';
let apiKey = process.env[envVar];
@@ -206,11 +170,40 @@ const loadTools = async ({
gl: 'us',
});
},
- youtube: async () => {
+ youtube: async (_toolContextMap) => {
const authFields = getAuthFields('youtube');
const authValues = await loadAuthValues({ userId: user, authFields });
return createYouTubeTools(authValues);
},
+ image_gen_oai: async (toolContextMap) => {
+ const authFields = getAuthFields('image_gen_oai');
+ const authValues = await loadAuthValues({ userId: user, authFields });
+ const imageFiles = options.tool_resources?.[EToolResources.image_edit]?.files ?? [];
+ let toolContext = '';
+ for (let i = 0; i < imageFiles.length; i++) {
+ const file = imageFiles[i];
+ if (!file) {
+ continue;
+ }
+ if (i === 0) {
+ toolContext =
+ 'Image files provided in this request (their image IDs listed in order of appearance) available for image editing:';
+ }
+ toolContext += `\n\t- ${file.file_id}`;
+ if (i === imageFiles.length - 1) {
+ toolContext += `\n\nInclude any you need in the \`image_ids\` array when calling \`${EToolResources.image_edit}_oai\`. You may also include previously referenced or generated image IDs.`;
+ }
+ }
+ if (toolContext) {
+ toolContextMap.image_edit_oai = toolContext;
+ }
+ return createOpenAIImageTools({
+ ...authValues,
+ isAgent: !!agent,
+ req: options.req,
+ imageFiles,
+ });
+ },
};
const requestedTools = {};
@@ -230,13 +223,14 @@ const loadTools = async ({
};
const toolOptions = {
- serpapi: { location: 'Austin,Texas,United States', hl: 'en', gl: 'us' },
+ flux: imageGenOptions,
dalle: imageGenOptions,
'stable-diffusion': imageGenOptions,
+ serpapi: { location: 'Austin,Texas,United States', hl: 'en', gl: 'us' },
};
+ /** @type {Record} */
const toolContextMap = {};
- const remainingTools = [];
const appTools = options.req?.app?.locals?.availableTools ?? {};
for (const tool of tools) {
@@ -281,7 +275,7 @@ const loadTools = async ({
}
if (customConstructors[tool]) {
- requestedTools[tool] = customConstructors[tool];
+ requestedTools[tool] = async () => customConstructors[tool](toolContextMap);
continue;
}
@@ -296,30 +290,6 @@ const loadTools = async ({
requestedTools[tool] = toolInstance;
continue;
}
-
- if (functions === true) {
- remainingTools.push(tool);
- }
- }
-
- let specs = null;
- if (useSpecs === true && functions === true && remainingTools.length > 0) {
- specs = await loadSpecs({
- llm: model,
- user,
- message: options.message,
- memory: options.memory,
- signal: options.signal,
- tools: remainingTools,
- map: true,
- verbose: false,
- });
- }
-
- for (const tool of remainingTools) {
- if (specs && specs[tool]) {
- requestedTools[tool] = specs[tool];
- }
}
if (returnMap) {
@@ -345,7 +315,6 @@ const loadTools = async ({
module.exports = {
loadToolWithAuth,
- loadAuthValues,
validateTools,
loadTools,
};
diff --git a/api/app/clients/tools/util/index.js b/api/app/clients/tools/util/index.js
index 73d10270b6..ea67bb4ced 100644
--- a/api/app/clients/tools/util/index.js
+++ b/api/app/clients/tools/util/index.js
@@ -1,9 +1,8 @@
-const { validateTools, loadTools, loadAuthValues } = require('./handleTools');
+const { validateTools, loadTools } = require('./handleTools');
const handleOpenAIErrors = require('./handleOpenAIErrors');
module.exports = {
handleOpenAIErrors,
- loadAuthValues,
validateTools,
loadTools,
};
diff --git a/api/app/clients/tools/util/loadSpecs.js b/api/app/clients/tools/util/loadSpecs.js
deleted file mode 100644
index e5b543132a..0000000000
--- a/api/app/clients/tools/util/loadSpecs.js
+++ /dev/null
@@ -1,117 +0,0 @@
-const fs = require('fs');
-const path = require('path');
-const { z } = require('zod');
-const { logger } = require('~/config');
-const { createOpenAPIPlugin } = require('~/app/clients/tools/dynamic/OpenAPIPlugin');
-
-// The minimum Manifest definition
-const ManifestDefinition = z.object({
- schema_version: z.string().optional(),
- name_for_human: z.string(),
- name_for_model: z.string(),
- description_for_human: z.string(),
- description_for_model: z.string(),
- auth: z.object({}).optional(),
- api: z.object({
- // Spec URL or can be the filename of the OpenAPI spec yaml file,
- // located in api\app\clients\tools\.well-known\openapi
- url: z.string(),
- type: z.string().optional(),
- is_user_authenticated: z.boolean().nullable().optional(),
- has_user_authentication: z.boolean().nullable().optional(),
- }),
- // use to override any params that the LLM will consistently get wrong
- params: z.object({}).optional(),
- logo_url: z.string().optional(),
- contact_email: z.string().optional(),
- legal_info_url: z.string().optional(),
-});
-
-function validateJson(json) {
- try {
- return ManifestDefinition.parse(json);
- } catch (error) {
- logger.debug('[validateJson] manifest parsing error', error);
- return false;
- }
-}
-
-// omit the LLM to return the well known jsons as objects
-async function loadSpecs({ llm, user, message, tools = [], map = false, memory, signal }) {
- const directoryPath = path.join(__dirname, '..', '.well-known');
- let files = [];
-
- for (let i = 0; i < tools.length; i++) {
- const filePath = path.join(directoryPath, tools[i] + '.json');
-
- try {
- // If the access Promise is resolved, it means that the file exists
- // Then we can add it to the files array
- await fs.promises.access(filePath, fs.constants.F_OK);
- files.push(tools[i] + '.json');
- } catch (err) {
- logger.error(`[loadSpecs] File ${tools[i] + '.json'} does not exist`, err);
- }
- }
-
- if (files.length === 0) {
- files = (await fs.promises.readdir(directoryPath)).filter(
- (file) => path.extname(file) === '.json',
- );
- }
-
- const validJsons = [];
- const constructorMap = {};
-
- logger.debug('[validateJson] files', files);
-
- for (const file of files) {
- if (path.extname(file) === '.json') {
- const filePath = path.join(directoryPath, file);
- const fileContent = await fs.promises.readFile(filePath, 'utf8');
- const json = JSON.parse(fileContent);
-
- if (!validateJson(json)) {
- logger.debug('[validateJson] Invalid json', json);
- continue;
- }
-
- if (llm && map) {
- constructorMap[json.name_for_model] = async () =>
- await createOpenAPIPlugin({
- data: json,
- llm,
- message,
- memory,
- signal,
- user,
- });
- continue;
- }
-
- if (llm) {
- validJsons.push(createOpenAPIPlugin({ data: json, llm }));
- continue;
- }
-
- validJsons.push(json);
- }
- }
-
- if (map) {
- return constructorMap;
- }
-
- const plugins = (await Promise.all(validJsons)).filter((plugin) => plugin);
-
- // logger.debug('[validateJson] plugins', plugins);
- // logger.debug(plugins[0].name);
-
- return plugins;
-}
-
-module.exports = {
- loadSpecs,
- validateJson,
- ManifestDefinition,
-};
diff --git a/api/app/clients/tools/util/loadSpecs.spec.js b/api/app/clients/tools/util/loadSpecs.spec.js
deleted file mode 100644
index 7b906d86f0..0000000000
--- a/api/app/clients/tools/util/loadSpecs.spec.js
+++ /dev/null
@@ -1,101 +0,0 @@
-const fs = require('fs');
-const { validateJson, loadSpecs, ManifestDefinition } = require('./loadSpecs');
-const { createOpenAPIPlugin } = require('../dynamic/OpenAPIPlugin');
-
-jest.mock('../dynamic/OpenAPIPlugin');
-
-describe('ManifestDefinition', () => {
- it('should validate correct json', () => {
- const json = {
- name_for_human: 'Test',
- name_for_model: 'Test',
- description_for_human: 'Test',
- description_for_model: 'Test',
- api: {
- url: 'http://test.com',
- },
- };
-
- expect(() => ManifestDefinition.parse(json)).not.toThrow();
- });
-
- it('should not validate incorrect json', () => {
- const json = {
- name_for_human: 'Test',
- name_for_model: 'Test',
- description_for_human: 'Test',
- description_for_model: 'Test',
- api: {
- url: 123, // incorrect type
- },
- };
-
- expect(() => ManifestDefinition.parse(json)).toThrow();
- });
-});
-
-describe('validateJson', () => {
- it('should return parsed json if valid', () => {
- const json = {
- name_for_human: 'Test',
- name_for_model: 'Test',
- description_for_human: 'Test',
- description_for_model: 'Test',
- api: {
- url: 'http://test.com',
- },
- };
-
- expect(validateJson(json)).toEqual(json);
- });
-
- it('should return false if json is not valid', () => {
- const json = {
- name_for_human: 'Test',
- name_for_model: 'Test',
- description_for_human: 'Test',
- description_for_model: 'Test',
- api: {
- url: 123, // incorrect type
- },
- };
-
- expect(validateJson(json)).toEqual(false);
- });
-});
-
-describe('loadSpecs', () => {
- beforeEach(() => {
- jest.spyOn(fs.promises, 'readdir').mockResolvedValue(['test.json']);
- jest.spyOn(fs.promises, 'readFile').mockResolvedValue(
- JSON.stringify({
- name_for_human: 'Test',
- name_for_model: 'Test',
- description_for_human: 'Test',
- description_for_model: 'Test',
- api: {
- url: 'http://test.com',
- },
- }),
- );
- createOpenAPIPlugin.mockResolvedValue({});
- });
-
- afterEach(() => {
- jest.restoreAllMocks();
- });
-
- it('should return plugins', async () => {
- const plugins = await loadSpecs({ llm: true, verbose: false });
-
- expect(plugins).toHaveLength(1);
- expect(createOpenAPIPlugin).toHaveBeenCalledTimes(1);
- });
-
- it('should return constructorMap if map is true', async () => {
- const plugins = await loadSpecs({ llm: {}, map: true, verbose: false });
-
- expect(plugins).toHaveProperty('Test');
- expect(createOpenAPIPlugin).not.toHaveBeenCalled();
- });
-});
diff --git a/api/cache/clearPendingReq.js b/api/cache/clearPendingReq.js
index 122638d7f9..54db8e9690 100644
--- a/api/cache/clearPendingReq.js
+++ b/api/cache/clearPendingReq.js
@@ -1,7 +1,8 @@
+const { Time, CacheKeys } = require('librechat-data-provider');
+const { isEnabled } = require('~/server/utils');
const getLogStores = require('./getLogStores');
-const { isEnabled } = require('../server/utils');
+
const { USE_REDIS, LIMIT_CONCURRENT_MESSAGES } = process.env ?? {};
-const ttl = 1000 * 60 * 1;
/**
* Clear or decrement pending requests from the cache.
@@ -28,7 +29,7 @@ const clearPendingReq = async ({ userId, cache: _cache }) => {
return;
}
- const namespace = 'pending_req';
+ const namespace = CacheKeys.PENDING_REQ;
const cache = _cache ?? getLogStores(namespace);
if (!cache) {
@@ -39,7 +40,7 @@ const clearPendingReq = async ({ userId, cache: _cache }) => {
const currentReq = +((await cache.get(key)) ?? 0);
if (currentReq && currentReq >= 1) {
- await cache.set(key, currentReq - 1, ttl);
+ await cache.set(key, currentReq - 1, Time.ONE_MINUTE);
} else {
await cache.delete(key);
}
diff --git a/api/cache/getLogStores.js b/api/cache/getLogStores.js
index 6592371f02..612638b97b 100644
--- a/api/cache/getLogStores.js
+++ b/api/cache/getLogStores.js
@@ -1,4 +1,4 @@
-const Keyv = require('keyv');
+const { Keyv } = require('keyv');
const { CacheKeys, ViolationTypes, Time } = require('librechat-data-provider');
const { logFile, violationFile } = require('./keyvFiles');
const { math, isEnabled } = require('~/server/utils');
@@ -19,7 +19,7 @@ const createViolationInstance = (namespace) => {
// Serve cache from memory so no need to clear it on startup/exit
const pending_req = isRedisEnabled
? new Keyv({ store: keyvRedis })
- : new Keyv({ namespace: 'pending_req' });
+ : new Keyv({ namespace: CacheKeys.PENDING_REQ });
const config = isRedisEnabled
? new Keyv({ store: keyvRedis })
@@ -49,6 +49,10 @@ const genTitle = isRedisEnabled
? new Keyv({ store: keyvRedis, ttl: Time.TWO_MINUTES })
: new Keyv({ namespace: CacheKeys.GEN_TITLE, ttl: Time.TWO_MINUTES });
+const s3ExpiryInterval = isRedisEnabled
+ ? new Keyv({ store: keyvRedis, ttl: Time.THIRTY_MINUTES })
+ : new Keyv({ namespace: CacheKeys.S3_EXPIRY_INTERVAL, ttl: Time.THIRTY_MINUTES });
+
const modelQueries = isEnabled(process.env.USE_REDIS)
? new Keyv({ store: keyvRedis })
: new Keyv({ namespace: CacheKeys.MODEL_QUERIES });
@@ -60,7 +64,7 @@ const abortKeys = isRedisEnabled
const namespaces = {
[CacheKeys.ROLES]: roles,
[CacheKeys.CONFIG_STORE]: config,
- pending_req,
+ [CacheKeys.PENDING_REQ]: pending_req,
[ViolationTypes.BAN]: new Keyv({ store: keyvMongo, namespace: CacheKeys.BANS, ttl: duration }),
[CacheKeys.ENCODED_DOMAINS]: new Keyv({
store: keyvMongo,
@@ -89,6 +93,7 @@ const namespaces = {
[CacheKeys.ABORT_KEYS]: abortKeys,
[CacheKeys.TOKEN_CONFIG]: tokenConfig,
[CacheKeys.GEN_TITLE]: genTitle,
+ [CacheKeys.S3_EXPIRY_INTERVAL]: s3ExpiryInterval,
[CacheKeys.MODEL_QUERIES]: modelQueries,
[CacheKeys.AUDIO_RUNS]: audioRuns,
[CacheKeys.MESSAGES]: messages,
diff --git a/api/cache/ioredisClient.js b/api/cache/ioredisClient.js
new file mode 100644
index 0000000000..cd48459ab4
--- /dev/null
+++ b/api/cache/ioredisClient.js
@@ -0,0 +1,92 @@
+const fs = require('fs');
+const Redis = require('ioredis');
+const { isEnabled } = require('~/server/utils');
+const logger = require('~/config/winston');
+
+const { REDIS_URI, USE_REDIS, USE_REDIS_CLUSTER, REDIS_CA, REDIS_MAX_LISTENERS } = process.env;
+
+/** @type {import('ioredis').Redis | import('ioredis').Cluster} */
+let ioredisClient;
+const redis_max_listeners = Number(REDIS_MAX_LISTENERS) || 40;
+
+function mapURI(uri) {
+ const regex =
+ /^(?:(?\w+):\/\/)?(?:(?[^:@]+)(?::(?[^@]+))?@)?(?[\w.-]+)(?::(?\d{1,5}))?$/;
+ const match = uri.match(regex);
+
+ if (match) {
+ const { scheme, user, password, host, port } = match.groups;
+
+ return {
+ scheme: scheme || 'none',
+ user: user || null,
+ password: password || null,
+ host: host || null,
+ port: port || null,
+ };
+ } else {
+ const parts = uri.split(':');
+ if (parts.length === 2) {
+ return {
+ scheme: 'none',
+ user: null,
+ password: null,
+ host: parts[0],
+ port: parts[1],
+ };
+ }
+
+ return {
+ scheme: 'none',
+ user: null,
+ password: null,
+ host: uri,
+ port: null,
+ };
+ }
+}
+
+if (REDIS_URI && isEnabled(USE_REDIS)) {
+ let redisOptions = null;
+
+ if (REDIS_CA) {
+ const ca = fs.readFileSync(REDIS_CA);
+ redisOptions = { tls: { ca } };
+ }
+
+ if (isEnabled(USE_REDIS_CLUSTER)) {
+ const hosts = REDIS_URI.split(',').map((item) => {
+ var value = mapURI(item);
+
+ return {
+ host: value.host,
+ port: value.port,
+ };
+ });
+ ioredisClient = new Redis.Cluster(hosts, { redisOptions });
+ } else {
+ ioredisClient = new Redis(REDIS_URI, redisOptions);
+ }
+
+ ioredisClient.on('ready', () => {
+ logger.info('IoRedis connection ready');
+ });
+ ioredisClient.on('reconnecting', () => {
+ logger.info('IoRedis connection reconnecting');
+ });
+ ioredisClient.on('end', () => {
+ logger.info('IoRedis connection ended');
+ });
+ ioredisClient.on('close', () => {
+ logger.info('IoRedis connection closed');
+ });
+ ioredisClient.on('error', (err) => logger.error('IoRedis connection error:', err));
+ ioredisClient.setMaxListeners(redis_max_listeners);
+ logger.info(
+ '[Optional] IoRedis initialized for rate limiters. If you have issues, disable Redis or restart the server.',
+ );
+} else {
+ logger.info('[Optional] IoRedis not initialized for rate limiters.');
+}
+
+module.exports = ioredisClient;
diff --git a/api/cache/keyvFiles.js b/api/cache/keyvFiles.js
index f969174b7d..1476b60cb8 100644
--- a/api/cache/keyvFiles.js
+++ b/api/cache/keyvFiles.js
@@ -1,11 +1,9 @@
const { KeyvFile } = require('keyv-file');
-const logFile = new KeyvFile({ filename: './data/logs.json' });
-const pendingReqFile = new KeyvFile({ filename: './data/pendingReqCache.json' });
-const violationFile = new KeyvFile({ filename: './data/violations.json' });
+const logFile = new KeyvFile({ filename: './data/logs.json' }).setMaxListeners(20);
+const violationFile = new KeyvFile({ filename: './data/violations.json' }).setMaxListeners(20);
module.exports = {
logFile,
- pendingReqFile,
violationFile,
};
diff --git a/api/cache/keyvMongo.js b/api/cache/keyvMongo.js
index 8f5b9fd8d8..1606e98eb8 100644
--- a/api/cache/keyvMongo.js
+++ b/api/cache/keyvMongo.js
@@ -1,9 +1,272 @@
-const KeyvMongo = require('@keyv/mongo');
+// api/cache/keyvMongo.js
+const mongoose = require('mongoose');
+const EventEmitter = require('events');
+const { GridFSBucket } = require('mongodb');
const { logger } = require('~/config');
-const { MONGO_URI } = process.env ?? {};
+const storeMap = new Map();
+
+class KeyvMongoCustom extends EventEmitter {
+ constructor(url, options = {}) {
+ super();
+
+ url = url || {};
+ if (typeof url === 'string') {
+ url = { url };
+ }
+ if (url.uri) {
+ url = { url: url.uri, ...url };
+ }
+
+ this.opts = {
+ url: 'mongodb://127.0.0.1:27017',
+ collection: 'keyv',
+ ...url,
+ ...options,
+ };
+
+ this.ttlSupport = false;
+
+ // Filter valid options
+ const keyvMongoKeys = new Set([
+ 'url',
+ 'collection',
+ 'namespace',
+ 'serialize',
+ 'deserialize',
+ 'uri',
+ 'useGridFS',
+ 'dialect',
+ ]);
+ this.opts = Object.fromEntries(Object.entries(this.opts).filter(([k]) => keyvMongoKeys.has(k)));
+ }
+
+ // Helper to access the store WITHOUT storing a promise on the instance
+ _getClient() {
+ const storeKey = `${this.opts.collection}:${this.opts.useGridFS ? 'gridfs' : 'collection'}`;
+
+ // If we already have the store initialized, return it directly
+ if (storeMap.has(storeKey)) {
+ return Promise.resolve(storeMap.get(storeKey));
+ }
+
+ // Check mongoose connection state
+ if (mongoose.connection.readyState !== 1) {
+ return Promise.reject(
+ new Error('Mongoose connection not ready. Ensure connectDb() is called first.'),
+ );
+ }
+
+ try {
+ const db = mongoose.connection.db;
+ let client;
+
+ if (this.opts.useGridFS) {
+ const bucket = new GridFSBucket(db, {
+ readPreference: this.opts.readPreference,
+ bucketName: this.opts.collection,
+ });
+ const store = db.collection(`${this.opts.collection}.files`);
+ client = { bucket, store, db };
+ } else {
+ const collection = this.opts.collection || 'keyv';
+ const store = db.collection(collection);
+ client = { store, db };
+ }
+
+ storeMap.set(storeKey, client);
+ return Promise.resolve(client);
+ } catch (error) {
+ this.emit('error', error);
+ return Promise.reject(error);
+ }
+ }
+
+ async get(key) {
+ const client = await this._getClient();
+
+ if (this.opts.useGridFS) {
+ await client.store.updateOne(
+ {
+ filename: key,
+ },
+ {
+ $set: {
+ 'metadata.lastAccessed': new Date(),
+ },
+ },
+ );
+
+ const stream = client.bucket.openDownloadStreamByName(key);
+
+ return new Promise((resolve) => {
+ const resp = [];
+ stream.on('error', () => {
+ resolve(undefined);
+ });
+
+ stream.on('end', () => {
+ const data = Buffer.concat(resp).toString('utf8');
+ resolve(data);
+ });
+
+ stream.on('data', (chunk) => {
+ resp.push(chunk);
+ });
+ });
+ }
+
+ const document = await client.store.findOne({ key: { $eq: key } });
+
+ if (!document) {
+ return undefined;
+ }
+
+ return document.value;
+ }
+
+ async getMany(keys) {
+ const client = await this._getClient();
+
+ if (this.opts.useGridFS) {
+ const promises = [];
+ for (const key of keys) {
+ promises.push(this.get(key));
+ }
+
+ const values = await Promise.allSettled(promises);
+ const data = [];
+ for (const value of values) {
+ data.push(value.value);
+ }
+
+ return data;
+ }
+
+ const values = await client.store
+ .find({ key: { $in: keys } })
+ .project({ _id: 0, value: 1, key: 1 })
+ .toArray();
+
+ const results = [...keys];
+ let i = 0;
+ for (const key of keys) {
+ const rowIndex = values.findIndex((row) => row.key === key);
+ results[i] = rowIndex > -1 ? values[rowIndex].value : undefined;
+ i++;
+ }
+
+ return results;
+ }
+
+ async set(key, value, ttl) {
+ const client = await this._getClient();
+ const expiresAt = typeof ttl === 'number' ? new Date(Date.now() + ttl) : null;
+
+ if (this.opts.useGridFS) {
+ const stream = client.bucket.openUploadStream(key, {
+ metadata: {
+ expiresAt,
+ lastAccessed: new Date(),
+ },
+ });
+
+ return new Promise((resolve) => {
+ stream.on('finish', () => {
+ resolve(stream);
+ });
+ stream.end(value);
+ });
+ }
+
+ await client.store.updateOne(
+ { key: { $eq: key } },
+ { $set: { key, value, expiresAt } },
+ { upsert: true },
+ );
+ }
+
+ async delete(key) {
+ if (typeof key !== 'string') {
+ return false;
+ }
+
+ const client = await this._getClient();
+
+ if (this.opts.useGridFS) {
+ try {
+ const bucket = new GridFSBucket(client.db, {
+ bucketName: this.opts.collection,
+ });
+ const files = await bucket.find({ filename: key }).toArray();
+ await client.bucket.delete(files[0]._id);
+ return true;
+ } catch {
+ return false;
+ }
+ }
+
+ const object = await client.store.deleteOne({ key: { $eq: key } });
+ return object.deletedCount > 0;
+ }
+
+ async deleteMany(keys) {
+ const client = await this._getClient();
+
+ if (this.opts.useGridFS) {
+ const bucket = new GridFSBucket(client.db, {
+ bucketName: this.opts.collection,
+ });
+ const files = await bucket.find({ filename: { $in: keys } }).toArray();
+ if (files.length === 0) {
+ return false;
+ }
+
+ await Promise.all(files.map(async (file) => client.bucket.delete(file._id)));
+ return true;
+ }
+
+ const object = await client.store.deleteMany({ key: { $in: keys } });
+ return object.deletedCount > 0;
+ }
+
+ async clear() {
+ const client = await this._getClient();
+
+ if (this.opts.useGridFS) {
+ try {
+ await client.bucket.drop();
+ } catch (error) {
+ // Throw error if not "namespace not found" error
+ if (!(error.code === 26)) {
+ throw error;
+ }
+ }
+ }
+
+ await client.store.deleteMany({
+ key: { $regex: this.namespace ? `^${this.namespace}:*` : '' },
+ });
+ }
+
+ async has(key) {
+ const client = await this._getClient();
+ const filter = { [this.opts.useGridFS ? 'filename' : 'key']: { $eq: key } };
+ const document = await client.store.countDocuments(filter, { limit: 1 });
+ return document !== 0;
+ }
+
+ // No-op disconnect
+ async disconnect() {
+ // This is a no-op since we don't want to close the shared mongoose connection
+ return true;
+ }
+}
+
+const keyvMongo = new KeyvMongoCustom({
+ collection: 'logs',
+});
-const keyvMongo = new KeyvMongo(MONGO_URI, { collection: 'logs' });
keyvMongo.on('error', (err) => logger.error('KeyvMongo connection error:', err));
module.exports = keyvMongo;
diff --git a/api/cache/keyvRedis.js b/api/cache/keyvRedis.js
index d544b50a11..cb9d837e21 100644
--- a/api/cache/keyvRedis.js
+++ b/api/cache/keyvRedis.js
@@ -1,20 +1,106 @@
-const KeyvRedis = require('@keyv/redis');
+const fs = require('fs');
+const ioredis = require('ioredis');
+const KeyvRedis = require('@keyv/redis').default;
const { isEnabled } = require('~/server/utils');
const logger = require('~/config/winston');
-const { REDIS_URI, USE_REDIS } = process.env;
+const { REDIS_URI, USE_REDIS, USE_REDIS_CLUSTER, REDIS_CA, REDIS_KEY_PREFIX, REDIS_MAX_LISTENERS } =
+ process.env;
let keyvRedis;
+const redis_prefix = REDIS_KEY_PREFIX || '';
+const redis_max_listeners = Number(REDIS_MAX_LISTENERS) || 40;
+
+function mapURI(uri) {
+ const regex =
+ /^(?:(?\w+):\/\/)?(?:(?[^:@]+)(?::(?[^@]+))?@)?(?[\w.-]+)(?::(?\d{1,5}))?$/;
+ const match = uri.match(regex);
+
+ if (match) {
+ const { scheme, user, password, host, port } = match.groups;
+
+ return {
+ scheme: scheme || 'none',
+ user: user || null,
+ password: password || null,
+ host: host || null,
+ port: port || null,
+ };
+ } else {
+ const parts = uri.split(':');
+ if (parts.length === 2) {
+ return {
+ scheme: 'none',
+ user: null,
+ password: null,
+ host: parts[0],
+ port: parts[1],
+ };
+ }
+
+ return {
+ scheme: 'none',
+ user: null,
+ password: null,
+ host: uri,
+ port: null,
+ };
+ }
+}
if (REDIS_URI && isEnabled(USE_REDIS)) {
- keyvRedis = new KeyvRedis(REDIS_URI, { useRedisSets: false });
+ let redisOptions = null;
+ /** @type {import('@keyv/redis').KeyvRedisOptions} */
+ let keyvOpts = {
+ useRedisSets: false,
+ keyPrefix: redis_prefix,
+ };
+
+ if (REDIS_CA) {
+ const ca = fs.readFileSync(REDIS_CA);
+ redisOptions = { tls: { ca } };
+ }
+
+ if (isEnabled(USE_REDIS_CLUSTER)) {
+ const hosts = REDIS_URI.split(',').map((item) => {
+ var value = mapURI(item);
+
+ return {
+ host: value.host,
+ port: value.port,
+ };
+ });
+ const cluster = new ioredis.Cluster(hosts, { redisOptions });
+ keyvRedis = new KeyvRedis(cluster, keyvOpts);
+ } else {
+ keyvRedis = new KeyvRedis(REDIS_URI, keyvOpts);
+ }
+
+ const pingInterval = setInterval(() => {
+ logger.debug('KeyvRedis ping');
+ keyvRedis.client.ping().catch(err => logger.error('Redis keep-alive ping failed:', err));
+ }, 5 * 60 * 1000);
+
+ keyvRedis.on('ready', () => {
+ logger.info('KeyvRedis connection ready');
+ });
+ keyvRedis.on('reconnecting', () => {
+ logger.info('KeyvRedis connection reconnecting');
+ });
+ keyvRedis.on('end', () => {
+ logger.info('KeyvRedis connection ended');
+ });
+ keyvRedis.on('close', () => {
+ clearInterval(pingInterval);
+ logger.info('KeyvRedis connection closed');
+ });
keyvRedis.on('error', (err) => logger.error('KeyvRedis connection error:', err));
- keyvRedis.setMaxListeners(20);
+ keyvRedis.setMaxListeners(redis_max_listeners);
logger.info(
- '[Optional] Redis initialized. Note: Redis support is experimental. If you have issues, disable it. Cache needs to be flushed for values to refresh.',
+ '[Optional] Redis initialized. If you have issues, or seeing older values, disable it or flush cache to refresh values.',
);
} else {
- logger.info('[Optional] Redis not initialized. Note: Redis support is experimental.');
+ logger.info('[Optional] Redis not initialized.');
}
module.exports = keyvRedis;
diff --git a/api/cache/redis.js b/api/cache/redis.js
deleted file mode 100644
index adf291d02b..0000000000
--- a/api/cache/redis.js
+++ /dev/null
@@ -1,4 +0,0 @@
-const Redis = require('ioredis');
-const { REDIS_URI } = process.env ?? {};
-const redis = new Redis.Cluster(REDIS_URI);
-module.exports = redis;
diff --git a/api/config/index.js b/api/config/index.js
index aaf8bb2764..e238f700be 100644
--- a/api/config/index.js
+++ b/api/config/index.js
@@ -1,31 +1,35 @@
+const axios = require('axios');
const { EventSource } = require('eventsource');
const { Time, CacheKeys } = require('librechat-data-provider');
+const { MCPManager, FlowStateManager } = require('librechat-mcp');
const logger = require('./winston');
global.EventSource = EventSource;
+/** @type {MCPManager} */
let mcpManager = null;
let flowManager = null;
/**
- * @returns {Promise}
+ * @param {string} [userId] - Optional user ID, to avoid disconnecting the current user.
+ * @returns {MCPManager}
*/
-async function getMCPManager() {
+function getMCPManager(userId) {
if (!mcpManager) {
- const { MCPManager } = await import('librechat-mcp');
mcpManager = MCPManager.getInstance(logger);
+ } else {
+ mcpManager.checkIdleConnections(userId);
}
return mcpManager;
}
/**
- * @param {(key: string) => Keyv} getLogStores
- * @returns {Promise}
+ * @param {Keyv} flowsCache
+ * @returns {FlowStateManager}
*/
-async function getFlowStateManager(getLogStores) {
+function getFlowStateManager(flowsCache) {
if (!flowManager) {
- const { FlowStateManager } = await import('librechat-mcp');
- flowManager = new FlowStateManager(getLogStores(CacheKeys.FLOWS), {
+ flowManager = new FlowStateManager(flowsCache, {
ttl: Time.ONE_MINUTE * 3,
logger,
});
@@ -47,9 +51,46 @@ const sendEvent = (res, event) => {
res.write(`event: message\ndata: ${JSON.stringify(event)}\n\n`);
};
+/**
+ * Creates and configures an Axios instance with optional proxy settings.
+ *
+ * @typedef {import('axios').AxiosInstance} AxiosInstance
+ * @typedef {import('axios').AxiosProxyConfig} AxiosProxyConfig
+ *
+ * @returns {AxiosInstance} A configured Axios instance
+ * @throws {Error} If there's an issue creating the Axios instance or parsing the proxy URL
+ */
+function createAxiosInstance() {
+ const instance = axios.create();
+
+ if (process.env.proxy) {
+ try {
+ const url = new URL(process.env.proxy);
+
+ /** @type {AxiosProxyConfig} */
+ const proxyConfig = {
+ host: url.hostname.replace(/^\[|\]$/g, ''),
+ protocol: url.protocol.replace(':', ''),
+ };
+
+ if (url.port) {
+ proxyConfig.port = parseInt(url.port, 10);
+ }
+
+ instance.defaults.proxy = proxyConfig;
+ } catch (error) {
+ console.error('Error parsing proxy URL:', error);
+ throw new Error(`Invalid proxy URL: ${process.env.proxy}`);
+ }
+ }
+
+ return instance;
+}
+
module.exports = {
logger,
sendEvent,
getMCPManager,
+ createAxiosInstance,
getFlowStateManager,
};
diff --git a/api/config/index.spec.js b/api/config/index.spec.js
new file mode 100644
index 0000000000..36ed8302f3
--- /dev/null
+++ b/api/config/index.spec.js
@@ -0,0 +1,126 @@
+const axios = require('axios');
+const { createAxiosInstance } = require('./index');
+
+// Mock axios
+jest.mock('axios', () => ({
+ interceptors: {
+ request: { use: jest.fn(), eject: jest.fn() },
+ response: { use: jest.fn(), eject: jest.fn() },
+ },
+ create: jest.fn().mockReturnValue({
+ defaults: {
+ proxy: null,
+ },
+ get: jest.fn().mockResolvedValue({ data: {} }),
+ post: jest.fn().mockResolvedValue({ data: {} }),
+ put: jest.fn().mockResolvedValue({ data: {} }),
+ delete: jest.fn().mockResolvedValue({ data: {} }),
+ }),
+ get: jest.fn().mockResolvedValue({ data: {} }),
+ post: jest.fn().mockResolvedValue({ data: {} }),
+ put: jest.fn().mockResolvedValue({ data: {} }),
+ delete: jest.fn().mockResolvedValue({ data: {} }),
+ reset: jest.fn().mockImplementation(function () {
+ this.get.mockClear();
+ this.post.mockClear();
+ this.put.mockClear();
+ this.delete.mockClear();
+ this.create.mockClear();
+ }),
+}));
+
+describe('createAxiosInstance', () => {
+ const originalEnv = process.env;
+
+ beforeEach(() => {
+ // Reset mocks
+ jest.clearAllMocks();
+ // Create a clean copy of process.env
+ process.env = { ...originalEnv };
+ // Default: no proxy
+ delete process.env.proxy;
+ });
+
+ afterAll(() => {
+ // Restore original process.env
+ process.env = originalEnv;
+ });
+
+ test('creates an axios instance without proxy when no proxy env is set', () => {
+ const instance = createAxiosInstance();
+
+ expect(axios.create).toHaveBeenCalledTimes(1);
+ expect(instance.defaults.proxy).toBeNull();
+ });
+
+ test('configures proxy correctly with hostname and protocol', () => {
+ process.env.proxy = 'http://example.com';
+
+ const instance = createAxiosInstance();
+
+ expect(axios.create).toHaveBeenCalledTimes(1);
+ expect(instance.defaults.proxy).toEqual({
+ host: 'example.com',
+ protocol: 'http',
+ });
+ });
+
+ test('configures proxy correctly with hostname, protocol and port', () => {
+ process.env.proxy = 'https://proxy.example.com:8080';
+
+ const instance = createAxiosInstance();
+
+ expect(axios.create).toHaveBeenCalledTimes(1);
+ expect(instance.defaults.proxy).toEqual({
+ host: 'proxy.example.com',
+ protocol: 'https',
+ port: 8080,
+ });
+ });
+
+ test('handles proxy URLs with authentication', () => {
+ process.env.proxy = 'http://user:pass@proxy.example.com:3128';
+
+ const instance = createAxiosInstance();
+
+ expect(axios.create).toHaveBeenCalledTimes(1);
+ expect(instance.defaults.proxy).toEqual({
+ host: 'proxy.example.com',
+ protocol: 'http',
+ port: 3128,
+ // Note: The current implementation doesn't handle auth - if needed, add this functionality
+ });
+ });
+
+ test('throws error when proxy URL is invalid', () => {
+ process.env.proxy = 'invalid-url';
+
+ expect(() => createAxiosInstance()).toThrow('Invalid proxy URL');
+ expect(axios.create).toHaveBeenCalledTimes(1);
+ });
+
+ // If you want to test the actual URL parsing more thoroughly
+ test('handles edge case proxy URLs correctly', () => {
+ // IPv6 address
+ process.env.proxy = 'http://[::1]:8080';
+
+ let instance = createAxiosInstance();
+
+ expect(instance.defaults.proxy).toEqual({
+ host: '::1',
+ protocol: 'http',
+ port: 8080,
+ });
+
+ // URL with path (which should be ignored for proxy config)
+ process.env.proxy = 'http://proxy.example.com:8080/some/path';
+
+ instance = createAxiosInstance();
+
+ expect(instance.defaults.proxy).toEqual({
+ host: 'proxy.example.com',
+ protocol: 'http',
+ port: 8080,
+ });
+ });
+});
diff --git a/api/config/meiliLogger.js b/api/config/meiliLogger.js
index 195b387ae5..c5e60ea157 100644
--- a/api/config/meiliLogger.js
+++ b/api/config/meiliLogger.js
@@ -4,7 +4,11 @@ require('winston-daily-rotate-file');
const logDir = path.join(__dirname, '..', 'logs');
-const { NODE_ENV } = process.env;
+const { NODE_ENV, DEBUG_LOGGING = false } = process.env;
+
+const useDebugLogging =
+ (typeof DEBUG_LOGGING === 'string' && DEBUG_LOGGING?.toLowerCase() === 'true') ||
+ DEBUG_LOGGING === true;
const levels = {
error: 0,
@@ -36,9 +40,10 @@ const fileFormat = winston.format.combine(
winston.format.splat(),
);
+const logLevel = useDebugLogging ? 'debug' : 'error';
const transports = [
new winston.transports.DailyRotateFile({
- level: 'debug',
+ level: logLevel,
filename: `${logDir}/meiliSync-%DATE%.log`,
datePattern: 'YYYY-MM-DD',
zippedArchive: true,
@@ -48,14 +53,6 @@ const transports = [
}),
];
-// if (NODE_ENV !== 'production') {
-// transports.push(
-// new winston.transports.Console({
-// format: winston.format.combine(winston.format.colorize(), winston.format.simple()),
-// }),
-// );
-// }
-
const consoleFormat = winston.format.combine(
winston.format.colorize({ all: true }),
winston.format.timestamp({ format: 'YYYY-MM-DD HH:mm:ss' }),
diff --git a/api/config/winston.js b/api/config/winston.js
index 8f51b9963c..12f6053723 100644
--- a/api/config/winston.js
+++ b/api/config/winston.js
@@ -5,7 +5,7 @@ const { redactFormat, redactMessage, debugTraverse, jsonTruncateFormat } = requi
const logDir = path.join(__dirname, '..', 'logs');
-const { NODE_ENV, DEBUG_LOGGING = true, DEBUG_CONSOLE = false, CONSOLE_JSON = false } = process.env;
+const { NODE_ENV, DEBUG_LOGGING = true, CONSOLE_JSON = false, DEBUG_CONSOLE = false } = process.env;
const useConsoleJson =
(typeof CONSOLE_JSON === 'string' && CONSOLE_JSON?.toLowerCase() === 'true') ||
@@ -15,6 +15,10 @@ const useDebugConsole =
(typeof DEBUG_CONSOLE === 'string' && DEBUG_CONSOLE?.toLowerCase() === 'true') ||
DEBUG_CONSOLE === true;
+const useDebugLogging =
+ (typeof DEBUG_LOGGING === 'string' && DEBUG_LOGGING?.toLowerCase() === 'true') ||
+ DEBUG_LOGGING === true;
+
const levels = {
error: 0,
warn: 1,
@@ -57,28 +61,9 @@ const transports = [
maxFiles: '14d',
format: fileFormat,
}),
- // new winston.transports.DailyRotateFile({
- // level: 'info',
- // filename: `${logDir}/info-%DATE%.log`,
- // datePattern: 'YYYY-MM-DD',
- // zippedArchive: true,
- // maxSize: '20m',
- // maxFiles: '14d',
- // }),
];
-// if (NODE_ENV !== 'production') {
-// transports.push(
-// new winston.transports.Console({
-// format: winston.format.combine(winston.format.colorize(), winston.format.simple()),
-// }),
-// );
-// }
-
-if (
- (typeof DEBUG_LOGGING === 'string' && DEBUG_LOGGING?.toLowerCase() === 'true') ||
- DEBUG_LOGGING === true
-) {
+if (useDebugLogging) {
transports.push(
new winston.transports.DailyRotateFile({
level: 'debug',
@@ -107,10 +92,16 @@ const consoleFormat = winston.format.combine(
}),
);
+// Determine console log level
+let consoleLogLevel = 'info';
+if (useDebugConsole) {
+ consoleLogLevel = 'debug';
+}
+
if (useDebugConsole) {
transports.push(
new winston.transports.Console({
- level: 'debug',
+ level: consoleLogLevel,
format: useConsoleJson
? winston.format.combine(fileFormat, jsonTruncateFormat(), winston.format.json())
: winston.format.combine(fileFormat, debugTraverse),
@@ -119,14 +110,14 @@ if (useDebugConsole) {
} else if (useConsoleJson) {
transports.push(
new winston.transports.Console({
- level: 'info',
+ level: consoleLogLevel,
format: winston.format.combine(fileFormat, jsonTruncateFormat(), winston.format.json()),
}),
);
} else {
transports.push(
new winston.transports.Console({
- level: 'info',
+ level: consoleLogLevel,
format: consoleFormat,
}),
);
diff --git a/api/jest.config.js b/api/jest.config.js
index ec44bd7f56..2df7790b7b 100644
--- a/api/jest.config.js
+++ b/api/jest.config.js
@@ -5,7 +5,6 @@ module.exports = {
coverageDirectory: 'coverage',
setupFiles: [
'./test/jestSetup.js',
- './test/__mocks__/KeyvMongo.js',
'./test/__mocks__/logger.js',
'./test/__mocks__/fetchEventSource.js',
],
diff --git a/api/lib/db/indexSync.js b/api/lib/db/indexSync.js
index 86c909419d..75acd9d231 100644
--- a/api/lib/db/indexSync.js
+++ b/api/lib/db/indexSync.js
@@ -1,9 +1,11 @@
const { MeiliSearch } = require('meilisearch');
-const Conversation = require('~/models/schema/convoSchema');
-const Message = require('~/models/schema/messageSchema');
+const { Conversation } = require('~/models/Conversation');
+const { Message } = require('~/models/Message');
+const { isEnabled } = require('~/server/utils');
const { logger } = require('~/config');
-const searchEnabled = process.env?.SEARCH?.toLowerCase() === 'true';
+const searchEnabled = isEnabled(process.env.SEARCH);
+const indexingDisabled = isEnabled(process.env.MEILI_NO_SYNC);
let currentTimeout = null;
class MeiliSearchClient {
@@ -23,8 +25,7 @@ class MeiliSearchClient {
}
}
-// eslint-disable-next-line no-unused-vars
-async function indexSync(req, res, next) {
+async function indexSync() {
if (!searchEnabled) {
return;
}
@@ -33,10 +34,15 @@ async function indexSync(req, res, next) {
const client = MeiliSearchClient.getInstance();
const { status } = await client.health();
- if (status !== 'available' || !process.env.SEARCH) {
+ if (status !== 'available') {
throw new Error('Meilisearch not available');
}
+ if (indexingDisabled === true) {
+ logger.info('[indexSync] Indexing is disabled, skipping...');
+ return;
+ }
+
const messageCount = await Message.countDocuments();
const convoCount = await Conversation.countDocuments();
const messages = await client.index('messages').getStats();
@@ -71,7 +77,6 @@ async function indexSync(req, res, next) {
logger.info('[indexSync] Meilisearch not configured, search will be disabled.');
} else {
logger.error('[indexSync] error', err);
- // res.status(500).json({ error: 'Server error' });
}
}
}
diff --git a/api/lib/utils/reduceHits.js b/api/lib/utils/reduceHits.js
deleted file mode 100644
index 77b2f9d57d..0000000000
--- a/api/lib/utils/reduceHits.js
+++ /dev/null
@@ -1,59 +0,0 @@
-const mergeSort = require('./mergeSort');
-const { cleanUpPrimaryKeyValue } = require('./misc');
-
-function reduceMessages(hits) {
- const counts = {};
-
- for (const hit of hits) {
- if (!counts[hit.conversationId]) {
- counts[hit.conversationId] = 1;
- } else {
- counts[hit.conversationId]++;
- }
- }
-
- const result = [];
-
- for (const [conversationId, count] of Object.entries(counts)) {
- result.push({
- conversationId,
- count,
- });
- }
-
- return mergeSort(result, (a, b) => b.count - a.count);
-}
-
-function reduceHits(hits, titles = []) {
- const counts = {};
- const titleMap = {};
- const convos = [...hits, ...titles];
-
- for (const convo of convos) {
- const currentId = cleanUpPrimaryKeyValue(convo.conversationId);
- if (!counts[currentId]) {
- counts[currentId] = 1;
- } else {
- counts[currentId]++;
- }
-
- if (convo.title) {
- // titleMap[currentId] = convo._formatted.title;
- titleMap[currentId] = convo.title;
- }
- }
-
- const result = [];
-
- for (const [conversationId, count] of Object.entries(counts)) {
- result.push({
- conversationId,
- count,
- title: titleMap[conversationId] ? titleMap[conversationId] : null,
- });
- }
-
- return mergeSort(result, (a, b) => b.count - a.count);
-}
-
-module.exports = { reduceMessages, reduceHits };
diff --git a/api/models/Action.js b/api/models/Action.js
index 299b3bf20a..677b4d78df 100644
--- a/api/models/Action.js
+++ b/api/models/Action.js
@@ -1,5 +1,5 @@
const mongoose = require('mongoose');
-const actionSchema = require('./schema/action');
+const { actionSchema } = require('@librechat/data-schemas');
const Action = mongoose.model('action', actionSchema);
diff --git a/api/models/Agent.js b/api/models/Agent.js
index 6fa00f56bc..9b34eeae65 100644
--- a/api/models/Agent.js
+++ b/api/models/Agent.js
@@ -1,6 +1,8 @@
const mongoose = require('mongoose');
-const { SystemRoles } = require('librechat-data-provider');
-const { GLOBAL_PROJECT_NAME } = require('librechat-data-provider').Constants;
+const { agentSchema } = require('@librechat/data-schemas');
+const { SystemRoles, Tools } = require('librechat-data-provider');
+const { GLOBAL_PROJECT_NAME, EPHEMERAL_AGENT_ID, mcp_delimiter } =
+ require('librechat-data-provider').Constants;
const { CONFIG_STORE, STARTUP_CONFIG } = require('librechat-data-provider').CacheKeys;
const {
getProjectByName,
@@ -9,7 +11,6 @@ const {
removeAgentFromAllProjects,
} = require('./Project');
const getLogStores = require('~/cache/getLogStores');
-const agentSchema = require('./schema/agent');
const Agent = mongoose.model('agent', agentSchema);
@@ -39,13 +40,69 @@ const getAgent = async (searchParameter) => await Agent.findOne(searchParameter)
* @param {Object} params
* @param {ServerRequest} params.req
* @param {string} params.agent_id
+ * @param {string} params.endpoint
+ * @param {import('@librechat/agents').ClientOptions} [params.model_parameters]
+ * @returns {Agent|null} The agent document as a plain object, or null if not found.
+ */
+const loadEphemeralAgent = ({ req, agent_id, endpoint, model_parameters: _m }) => {
+ const { model, ...model_parameters } = _m;
+ /** @type {Record} */
+ const availableTools = req.app.locals.availableTools;
+ const mcpServers = new Set(req.body.ephemeralAgent?.mcp);
+ /** @type {string[]} */
+ const tools = [];
+ if (req.body.ephemeralAgent?.execute_code === true) {
+ tools.push(Tools.execute_code);
+ }
+
+ if (mcpServers.size > 0) {
+ for (const toolName of Object.keys(availableTools)) {
+ if (!toolName.includes(mcp_delimiter)) {
+ continue;
+ }
+ const mcpServer = toolName.split(mcp_delimiter)?.[1];
+ if (mcpServer && mcpServers.has(mcpServer)) {
+ tools.push(toolName);
+ }
+ }
+ }
+
+ const instructions = req.body.promptPrefix;
+ return {
+ id: agent_id,
+ instructions,
+ provider: endpoint,
+ model_parameters,
+ model,
+ tools,
+ };
+};
+
+/**
+ * Load an agent based on the provided ID
+ *
+ * @param {Object} params
+ * @param {ServerRequest} params.req
+ * @param {string} params.agent_id
+ * @param {string} params.endpoint
+ * @param {import('@librechat/agents').ClientOptions} [params.model_parameters]
* @returns {Promise} The agent document as a plain object, or null if not found.
*/
-const loadAgent = async ({ req, agent_id }) => {
+const loadAgent = async ({ req, agent_id, endpoint, model_parameters }) => {
+ if (!agent_id) {
+ return null;
+ }
+ if (agent_id === EPHEMERAL_AGENT_ID) {
+ return loadEphemeralAgent({ req, agent_id, endpoint, model_parameters });
+ }
const agent = await getAgent({
id: agent_id,
});
+ if (!agent) {
+ return null;
+ }
+
if (agent.author.toString() === req.user.id) {
return agent;
}
@@ -96,12 +153,30 @@ const updateAgent = async (searchParameter, updateData) => {
*/
const addAgentResourceFile = async ({ agent_id, tool_resource, file_id }) => {
const searchParameter = { id: agent_id };
-
- // build the update to push or create the file ids set
+ let agent = await getAgent(searchParameter);
+ if (!agent) {
+ throw new Error('Agent not found for adding resource file');
+ }
const fileIdsPath = `tool_resources.${tool_resource}.file_ids`;
- const updateData = { $addToSet: { [fileIdsPath]: file_id } };
+ await Agent.updateOne(
+ {
+ id: agent_id,
+ [`${fileIdsPath}`]: { $exists: false },
+ },
+ {
+ $set: {
+ [`${fileIdsPath}`]: [],
+ },
+ },
+ );
+
+ const updateData = {
+ $addToSet: {
+ tools: tool_resource,
+ [fileIdsPath]: file_id,
+ },
+ };
- // return the updated agent or throw if no agent matches
const updatedAgent = await updateAgent(searchParameter, updateData);
if (updatedAgent) {
return updatedAgent;
@@ -111,16 +186,17 @@ const addAgentResourceFile = async ({ agent_id, tool_resource, file_id }) => {
};
/**
- * Removes multiple resource files from an agent in a single update.
+ * Removes multiple resource files from an agent using atomic operations.
* @param {object} params
* @param {string} params.agent_id
* @param {Array<{tool_resource: string, file_id: string}>} params.files
* @returns {Promise} The updated agent.
+ * @throws {Error} If the agent is not found or update fails.
*/
const removeAgentResourceFiles = async ({ agent_id, files }) => {
const searchParameter = { id: agent_id };
- // associate each tool resource with the respective file ids array
+ // Group files to remove by resource
const filesByResource = files.reduce((acc, { tool_resource, file_id }) => {
if (!acc[tool_resource]) {
acc[tool_resource] = [];
@@ -129,42 +205,35 @@ const removeAgentResourceFiles = async ({ agent_id, files }) => {
return acc;
}, {});
- // build the update aggregation pipeline wich removes file ids from tool resources array
- // and eventually deletes empty tool resources
- const updateData = [];
- Object.entries(filesByResource).forEach(([resource, fileIds]) => {
- const toolResourcePath = `tool_resources.${resource}`;
- const fileIdsPath = `${toolResourcePath}.file_ids`;
-
- // file ids removal stage
- updateData.push({
- $set: {
- [fileIdsPath]: {
- $filter: {
- input: `$${fileIdsPath}`,
- cond: { $not: [{ $in: ['$$this', fileIds] }] },
- },
- },
- },
- });
-
- // empty tool resource deletion stage
- updateData.push({
- $set: {
- [toolResourcePath]: {
- $cond: [{ $eq: [`$${fileIdsPath}`, []] }, '$$REMOVE', `$${toolResourcePath}`],
- },
- },
- });
- });
-
- // return the updated agent or throw if no agent matches
- const updatedAgent = await updateAgent(searchParameter, updateData);
- if (updatedAgent) {
- return updatedAgent;
- } else {
- throw new Error('Agent not found for removing resource files');
+ // Step 1: Atomically remove file IDs using $pull
+ const pullOps = {};
+ const resourcesToCheck = new Set();
+ for (const [resource, fileIds] of Object.entries(filesByResource)) {
+ const fileIdsPath = `tool_resources.${resource}.file_ids`;
+ pullOps[fileIdsPath] = { $in: fileIds };
+ resourcesToCheck.add(resource);
}
+
+ const updatePullData = { $pull: pullOps };
+ const agentAfterPull = await Agent.findOneAndUpdate(searchParameter, updatePullData, {
+ new: true,
+ }).lean();
+
+ if (!agentAfterPull) {
+ // Agent might have been deleted concurrently, or never existed.
+ // Check if it existed before trying to throw.
+ const agentExists = await getAgent(searchParameter);
+ if (!agentExists) {
+ throw new Error('Agent not found for removing resource files');
+ }
+ // If it existed but findOneAndUpdate returned null, something else went wrong.
+ throw new Error('Failed to update agent during file removal (pull step)');
+ }
+
+ // Return the agent state directly after the $pull operation.
+ // Skipping the $unset step for now to simplify and test core $pull atomicity.
+ // Empty arrays might remain, but the removal itself should be correct.
+ return agentAfterPull;
};
/**
@@ -239,7 +308,7 @@ const getListAgents = async (searchParameter) => {
* This function also updates the corresponding projects to include or exclude the agent ID.
*
* @param {Object} params - Parameters for updating the agent's projects.
- * @param {import('librechat-data-provider').TUser} params.user - Parameters for updating the agent's projects.
+ * @param {MongoUser} params.user - Parameters for updating the agent's projects.
* @param {string} params.agentId - The ID of the agent to update.
* @param {string[]} [params.projectIds] - Array of project IDs to add to the agent.
* @param {string[]} [params.removeProjectIds] - Array of project IDs to remove from the agent.
@@ -290,6 +359,7 @@ const updateAgentProjects = async ({ user, agentId, projectIds, removeProjectIds
};
module.exports = {
+ Agent,
getAgent,
loadAgent,
createAgent,
diff --git a/api/models/Agent.spec.js b/api/models/Agent.spec.js
new file mode 100644
index 0000000000..051cb6800f
--- /dev/null
+++ b/api/models/Agent.spec.js
@@ -0,0 +1,334 @@
+const mongoose = require('mongoose');
+const { v4: uuidv4 } = require('uuid');
+const { MongoMemoryServer } = require('mongodb-memory-server');
+const { Agent, addAgentResourceFile, removeAgentResourceFiles } = require('./Agent');
+
+describe('Agent Resource File Operations', () => {
+ let mongoServer;
+
+ beforeAll(async () => {
+ mongoServer = await MongoMemoryServer.create();
+ const mongoUri = mongoServer.getUri();
+ await mongoose.connect(mongoUri);
+ });
+
+ afterAll(async () => {
+ await mongoose.disconnect();
+ await mongoServer.stop();
+ });
+
+ beforeEach(async () => {
+ await Agent.deleteMany({});
+ });
+
+ const createBasicAgent = async () => {
+ const agentId = `agent_${uuidv4()}`;
+ const agent = await Agent.create({
+ id: agentId,
+ name: 'Test Agent',
+ provider: 'test',
+ model: 'test-model',
+ author: new mongoose.Types.ObjectId(),
+ });
+ return agent;
+ };
+
+ test('should add tool_resource to tools if missing', async () => {
+ const agent = await createBasicAgent();
+ const fileId = uuidv4();
+ const toolResource = 'file_search';
+
+ const updatedAgent = await addAgentResourceFile({
+ agent_id: agent.id,
+ tool_resource: toolResource,
+ file_id: fileId,
+ });
+
+ expect(updatedAgent.tools).toContain(toolResource);
+ expect(Array.isArray(updatedAgent.tools)).toBe(true);
+ // Should not duplicate
+ const count = updatedAgent.tools.filter((t) => t === toolResource).length;
+ expect(count).toBe(1);
+ });
+
+ test('should not duplicate tool_resource in tools if already present', async () => {
+ const agent = await createBasicAgent();
+ const fileId1 = uuidv4();
+ const fileId2 = uuidv4();
+ const toolResource = 'file_search';
+
+ // First add
+ await addAgentResourceFile({
+ agent_id: agent.id,
+ tool_resource: toolResource,
+ file_id: fileId1,
+ });
+
+ // Second add (should not duplicate)
+ const updatedAgent = await addAgentResourceFile({
+ agent_id: agent.id,
+ tool_resource: toolResource,
+ file_id: fileId2,
+ });
+
+ expect(updatedAgent.tools).toContain(toolResource);
+ expect(Array.isArray(updatedAgent.tools)).toBe(true);
+ const count = updatedAgent.tools.filter((t) => t === toolResource).length;
+ expect(count).toBe(1);
+ });
+
+ test('should handle concurrent file additions', async () => {
+ const agent = await createBasicAgent();
+ const fileIds = Array.from({ length: 10 }, () => uuidv4());
+
+ // Concurrent additions
+ const additionPromises = fileIds.map((fileId) =>
+ addAgentResourceFile({
+ agent_id: agent.id,
+ tool_resource: 'test_tool',
+ file_id: fileId,
+ }),
+ );
+
+ await Promise.all(additionPromises);
+
+ const updatedAgent = await Agent.findOne({ id: agent.id });
+ expect(updatedAgent.tool_resources.test_tool.file_ids).toBeDefined();
+ expect(updatedAgent.tool_resources.test_tool.file_ids).toHaveLength(10);
+ expect(new Set(updatedAgent.tool_resources.test_tool.file_ids).size).toBe(10);
+ });
+
+ test('should handle concurrent additions and removals', async () => {
+ const agent = await createBasicAgent();
+ const initialFileIds = Array.from({ length: 5 }, () => uuidv4());
+
+ await Promise.all(
+ initialFileIds.map((fileId) =>
+ addAgentResourceFile({
+ agent_id: agent.id,
+ tool_resource: 'test_tool',
+ file_id: fileId,
+ }),
+ ),
+ );
+
+ const newFileIds = Array.from({ length: 5 }, () => uuidv4());
+ const operations = [
+ ...newFileIds.map((fileId) =>
+ addAgentResourceFile({
+ agent_id: agent.id,
+ tool_resource: 'test_tool',
+ file_id: fileId,
+ }),
+ ),
+ ...initialFileIds.map((fileId) =>
+ removeAgentResourceFiles({
+ agent_id: agent.id,
+ files: [{ tool_resource: 'test_tool', file_id: fileId }],
+ }),
+ ),
+ ];
+
+ await Promise.all(operations);
+
+ const updatedAgent = await Agent.findOne({ id: agent.id });
+ expect(updatedAgent.tool_resources.test_tool.file_ids).toBeDefined();
+ expect(updatedAgent.tool_resources.test_tool.file_ids).toHaveLength(5);
+ });
+
+ test('should initialize array when adding to non-existent tool resource', async () => {
+ const agent = await createBasicAgent();
+ const fileId = uuidv4();
+
+ const updatedAgent = await addAgentResourceFile({
+ agent_id: agent.id,
+ tool_resource: 'new_tool',
+ file_id: fileId,
+ });
+
+ expect(updatedAgent.tool_resources.new_tool.file_ids).toBeDefined();
+ expect(updatedAgent.tool_resources.new_tool.file_ids).toHaveLength(1);
+ expect(updatedAgent.tool_resources.new_tool.file_ids[0]).toBe(fileId);
+ });
+
+ test('should handle rapid sequential modifications to same tool resource', async () => {
+ const agent = await createBasicAgent();
+ const fileId = uuidv4();
+
+ for (let i = 0; i < 10; i++) {
+ await addAgentResourceFile({
+ agent_id: agent.id,
+ tool_resource: 'test_tool',
+ file_id: `${fileId}_${i}`,
+ });
+
+ if (i % 2 === 0) {
+ await removeAgentResourceFiles({
+ agent_id: agent.id,
+ files: [{ tool_resource: 'test_tool', file_id: `${fileId}_${i}` }],
+ });
+ }
+ }
+
+ const updatedAgent = await Agent.findOne({ id: agent.id });
+ expect(updatedAgent.tool_resources.test_tool.file_ids).toBeDefined();
+ expect(Array.isArray(updatedAgent.tool_resources.test_tool.file_ids)).toBe(true);
+ });
+
+ test('should handle multiple tool resources concurrently', async () => {
+ const agent = await createBasicAgent();
+ const toolResources = ['tool1', 'tool2', 'tool3'];
+ const operations = [];
+
+ toolResources.forEach((tool) => {
+ const fileIds = Array.from({ length: 5 }, () => uuidv4());
+ fileIds.forEach((fileId) => {
+ operations.push(
+ addAgentResourceFile({
+ agent_id: agent.id,
+ tool_resource: tool,
+ file_id: fileId,
+ }),
+ );
+ });
+ });
+
+ await Promise.all(operations);
+
+ const updatedAgent = await Agent.findOne({ id: agent.id });
+ toolResources.forEach((tool) => {
+ expect(updatedAgent.tool_resources[tool].file_ids).toBeDefined();
+ expect(updatedAgent.tool_resources[tool].file_ids).toHaveLength(5);
+ });
+ });
+
+ test('should handle concurrent duplicate additions', async () => {
+ const agent = await createBasicAgent();
+ const fileId = uuidv4();
+
+ // Concurrent additions of the same file
+ const additionPromises = Array.from({ length: 5 }).map(() =>
+ addAgentResourceFile({
+ agent_id: agent.id,
+ tool_resource: 'test_tool',
+ file_id: fileId,
+ }),
+ );
+
+ await Promise.all(additionPromises);
+
+ const updatedAgent = await Agent.findOne({ id: agent.id });
+ expect(updatedAgent.tool_resources.test_tool.file_ids).toBeDefined();
+ // Should only contain one instance of the fileId
+ expect(updatedAgent.tool_resources.test_tool.file_ids).toHaveLength(1);
+ expect(updatedAgent.tool_resources.test_tool.file_ids[0]).toBe(fileId);
+ });
+
+ test('should handle concurrent add and remove of the same file', async () => {
+ const agent = await createBasicAgent();
+ const fileId = uuidv4();
+
+ // First, ensure the file exists (or test might be trivial if remove runs first)
+ await addAgentResourceFile({
+ agent_id: agent.id,
+ tool_resource: 'test_tool',
+ file_id: fileId,
+ });
+
+ // Concurrent add (which should be ignored) and remove
+ const operations = [
+ addAgentResourceFile({
+ agent_id: agent.id,
+ tool_resource: 'test_tool',
+ file_id: fileId,
+ }),
+ removeAgentResourceFiles({
+ agent_id: agent.id,
+ files: [{ tool_resource: 'test_tool', file_id: fileId }],
+ }),
+ ];
+
+ await Promise.all(operations);
+
+ const updatedAgent = await Agent.findOne({ id: agent.id });
+ // The final state should ideally be that the file is removed,
+ // but the key point is consistency (not duplicated or error state).
+ // Depending on execution order, the file might remain if the add operation's
+ // findOneAndUpdate runs after the remove operation completes.
+ // A more robust check might be that the length is <= 1.
+ // Given the remove uses an update pipeline, it might be more likely to win.
+ // The final state depends on race condition timing (add or remove might "win").
+ // The critical part is that the state is consistent (no duplicates, no errors).
+ // Assert that the fileId is either present exactly once or not present at all.
+ expect(updatedAgent.tool_resources.test_tool.file_ids).toBeDefined();
+ const finalFileIds = updatedAgent.tool_resources.test_tool.file_ids;
+ const count = finalFileIds.filter((id) => id === fileId).length;
+ expect(count).toBeLessThanOrEqual(1); // Should be 0 or 1, never more
+ // Optional: Check overall length is consistent with the count
+ if (count === 0) {
+ expect(finalFileIds).toHaveLength(0);
+ } else {
+ expect(finalFileIds).toHaveLength(1);
+ expect(finalFileIds[0]).toBe(fileId);
+ }
+ });
+
+ test('should handle concurrent duplicate removals', async () => {
+ const agent = await createBasicAgent();
+ const fileId = uuidv4();
+
+ // Add the file first
+ await addAgentResourceFile({
+ agent_id: agent.id,
+ tool_resource: 'test_tool',
+ file_id: fileId,
+ });
+
+ // Concurrent removals of the same file
+ const removalPromises = Array.from({ length: 5 }).map(() =>
+ removeAgentResourceFiles({
+ agent_id: agent.id,
+ files: [{ tool_resource: 'test_tool', file_id: fileId }],
+ }),
+ );
+
+ await Promise.all(removalPromises);
+
+ const updatedAgent = await Agent.findOne({ id: agent.id });
+ // Check if the array is empty or the tool resource itself is removed
+ const fileIds = updatedAgent.tool_resources?.test_tool?.file_ids ?? [];
+ expect(fileIds).toHaveLength(0);
+ expect(fileIds).not.toContain(fileId);
+ });
+
+ test('should handle concurrent removals of different files', async () => {
+ const agent = await createBasicAgent();
+ const fileIds = Array.from({ length: 10 }, () => uuidv4());
+
+ // Add all files first
+ await Promise.all(
+ fileIds.map((fileId) =>
+ addAgentResourceFile({
+ agent_id: agent.id,
+ tool_resource: 'test_tool',
+ file_id: fileId,
+ }),
+ ),
+ );
+
+ // Concurrently remove all files
+ const removalPromises = fileIds.map((fileId) =>
+ removeAgentResourceFiles({
+ agent_id: agent.id,
+ files: [{ tool_resource: 'test_tool', file_id: fileId }],
+ }),
+ );
+
+ await Promise.all(removalPromises);
+
+ const updatedAgent = await Agent.findOne({ id: agent.id });
+ // Check if the array is empty or the tool resource itself is removed
+ const finalFileIds = updatedAgent.tool_resources?.test_tool?.file_ids ?? [];
+ expect(finalFileIds).toHaveLength(0);
+ });
+});
diff --git a/api/models/Assistant.js b/api/models/Assistant.js
index d0e73ad4e7..a8a5b98157 100644
--- a/api/models/Assistant.js
+++ b/api/models/Assistant.js
@@ -1,5 +1,5 @@
const mongoose = require('mongoose');
-const assistantSchema = require('./schema/assistant');
+const { assistantSchema } = require('@librechat/data-schemas');
const Assistant = mongoose.model('assistant', assistantSchema);
diff --git a/api/models/Balance.js b/api/models/Balance.js
index 24d9087b77..226f6ef508 100644
--- a/api/models/Balance.js
+++ b/api/models/Balance.js
@@ -1,44 +1,4 @@
const mongoose = require('mongoose');
-const balanceSchema = require('./schema/balance');
-const { getMultiplier } = require('./tx');
-const { logger } = require('~/config');
-
-balanceSchema.statics.check = async function ({
- user,
- model,
- endpoint,
- valueKey,
- tokenType,
- amount,
- endpointTokenConfig,
-}) {
- const multiplier = getMultiplier({ valueKey, tokenType, model, endpoint, endpointTokenConfig });
- const tokenCost = amount * multiplier;
- const { tokenCredits: balance } = (await this.findOne({ user }, 'tokenCredits').lean()) ?? {};
-
- logger.debug('[Balance.check]', {
- user,
- model,
- endpoint,
- valueKey,
- tokenType,
- amount,
- balance,
- multiplier,
- endpointTokenConfig: !!endpointTokenConfig,
- });
-
- if (!balance) {
- return {
- canSpend: false,
- balance: 0,
- tokenCost,
- };
- }
-
- logger.debug('[Balance.check]', { tokenCost });
-
- return { canSpend: balance >= tokenCost, balance, tokenCost };
-};
+const { balanceSchema } = require('@librechat/data-schemas');
module.exports = mongoose.model('Balance', balanceSchema);
diff --git a/api/models/Banner.js b/api/models/Banner.js
index 8d439dae28..399a8e72ee 100644
--- a/api/models/Banner.js
+++ b/api/models/Banner.js
@@ -1,5 +1,9 @@
-const Banner = require('./schema/banner');
+const mongoose = require('mongoose');
const logger = require('~/config/winston');
+const { bannerSchema } = require('@librechat/data-schemas');
+
+const Banner = mongoose.model('Banner', bannerSchema);
+
/**
* Retrieves the current active banner.
* @returns {Promise