mirror of
https://github.com/danny-avila/LibreChat.git
synced 2026-03-04 15:20:18 +01:00
Compare commits
No commits in common. "main" and "v0.8.1-rc2" have entirely different histories.
main
...
v0.8.1-rc2
1392 changed files with 39037 additions and 142813 deletions
|
|
@ -20,7 +20,8 @@ services:
|
||||||
environment:
|
environment:
|
||||||
- HOST=0.0.0.0
|
- HOST=0.0.0.0
|
||||||
- MONGO_URI=mongodb://mongodb:27017/LibreChat
|
- MONGO_URI=mongodb://mongodb:27017/LibreChat
|
||||||
# - OPENAI_REVERSE_PROXY=http://host.docker.internal:8070/v1
|
# - CHATGPT_REVERSE_PROXY=http://host.docker.internal:8080/api/conversation # if you are hosting your own chatgpt reverse proxy with docker
|
||||||
|
# - OPENAI_REVERSE_PROXY=http://host.docker.internal:8070/v1/chat/completions # if you are hosting your own chatgpt reverse proxy with docker
|
||||||
- MEILI_HOST=http://meilisearch:7700
|
- MEILI_HOST=http://meilisearch:7700
|
||||||
|
|
||||||
# Runs app on the same network as the service container, allows "forwardPorts" in devcontainer.json function.
|
# Runs app on the same network as the service container, allows "forwardPorts" in devcontainer.json function.
|
||||||
|
|
|
||||||
98
.env.example
98
.env.example
|
|
@ -47,10 +47,6 @@ TRUST_PROXY=1
|
||||||
# password policies.
|
# password policies.
|
||||||
# MIN_PASSWORD_LENGTH=8
|
# MIN_PASSWORD_LENGTH=8
|
||||||
|
|
||||||
# When enabled, the app will continue running after encountering uncaught exceptions
|
|
||||||
# instead of exiting the process. Not recommended for production unless necessary.
|
|
||||||
# CONTINUE_ON_UNCAUGHT_EXCEPTION=false
|
|
||||||
|
|
||||||
#===============#
|
#===============#
|
||||||
# JSON Logging #
|
# JSON Logging #
|
||||||
#===============#
|
#===============#
|
||||||
|
|
@ -65,9 +61,6 @@ CONSOLE_JSON=false
|
||||||
DEBUG_LOGGING=true
|
DEBUG_LOGGING=true
|
||||||
DEBUG_CONSOLE=false
|
DEBUG_CONSOLE=false
|
||||||
|
|
||||||
# Enable memory diagnostics (logs heap/RSS snapshots every 60s, auto-enabled with --inspect)
|
|
||||||
# MEM_DIAG=true
|
|
||||||
|
|
||||||
#=============#
|
#=============#
|
||||||
# Permissions #
|
# Permissions #
|
||||||
#=============#
|
#=============#
|
||||||
|
|
@ -75,18 +68,6 @@ DEBUG_CONSOLE=false
|
||||||
# UID=1000
|
# UID=1000
|
||||||
# GID=1000
|
# GID=1000
|
||||||
|
|
||||||
#==============#
|
|
||||||
# Node Options #
|
|
||||||
#==============#
|
|
||||||
|
|
||||||
# NOTE: NODE_MAX_OLD_SPACE_SIZE is NOT recognized by Node.js directly.
|
|
||||||
# This variable is used as a build argument for Docker or CI/CD workflows,
|
|
||||||
# and is NOT used by Node.js to set the heap size at runtime.
|
|
||||||
# To configure Node.js memory, use NODE_OPTIONS, e.g.:
|
|
||||||
# NODE_OPTIONS="--max-old-space-size=6144"
|
|
||||||
# See: https://nodejs.org/api/cli.html#--max-old-space-sizesize-in-mib
|
|
||||||
NODE_MAX_OLD_SPACE_SIZE=6144
|
|
||||||
|
|
||||||
#===============#
|
#===============#
|
||||||
# Configuration #
|
# Configuration #
|
||||||
#===============#
|
#===============#
|
||||||
|
|
@ -94,16 +75,6 @@ NODE_MAX_OLD_SPACE_SIZE=6144
|
||||||
|
|
||||||
# CONFIG_PATH="/alternative/path/to/librechat.yaml"
|
# CONFIG_PATH="/alternative/path/to/librechat.yaml"
|
||||||
|
|
||||||
#==================#
|
|
||||||
# Langfuse Tracing #
|
|
||||||
#==================#
|
|
||||||
|
|
||||||
# Get Langfuse API keys for your project from the project settings page: https://cloud.langfuse.com
|
|
||||||
|
|
||||||
# LANGFUSE_PUBLIC_KEY=
|
|
||||||
# LANGFUSE_SECRET_KEY=
|
|
||||||
# LANGFUSE_BASE_URL=
|
|
||||||
|
|
||||||
#===================================================#
|
#===================================================#
|
||||||
# Endpoints #
|
# Endpoints #
|
||||||
#===================================================#
|
#===================================================#
|
||||||
|
|
@ -138,13 +109,9 @@ PROXY=
|
||||||
#============#
|
#============#
|
||||||
|
|
||||||
ANTHROPIC_API_KEY=user_provided
|
ANTHROPIC_API_KEY=user_provided
|
||||||
# ANTHROPIC_MODELS=claude-sonnet-4-6,claude-opus-4-6,claude-opus-4-20250514,claude-sonnet-4-20250514,claude-3-7-sonnet-20250219,claude-3-5-sonnet-20241022,claude-3-5-haiku-20241022,claude-3-opus-20240229,claude-3-sonnet-20240229,claude-3-haiku-20240307
|
# ANTHROPIC_MODELS=claude-opus-4-20250514,claude-sonnet-4-20250514,claude-3-7-sonnet-20250219,claude-3-5-sonnet-20241022,claude-3-5-haiku-20241022,claude-3-opus-20240229,claude-3-sonnet-20240229,claude-3-haiku-20240307
|
||||||
# ANTHROPIC_REVERSE_PROXY=
|
# ANTHROPIC_REVERSE_PROXY=
|
||||||
|
|
||||||
# Set to true to use Anthropic models through Google Vertex AI instead of direct API
|
|
||||||
# ANTHROPIC_USE_VERTEX=
|
|
||||||
# ANTHROPIC_VERTEX_REGION=us-east5
|
|
||||||
|
|
||||||
#============#
|
#============#
|
||||||
# Azure #
|
# Azure #
|
||||||
#============#
|
#============#
|
||||||
|
|
@ -162,6 +129,7 @@ ANTHROPIC_API_KEY=user_provided
|
||||||
# AZURE_OPENAI_API_VERSION= # Deprecated
|
# AZURE_OPENAI_API_VERSION= # Deprecated
|
||||||
# AZURE_OPENAI_API_COMPLETIONS_DEPLOYMENT_NAME= # Deprecated
|
# AZURE_OPENAI_API_COMPLETIONS_DEPLOYMENT_NAME= # Deprecated
|
||||||
# AZURE_OPENAI_API_EMBEDDINGS_DEPLOYMENT_NAME= # Deprecated
|
# AZURE_OPENAI_API_EMBEDDINGS_DEPLOYMENT_NAME= # Deprecated
|
||||||
|
# PLUGINS_USE_AZURE="true" # Deprecated
|
||||||
|
|
||||||
#=================#
|
#=================#
|
||||||
# AWS Bedrock #
|
# AWS Bedrock #
|
||||||
|
|
@ -173,8 +141,7 @@ ANTHROPIC_API_KEY=user_provided
|
||||||
# BEDROCK_AWS_SESSION_TOKEN=someSessionToken
|
# BEDROCK_AWS_SESSION_TOKEN=someSessionToken
|
||||||
|
|
||||||
# Note: This example list is not meant to be exhaustive. If omitted, all known, supported model IDs will be included for you.
|
# Note: This example list is not meant to be exhaustive. If omitted, all known, supported model IDs will be included for you.
|
||||||
# BEDROCK_AWS_MODELS=anthropic.claude-sonnet-4-6,anthropic.claude-opus-4-6-v1,anthropic.claude-3-5-sonnet-20240620-v1:0,meta.llama3-1-8b-instruct-v1:0
|
# BEDROCK_AWS_MODELS=anthropic.claude-3-5-sonnet-20240620-v1:0,meta.llama3-1-8b-instruct-v1:0
|
||||||
# Cross-region inference model IDs: us.anthropic.claude-sonnet-4-6,us.anthropic.claude-opus-4-6-v1,global.anthropic.claude-opus-4-6-v1
|
|
||||||
|
|
||||||
# See all Bedrock model IDs here: https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html#model-ids-arns
|
# See all Bedrock model IDs here: https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html#model-ids-arns
|
||||||
|
|
||||||
|
|
@ -196,23 +163,15 @@ GOOGLE_KEY=user_provided
|
||||||
# GOOGLE_AUTH_HEADER=true
|
# GOOGLE_AUTH_HEADER=true
|
||||||
|
|
||||||
# Gemini API (AI Studio)
|
# Gemini API (AI Studio)
|
||||||
# GOOGLE_MODELS=gemini-3.1-pro-preview,gemini-3.1-pro-preview-customtools,gemini-3.1-flash-lite-preview,gemini-2.5-pro,gemini-2.5-flash,gemini-2.5-flash-lite,gemini-2.0-flash,gemini-2.0-flash-lite
|
# GOOGLE_MODELS=gemini-2.5-pro,gemini-2.5-flash,gemini-2.5-flash-lite,gemini-2.0-flash,gemini-2.0-flash-lite
|
||||||
|
|
||||||
# Vertex AI
|
# Vertex AI
|
||||||
# GOOGLE_MODELS=gemini-3.1-pro-preview,gemini-3.1-pro-preview-customtools,gemini-3.1-flash-lite-preview,gemini-2.5-pro,gemini-2.5-flash,gemini-2.5-flash-lite,gemini-2.0-flash-001,gemini-2.0-flash-lite-001
|
# GOOGLE_MODELS=gemini-2.5-pro,gemini-2.5-flash,gemini-2.5-flash-lite,gemini-2.0-flash-001,gemini-2.0-flash-lite-001
|
||||||
|
|
||||||
# GOOGLE_TITLE_MODEL=gemini-2.0-flash-lite-001
|
# GOOGLE_TITLE_MODEL=gemini-2.0-flash-lite-001
|
||||||
|
|
||||||
# Google Cloud region for Vertex AI (used by both chat and image generation)
|
|
||||||
# GOOGLE_LOC=us-central1
|
# GOOGLE_LOC=us-central1
|
||||||
|
|
||||||
# Alternative region env var for Gemini Image Generation
|
|
||||||
# GOOGLE_CLOUD_LOCATION=global
|
|
||||||
|
|
||||||
# Vertex AI Service Account Configuration
|
|
||||||
# Path to your Google Cloud service account JSON file
|
|
||||||
# GOOGLE_SERVICE_KEY_FILE=/path/to/service-account.json
|
|
||||||
|
|
||||||
# Google Safety Settings
|
# Google Safety Settings
|
||||||
# NOTE: These settings apply to both Vertex AI and Gemini API (AI Studio)
|
# NOTE: These settings apply to both Vertex AI and Gemini API (AI Studio)
|
||||||
#
|
#
|
||||||
|
|
@ -232,23 +191,6 @@ GOOGLE_KEY=user_provided
|
||||||
# GOOGLE_SAFETY_DANGEROUS_CONTENT=BLOCK_ONLY_HIGH
|
# GOOGLE_SAFETY_DANGEROUS_CONTENT=BLOCK_ONLY_HIGH
|
||||||
# GOOGLE_SAFETY_CIVIC_INTEGRITY=BLOCK_ONLY_HIGH
|
# GOOGLE_SAFETY_CIVIC_INTEGRITY=BLOCK_ONLY_HIGH
|
||||||
|
|
||||||
#========================#
|
|
||||||
# Gemini Image Generation #
|
|
||||||
#========================#
|
|
||||||
|
|
||||||
# Gemini Image Generation Tool (for Agents)
|
|
||||||
# Supports multiple authentication methods in priority order:
|
|
||||||
# 1. User-provided API key (via GUI)
|
|
||||||
# 2. GEMINI_API_KEY env var (admin-configured)
|
|
||||||
# 3. GOOGLE_KEY env var (shared with Google chat endpoint)
|
|
||||||
# 4. Vertex AI service account (via GOOGLE_SERVICE_KEY_FILE)
|
|
||||||
|
|
||||||
# Option A: Use dedicated Gemini API key for image generation
|
|
||||||
# GEMINI_API_KEY=your-gemini-api-key
|
|
||||||
|
|
||||||
# Vertex AI model for image generation (defaults to gemini-2.5-flash-image)
|
|
||||||
# GEMINI_IMAGE_MODEL=gemini-2.5-flash-image
|
|
||||||
|
|
||||||
#============#
|
#============#
|
||||||
# OpenAI #
|
# OpenAI #
|
||||||
#============#
|
#============#
|
||||||
|
|
@ -288,6 +230,14 @@ ASSISTANTS_API_KEY=user_provided
|
||||||
# More info, including how to enable use of Assistants with Azure here:
|
# More info, including how to enable use of Assistants with Azure here:
|
||||||
# https://www.librechat.ai/docs/configuration/librechat_yaml/ai_endpoints/azure#using-assistants-with-azure
|
# https://www.librechat.ai/docs/configuration/librechat_yaml/ai_endpoints/azure#using-assistants-with-azure
|
||||||
|
|
||||||
|
#============#
|
||||||
|
# Plugins #
|
||||||
|
#============#
|
||||||
|
|
||||||
|
# PLUGIN_MODELS=gpt-4o,gpt-4o-mini,gpt-4,gpt-4-turbo-preview,gpt-4-0125-preview,gpt-4-1106-preview,gpt-4-0613,gpt-3.5-turbo,gpt-3.5-turbo-0125,gpt-3.5-turbo-1106,gpt-3.5-turbo-0613
|
||||||
|
|
||||||
|
DEBUG_PLUGINS=true
|
||||||
|
|
||||||
CREDS_KEY=f34be427ebb29de8d88c107a71546019685ed8b241d8f2ed00c3df97ad2566f0
|
CREDS_KEY=f34be427ebb29de8d88c107a71546019685ed8b241d8f2ed00c3df97ad2566f0
|
||||||
CREDS_IV=e2341419ec3dd3d19b13a1a87fafcbfb
|
CREDS_IV=e2341419ec3dd3d19b13a1a87fafcbfb
|
||||||
|
|
||||||
|
|
@ -307,7 +257,6 @@ AZURE_AI_SEARCH_SEARCH_OPTION_SELECT=
|
||||||
# IMAGE_GEN_OAI_API_KEY= # Create or reuse OpenAI API key for image generation tool
|
# IMAGE_GEN_OAI_API_KEY= # Create or reuse OpenAI API key for image generation tool
|
||||||
# IMAGE_GEN_OAI_BASEURL= # Custom OpenAI base URL for image generation tool
|
# IMAGE_GEN_OAI_BASEURL= # Custom OpenAI base URL for image generation tool
|
||||||
# IMAGE_GEN_OAI_AZURE_API_VERSION= # Custom Azure OpenAI deployments
|
# IMAGE_GEN_OAI_AZURE_API_VERSION= # Custom Azure OpenAI deployments
|
||||||
# IMAGE_GEN_OAI_MODEL=gpt-image-1 # OpenAI image model (e.g., gpt-image-1, gpt-image-1.5)
|
|
||||||
# IMAGE_GEN_OAI_DESCRIPTION=
|
# IMAGE_GEN_OAI_DESCRIPTION=
|
||||||
# IMAGE_GEN_OAI_DESCRIPTION_WITH_FILES=Custom description for image generation tool when files are present
|
# IMAGE_GEN_OAI_DESCRIPTION_WITH_FILES=Custom description for image generation tool when files are present
|
||||||
# IMAGE_GEN_OAI_DESCRIPTION_NO_FILES=Custom description for image generation tool when no files are present
|
# IMAGE_GEN_OAI_DESCRIPTION_NO_FILES=Custom description for image generation tool when no files are present
|
||||||
|
|
@ -345,6 +294,10 @@ FLUX_API_BASE_URL=https://api.us1.bfl.ai
|
||||||
GOOGLE_SEARCH_API_KEY=
|
GOOGLE_SEARCH_API_KEY=
|
||||||
GOOGLE_CSE_ID=
|
GOOGLE_CSE_ID=
|
||||||
|
|
||||||
|
# YOUTUBE
|
||||||
|
#-----------------
|
||||||
|
YOUTUBE_API_KEY=
|
||||||
|
|
||||||
# Stable Diffusion
|
# Stable Diffusion
|
||||||
#-----------------
|
#-----------------
|
||||||
SD_WEBUI_URL=http://host.docker.internal:7860
|
SD_WEBUI_URL=http://host.docker.internal:7860
|
||||||
|
|
@ -513,9 +466,6 @@ OPENID_ADMIN_ROLE_TOKEN_KIND=
|
||||||
OPENID_USERNAME_CLAIM=
|
OPENID_USERNAME_CLAIM=
|
||||||
# Set to determine which user info property returned from OpenID Provider to store as the User's name
|
# Set to determine which user info property returned from OpenID Provider to store as the User's name
|
||||||
OPENID_NAME_CLAIM=
|
OPENID_NAME_CLAIM=
|
||||||
# Set to determine which user info claim to use as the email/identifier for user matching (e.g., "upn" for Entra ID)
|
|
||||||
# When not set, defaults to: email -> preferred_username -> upn
|
|
||||||
OPENID_EMAIL_CLAIM=
|
|
||||||
# Optional audience parameter for OpenID authorization requests
|
# Optional audience parameter for OpenID authorization requests
|
||||||
OPENID_AUDIENCE=
|
OPENID_AUDIENCE=
|
||||||
|
|
||||||
|
|
@ -538,8 +488,6 @@ OPENID_ON_BEHALF_FLOW_FOR_USERINFO_REQUIRED=
|
||||||
OPENID_ON_BEHALF_FLOW_USERINFO_SCOPE="user.read" # example for Scope Needed for Microsoft Graph API
|
OPENID_ON_BEHALF_FLOW_USERINFO_SCOPE="user.read" # example for Scope Needed for Microsoft Graph API
|
||||||
# Set to true to use the OpenID Connect end session endpoint for logout
|
# Set to true to use the OpenID Connect end session endpoint for logout
|
||||||
OPENID_USE_END_SESSION_ENDPOINT=
|
OPENID_USE_END_SESSION_ENDPOINT=
|
||||||
# URL to redirect to after OpenID logout (defaults to ${DOMAIN_CLIENT}/login)
|
|
||||||
OPENID_POST_LOGOUT_REDIRECT_URI=
|
|
||||||
|
|
||||||
#========================#
|
#========================#
|
||||||
# SharePoint Integration #
|
# SharePoint Integration #
|
||||||
|
|
@ -660,9 +608,6 @@ AWS_ACCESS_KEY_ID=
|
||||||
AWS_SECRET_ACCESS_KEY=
|
AWS_SECRET_ACCESS_KEY=
|
||||||
AWS_REGION=
|
AWS_REGION=
|
||||||
AWS_BUCKET_NAME=
|
AWS_BUCKET_NAME=
|
||||||
# Required for path-style S3-compatible providers (MinIO, Hetzner, Backblaze B2, etc.)
|
|
||||||
# that don't support virtual-hosted-style URLs (bucket.endpoint). Not needed for AWS S3.
|
|
||||||
# AWS_FORCE_PATH_STYLE=false
|
|
||||||
|
|
||||||
#========================#
|
#========================#
|
||||||
# Azure Blob Storage #
|
# Azure Blob Storage #
|
||||||
|
|
@ -720,9 +665,6 @@ HELP_AND_FAQ_URL=https://librechat.ai
|
||||||
|
|
||||||
# Enable Redis for caching and session storage
|
# Enable Redis for caching and session storage
|
||||||
# USE_REDIS=true
|
# USE_REDIS=true
|
||||||
# Enable Redis for resumable LLM streams (defaults to USE_REDIS value if not set)
|
|
||||||
# Set to false to use in-memory storage for streams while keeping Redis for other caches
|
|
||||||
# USE_REDIS_STREAMS=true
|
|
||||||
|
|
||||||
# Single Redis instance
|
# Single Redis instance
|
||||||
# REDIS_URI=redis://127.0.0.1:6379
|
# REDIS_URI=redis://127.0.0.1:6379
|
||||||
|
|
@ -757,10 +699,8 @@ HELP_AND_FAQ_URL=https://librechat.ai
|
||||||
# REDIS_PING_INTERVAL=300
|
# REDIS_PING_INTERVAL=300
|
||||||
|
|
||||||
# Force specific cache namespaces to use in-memory storage even when Redis is enabled
|
# Force specific cache namespaces to use in-memory storage even when Redis is enabled
|
||||||
# Comma-separated list of CacheKeys
|
# Comma-separated list of CacheKeys (e.g., ROLES,MESSAGES)
|
||||||
# Defaults to CONFIG_STORE,APP_CONFIG so YAML-derived config stays per-container (safe for blue/green deployments)
|
# FORCED_IN_MEMORY_CACHE_NAMESPACES=ROLES,MESSAGES
|
||||||
# Set to empty string to force all namespaces through Redis: FORCED_IN_MEMORY_CACHE_NAMESPACES=
|
|
||||||
# FORCED_IN_MEMORY_CACHE_NAMESPACES=CONFIG_STORE,APP_CONFIG
|
|
||||||
|
|
||||||
# Leader Election Configuration (for multi-instance deployments with Redis)
|
# Leader Election Configuration (for multi-instance deployments with Redis)
|
||||||
# Duration in seconds that the leader lease is valid before it expires (default: 25)
|
# Duration in seconds that the leader lease is valid before it expires (default: 25)
|
||||||
|
|
|
||||||
75
.github/CONTRIBUTING.md
vendored
75
.github/CONTRIBUTING.md
vendored
|
|
@ -26,14 +26,18 @@ Project maintainers have the right and responsibility to remove, edit, or reject
|
||||||
|
|
||||||
## 1. Development Setup
|
## 1. Development Setup
|
||||||
|
|
||||||
1. Use Node.js v20.19.0+ or ^22.12.0 or >= 23.0.0.
|
1. Use Node.JS 20.x.
|
||||||
2. Run `npm run smart-reinstall` to install dependencies (uses Turborepo). Use `npm run reinstall` for a clean install, or `npm ci` for a fresh lockfile-based install.
|
2. Install typescript globally: `npm i -g typescript`.
|
||||||
3. Build all compiled code: `npm run build`.
|
3. Run `npm ci` to install dependencies.
|
||||||
4. Setup and run unit tests:
|
4. Build the data provider: `npm run build:data-provider`.
|
||||||
|
5. Build data schemas: `npm run build:data-schemas`.
|
||||||
|
6. Build API methods: `npm run build:api`.
|
||||||
|
7. Setup and run unit tests:
|
||||||
- Copy `.env.test`: `cp api/test/.env.test.example api/test/.env.test`.
|
- Copy `.env.test`: `cp api/test/.env.test.example api/test/.env.test`.
|
||||||
- Run backend unit tests: `npm run test:api`.
|
- Run backend unit tests: `npm run test:api`.
|
||||||
- Run frontend unit tests: `npm run test:client`.
|
- Run frontend unit tests: `npm run test:client`.
|
||||||
5. Setup and run integration tests:
|
8. Setup and run integration tests:
|
||||||
|
- Build client: `cd client && npm run build`.
|
||||||
- Create `.env`: `cp .env.example .env`.
|
- Create `.env`: `cp .env.example .env`.
|
||||||
- Install [MongoDB Community Edition](https://www.mongodb.com/docs/manual/administration/install-community/), ensure that `mongosh` connects to your local instance.
|
- Install [MongoDB Community Edition](https://www.mongodb.com/docs/manual/administration/install-community/), ensure that `mongosh` connects to your local instance.
|
||||||
- Run: `npx install playwright`, then `npx playwright install`.
|
- Run: `npx install playwright`, then `npx playwright install`.
|
||||||
|
|
@ -44,11 +48,11 @@ Project maintainers have the right and responsibility to remove, edit, or reject
|
||||||
## 2. Development Notes
|
## 2. Development Notes
|
||||||
|
|
||||||
1. Before starting work, make sure your main branch has the latest commits with `npm run update`.
|
1. Before starting work, make sure your main branch has the latest commits with `npm run update`.
|
||||||
2. Run linting command to find errors: `npm run lint`. Alternatively, ensure husky pre-commit checks are functioning.
|
3. Run linting command to find errors: `npm run lint`. Alternatively, ensure husky pre-commit checks are functioning.
|
||||||
3. After your changes, reinstall packages in your current branch using `npm run reinstall` and ensure everything still works.
|
3. After your changes, reinstall packages in your current branch using `npm run reinstall` and ensure everything still works.
|
||||||
- Restart the ESLint server ("ESLint: Restart ESLint Server" in VS Code command bar) and your IDE after reinstalling or updating.
|
- Restart the ESLint server ("ESLint: Restart ESLint Server" in VS Code command bar) and your IDE after reinstalling or updating.
|
||||||
4. Clear web app localStorage and cookies before and after changes.
|
4. Clear web app localStorage and cookies before and after changes.
|
||||||
5. To check for introduced errors, build all compiled code: `npm run build`.
|
5. For frontend changes, compile typescript before and after changes to check for introduced errors: `cd client && npm run build`.
|
||||||
6. Run backend unit tests: `npm run test:api`.
|
6. Run backend unit tests: `npm run test:api`.
|
||||||
7. Run frontend unit tests: `npm run test:client`.
|
7. Run frontend unit tests: `npm run test:client`.
|
||||||
8. Run integration tests: `npm run e2e`.
|
8. Run integration tests: `npm run e2e`.
|
||||||
|
|
@ -114,45 +118,50 @@ Apply the following naming conventions to branches, labels, and other Git-relate
|
||||||
- **JS/TS:** Directories and file names: Descriptive and camelCase. First letter uppercased for React files (e.g., `helperFunction.ts, ReactComponent.tsx`).
|
- **JS/TS:** Directories and file names: Descriptive and camelCase. First letter uppercased for React files (e.g., `helperFunction.ts, ReactComponent.tsx`).
|
||||||
- **Docs:** Directories and file names: Descriptive and snake_case (e.g., `config_files.md`).
|
- **Docs:** Directories and file names: Descriptive and snake_case (e.g., `config_files.md`).
|
||||||
|
|
||||||
## 7. Coding Standards
|
## 7. TypeScript Conversion
|
||||||
|
|
||||||
For detailed coding conventions, workspace boundaries, and architecture guidance, refer to the [`AGENTS.md`](../AGENTS.md) file at the project root. It covers code style, type safety, import ordering, iteration/performance expectations, frontend rules, testing, and development commands.
|
|
||||||
|
|
||||||
## 8. TypeScript Conversion
|
|
||||||
|
|
||||||
1. **Original State**: The project was initially developed entirely in JavaScript (JS).
|
1. **Original State**: The project was initially developed entirely in JavaScript (JS).
|
||||||
|
|
||||||
2. **Frontend**: Fully transitioned to TypeScript.
|
2. **Frontend Transition**:
|
||||||
|
- We are in the process of transitioning the frontend from JS to TypeScript (TS).
|
||||||
|
- The transition is nearing completion.
|
||||||
|
- This conversion is feasible due to React's capability to intermix JS and TS prior to code compilation. It's standard practice to compile/bundle the code in such scenarios.
|
||||||
|
|
||||||
3. **Backend**:
|
3. **Backend Considerations**:
|
||||||
- The legacy Express.js server remains in `/api` as JavaScript.
|
- Transitioning the backend to TypeScript would be a more intricate process, especially for an established Express.js server.
|
||||||
- All new backend code is written in TypeScript under `/packages/api`, which is compiled and consumed by `/api`.
|
|
||||||
- Shared database logic lives in `/packages/data-schemas` (TypeScript).
|
|
||||||
- Shared frontend/backend API types and services live in `/packages/data-provider` (TypeScript).
|
|
||||||
- Minimize direct changes to `/api`; prefer adding TypeScript code to `/packages/api` and importing it.
|
|
||||||
|
|
||||||
## 9. Module Import Conventions
|
- **Options for Transition**:
|
||||||
|
- **Single Phase Overhaul**: This involves converting the entire backend to TypeScript in one go. It's the most straightforward approach but can be disruptive, especially for larger codebases.
|
||||||
|
|
||||||
Imports are organized into three sections (in order):
|
- **Incremental Transition**: Convert parts of the backend progressively. This can be done by:
|
||||||
|
- Maintaining a separate directory for TypeScript files.
|
||||||
|
- Gradually migrating and testing individual modules or routes.
|
||||||
|
- Using a build tool like `tsc` to compile TypeScript files independently until the entire transition is complete.
|
||||||
|
|
||||||
1. **Package imports** — sorted from shortest to longest line length.
|
- **Compilation Considerations**:
|
||||||
- `react` is always the first import.
|
- Introducing a compilation step for the server is an option. This would involve using tools like `ts-node` for development and `tsc` for production builds.
|
||||||
- Multi-line (stacked) imports count their total character length across all lines for sorting.
|
- However, this is not a conventional approach for Express.js servers and could introduce added complexity, especially in terms of build and deployment processes.
|
||||||
|
|
||||||
2. **`import type` imports** — sorted from longest to shortest line length.
|
- **Current Stance**: At present, this backend transition is of lower priority and might not be pursued.
|
||||||
- Package type imports come first, then local type imports.
|
|
||||||
- Line length sorting resets between the package and local sub-groups.
|
|
||||||
|
|
||||||
3. **Local/project imports** — sorted from longest to shortest line length.
|
## 8. Module Import Conventions
|
||||||
- Multi-line (stacked) imports count their total character length across all lines for sorting.
|
|
||||||
- Imports with alias `~` are treated the same as relative imports with respect to line length.
|
|
||||||
|
|
||||||
- Consolidate value imports from the same module as much as possible.
|
- `npm` packages first,
|
||||||
- Always use standalone `import type { ... }` for type imports; never use inline `type` keyword inside value imports (e.g., `import { Foo, type Bar }` is wrong).
|
- from longest line (top) to shortest (bottom)
|
||||||
|
|
||||||
|
- Followed by typescript types (pertains to data-provider and client workspaces)
|
||||||
|
- longest line (top) to shortest (bottom)
|
||||||
|
- types from package come first
|
||||||
|
|
||||||
|
- Lastly, local imports
|
||||||
|
- longest line (top) to shortest (bottom)
|
||||||
|
- imports with alias `~` treated the same as relative import with respect to line length
|
||||||
|
|
||||||
**Note:** ESLint will automatically enforce these import conventions when you run `npm run lint --fix` or through pre-commit hooks.
|
**Note:** ESLint will automatically enforce these import conventions when you run `npm run lint --fix` or through pre-commit hooks.
|
||||||
|
|
||||||
For the full set of coding standards, see [`AGENTS.md`](../AGENTS.md).
|
---
|
||||||
|
|
||||||
|
Please ensure that you adapt this summary to fit the specific context and nuances of your project.
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
|
|
|
||||||
12
.github/workflows/backend-review.yml
vendored
12
.github/workflows/backend-review.yml
vendored
|
|
@ -4,7 +4,6 @@ on:
|
||||||
branches:
|
branches:
|
||||||
- main
|
- main
|
||||||
- dev
|
- dev
|
||||||
- dev-staging
|
|
||||||
- release/*
|
- release/*
|
||||||
paths:
|
paths:
|
||||||
- 'api/**'
|
- 'api/**'
|
||||||
|
|
@ -24,7 +23,6 @@ jobs:
|
||||||
BAN_DURATION: ${{ secrets.BAN_DURATION }}
|
BAN_DURATION: ${{ secrets.BAN_DURATION }}
|
||||||
BAN_INTERVAL: ${{ secrets.BAN_INTERVAL }}
|
BAN_INTERVAL: ${{ secrets.BAN_INTERVAL }}
|
||||||
NODE_ENV: CI
|
NODE_ENV: CI
|
||||||
NODE_OPTIONS: '--max-old-space-size=${{ secrets.NODE_MAX_OLD_SPACE_SIZE || 6144 }}'
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
- name: Use Node.js 20.x
|
- name: Use Node.js 20.x
|
||||||
|
|
@ -42,14 +40,8 @@ jobs:
|
||||||
- name: Install Data Schemas Package
|
- name: Install Data Schemas Package
|
||||||
run: npm run build:data-schemas
|
run: npm run build:data-schemas
|
||||||
|
|
||||||
- name: Build API Package & Detect Circular Dependencies
|
- name: Install API Package
|
||||||
run: |
|
run: npm run build:api
|
||||||
output=$(npm run build:api 2>&1)
|
|
||||||
echo "$output"
|
|
||||||
if echo "$output" | grep -q "Circular depend"; then
|
|
||||||
echo "Error: Circular dependency detected in @librechat/api!"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
- name: Create empty auth.json file
|
- name: Create empty auth.json file
|
||||||
run: |
|
run: |
|
||||||
|
|
|
||||||
|
|
@ -5,13 +5,11 @@ on:
|
||||||
branches:
|
branches:
|
||||||
- main
|
- main
|
||||||
- dev
|
- dev
|
||||||
- dev-staging
|
|
||||||
- release/*
|
- release/*
|
||||||
paths:
|
paths:
|
||||||
- 'packages/api/src/cache/**'
|
- 'packages/api/src/cache/**'
|
||||||
- 'packages/api/src/cluster/**'
|
- 'packages/api/src/cluster/**'
|
||||||
- 'packages/api/src/mcp/**'
|
- 'packages/api/src/mcp/**'
|
||||||
- 'packages/api/src/stream/**'
|
|
||||||
- 'redis-config/**'
|
- 'redis-config/**'
|
||||||
- '.github/workflows/cache-integration-tests.yml'
|
- '.github/workflows/cache-integration-tests.yml'
|
||||||
|
|
||||||
|
|
|
||||||
14
.github/workflows/client.yml
vendored
14
.github/workflows/client.yml
vendored
|
|
@ -13,14 +13,9 @@ on:
|
||||||
required: false
|
required: false
|
||||||
default: 'Manual publish requested'
|
default: 'Manual publish requested'
|
||||||
|
|
||||||
permissions:
|
|
||||||
id-token: write # Required for OIDC trusted publishing
|
|
||||||
contents: read
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build-and-publish:
|
build-and-publish:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
environment: publish # Must match npm trusted publisher config
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
|
|
@ -28,10 +23,6 @@ jobs:
|
||||||
uses: actions/setup-node@v4
|
uses: actions/setup-node@v4
|
||||||
with:
|
with:
|
||||||
node-version: '20.x'
|
node-version: '20.x'
|
||||||
registry-url: 'https://registry.npmjs.org'
|
|
||||||
|
|
||||||
- name: Update npm for OIDC support
|
|
||||||
run: npm install -g npm@latest # Must be 11.5.1+ for provenance
|
|
||||||
|
|
||||||
- name: Install client dependencies
|
- name: Install client dependencies
|
||||||
run: cd packages/client && npm ci
|
run: cd packages/client && npm ci
|
||||||
|
|
@ -39,6 +30,9 @@ jobs:
|
||||||
- name: Build client
|
- name: Build client
|
||||||
run: cd packages/client && npm run build
|
run: cd packages/client && npm run build
|
||||||
|
|
||||||
|
- name: Set up npm authentication
|
||||||
|
run: echo "//registry.npmjs.org/:_authToken=${{ secrets.PUBLISH_NPM_TOKEN }}" > ~/.npmrc
|
||||||
|
|
||||||
- name: Check version change
|
- name: Check version change
|
||||||
id: check
|
id: check
|
||||||
working-directory: packages/client
|
working-directory: packages/client
|
||||||
|
|
@ -61,4 +55,4 @@ jobs:
|
||||||
- name: Publish
|
- name: Publish
|
||||||
if: steps.check.outputs.skip != 'true'
|
if: steps.check.outputs.skip != 'true'
|
||||||
working-directory: packages/client
|
working-directory: packages/client
|
||||||
run: npm publish *.tgz --access public --provenance
|
run: npm publish *.tgz --access public
|
||||||
13
.github/workflows/data-provider.yml
vendored
13
.github/workflows/data-provider.yml
vendored
|
|
@ -13,10 +13,6 @@ on:
|
||||||
required: false
|
required: false
|
||||||
default: 'Manual publish requested'
|
default: 'Manual publish requested'
|
||||||
|
|
||||||
permissions:
|
|
||||||
id-token: write # Required for OIDC trusted publishing
|
|
||||||
contents: read
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build:
|
build:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
|
@ -31,17 +27,14 @@ jobs:
|
||||||
publish-npm:
|
publish-npm:
|
||||||
needs: build
|
needs: build
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
environment: publish # Must match npm trusted publisher config
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
- uses: actions/setup-node@v4
|
- uses: actions/setup-node@v4
|
||||||
with:
|
with:
|
||||||
node-version: 20
|
node-version: 20
|
||||||
registry-url: 'https://registry.npmjs.org'
|
registry-url: 'https://registry.npmjs.org'
|
||||||
|
|
||||||
- name: Update npm for OIDC support
|
|
||||||
run: npm install -g npm@latest # Must be 11.5.1+ for provenance
|
|
||||||
|
|
||||||
- run: cd packages/data-provider && npm ci
|
- run: cd packages/data-provider && npm ci
|
||||||
- run: cd packages/data-provider && npm run build
|
- run: cd packages/data-provider && npm run build
|
||||||
- run: cd packages/data-provider && npm publish --provenance
|
- run: cd packages/data-provider && npm publish
|
||||||
|
env:
|
||||||
|
NODE_AUTH_TOKEN: ${{secrets.NPM_TOKEN}}
|
||||||
|
|
|
||||||
14
.github/workflows/data-schemas.yml
vendored
14
.github/workflows/data-schemas.yml
vendored
|
|
@ -13,14 +13,9 @@ on:
|
||||||
required: false
|
required: false
|
||||||
default: 'Manual publish requested'
|
default: 'Manual publish requested'
|
||||||
|
|
||||||
permissions:
|
|
||||||
id-token: write # Required for OIDC trusted publishing
|
|
||||||
contents: read
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build-and-publish:
|
build-and-publish:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
environment: publish # Must match npm trusted publisher config
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
|
|
@ -28,10 +23,6 @@ jobs:
|
||||||
uses: actions/setup-node@v4
|
uses: actions/setup-node@v4
|
||||||
with:
|
with:
|
||||||
node-version: '20.x'
|
node-version: '20.x'
|
||||||
registry-url: 'https://registry.npmjs.org'
|
|
||||||
|
|
||||||
- name: Update npm for OIDC support
|
|
||||||
run: npm install -g npm@latest # Must be 11.5.1+ for provenance
|
|
||||||
|
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: cd packages/data-schemas && npm ci
|
run: cd packages/data-schemas && npm ci
|
||||||
|
|
@ -39,6 +30,9 @@ jobs:
|
||||||
- name: Build
|
- name: Build
|
||||||
run: cd packages/data-schemas && npm run build
|
run: cd packages/data-schemas && npm run build
|
||||||
|
|
||||||
|
- name: Set up npm authentication
|
||||||
|
run: echo "//registry.npmjs.org/:_authToken=${{ secrets.PUBLISH_NPM_TOKEN }}" > ~/.npmrc
|
||||||
|
|
||||||
- name: Check version change
|
- name: Check version change
|
||||||
id: check
|
id: check
|
||||||
working-directory: packages/data-schemas
|
working-directory: packages/data-schemas
|
||||||
|
|
@ -61,4 +55,4 @@ jobs:
|
||||||
- name: Publish
|
- name: Publish
|
||||||
if: steps.check.outputs.skip != 'true'
|
if: steps.check.outputs.skip != 'true'
|
||||||
working-directory: packages/data-schemas
|
working-directory: packages/data-schemas
|
||||||
run: npm publish *.tgz --access public --provenance
|
run: npm publish *.tgz --access public
|
||||||
66
.github/workflows/dev-staging-images.yml
vendored
66
.github/workflows/dev-staging-images.yml
vendored
|
|
@ -1,66 +0,0 @@
|
||||||
name: Docker Dev Staging Images Build
|
|
||||||
|
|
||||||
on:
|
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
build:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
include:
|
|
||||||
- target: api-build
|
|
||||||
file: Dockerfile.multi
|
|
||||||
image_name: lc-dev-staging-api
|
|
||||||
- target: node
|
|
||||||
file: Dockerfile
|
|
||||||
image_name: lc-dev-staging
|
|
||||||
|
|
||||||
steps:
|
|
||||||
# Check out the repository
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
|
|
||||||
# Set up QEMU
|
|
||||||
- name: Set up QEMU
|
|
||||||
uses: docker/setup-qemu-action@v3
|
|
||||||
|
|
||||||
# Set up Docker Buildx
|
|
||||||
- name: Set up Docker Buildx
|
|
||||||
uses: docker/setup-buildx-action@v3
|
|
||||||
|
|
||||||
# Log in to GitHub Container Registry
|
|
||||||
- name: Log in to GitHub Container Registry
|
|
||||||
uses: docker/login-action@v2
|
|
||||||
with:
|
|
||||||
registry: ghcr.io
|
|
||||||
username: ${{ github.actor }}
|
|
||||||
password: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
|
|
||||||
# Login to Docker Hub
|
|
||||||
- name: Login to Docker Hub
|
|
||||||
uses: docker/login-action@v3
|
|
||||||
with:
|
|
||||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
|
||||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
|
||||||
|
|
||||||
# Prepare the environment
|
|
||||||
- name: Prepare environment
|
|
||||||
run: |
|
|
||||||
cp .env.example .env
|
|
||||||
|
|
||||||
# Build and push Docker images for each target
|
|
||||||
- name: Build and push Docker images
|
|
||||||
uses: docker/build-push-action@v5
|
|
||||||
with:
|
|
||||||
context: .
|
|
||||||
file: ${{ matrix.file }}
|
|
||||||
push: true
|
|
||||||
tags: |
|
|
||||||
ghcr.io/${{ github.repository_owner }}/${{ matrix.image_name }}:${{ github.sha }}
|
|
||||||
ghcr.io/${{ github.repository_owner }}/${{ matrix.image_name }}:latest
|
|
||||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ matrix.image_name }}:${{ github.sha }}
|
|
||||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ matrix.image_name }}:latest
|
|
||||||
platforms: linux/amd64,linux/arm64
|
|
||||||
target: ${{ matrix.target }}
|
|
||||||
|
|
||||||
1
.github/workflows/eslint-ci.yml
vendored
1
.github/workflows/eslint-ci.yml
vendored
|
|
@ -5,7 +5,6 @@ on:
|
||||||
branches:
|
branches:
|
||||||
- main
|
- main
|
||||||
- dev
|
- dev
|
||||||
- dev-staging
|
|
||||||
- release/*
|
- release/*
|
||||||
paths:
|
paths:
|
||||||
- 'api/**'
|
- 'api/**'
|
||||||
|
|
|
||||||
5
.github/workflows/frontend-review.yml
vendored
5
.github/workflows/frontend-review.yml
vendored
|
|
@ -5,7 +5,6 @@ on:
|
||||||
branches:
|
branches:
|
||||||
- main
|
- main
|
||||||
- dev
|
- dev
|
||||||
- dev-staging
|
|
||||||
- release/*
|
- release/*
|
||||||
paths:
|
paths:
|
||||||
- 'client/**'
|
- 'client/**'
|
||||||
|
|
@ -16,8 +15,6 @@ jobs:
|
||||||
name: Run frontend unit tests on Ubuntu
|
name: Run frontend unit tests on Ubuntu
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
env:
|
|
||||||
NODE_OPTIONS: '--max-old-space-size=${{ secrets.NODE_MAX_OLD_SPACE_SIZE || 6144 }}'
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
- name: Use Node.js 20.x
|
- name: Use Node.js 20.x
|
||||||
|
|
@ -40,8 +37,6 @@ jobs:
|
||||||
name: Run frontend unit tests on Windows
|
name: Run frontend unit tests on Windows
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
runs-on: windows-latest
|
runs-on: windows-latest
|
||||||
env:
|
|
||||||
NODE_OPTIONS: '--max-old-space-size=${{ secrets.NODE_MAX_OLD_SPACE_SIZE || 6144 }}'
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
- name: Use Node.js 20.x
|
- name: Use Node.js 20.x
|
||||||
|
|
|
||||||
81
.github/workflows/unused-packages.yml
vendored
81
.github/workflows/unused-packages.yml
vendored
|
|
@ -8,7 +8,6 @@ on:
|
||||||
- 'client/**'
|
- 'client/**'
|
||||||
- 'api/**'
|
- 'api/**'
|
||||||
- 'packages/client/**'
|
- 'packages/client/**'
|
||||||
- 'packages/api/**'
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
detect-unused-packages:
|
detect-unused-packages:
|
||||||
|
|
@ -64,45 +63,35 @@ jobs:
|
||||||
extract_deps_from_code() {
|
extract_deps_from_code() {
|
||||||
local folder=$1
|
local folder=$1
|
||||||
local output_file=$2
|
local output_file=$2
|
||||||
|
|
||||||
# Initialize empty output file
|
|
||||||
> "$output_file"
|
|
||||||
|
|
||||||
if [[ -d "$folder" ]]; then
|
if [[ -d "$folder" ]]; then
|
||||||
# Extract require() statements (use explicit includes for portability)
|
# Extract require() statements
|
||||||
grep -rEho "require\\(['\"]([a-zA-Z0-9@/._-]+)['\"]\\)" "$folder" \
|
grep -rEho "require\\(['\"]([a-zA-Z0-9@/._-]+)['\"]\\)" "$folder" --include=\*.{js,ts,tsx,jsx,mjs,cjs} | \
|
||||||
--include='*.js' --include='*.ts' --include='*.tsx' --include='*.jsx' --include='*.mjs' --include='*.cjs' 2>/dev/null | \
|
sed -E "s/require\\(['\"]([a-zA-Z0-9@/._-]+)['\"]\\)/\1/" > "$output_file"
|
||||||
sed -E "s/require\\(['\"]([a-zA-Z0-9@/._-]+)['\"]\\)/\1/" >> "$output_file" || true
|
|
||||||
|
|
||||||
# Extract ES6 imports - import x from 'module'
|
# Extract ES6 imports - various patterns
|
||||||
grep -rEho "import .* from ['\"]([a-zA-Z0-9@/._-]+)['\"]" "$folder" \
|
# import x from 'module'
|
||||||
--include='*.js' --include='*.ts' --include='*.tsx' --include='*.jsx' --include='*.mjs' --include='*.cjs' 2>/dev/null | \
|
grep -rEho "import .* from ['\"]([a-zA-Z0-9@/._-]+)['\"]" "$folder" --include=\*.{js,ts,tsx,jsx,mjs,cjs} | \
|
||||||
sed -E "s/import .* from ['\"]([a-zA-Z0-9@/._-]+)['\"]/\1/" >> "$output_file" || true
|
sed -E "s/import .* from ['\"]([a-zA-Z0-9@/._-]+)['\"]/\1/" >> "$output_file"
|
||||||
|
|
||||||
# import 'module' (side-effect imports)
|
# import 'module' (side-effect imports)
|
||||||
grep -rEho "import ['\"]([a-zA-Z0-9@/._-]+)['\"]" "$folder" \
|
grep -rEho "import ['\"]([a-zA-Z0-9@/._-]+)['\"]" "$folder" --include=\*.{js,ts,tsx,jsx,mjs,cjs} | \
|
||||||
--include='*.js' --include='*.ts' --include='*.tsx' --include='*.jsx' --include='*.mjs' --include='*.cjs' 2>/dev/null | \
|
sed -E "s/import ['\"]([a-zA-Z0-9@/._-]+)['\"]/\1/" >> "$output_file"
|
||||||
sed -E "s/import ['\"]([a-zA-Z0-9@/._-]+)['\"]/\1/" >> "$output_file" || true
|
|
||||||
|
|
||||||
# export { x } from 'module' or export * from 'module'
|
# export { x } from 'module' or export * from 'module'
|
||||||
grep -rEho "export .* from ['\"]([a-zA-Z0-9@/._-]+)['\"]" "$folder" \
|
grep -rEho "export .* from ['\"]([a-zA-Z0-9@/._-]+)['\"]" "$folder" --include=\*.{js,ts,tsx,jsx,mjs,cjs} | \
|
||||||
--include='*.js' --include='*.ts' --include='*.tsx' --include='*.jsx' --include='*.mjs' --include='*.cjs' 2>/dev/null | \
|
sed -E "s/export .* from ['\"]([a-zA-Z0-9@/._-]+)['\"]/\1/" >> "$output_file"
|
||||||
sed -E "s/export .* from ['\"]([a-zA-Z0-9@/._-]+)['\"]/\1/" >> "$output_file" || true
|
|
||||||
|
|
||||||
# import type { x } from 'module' (TypeScript)
|
# import type { x } from 'module' (TypeScript)
|
||||||
grep -rEho "import type .* from ['\"]([a-zA-Z0-9@/._-]+)['\"]" "$folder" \
|
grep -rEho "import type .* from ['\"]([a-zA-Z0-9@/._-]+)['\"]" "$folder" --include=\*.{ts,tsx} | \
|
||||||
--include='*.ts' --include='*.tsx' 2>/dev/null | \
|
sed -E "s/import type .* from ['\"]([a-zA-Z0-9@/._-]+)['\"]/\1/" >> "$output_file"
|
||||||
sed -E "s/import type .* from ['\"]([a-zA-Z0-9@/._-]+)['\"]/\1/" >> "$output_file" || true
|
|
||||||
|
|
||||||
# Remove subpath imports but keep the base package
|
# Remove subpath imports but keep the base package
|
||||||
# For scoped packages: '@scope/pkg/subpath' -> '@scope/pkg'
|
# e.g., '@tanstack/react-query/devtools' becomes '@tanstack/react-query'
|
||||||
# For regular packages: 'pkg/subpath' -> 'pkg'
|
sed -i -E 's|^(@?[a-zA-Z0-9-]+(/[a-zA-Z0-9-]+)?)/.*|\1|' "$output_file"
|
||||||
# Scoped packages (must keep @scope/package, strip anything after)
|
|
||||||
sed -i -E 's|^(@[a-zA-Z0-9_-]+/[a-zA-Z0-9_-]+)/.*|\1|' "$output_file" 2>/dev/null || true
|
|
||||||
# Non-scoped packages (keep package name, strip subpath)
|
|
||||||
sed -i -E 's|^([a-zA-Z0-9_-]+)/.*|\1|' "$output_file" 2>/dev/null || true
|
|
||||||
|
|
||||||
sort -u "$output_file" -o "$output_file"
|
sort -u "$output_file" -o "$output_file"
|
||||||
|
else
|
||||||
|
touch "$output_file"
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -110,10 +99,8 @@ jobs:
|
||||||
extract_deps_from_code "client" client_used_code.txt
|
extract_deps_from_code "client" client_used_code.txt
|
||||||
extract_deps_from_code "api" api_used_code.txt
|
extract_deps_from_code "api" api_used_code.txt
|
||||||
|
|
||||||
# Extract dependencies used by workspace packages
|
# Extract dependencies used by @librechat/client package
|
||||||
# These packages are used in the workspace but dependencies are provided by parent package.json
|
|
||||||
extract_deps_from_code "packages/client" packages_client_used_code.txt
|
extract_deps_from_code "packages/client" packages_client_used_code.txt
|
||||||
extract_deps_from_code "packages/api" packages_api_used_code.txt
|
|
||||||
|
|
||||||
- name: Get @librechat/client dependencies
|
- name: Get @librechat/client dependencies
|
||||||
id: get-librechat-client-deps
|
id: get-librechat-client-deps
|
||||||
|
|
@ -139,30 +126,6 @@ jobs:
|
||||||
touch librechat_client_deps.txt
|
touch librechat_client_deps.txt
|
||||||
fi
|
fi
|
||||||
|
|
||||||
- name: Get @librechat/api dependencies
|
|
||||||
id: get-librechat-api-deps
|
|
||||||
run: |
|
|
||||||
if [[ -f "packages/api/package.json" ]]; then
|
|
||||||
# Get all dependencies from @librechat/api (dependencies, devDependencies, and peerDependencies)
|
|
||||||
DEPS=$(jq -r '.dependencies // {} | keys[]' packages/api/package.json 2>/dev/null || echo "")
|
|
||||||
DEV_DEPS=$(jq -r '.devDependencies // {} | keys[]' packages/api/package.json 2>/dev/null || echo "")
|
|
||||||
PEER_DEPS=$(jq -r '.peerDependencies // {} | keys[]' packages/api/package.json 2>/dev/null || echo "")
|
|
||||||
|
|
||||||
# Combine all dependencies
|
|
||||||
echo "$DEPS" > librechat_api_deps.txt
|
|
||||||
echo "$DEV_DEPS" >> librechat_api_deps.txt
|
|
||||||
echo "$PEER_DEPS" >> librechat_api_deps.txt
|
|
||||||
|
|
||||||
# Also include dependencies that are imported in packages/api
|
|
||||||
cat packages_api_used_code.txt >> librechat_api_deps.txt
|
|
||||||
|
|
||||||
# Remove empty lines and sort
|
|
||||||
grep -v '^$' librechat_api_deps.txt | sort -u > temp_deps.txt
|
|
||||||
mv temp_deps.txt librechat_api_deps.txt
|
|
||||||
else
|
|
||||||
touch librechat_api_deps.txt
|
|
||||||
fi
|
|
||||||
|
|
||||||
- name: Extract Workspace Dependencies
|
- name: Extract Workspace Dependencies
|
||||||
id: extract-workspace-deps
|
id: extract-workspace-deps
|
||||||
run: |
|
run: |
|
||||||
|
|
@ -221,8 +184,8 @@ jobs:
|
||||||
chmod -R 755 client
|
chmod -R 755 client
|
||||||
cd client
|
cd client
|
||||||
UNUSED=$(depcheck --json | jq -r '.dependencies | join("\n")' || echo "")
|
UNUSED=$(depcheck --json | jq -r '.dependencies | join("\n")' || echo "")
|
||||||
# Exclude dependencies used in scripts, code, workspace packages, and @librechat/client imports
|
# Exclude dependencies used in scripts, code, and workspace packages
|
||||||
UNUSED=$(comm -23 <(echo "$UNUSED" | sort) <(cat ../client_used_deps.txt ../client_used_code.txt ../client_workspace_deps.txt ../packages_client_used_code.txt ../librechat_client_deps.txt 2>/dev/null | sort -u) || echo "")
|
UNUSED=$(comm -23 <(echo "$UNUSED" | sort) <(cat ../client_used_deps.txt ../client_used_code.txt ../client_workspace_deps.txt | sort) || echo "")
|
||||||
# Filter out false positives
|
# Filter out false positives
|
||||||
UNUSED=$(echo "$UNUSED" | grep -v "^micromark-extension-llm-math$" || echo "")
|
UNUSED=$(echo "$UNUSED" | grep -v "^micromark-extension-llm-math$" || echo "")
|
||||||
echo "CLIENT_UNUSED<<EOF" >> $GITHUB_ENV
|
echo "CLIENT_UNUSED<<EOF" >> $GITHUB_ENV
|
||||||
|
|
@ -238,8 +201,8 @@ jobs:
|
||||||
chmod -R 755 api
|
chmod -R 755 api
|
||||||
cd api
|
cd api
|
||||||
UNUSED=$(depcheck --json | jq -r '.dependencies | join("\n")' || echo "")
|
UNUSED=$(depcheck --json | jq -r '.dependencies | join("\n")' || echo "")
|
||||||
# Exclude dependencies used in scripts, code, workspace packages, and @librechat/api imports
|
# Exclude dependencies used in scripts, code, and workspace packages
|
||||||
UNUSED=$(comm -23 <(echo "$UNUSED" | sort) <(cat ../api_used_deps.txt ../api_used_code.txt ../api_workspace_deps.txt ../packages_api_used_code.txt ../librechat_api_deps.txt 2>/dev/null | sort -u) || echo "")
|
UNUSED=$(comm -23 <(echo "$UNUSED" | sort) <(cat ../api_used_deps.txt ../api_used_code.txt ../api_workspace_deps.txt | sort) || echo "")
|
||||||
echo "API_UNUSED<<EOF" >> $GITHUB_ENV
|
echo "API_UNUSED<<EOF" >> $GITHUB_ENV
|
||||||
echo "$UNUSED" >> $GITHUB_ENV
|
echo "$UNUSED" >> $GITHUB_ENV
|
||||||
echo "EOF" >> $GITHUB_ENV
|
echo "EOF" >> $GITHUB_ENV
|
||||||
|
|
|
||||||
4
.gitignore
vendored
4
.gitignore
vendored
|
|
@ -15,7 +15,6 @@ pids
|
||||||
|
|
||||||
# CI/CD data
|
# CI/CD data
|
||||||
test-image*
|
test-image*
|
||||||
dump.rdb
|
|
||||||
|
|
||||||
# Directory for instrumented libs generated by jscoverage/JSCover
|
# Directory for instrumented libs generated by jscoverage/JSCover
|
||||||
lib-cov
|
lib-cov
|
||||||
|
|
@ -30,9 +29,6 @@ coverage
|
||||||
config/translations/stores/*
|
config/translations/stores/*
|
||||||
client/src/localization/languages/*_missing_keys.json
|
client/src/localization/languages/*_missing_keys.json
|
||||||
|
|
||||||
# Turborepo
|
|
||||||
.turbo
|
|
||||||
|
|
||||||
# Compiled Dirs (http://nodejs.org/api/addons.html)
|
# Compiled Dirs (http://nodejs.org/api/addons.html)
|
||||||
build/
|
build/
|
||||||
dist/
|
dist/
|
||||||
|
|
|
||||||
158
AGENTS.md
158
AGENTS.md
|
|
@ -1,158 +0,0 @@
|
||||||
# LibreChat
|
|
||||||
|
|
||||||
## Project Overview
|
|
||||||
|
|
||||||
LibreChat is a monorepo with the following key workspaces:
|
|
||||||
|
|
||||||
| Workspace | Language | Side | Dependency | Purpose |
|
|
||||||
|---|---|---|---|---|
|
|
||||||
| `/api` | JS (legacy) | Backend | `packages/api`, `packages/data-schemas`, `packages/data-provider`, `@librechat/agents` | Express server — minimize changes here |
|
|
||||||
| `/packages/api` | **TypeScript** | Backend | `packages/data-schemas`, `packages/data-provider` | New backend code lives here (TS only, consumed by `/api`) |
|
|
||||||
| `/packages/data-schemas` | TypeScript | Backend | `packages/data-provider` | Database models/schemas, shareable across backend projects |
|
|
||||||
| `/packages/data-provider` | TypeScript | Shared | — | Shared API types, endpoints, data-service — used by both frontend and backend |
|
|
||||||
| `/client` | TypeScript/React | Frontend | `packages/data-provider`, `packages/client` | Frontend SPA |
|
|
||||||
| `/packages/client` | TypeScript | Frontend | `packages/data-provider` | Shared frontend utilities |
|
|
||||||
|
|
||||||
The source code for `@librechat/agents` (major backend dependency, same team) is at `/home/danny/agentus`.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Workspace Boundaries
|
|
||||||
|
|
||||||
- **All new backend code must be TypeScript** in `/packages/api`.
|
|
||||||
- Keep `/api` changes to the absolute minimum (thin JS wrappers calling into `/packages/api`).
|
|
||||||
- Database-specific shared logic goes in `/packages/data-schemas`.
|
|
||||||
- Frontend/backend shared API logic (endpoints, types, data-service) goes in `/packages/data-provider`.
|
|
||||||
- Build data-provider from project root: `npm run build:data-provider`.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Code Style
|
|
||||||
|
|
||||||
### Structure and Clarity
|
|
||||||
|
|
||||||
- **Never-nesting**: early returns, flat code, minimal indentation. Break complex operations into well-named helpers.
|
|
||||||
- **Functional first**: pure functions, immutable data, `map`/`filter`/`reduce` over imperative loops. Only reach for OOP when it clearly improves domain modeling or state encapsulation.
|
|
||||||
- **No dynamic imports** unless absolutely necessary.
|
|
||||||
|
|
||||||
### DRY
|
|
||||||
|
|
||||||
- Extract repeated logic into utility functions.
|
|
||||||
- Reusable hooks / higher-order components for UI patterns.
|
|
||||||
- Parameterized helpers instead of near-duplicate functions.
|
|
||||||
- Constants for repeated values; configuration objects over duplicated init code.
|
|
||||||
- Shared validators, centralized error handling, single source of truth for business rules.
|
|
||||||
- Shared typing system with interfaces/types extending common base definitions.
|
|
||||||
- Abstraction layers for external API interactions.
|
|
||||||
|
|
||||||
### Iteration and Performance
|
|
||||||
|
|
||||||
- **Minimize looping** — especially over shared data structures like message arrays, which are iterated frequently throughout the codebase. Every additional pass adds up at scale.
|
|
||||||
- Consolidate sequential O(n) operations into a single pass whenever possible; never loop over the same collection twice if the work can be combined.
|
|
||||||
- Choose data structures that reduce the need to iterate (e.g., `Map`/`Set` for lookups instead of `Array.find`/`Array.includes`).
|
|
||||||
- Avoid unnecessary object creation; consider space-time tradeoffs.
|
|
||||||
- Prevent memory leaks: careful with closures, dispose resources/event listeners, no circular references.
|
|
||||||
|
|
||||||
### Type Safety
|
|
||||||
|
|
||||||
- **Never use `any`**. Explicit types for all parameters, return values, and variables.
|
|
||||||
- **Limit `unknown`** — avoid `unknown`, `Record<string, unknown>`, and `as unknown as T` assertions. A `Record<string, unknown>` almost always signals a missing explicit type definition.
|
|
||||||
- **Don't duplicate types** — before defining a new type, check whether it already exists in the project (especially `packages/data-provider`). Reuse and extend existing types rather than creating redundant definitions.
|
|
||||||
- Use union types, generics, and interfaces appropriately.
|
|
||||||
- All TypeScript and ESLint warnings/errors must be addressed — do not leave unresolved diagnostics.
|
|
||||||
|
|
||||||
### Comments and Documentation
|
|
||||||
|
|
||||||
- Write self-documenting code; no inline comments narrating what code does.
|
|
||||||
- JSDoc only for complex/non-obvious logic or intellisense on public APIs.
|
|
||||||
- Single-line JSDoc for brief docs, multi-line for complex cases.
|
|
||||||
- Avoid standalone `//` comments unless absolutely necessary.
|
|
||||||
|
|
||||||
### Import Order
|
|
||||||
|
|
||||||
Imports are organized into three sections:
|
|
||||||
|
|
||||||
1. **Package imports** — sorted shortest to longest line length (`react` always first).
|
|
||||||
2. **`import type` imports** — sorted longest to shortest (package types first, then local types; length resets between sub-groups).
|
|
||||||
3. **Local/project imports** — sorted longest to shortest.
|
|
||||||
|
|
||||||
Multi-line imports count total character length across all lines. Consolidate value imports from the same module. Always use standalone `import type { ... }` — never inline `type` inside value imports.
|
|
||||||
|
|
||||||
### JS/TS Loop Preferences
|
|
||||||
|
|
||||||
- **Limit looping as much as possible.** Prefer single-pass transformations and avoid re-iterating the same data.
|
|
||||||
- `for (let i = 0; ...)` for performance-critical or index-dependent operations.
|
|
||||||
- `for...of` for simple array iteration.
|
|
||||||
- `for...in` only for object property enumeration.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Frontend Rules (`client/src/**/*`)
|
|
||||||
|
|
||||||
### Localization
|
|
||||||
|
|
||||||
- All user-facing text must use `useLocalize()`.
|
|
||||||
- Only update English keys in `client/src/locales/en/translation.json` (other languages are automated externally).
|
|
||||||
- Semantic key prefixes: `com_ui_`, `com_assistants_`, etc.
|
|
||||||
|
|
||||||
### Components
|
|
||||||
|
|
||||||
- TypeScript for all React components with proper type imports.
|
|
||||||
- Semantic HTML with ARIA labels (`role`, `aria-label`) for accessibility.
|
|
||||||
- Group related components in feature directories (e.g., `SidePanel/Memories/`).
|
|
||||||
- Use index files for clean exports.
|
|
||||||
|
|
||||||
### Data Management
|
|
||||||
|
|
||||||
- Feature hooks: `client/src/data-provider/[Feature]/queries.ts` → `[Feature]/index.ts` → `client/src/data-provider/index.ts`.
|
|
||||||
- React Query (`@tanstack/react-query`) for all API interactions; proper query invalidation on mutations.
|
|
||||||
- QueryKeys and MutationKeys in `packages/data-provider/src/keys.ts`.
|
|
||||||
|
|
||||||
### Data-Provider Integration
|
|
||||||
|
|
||||||
- Endpoints: `packages/data-provider/src/api-endpoints.ts`
|
|
||||||
- Data service: `packages/data-provider/src/data-service.ts`
|
|
||||||
- Types: `packages/data-provider/src/types/queries.ts`
|
|
||||||
- Use `encodeURIComponent` for dynamic URL parameters.
|
|
||||||
|
|
||||||
### Performance
|
|
||||||
|
|
||||||
- Prioritize memory and speed efficiency at scale.
|
|
||||||
- Cursor pagination for large datasets.
|
|
||||||
- Proper dependency arrays to avoid unnecessary re-renders.
|
|
||||||
- Leverage React Query caching and background refetching.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Development Commands
|
|
||||||
|
|
||||||
| Command | Purpose |
|
|
||||||
|---|---|
|
|
||||||
| `npm run smart-reinstall` | Install deps (if lockfile changed) + build via Turborepo |
|
|
||||||
| `npm run reinstall` | Clean install — wipe `node_modules` and reinstall from scratch |
|
|
||||||
| `npm run backend` | Start the backend server |
|
|
||||||
| `npm run backend:dev` | Start backend with file watching (development) |
|
|
||||||
| `npm run build` | Build all compiled code via Turborepo (parallel, cached) |
|
|
||||||
| `npm run frontend` | Build all compiled code sequentially (legacy fallback) |
|
|
||||||
| `npm run frontend:dev` | Start frontend dev server with HMR (port 3090, requires backend running) |
|
|
||||||
| `npm run build:data-provider` | Rebuild `packages/data-provider` after changes |
|
|
||||||
|
|
||||||
- Node.js: v20.19.0+ or ^22.12.0 or >= 23.0.0
|
|
||||||
- Database: MongoDB
|
|
||||||
- Backend runs on `http://localhost:3080/`; frontend dev server on `http://localhost:3090/`
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Testing
|
|
||||||
|
|
||||||
- Framework: **Jest**, run per-workspace.
|
|
||||||
- Run tests from their workspace directory: `cd api && npx jest <pattern>`, `cd packages/api && npx jest <pattern>`, etc.
|
|
||||||
- Frontend tests: `__tests__` directories alongside components; use `test/layout-test-utils` for rendering.
|
|
||||||
- Cover loading, success, and error states for UI/data flows.
|
|
||||||
- Mock data-provider hooks and external dependencies.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Formatting
|
|
||||||
|
|
||||||
Fix all formatting lint errors (trailing spaces, tabs, newlines, indentation) using auto-fix when available. All TypeScript/ESLint warnings and errors **must** be resolved.
|
|
||||||
|
|
@ -1 +0,0 @@
|
||||||
AGENTS.md
|
|
||||||
13
Dockerfile
13
Dockerfile
|
|
@ -1,4 +1,4 @@
|
||||||
# v0.8.3-rc1
|
# v0.8.1-rc2
|
||||||
|
|
||||||
# Base node image
|
# Base node image
|
||||||
FROM node:20-alpine AS node
|
FROM node:20-alpine AS node
|
||||||
|
|
@ -11,12 +11,9 @@ RUN apk add --no-cache python3 py3-pip uv
|
||||||
ENV LD_PRELOAD=/usr/lib/libjemalloc.so.2
|
ENV LD_PRELOAD=/usr/lib/libjemalloc.so.2
|
||||||
|
|
||||||
# Add `uv` for extended MCP support
|
# Add `uv` for extended MCP support
|
||||||
COPY --from=ghcr.io/astral-sh/uv:0.9.5-python3.12-alpine /usr/local/bin/uv /usr/local/bin/uvx /bin/
|
COPY --from=ghcr.io/astral-sh/uv:0.6.13 /uv /uvx /bin/
|
||||||
RUN uv --version
|
RUN uv --version
|
||||||
|
|
||||||
# Set configurable max-old-space-size with default
|
|
||||||
ARG NODE_MAX_OLD_SPACE_SIZE=6144
|
|
||||||
|
|
||||||
RUN mkdir -p /app && chown node:node /app
|
RUN mkdir -p /app && chown node:node /app
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
|
|
||||||
|
|
@ -33,7 +30,7 @@ RUN \
|
||||||
# Allow mounting of these files, which have no default
|
# Allow mounting of these files, which have no default
|
||||||
touch .env ; \
|
touch .env ; \
|
||||||
# Create directories for the volumes to inherit the correct permissions
|
# Create directories for the volumes to inherit the correct permissions
|
||||||
mkdir -p /app/client/public/images /app/logs /app/uploads ; \
|
mkdir -p /app/client/public/images /app/api/logs /app/uploads ; \
|
||||||
npm config set fetch-retry-maxtimeout 600000 ; \
|
npm config set fetch-retry-maxtimeout 600000 ; \
|
||||||
npm config set fetch-retries 5 ; \
|
npm config set fetch-retries 5 ; \
|
||||||
npm config set fetch-retry-mintimeout 15000 ; \
|
npm config set fetch-retry-mintimeout 15000 ; \
|
||||||
|
|
@ -42,8 +39,8 @@ RUN \
|
||||||
COPY --chown=node:node . .
|
COPY --chown=node:node . .
|
||||||
|
|
||||||
RUN \
|
RUN \
|
||||||
# React client build with configurable memory
|
# React client build
|
||||||
NODE_OPTIONS="--max-old-space-size=${NODE_MAX_OLD_SPACE_SIZE}" npm run frontend; \
|
NODE_OPTIONS="--max-old-space-size=2048" npm run frontend; \
|
||||||
npm prune --production; \
|
npm prune --production; \
|
||||||
npm cache clean --force
|
npm cache clean --force
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,8 +1,5 @@
|
||||||
# Dockerfile.multi
|
# Dockerfile.multi
|
||||||
# v0.8.3-rc1
|
# v0.8.1-rc2
|
||||||
|
|
||||||
# Set configurable max-old-space-size with default
|
|
||||||
ARG NODE_MAX_OLD_SPACE_SIZE=6144
|
|
||||||
|
|
||||||
# Base for all builds
|
# Base for all builds
|
||||||
FROM node:20-alpine AS base-min
|
FROM node:20-alpine AS base-min
|
||||||
|
|
@ -10,7 +7,6 @@ FROM node:20-alpine AS base-min
|
||||||
RUN apk add --no-cache jemalloc
|
RUN apk add --no-cache jemalloc
|
||||||
# Set environment variable to use jemalloc
|
# Set environment variable to use jemalloc
|
||||||
ENV LD_PRELOAD=/usr/lib/libjemalloc.so.2
|
ENV LD_PRELOAD=/usr/lib/libjemalloc.so.2
|
||||||
|
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
RUN apk --no-cache add curl
|
RUN apk --no-cache add curl
|
||||||
RUN npm config set fetch-retry-maxtimeout 600000 && \
|
RUN npm config set fetch-retry-maxtimeout 600000 && \
|
||||||
|
|
@ -63,8 +59,7 @@ COPY client ./
|
||||||
COPY --from=data-provider-build /app/packages/data-provider/dist /app/packages/data-provider/dist
|
COPY --from=data-provider-build /app/packages/data-provider/dist /app/packages/data-provider/dist
|
||||||
COPY --from=client-package-build /app/packages/client/dist /app/packages/client/dist
|
COPY --from=client-package-build /app/packages/client/dist /app/packages/client/dist
|
||||||
COPY --from=client-package-build /app/packages/client/src /app/packages/client/src
|
COPY --from=client-package-build /app/packages/client/src /app/packages/client/src
|
||||||
ARG NODE_MAX_OLD_SPACE_SIZE
|
ENV NODE_OPTIONS="--max-old-space-size=2048"
|
||||||
ENV NODE_OPTIONS="--max-old-space-size=${NODE_MAX_OLD_SPACE_SIZE}"
|
|
||||||
RUN npm run build
|
RUN npm run build
|
||||||
|
|
||||||
# API setup (including client dist)
|
# API setup (including client dist)
|
||||||
|
|
|
||||||
17
README.md
17
README.md
|
|
@ -27,8 +27,8 @@
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
<p align="center">
|
<p align="center">
|
||||||
<a href="https://railway.com/deploy/b5k2mn?referralCode=HI9hWz">
|
<a href="https://railway.app/template/b5k2mn?referralCode=HI9hWz">
|
||||||
<img src="https://railway.com/button.svg" alt="Deploy on Railway" height="30">
|
<img src="https://railway.app/button.svg" alt="Deploy on Railway" height="30">
|
||||||
</a>
|
</a>
|
||||||
<a href="https://zeabur.com/templates/0X2ZY8">
|
<a href="https://zeabur.com/templates/0X2ZY8">
|
||||||
<img src="https://zeabur.com/button.svg" alt="Deploy on Zeabur" height="30"/>
|
<img src="https://zeabur.com/button.svg" alt="Deploy on Zeabur" height="30"/>
|
||||||
|
|
@ -109,11 +109,6 @@
|
||||||
- 🎨 **Customizable Interface**:
|
- 🎨 **Customizable Interface**:
|
||||||
- Customizable Dropdown & Interface that adapts to both power users and newcomers
|
- Customizable Dropdown & Interface that adapts to both power users and newcomers
|
||||||
|
|
||||||
- 🌊 **[Resumable Streams](https://www.librechat.ai/docs/features/resumable_streams)**:
|
|
||||||
- Never lose a response: AI responses automatically reconnect and resume if your connection drops
|
|
||||||
- Multi-Tab & Multi-Device Sync: Open the same chat in multiple tabs or pick up on another device
|
|
||||||
- Production-Ready: Works from single-server setups to horizontally scaled deployments with Redis
|
|
||||||
|
|
||||||
- 🗣️ **Speech & Audio**:
|
- 🗣️ **Speech & Audio**:
|
||||||
- Chat hands-free with Speech-to-Text and Text-to-Speech
|
- Chat hands-free with Speech-to-Text and Text-to-Speech
|
||||||
- Automatically send and play Audio
|
- Automatically send and play Audio
|
||||||
|
|
@ -142,11 +137,13 @@
|
||||||
|
|
||||||
## 🪶 All-In-One AI Conversations with LibreChat
|
## 🪶 All-In-One AI Conversations with LibreChat
|
||||||
|
|
||||||
LibreChat is a self-hosted AI chat platform that unifies all major AI providers in a single, privacy-focused interface.
|
LibreChat brings together the future of assistant AIs with the revolutionary technology of OpenAI's ChatGPT. Celebrating the original styling, LibreChat gives you the ability to integrate multiple AI models. It also integrates and enhances original client features such as conversation and message search, prompt templates and plugins.
|
||||||
|
|
||||||
Beyond chat, LibreChat provides AI Agents, Model Context Protocol (MCP) support, Artifacts, Code Interpreter, custom actions, conversation search, and enterprise-ready multi-user authentication.
|
With LibreChat, you no longer need to opt for ChatGPT Plus and can instead use free or pay-per-call APIs. We welcome contributions, cloning, and forking to enhance the capabilities of this advanced chatbot platform.
|
||||||
|
|
||||||
Open source, actively developed, and built for anyone who values control over their AI infrastructure.
|
[](https://www.youtube.com/watch?v=ilfwGQtJNlI)
|
||||||
|
|
||||||
|
Click on the thumbnail to open the video☝️
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
|
|
|
||||||
991
api/app/clients/AnthropicClient.js
Normal file
991
api/app/clients/AnthropicClient.js
Normal file
|
|
@ -0,0 +1,991 @@
|
||||||
|
const Anthropic = require('@anthropic-ai/sdk');
|
||||||
|
const { logger } = require('@librechat/data-schemas');
|
||||||
|
const { HttpsProxyAgent } = require('https-proxy-agent');
|
||||||
|
const {
|
||||||
|
Constants,
|
||||||
|
ErrorTypes,
|
||||||
|
EModelEndpoint,
|
||||||
|
parseTextParts,
|
||||||
|
anthropicSettings,
|
||||||
|
getResponseSender,
|
||||||
|
validateVisionModel,
|
||||||
|
} = require('librechat-data-provider');
|
||||||
|
const { sleep, SplitStreamHandler: _Handler, addCacheControl } = require('@librechat/agents');
|
||||||
|
const {
|
||||||
|
Tokenizer,
|
||||||
|
createFetch,
|
||||||
|
matchModelName,
|
||||||
|
getClaudeHeaders,
|
||||||
|
getModelMaxTokens,
|
||||||
|
configureReasoning,
|
||||||
|
checkPromptCacheSupport,
|
||||||
|
getModelMaxOutputTokens,
|
||||||
|
createStreamEventHandlers,
|
||||||
|
} = require('@librechat/api');
|
||||||
|
const {
|
||||||
|
truncateText,
|
||||||
|
formatMessage,
|
||||||
|
titleFunctionPrompt,
|
||||||
|
parseParamFromPrompt,
|
||||||
|
createContextHandlers,
|
||||||
|
} = require('./prompts');
|
||||||
|
const { spendTokens, spendStructuredTokens } = require('~/models/spendTokens');
|
||||||
|
const { encodeAndFormat } = require('~/server/services/Files/images/encode');
|
||||||
|
const BaseClient = require('./BaseClient');
|
||||||
|
|
||||||
|
const HUMAN_PROMPT = '\n\nHuman:';
|
||||||
|
const AI_PROMPT = '\n\nAssistant:';
|
||||||
|
|
||||||
|
class SplitStreamHandler extends _Handler {
|
||||||
|
getDeltaContent(chunk) {
|
||||||
|
return (chunk?.delta?.text ?? chunk?.completion) || '';
|
||||||
|
}
|
||||||
|
getReasoningDelta(chunk) {
|
||||||
|
return chunk?.delta?.thinking || '';
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Helper function to introduce a delay before retrying */
|
||||||
|
function delayBeforeRetry(attempts, baseDelay = 1000) {
|
||||||
|
return new Promise((resolve) => setTimeout(resolve, baseDelay * attempts));
|
||||||
|
}
|
||||||
|
|
||||||
|
const tokenEventTypes = new Set(['message_start', 'message_delta']);
|
||||||
|
const { legacy } = anthropicSettings;
|
||||||
|
|
||||||
|
class AnthropicClient extends BaseClient {
|
||||||
|
constructor(apiKey, options = {}) {
|
||||||
|
super(apiKey, options);
|
||||||
|
this.apiKey = apiKey || process.env.ANTHROPIC_API_KEY;
|
||||||
|
this.userLabel = HUMAN_PROMPT;
|
||||||
|
this.assistantLabel = AI_PROMPT;
|
||||||
|
this.contextStrategy = options.contextStrategy
|
||||||
|
? options.contextStrategy.toLowerCase()
|
||||||
|
: 'discard';
|
||||||
|
this.setOptions(options);
|
||||||
|
/** @type {string | undefined} */
|
||||||
|
this.systemMessage;
|
||||||
|
/** @type {AnthropicMessageStartEvent| undefined} */
|
||||||
|
this.message_start;
|
||||||
|
/** @type {AnthropicMessageDeltaEvent| undefined} */
|
||||||
|
this.message_delta;
|
||||||
|
/** Whether the model is part of the Claude 3 Family
|
||||||
|
* @type {boolean} */
|
||||||
|
this.isClaudeLatest;
|
||||||
|
/** Whether to use Messages API or Completions API
|
||||||
|
* @type {boolean} */
|
||||||
|
this.useMessages;
|
||||||
|
/** Whether or not the model supports Prompt Caching
|
||||||
|
* @type {boolean} */
|
||||||
|
this.supportsCacheControl;
|
||||||
|
/** The key for the usage object's input tokens
|
||||||
|
* @type {string} */
|
||||||
|
this.inputTokensKey = 'input_tokens';
|
||||||
|
/** The key for the usage object's output tokens
|
||||||
|
* @type {string} */
|
||||||
|
this.outputTokensKey = 'output_tokens';
|
||||||
|
/** @type {SplitStreamHandler | undefined} */
|
||||||
|
this.streamHandler;
|
||||||
|
}
|
||||||
|
|
||||||
|
setOptions(options) {
|
||||||
|
if (this.options && !this.options.replaceOptions) {
|
||||||
|
// nested options aren't spread properly, so we need to do this manually
|
||||||
|
this.options.modelOptions = {
|
||||||
|
...this.options.modelOptions,
|
||||||
|
...options.modelOptions,
|
||||||
|
};
|
||||||
|
delete options.modelOptions;
|
||||||
|
// now we can merge options
|
||||||
|
this.options = {
|
||||||
|
...this.options,
|
||||||
|
...options,
|
||||||
|
};
|
||||||
|
} else {
|
||||||
|
this.options = options;
|
||||||
|
}
|
||||||
|
|
||||||
|
this.modelOptions = Object.assign(
|
||||||
|
{
|
||||||
|
model: anthropicSettings.model.default,
|
||||||
|
},
|
||||||
|
this.modelOptions,
|
||||||
|
this.options.modelOptions,
|
||||||
|
);
|
||||||
|
|
||||||
|
const modelMatch = matchModelName(this.modelOptions.model, EModelEndpoint.anthropic);
|
||||||
|
this.isClaudeLatest =
|
||||||
|
/claude-[3-9]/.test(modelMatch) || /claude-(?:sonnet|opus|haiku)-[4-9]/.test(modelMatch);
|
||||||
|
const isLegacyOutput = !(
|
||||||
|
/claude-3[-.]5-sonnet/.test(modelMatch) ||
|
||||||
|
/claude-3[-.]7/.test(modelMatch) ||
|
||||||
|
/claude-(?:sonnet|opus|haiku)-[4-9]/.test(modelMatch) ||
|
||||||
|
/claude-[4-9]/.test(modelMatch)
|
||||||
|
);
|
||||||
|
this.supportsCacheControl = this.options.promptCache && checkPromptCacheSupport(modelMatch);
|
||||||
|
|
||||||
|
if (
|
||||||
|
isLegacyOutput &&
|
||||||
|
this.modelOptions.maxOutputTokens &&
|
||||||
|
this.modelOptions.maxOutputTokens > legacy.maxOutputTokens.default
|
||||||
|
) {
|
||||||
|
this.modelOptions.maxOutputTokens = legacy.maxOutputTokens.default;
|
||||||
|
}
|
||||||
|
|
||||||
|
this.useMessages = this.isClaudeLatest || !!this.options.attachments;
|
||||||
|
|
||||||
|
this.defaultVisionModel = this.options.visionModel ?? 'claude-3-sonnet-20240229';
|
||||||
|
this.options.attachments?.then((attachments) => this.checkVisionRequest(attachments));
|
||||||
|
|
||||||
|
this.maxContextTokens =
|
||||||
|
this.options.maxContextTokens ??
|
||||||
|
getModelMaxTokens(this.modelOptions.model, EModelEndpoint.anthropic) ??
|
||||||
|
100000;
|
||||||
|
this.maxResponseTokens =
|
||||||
|
this.modelOptions.maxOutputTokens ??
|
||||||
|
getModelMaxOutputTokens(
|
||||||
|
this.modelOptions.model,
|
||||||
|
this.options.endpointType ?? this.options.endpoint,
|
||||||
|
this.options.endpointTokenConfig,
|
||||||
|
) ??
|
||||||
|
anthropicSettings.maxOutputTokens.reset(this.modelOptions.model);
|
||||||
|
this.maxPromptTokens =
|
||||||
|
this.options.maxPromptTokens || this.maxContextTokens - this.maxResponseTokens;
|
||||||
|
|
||||||
|
const reservedTokens = this.maxPromptTokens + this.maxResponseTokens;
|
||||||
|
if (reservedTokens > this.maxContextTokens) {
|
||||||
|
const info = `Total Possible Tokens + Max Output Tokens must be less than or equal to Max Context Tokens: ${this.maxPromptTokens} (total possible output) + ${this.maxResponseTokens} (max output) = ${reservedTokens}/${this.maxContextTokens} (max context)`;
|
||||||
|
const errorMessage = `{ "type": "${ErrorTypes.INPUT_LENGTH}", "info": "${info}" }`;
|
||||||
|
logger.warn(info);
|
||||||
|
throw new Error(errorMessage);
|
||||||
|
} else if (this.maxResponseTokens === this.maxContextTokens) {
|
||||||
|
const info = `Max Output Tokens must be less than Max Context Tokens: ${this.maxResponseTokens} (max output) = ${this.maxContextTokens} (max context)`;
|
||||||
|
const errorMessage = `{ "type": "${ErrorTypes.INPUT_LENGTH}", "info": "${info}" }`;
|
||||||
|
logger.warn(info);
|
||||||
|
throw new Error(errorMessage);
|
||||||
|
}
|
||||||
|
|
||||||
|
this.sender =
|
||||||
|
this.options.sender ??
|
||||||
|
getResponseSender({
|
||||||
|
model: this.modelOptions.model,
|
||||||
|
endpoint: EModelEndpoint.anthropic,
|
||||||
|
modelLabel: this.options.modelLabel,
|
||||||
|
});
|
||||||
|
|
||||||
|
this.startToken = '||>';
|
||||||
|
this.endToken = '';
|
||||||
|
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the initialized Anthropic client.
|
||||||
|
* @param {Partial<Anthropic.ClientOptions>} requestOptions - The options for the client.
|
||||||
|
* @returns {Anthropic} The Anthropic client instance.
|
||||||
|
*/
|
||||||
|
getClient(requestOptions) {
|
||||||
|
/** @type {Anthropic.ClientOptions} */
|
||||||
|
const options = {
|
||||||
|
fetch: createFetch({
|
||||||
|
directEndpoint: this.options.directEndpoint,
|
||||||
|
reverseProxyUrl: this.options.reverseProxyUrl,
|
||||||
|
}),
|
||||||
|
apiKey: this.apiKey,
|
||||||
|
fetchOptions: {},
|
||||||
|
};
|
||||||
|
|
||||||
|
if (this.options.proxy) {
|
||||||
|
options.fetchOptions.agent = new HttpsProxyAgent(this.options.proxy);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (this.options.reverseProxyUrl) {
|
||||||
|
options.baseURL = this.options.reverseProxyUrl;
|
||||||
|
}
|
||||||
|
|
||||||
|
const headers = getClaudeHeaders(requestOptions?.model, this.supportsCacheControl);
|
||||||
|
if (headers) {
|
||||||
|
options.defaultHeaders = headers;
|
||||||
|
}
|
||||||
|
|
||||||
|
return new Anthropic(options);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get stream usage as returned by this client's API response.
|
||||||
|
* @returns {AnthropicStreamUsage} The stream usage object.
|
||||||
|
*/
|
||||||
|
getStreamUsage() {
|
||||||
|
const inputUsage = this.message_start?.message?.usage ?? {};
|
||||||
|
const outputUsage = this.message_delta?.usage ?? {};
|
||||||
|
return Object.assign({}, inputUsage, outputUsage);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Calculates the correct token count for the current user message based on the token count map and API usage.
|
||||||
|
* Edge case: If the calculation results in a negative value, it returns the original estimate.
|
||||||
|
* If revisiting a conversation with a chat history entirely composed of token estimates,
|
||||||
|
* the cumulative token count going forward should become more accurate as the conversation progresses.
|
||||||
|
* @param {Object} params - The parameters for the calculation.
|
||||||
|
* @param {Record<string, number>} params.tokenCountMap - A map of message IDs to their token counts.
|
||||||
|
* @param {string} params.currentMessageId - The ID of the current message to calculate.
|
||||||
|
* @param {AnthropicStreamUsage} params.usage - The usage object returned by the API.
|
||||||
|
* @returns {number} The correct token count for the current user message.
|
||||||
|
*/
|
||||||
|
calculateCurrentTokenCount({ tokenCountMap, currentMessageId, usage }) {
|
||||||
|
const originalEstimate = tokenCountMap[currentMessageId] || 0;
|
||||||
|
|
||||||
|
if (!usage || typeof usage.input_tokens !== 'number') {
|
||||||
|
return originalEstimate;
|
||||||
|
}
|
||||||
|
|
||||||
|
tokenCountMap[currentMessageId] = 0;
|
||||||
|
const totalTokensFromMap = Object.values(tokenCountMap).reduce((sum, count) => {
|
||||||
|
const numCount = Number(count);
|
||||||
|
return sum + (isNaN(numCount) ? 0 : numCount);
|
||||||
|
}, 0);
|
||||||
|
const totalInputTokens =
|
||||||
|
(usage.input_tokens ?? 0) +
|
||||||
|
(usage.cache_creation_input_tokens ?? 0) +
|
||||||
|
(usage.cache_read_input_tokens ?? 0);
|
||||||
|
|
||||||
|
const currentMessageTokens = totalInputTokens - totalTokensFromMap;
|
||||||
|
return currentMessageTokens > 0 ? currentMessageTokens : originalEstimate;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get Token Count for LibreChat Message
|
||||||
|
* @param {TMessage} responseMessage
|
||||||
|
* @returns {number}
|
||||||
|
*/
|
||||||
|
getTokenCountForResponse(responseMessage) {
|
||||||
|
return this.getTokenCountForMessage({
|
||||||
|
role: 'assistant',
|
||||||
|
content: responseMessage.text,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
*
|
||||||
|
* Checks if the model is a vision model based on request attachments and sets the appropriate options:
|
||||||
|
* - Sets `this.modelOptions.model` to `gpt-4-vision-preview` if the request is a vision request.
|
||||||
|
* - Sets `this.isVisionModel` to `true` if vision request.
|
||||||
|
* - Deletes `this.modelOptions.stop` if vision request.
|
||||||
|
* @param {MongoFile[]} attachments
|
||||||
|
*/
|
||||||
|
checkVisionRequest(attachments) {
|
||||||
|
const availableModels = this.options.modelsConfig?.[EModelEndpoint.anthropic];
|
||||||
|
this.isVisionModel = validateVisionModel({ model: this.modelOptions.model, availableModels });
|
||||||
|
|
||||||
|
const visionModelAvailable = availableModels?.includes(this.defaultVisionModel);
|
||||||
|
if (
|
||||||
|
attachments &&
|
||||||
|
attachments.some((file) => file?.type && file?.type?.includes('image')) &&
|
||||||
|
visionModelAvailable &&
|
||||||
|
!this.isVisionModel
|
||||||
|
) {
|
||||||
|
this.modelOptions.model = this.defaultVisionModel;
|
||||||
|
this.isVisionModel = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Calculate the token cost in tokens for an image based on its dimensions and detail level.
|
||||||
|
*
|
||||||
|
* For reference, see: https://docs.anthropic.com/claude/docs/vision#image-costs
|
||||||
|
*
|
||||||
|
* @param {Object} image - The image object.
|
||||||
|
* @param {number} image.width - The width of the image.
|
||||||
|
* @param {number} image.height - The height of the image.
|
||||||
|
* @returns {number} The calculated token cost measured by tokens.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
calculateImageTokenCost({ width, height }) {
|
||||||
|
return Math.ceil((width * height) / 750);
|
||||||
|
}
|
||||||
|
|
||||||
|
async addImageURLs(message, attachments) {
|
||||||
|
const { files, image_urls } = await encodeAndFormat(this.options.req, attachments, {
|
||||||
|
endpoint: EModelEndpoint.anthropic,
|
||||||
|
});
|
||||||
|
message.image_urls = image_urls.length ? image_urls : undefined;
|
||||||
|
return files;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param {object} params
|
||||||
|
* @param {number} params.promptTokens
|
||||||
|
* @param {number} params.completionTokens
|
||||||
|
* @param {AnthropicStreamUsage} [params.usage]
|
||||||
|
* @param {string} [params.model]
|
||||||
|
* @param {string} [params.context='message']
|
||||||
|
* @returns {Promise<void>}
|
||||||
|
*/
|
||||||
|
async recordTokenUsage({ promptTokens, completionTokens, usage, model, context = 'message' }) {
|
||||||
|
if (usage != null && usage?.input_tokens != null) {
|
||||||
|
const input = usage.input_tokens ?? 0;
|
||||||
|
const write = usage.cache_creation_input_tokens ?? 0;
|
||||||
|
const read = usage.cache_read_input_tokens ?? 0;
|
||||||
|
|
||||||
|
await spendStructuredTokens(
|
||||||
|
{
|
||||||
|
context,
|
||||||
|
user: this.user,
|
||||||
|
conversationId: this.conversationId,
|
||||||
|
model: model ?? this.modelOptions.model,
|
||||||
|
endpointTokenConfig: this.options.endpointTokenConfig,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
promptTokens: { input, write, read },
|
||||||
|
completionTokens,
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
await spendTokens(
|
||||||
|
{
|
||||||
|
context,
|
||||||
|
user: this.user,
|
||||||
|
conversationId: this.conversationId,
|
||||||
|
model: model ?? this.modelOptions.model,
|
||||||
|
endpointTokenConfig: this.options.endpointTokenConfig,
|
||||||
|
},
|
||||||
|
{ promptTokens, completionTokens },
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
async buildMessages(messages, parentMessageId) {
|
||||||
|
const orderedMessages = this.constructor.getMessagesForConversation({
|
||||||
|
messages,
|
||||||
|
parentMessageId,
|
||||||
|
});
|
||||||
|
|
||||||
|
logger.debug('[AnthropicClient] orderedMessages', { orderedMessages, parentMessageId });
|
||||||
|
|
||||||
|
if (this.options.attachments) {
|
||||||
|
const attachments = await this.options.attachments;
|
||||||
|
const images = attachments.filter((file) => file.type.includes('image'));
|
||||||
|
|
||||||
|
if (images.length && !this.isVisionModel) {
|
||||||
|
throw new Error('Images are only supported with the Claude 3 family of models');
|
||||||
|
}
|
||||||
|
|
||||||
|
const latestMessage = orderedMessages[orderedMessages.length - 1];
|
||||||
|
|
||||||
|
if (this.message_file_map) {
|
||||||
|
this.message_file_map[latestMessage.messageId] = attachments;
|
||||||
|
} else {
|
||||||
|
this.message_file_map = {
|
||||||
|
[latestMessage.messageId]: attachments,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
const files = await this.addImageURLs(latestMessage, attachments);
|
||||||
|
|
||||||
|
this.options.attachments = files;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (this.message_file_map) {
|
||||||
|
this.contextHandlers = createContextHandlers(
|
||||||
|
this.options.req,
|
||||||
|
orderedMessages[orderedMessages.length - 1].text,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
const formattedMessages = orderedMessages.map((message, i) => {
|
||||||
|
const formattedMessage = this.useMessages
|
||||||
|
? formatMessage({
|
||||||
|
message,
|
||||||
|
endpoint: EModelEndpoint.anthropic,
|
||||||
|
})
|
||||||
|
: {
|
||||||
|
author: message.isCreatedByUser ? this.userLabel : this.assistantLabel,
|
||||||
|
content: message?.content ?? message.text,
|
||||||
|
};
|
||||||
|
|
||||||
|
const needsTokenCount = this.contextStrategy && !orderedMessages[i].tokenCount;
|
||||||
|
/* If tokens were never counted, or, is a Vision request and the message has files, count again */
|
||||||
|
if (needsTokenCount || (this.isVisionModel && (message.image_urls || message.files))) {
|
||||||
|
orderedMessages[i].tokenCount = this.getTokenCountForMessage(formattedMessage);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* If message has files, calculate image token cost */
|
||||||
|
if (this.message_file_map && this.message_file_map[message.messageId]) {
|
||||||
|
const attachments = this.message_file_map[message.messageId];
|
||||||
|
for (const file of attachments) {
|
||||||
|
if (file.embedded) {
|
||||||
|
this.contextHandlers?.processFile(file);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if (file.metadata?.fileIdentifier) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
orderedMessages[i].tokenCount += this.calculateImageTokenCost({
|
||||||
|
width: file.width,
|
||||||
|
height: file.height,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
formattedMessage.tokenCount = orderedMessages[i].tokenCount;
|
||||||
|
return formattedMessage;
|
||||||
|
});
|
||||||
|
|
||||||
|
if (this.contextHandlers) {
|
||||||
|
this.augmentedPrompt = await this.contextHandlers.createContext();
|
||||||
|
this.options.promptPrefix = this.augmentedPrompt + (this.options.promptPrefix ?? '');
|
||||||
|
}
|
||||||
|
|
||||||
|
let { context: messagesInWindow, remainingContextTokens } =
|
||||||
|
await this.getMessagesWithinTokenLimit({ messages: formattedMessages });
|
||||||
|
|
||||||
|
const tokenCountMap = orderedMessages
|
||||||
|
.slice(orderedMessages.length - messagesInWindow.length)
|
||||||
|
.reduce((map, message, index) => {
|
||||||
|
const { messageId } = message;
|
||||||
|
if (!messageId) {
|
||||||
|
return map;
|
||||||
|
}
|
||||||
|
|
||||||
|
map[messageId] = orderedMessages[index].tokenCount;
|
||||||
|
return map;
|
||||||
|
}, {});
|
||||||
|
|
||||||
|
logger.debug('[AnthropicClient]', {
|
||||||
|
messagesInWindow: messagesInWindow.length,
|
||||||
|
remainingContextTokens,
|
||||||
|
});
|
||||||
|
|
||||||
|
let lastAuthor = '';
|
||||||
|
let groupedMessages = [];
|
||||||
|
|
||||||
|
for (let i = 0; i < messagesInWindow.length; i++) {
|
||||||
|
const message = messagesInWindow[i];
|
||||||
|
const author = message.role ?? message.author;
|
||||||
|
// If last author is not same as current author, add to new group
|
||||||
|
if (lastAuthor !== author) {
|
||||||
|
const newMessage = {
|
||||||
|
content: [message.content],
|
||||||
|
};
|
||||||
|
|
||||||
|
if (message.role) {
|
||||||
|
newMessage.role = message.role;
|
||||||
|
} else {
|
||||||
|
newMessage.author = message.author;
|
||||||
|
}
|
||||||
|
|
||||||
|
groupedMessages.push(newMessage);
|
||||||
|
lastAuthor = author;
|
||||||
|
// If same author, append content to the last group
|
||||||
|
} else {
|
||||||
|
groupedMessages[groupedMessages.length - 1].content.push(message.content);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
groupedMessages = groupedMessages.map((msg, i) => {
|
||||||
|
const isLast = i === groupedMessages.length - 1;
|
||||||
|
if (msg.content.length === 1) {
|
||||||
|
const content = msg.content[0];
|
||||||
|
return {
|
||||||
|
...msg,
|
||||||
|
// reason: final assistant content cannot end with trailing whitespace
|
||||||
|
content:
|
||||||
|
isLast && this.useMessages && msg.role === 'assistant' && typeof content === 'string'
|
||||||
|
? content?.trim()
|
||||||
|
: content,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!this.useMessages && msg.tokenCount) {
|
||||||
|
delete msg.tokenCount;
|
||||||
|
}
|
||||||
|
|
||||||
|
return msg;
|
||||||
|
});
|
||||||
|
|
||||||
|
let identityPrefix = '';
|
||||||
|
if (this.options.userLabel) {
|
||||||
|
identityPrefix = `\nHuman's name: ${this.options.userLabel}`;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (this.options.modelLabel) {
|
||||||
|
identityPrefix = `${identityPrefix}\nYou are ${this.options.modelLabel}`;
|
||||||
|
}
|
||||||
|
|
||||||
|
let promptPrefix = (this.options.promptPrefix ?? '').trim();
|
||||||
|
if (typeof this.options.artifactsPrompt === 'string' && this.options.artifactsPrompt) {
|
||||||
|
promptPrefix = `${promptPrefix ?? ''}\n${this.options.artifactsPrompt}`.trim();
|
||||||
|
}
|
||||||
|
if (promptPrefix) {
|
||||||
|
// If the prompt prefix doesn't end with the end token, add it.
|
||||||
|
if (!promptPrefix.endsWith(`${this.endToken}`)) {
|
||||||
|
promptPrefix = `${promptPrefix.trim()}${this.endToken}\n\n`;
|
||||||
|
}
|
||||||
|
promptPrefix = `\nContext:\n${promptPrefix}`;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (identityPrefix) {
|
||||||
|
promptPrefix = `${identityPrefix}${promptPrefix}`;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prompt AI to respond, empty if last message was from AI
|
||||||
|
let isEdited = lastAuthor === this.assistantLabel;
|
||||||
|
const promptSuffix = isEdited ? '' : `${promptPrefix}${this.assistantLabel}\n`;
|
||||||
|
let currentTokenCount =
|
||||||
|
isEdited || this.useMessages
|
||||||
|
? this.getTokenCount(promptPrefix)
|
||||||
|
: this.getTokenCount(promptSuffix);
|
||||||
|
|
||||||
|
let promptBody = '';
|
||||||
|
const maxTokenCount = this.maxPromptTokens;
|
||||||
|
|
||||||
|
const context = [];
|
||||||
|
|
||||||
|
// Iterate backwards through the messages, adding them to the prompt until we reach the max token count.
|
||||||
|
// Do this within a recursive async function so that it doesn't block the event loop for too long.
|
||||||
|
// Also, remove the next message when the message that puts us over the token limit is created by the user.
|
||||||
|
// Otherwise, remove only the exceeding message. This is due to Anthropic's strict payload rule to start with "Human:".
|
||||||
|
const nextMessage = {
|
||||||
|
remove: false,
|
||||||
|
tokenCount: 0,
|
||||||
|
messageString: '',
|
||||||
|
};
|
||||||
|
|
||||||
|
const buildPromptBody = async () => {
|
||||||
|
if (currentTokenCount < maxTokenCount && groupedMessages.length > 0) {
|
||||||
|
const message = groupedMessages.pop();
|
||||||
|
const isCreatedByUser = message.author === this.userLabel;
|
||||||
|
// Use promptPrefix if message is edited assistant'
|
||||||
|
const messagePrefix =
|
||||||
|
isCreatedByUser || !isEdited ? message.author : `${promptPrefix}${message.author}`;
|
||||||
|
const messageString = `${messagePrefix}\n${message.content}${this.endToken}\n`;
|
||||||
|
let newPromptBody = `${messageString}${promptBody}`;
|
||||||
|
|
||||||
|
context.unshift(message);
|
||||||
|
|
||||||
|
const tokenCountForMessage = this.getTokenCount(messageString);
|
||||||
|
const newTokenCount = currentTokenCount + tokenCountForMessage;
|
||||||
|
|
||||||
|
if (!isCreatedByUser) {
|
||||||
|
nextMessage.messageString = messageString;
|
||||||
|
nextMessage.tokenCount = tokenCountForMessage;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (newTokenCount > maxTokenCount) {
|
||||||
|
if (!promptBody) {
|
||||||
|
// This is the first message, so we can't add it. Just throw an error.
|
||||||
|
throw new Error(
|
||||||
|
`Prompt is too long. Max token count is ${maxTokenCount}, but prompt is ${newTokenCount} tokens long.`,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Otherwise, ths message would put us over the token limit, so don't add it.
|
||||||
|
// if created by user, remove next message, otherwise remove only this message
|
||||||
|
if (isCreatedByUser) {
|
||||||
|
nextMessage.remove = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
promptBody = newPromptBody;
|
||||||
|
currentTokenCount = newTokenCount;
|
||||||
|
|
||||||
|
// Switch off isEdited after using it for the first time
|
||||||
|
if (isEdited) {
|
||||||
|
isEdited = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// wait for next tick to avoid blocking the event loop
|
||||||
|
await new Promise((resolve) => setImmediate(resolve));
|
||||||
|
return buildPromptBody();
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
};
|
||||||
|
|
||||||
|
const messagesPayload = [];
|
||||||
|
const buildMessagesPayload = async () => {
|
||||||
|
let canContinue = true;
|
||||||
|
|
||||||
|
if (promptPrefix) {
|
||||||
|
this.systemMessage = promptPrefix;
|
||||||
|
}
|
||||||
|
|
||||||
|
while (currentTokenCount < maxTokenCount && groupedMessages.length > 0 && canContinue) {
|
||||||
|
const message = groupedMessages.pop();
|
||||||
|
|
||||||
|
let tokenCountForMessage = message.tokenCount ?? this.getTokenCountForMessage(message);
|
||||||
|
|
||||||
|
const newTokenCount = currentTokenCount + tokenCountForMessage;
|
||||||
|
const exceededMaxCount = newTokenCount > maxTokenCount;
|
||||||
|
|
||||||
|
if (exceededMaxCount && messagesPayload.length === 0) {
|
||||||
|
throw new Error(
|
||||||
|
`Prompt is too long. Max token count is ${maxTokenCount}, but prompt is ${newTokenCount} tokens long.`,
|
||||||
|
);
|
||||||
|
} else if (exceededMaxCount) {
|
||||||
|
canContinue = false;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
delete message.tokenCount;
|
||||||
|
messagesPayload.unshift(message);
|
||||||
|
currentTokenCount = newTokenCount;
|
||||||
|
|
||||||
|
// Switch off isEdited after using it once
|
||||||
|
if (isEdited && message.role === 'assistant') {
|
||||||
|
isEdited = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait for next tick to avoid blocking the event loop
|
||||||
|
await new Promise((resolve) => setImmediate(resolve));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
const processTokens = () => {
|
||||||
|
// Add 2 tokens for metadata after all messages have been counted.
|
||||||
|
currentTokenCount += 2;
|
||||||
|
|
||||||
|
// Use up to `this.maxContextTokens` tokens (prompt + response), but try to leave `this.maxTokens` tokens for the response.
|
||||||
|
this.modelOptions.maxOutputTokens = Math.min(
|
||||||
|
this.maxContextTokens - currentTokenCount,
|
||||||
|
this.maxResponseTokens,
|
||||||
|
);
|
||||||
|
};
|
||||||
|
|
||||||
|
if (
|
||||||
|
/claude-[3-9]/.test(this.modelOptions.model) ||
|
||||||
|
/claude-(?:sonnet|opus|haiku)-[4-9]/.test(this.modelOptions.model)
|
||||||
|
) {
|
||||||
|
await buildMessagesPayload();
|
||||||
|
processTokens();
|
||||||
|
return {
|
||||||
|
prompt: messagesPayload,
|
||||||
|
context: messagesInWindow,
|
||||||
|
promptTokens: currentTokenCount,
|
||||||
|
tokenCountMap,
|
||||||
|
};
|
||||||
|
} else {
|
||||||
|
await buildPromptBody();
|
||||||
|
processTokens();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (nextMessage.remove) {
|
||||||
|
promptBody = promptBody.replace(nextMessage.messageString, '');
|
||||||
|
currentTokenCount -= nextMessage.tokenCount;
|
||||||
|
context.shift();
|
||||||
|
}
|
||||||
|
|
||||||
|
let prompt = `${promptBody}${promptSuffix}`;
|
||||||
|
|
||||||
|
return { prompt, context, promptTokens: currentTokenCount, tokenCountMap };
|
||||||
|
}
|
||||||
|
|
||||||
|
getCompletion() {
|
||||||
|
logger.debug("AnthropicClient doesn't use getCompletion (all handled in sendCompletion)");
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a message or completion response using the Anthropic client.
|
||||||
|
* @param {Anthropic} client - The Anthropic client instance.
|
||||||
|
* @param {Anthropic.default.MessageCreateParams | Anthropic.default.CompletionCreateParams} options - The options for the message or completion.
|
||||||
|
* @param {boolean} useMessages - Whether to use messages or completions. Defaults to `this.useMessages`.
|
||||||
|
* @returns {Promise<Anthropic.default.Message | Anthropic.default.Completion>} The response from the Anthropic client.
|
||||||
|
*/
|
||||||
|
async createResponse(client, options, useMessages) {
|
||||||
|
return (useMessages ?? this.useMessages)
|
||||||
|
? await client.messages.create(options)
|
||||||
|
: await client.completions.create(options);
|
||||||
|
}
|
||||||
|
|
||||||
|
getMessageMapMethod() {
|
||||||
|
/**
|
||||||
|
* @param {TMessage} msg
|
||||||
|
*/
|
||||||
|
return (msg) => {
|
||||||
|
if (msg.text != null && msg.text && msg.text.startsWith(':::thinking')) {
|
||||||
|
msg.text = msg.text.replace(/:::thinking.*?:::/gs, '').trim();
|
||||||
|
} else if (msg.content != null) {
|
||||||
|
msg.text = parseTextParts(msg.content, true);
|
||||||
|
delete msg.content;
|
||||||
|
}
|
||||||
|
|
||||||
|
return msg;
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param {string[]} [intermediateReply]
|
||||||
|
* @returns {string}
|
||||||
|
*/
|
||||||
|
getStreamText(intermediateReply) {
|
||||||
|
if (!this.streamHandler) {
|
||||||
|
return intermediateReply?.join('') ?? '';
|
||||||
|
}
|
||||||
|
|
||||||
|
const reasoningText = this.streamHandler.reasoningTokens.join('');
|
||||||
|
|
||||||
|
const reasoningBlock = reasoningText.length > 0 ? `:::thinking\n${reasoningText}\n:::\n` : '';
|
||||||
|
|
||||||
|
return `${reasoningBlock}${this.streamHandler.tokens.join('')}`;
|
||||||
|
}
|
||||||
|
|
||||||
|
async sendCompletion(payload, { onProgress, abortController }) {
|
||||||
|
if (!abortController) {
|
||||||
|
abortController = new AbortController();
|
||||||
|
}
|
||||||
|
|
||||||
|
const { signal } = abortController;
|
||||||
|
|
||||||
|
const modelOptions = { ...this.modelOptions };
|
||||||
|
if (typeof onProgress === 'function') {
|
||||||
|
modelOptions.stream = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.debug('modelOptions', { modelOptions });
|
||||||
|
const metadata = {
|
||||||
|
user_id: this.user,
|
||||||
|
};
|
||||||
|
|
||||||
|
const {
|
||||||
|
stream,
|
||||||
|
model,
|
||||||
|
temperature,
|
||||||
|
maxOutputTokens,
|
||||||
|
stop: stop_sequences,
|
||||||
|
topP: top_p,
|
||||||
|
topK: top_k,
|
||||||
|
} = this.modelOptions;
|
||||||
|
|
||||||
|
let requestOptions = {
|
||||||
|
model,
|
||||||
|
stream: stream || true,
|
||||||
|
stop_sequences,
|
||||||
|
temperature,
|
||||||
|
metadata,
|
||||||
|
};
|
||||||
|
|
||||||
|
if (this.useMessages) {
|
||||||
|
requestOptions.messages = payload;
|
||||||
|
requestOptions.max_tokens =
|
||||||
|
maxOutputTokens || anthropicSettings.maxOutputTokens.reset(requestOptions.model);
|
||||||
|
} else {
|
||||||
|
requestOptions.prompt = payload;
|
||||||
|
requestOptions.max_tokens_to_sample = maxOutputTokens || legacy.maxOutputTokens.default;
|
||||||
|
}
|
||||||
|
|
||||||
|
requestOptions = configureReasoning(requestOptions, {
|
||||||
|
thinking: this.options.thinking,
|
||||||
|
thinkingBudget: this.options.thinkingBudget,
|
||||||
|
});
|
||||||
|
|
||||||
|
if (!/claude-3[-.]7/.test(model)) {
|
||||||
|
requestOptions.top_p = top_p;
|
||||||
|
requestOptions.top_k = top_k;
|
||||||
|
} else if (requestOptions.thinking == null) {
|
||||||
|
requestOptions.topP = top_p;
|
||||||
|
requestOptions.topK = top_k;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (this.systemMessage && this.supportsCacheControl === true) {
|
||||||
|
requestOptions.system = [
|
||||||
|
{
|
||||||
|
type: 'text',
|
||||||
|
text: this.systemMessage,
|
||||||
|
cache_control: { type: 'ephemeral' },
|
||||||
|
},
|
||||||
|
];
|
||||||
|
} else if (this.systemMessage) {
|
||||||
|
requestOptions.system = this.systemMessage;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (this.supportsCacheControl === true && this.useMessages) {
|
||||||
|
requestOptions.messages = addCacheControl(requestOptions.messages);
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.debug('[AnthropicClient]', { ...requestOptions });
|
||||||
|
const handlers = createStreamEventHandlers(this.options.res);
|
||||||
|
this.streamHandler = new SplitStreamHandler({
|
||||||
|
accumulate: true,
|
||||||
|
runId: this.responseMessageId,
|
||||||
|
handlers,
|
||||||
|
});
|
||||||
|
|
||||||
|
let intermediateReply = this.streamHandler.tokens;
|
||||||
|
|
||||||
|
const maxRetries = 3;
|
||||||
|
const streamRate = this.options.streamRate ?? Constants.DEFAULT_STREAM_RATE;
|
||||||
|
async function processResponse() {
|
||||||
|
let attempts = 0;
|
||||||
|
|
||||||
|
while (attempts < maxRetries) {
|
||||||
|
let response;
|
||||||
|
try {
|
||||||
|
const client = this.getClient(requestOptions);
|
||||||
|
response = await this.createResponse(client, requestOptions);
|
||||||
|
|
||||||
|
signal.addEventListener('abort', () => {
|
||||||
|
logger.debug('[AnthropicClient] message aborted!');
|
||||||
|
if (response.controller?.abort) {
|
||||||
|
response.controller.abort();
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
for await (const completion of response) {
|
||||||
|
const type = completion?.type ?? '';
|
||||||
|
if (tokenEventTypes.has(type)) {
|
||||||
|
logger.debug(`[AnthropicClient] ${type}`, completion);
|
||||||
|
this[type] = completion;
|
||||||
|
}
|
||||||
|
this.streamHandler.handle(completion);
|
||||||
|
await sleep(streamRate);
|
||||||
|
}
|
||||||
|
|
||||||
|
break;
|
||||||
|
} catch (error) {
|
||||||
|
attempts += 1;
|
||||||
|
logger.warn(
|
||||||
|
`User: ${this.user} | Anthropic Request ${attempts} failed: ${error.message}`,
|
||||||
|
);
|
||||||
|
|
||||||
|
if (attempts < maxRetries) {
|
||||||
|
await delayBeforeRetry(attempts, 350);
|
||||||
|
} else if (this.streamHandler && this.streamHandler.reasoningTokens.length) {
|
||||||
|
return this.getStreamText();
|
||||||
|
} else if (intermediateReply.length > 0) {
|
||||||
|
return this.getStreamText(intermediateReply);
|
||||||
|
} else {
|
||||||
|
throw new Error(`Operation failed after ${maxRetries} attempts: ${error.message}`);
|
||||||
|
}
|
||||||
|
} finally {
|
||||||
|
signal.removeEventListener('abort', () => {
|
||||||
|
logger.debug('[AnthropicClient] message aborted!');
|
||||||
|
if (response.controller?.abort) {
|
||||||
|
response.controller.abort();
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
await processResponse.bind(this)();
|
||||||
|
return this.getStreamText(intermediateReply);
|
||||||
|
}
|
||||||
|
|
||||||
|
getSaveOptions() {
|
||||||
|
return {
|
||||||
|
maxContextTokens: this.options.maxContextTokens,
|
||||||
|
artifacts: this.options.artifacts,
|
||||||
|
promptPrefix: this.options.promptPrefix,
|
||||||
|
modelLabel: this.options.modelLabel,
|
||||||
|
promptCache: this.options.promptCache,
|
||||||
|
thinking: this.options.thinking,
|
||||||
|
thinkingBudget: this.options.thinkingBudget,
|
||||||
|
resendFiles: this.options.resendFiles,
|
||||||
|
iconURL: this.options.iconURL,
|
||||||
|
greeting: this.options.greeting,
|
||||||
|
spec: this.options.spec,
|
||||||
|
...this.modelOptions,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
getBuildMessagesOptions() {
|
||||||
|
logger.debug("AnthropicClient doesn't use getBuildMessagesOptions");
|
||||||
|
}
|
||||||
|
|
||||||
|
getEncoding() {
|
||||||
|
return 'cl100k_base';
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the token count of a given text. It also checks and resets the tokenizers if necessary.
|
||||||
|
* @param {string} text - The text to get the token count for.
|
||||||
|
* @returns {number} The token count of the given text.
|
||||||
|
*/
|
||||||
|
getTokenCount(text) {
|
||||||
|
const encoding = this.getEncoding();
|
||||||
|
return Tokenizer.getTokenCount(text, encoding);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Generates a concise title for a conversation based on the user's input text and response.
|
||||||
|
* Involves sending a chat completion request with specific instructions for title generation.
|
||||||
|
*
|
||||||
|
* This function capitlizes on [Anthropic's function calling training](https://docs.anthropic.com/claude/docs/functions-external-tools).
|
||||||
|
*
|
||||||
|
* @param {Object} params - The parameters for the conversation title generation.
|
||||||
|
* @param {string} params.text - The user's input.
|
||||||
|
* @param {string} [params.responseText=''] - The AI's immediate response to the user.
|
||||||
|
*
|
||||||
|
* @returns {Promise<string | 'New Chat'>} A promise that resolves to the generated conversation title.
|
||||||
|
* In case of failure, it will return the default title, "New Chat".
|
||||||
|
*/
|
||||||
|
async titleConvo({ text, responseText = '' }) {
|
||||||
|
let title = 'New Chat';
|
||||||
|
this.message_delta = undefined;
|
||||||
|
this.message_start = undefined;
|
||||||
|
const convo = `<initial_message>
|
||||||
|
${truncateText(text)}
|
||||||
|
</initial_message>
|
||||||
|
<response>
|
||||||
|
${JSON.stringify(truncateText(responseText))}
|
||||||
|
</response>`;
|
||||||
|
|
||||||
|
const { ANTHROPIC_TITLE_MODEL } = process.env ?? {};
|
||||||
|
const model = this.options.titleModel ?? ANTHROPIC_TITLE_MODEL ?? 'claude-3-haiku-20240307';
|
||||||
|
const system = titleFunctionPrompt;
|
||||||
|
|
||||||
|
const titleChatCompletion = async () => {
|
||||||
|
const content = `<conversation_context>
|
||||||
|
${convo}
|
||||||
|
</conversation_context>
|
||||||
|
|
||||||
|
Please generate a title for this conversation.`;
|
||||||
|
|
||||||
|
const titleMessage = { role: 'user', content };
|
||||||
|
const requestOptions = {
|
||||||
|
model,
|
||||||
|
temperature: 0.3,
|
||||||
|
max_tokens: 1024,
|
||||||
|
system,
|
||||||
|
stop_sequences: ['\n\nHuman:', '\n\nAssistant', '</function_calls>'],
|
||||||
|
messages: [titleMessage],
|
||||||
|
};
|
||||||
|
|
||||||
|
try {
|
||||||
|
const response = await this.createResponse(
|
||||||
|
this.getClient(requestOptions),
|
||||||
|
requestOptions,
|
||||||
|
true,
|
||||||
|
);
|
||||||
|
let promptTokens = response?.usage?.input_tokens;
|
||||||
|
let completionTokens = response?.usage?.output_tokens;
|
||||||
|
if (!promptTokens) {
|
||||||
|
promptTokens = this.getTokenCountForMessage(titleMessage);
|
||||||
|
promptTokens += this.getTokenCountForMessage({ role: 'system', content: system });
|
||||||
|
}
|
||||||
|
if (!completionTokens) {
|
||||||
|
completionTokens = this.getTokenCountForMessage(response.content[0]);
|
||||||
|
}
|
||||||
|
await this.recordTokenUsage({
|
||||||
|
model,
|
||||||
|
promptTokens,
|
||||||
|
completionTokens,
|
||||||
|
context: 'title',
|
||||||
|
});
|
||||||
|
const text = response.content[0].text;
|
||||||
|
title = parseParamFromPrompt(text, 'title');
|
||||||
|
} catch (e) {
|
||||||
|
logger.error('[AnthropicClient] There was an issue generating the title', e);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
await titleChatCompletion();
|
||||||
|
logger.debug('[AnthropicClient] Convo Title: ' + title);
|
||||||
|
return title;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = AnthropicClient;
|
||||||
|
|
@ -2,9 +2,7 @@ const crypto = require('crypto');
|
||||||
const fetch = require('node-fetch');
|
const fetch = require('node-fetch');
|
||||||
const { logger } = require('@librechat/data-schemas');
|
const { logger } = require('@librechat/data-schemas');
|
||||||
const {
|
const {
|
||||||
countTokens,
|
|
||||||
getBalanceConfig,
|
getBalanceConfig,
|
||||||
buildMessageFiles,
|
|
||||||
extractFileContext,
|
extractFileContext,
|
||||||
encodeAndFormatAudios,
|
encodeAndFormatAudios,
|
||||||
encodeAndFormatVideos,
|
encodeAndFormatVideos,
|
||||||
|
|
@ -19,21 +17,14 @@ const {
|
||||||
EModelEndpoint,
|
EModelEndpoint,
|
||||||
isParamEndpoint,
|
isParamEndpoint,
|
||||||
isAgentsEndpoint,
|
isAgentsEndpoint,
|
||||||
isEphemeralAgentId,
|
|
||||||
supportsBalanceCheck,
|
supportsBalanceCheck,
|
||||||
isBedrockDocumentType,
|
|
||||||
} = require('librechat-data-provider');
|
} = require('librechat-data-provider');
|
||||||
const {
|
const { getMessages, saveMessage, updateMessage, saveConvo, getConvo } = require('~/models');
|
||||||
updateMessage,
|
|
||||||
getMessages,
|
|
||||||
saveMessage,
|
|
||||||
saveConvo,
|
|
||||||
getConvo,
|
|
||||||
getFiles,
|
|
||||||
} = require('~/models');
|
|
||||||
const { getStrategyFunctions } = require('~/server/services/Files/strategies');
|
const { getStrategyFunctions } = require('~/server/services/Files/strategies');
|
||||||
const { checkBalance } = require('~/models/balanceMethods');
|
const { checkBalance } = require('~/models/balanceMethods');
|
||||||
const { truncateToolCallOutputs } = require('./prompts');
|
const { truncateToolCallOutputs } = require('./prompts');
|
||||||
|
const countTokens = require('~/server/utils/countTokens');
|
||||||
|
const { getFiles } = require('~/models/File');
|
||||||
const TextStream = require('./TextStream');
|
const TextStream = require('./TextStream');
|
||||||
|
|
||||||
class BaseClient {
|
class BaseClient {
|
||||||
|
|
@ -124,9 +115,7 @@ class BaseClient {
|
||||||
* @returns {number}
|
* @returns {number}
|
||||||
*/
|
*/
|
||||||
getTokenCountForResponse(responseMessage) {
|
getTokenCountForResponse(responseMessage) {
|
||||||
logger.debug('[BaseClient] `recordTokenUsage` not implemented.', {
|
logger.debug('[BaseClient] `recordTokenUsage` not implemented.', responseMessage);
|
||||||
messageId: responseMessage?.messageId,
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
@ -137,14 +126,12 @@ class BaseClient {
|
||||||
* @param {AppConfig['balance']} [balance]
|
* @param {AppConfig['balance']} [balance]
|
||||||
* @param {number} promptTokens
|
* @param {number} promptTokens
|
||||||
* @param {number} completionTokens
|
* @param {number} completionTokens
|
||||||
* @param {string} [messageId]
|
|
||||||
* @returns {Promise<void>}
|
* @returns {Promise<void>}
|
||||||
*/
|
*/
|
||||||
async recordTokenUsage({ model, balance, promptTokens, completionTokens, messageId }) {
|
async recordTokenUsage({ model, balance, promptTokens, completionTokens }) {
|
||||||
logger.debug('[BaseClient] `recordTokenUsage` not implemented.', {
|
logger.debug('[BaseClient] `recordTokenUsage` not implemented.', {
|
||||||
model,
|
model,
|
||||||
balance,
|
balance,
|
||||||
messageId,
|
|
||||||
promptTokens,
|
promptTokens,
|
||||||
completionTokens,
|
completionTokens,
|
||||||
});
|
});
|
||||||
|
|
@ -665,27 +652,16 @@ class BaseClient {
|
||||||
);
|
);
|
||||||
|
|
||||||
if (tokenCountMap) {
|
if (tokenCountMap) {
|
||||||
|
logger.debug('[BaseClient] tokenCountMap', tokenCountMap);
|
||||||
if (tokenCountMap[userMessage.messageId]) {
|
if (tokenCountMap[userMessage.messageId]) {
|
||||||
userMessage.tokenCount = tokenCountMap[userMessage.messageId];
|
userMessage.tokenCount = tokenCountMap[userMessage.messageId];
|
||||||
logger.debug('[BaseClient] userMessage', {
|
logger.debug('[BaseClient] userMessage', userMessage);
|
||||||
messageId: userMessage.messageId,
|
|
||||||
tokenCount: userMessage.tokenCount,
|
|
||||||
conversationId: userMessage.conversationId,
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
|
|
||||||
this.handleTokenCountMap(tokenCountMap);
|
this.handleTokenCountMap(tokenCountMap);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!isEdited && !this.skipSaveUserMessage) {
|
if (!isEdited && !this.skipSaveUserMessage) {
|
||||||
const reqFiles = this.options.req?.body?.files;
|
|
||||||
if (reqFiles && Array.isArray(this.options.attachments)) {
|
|
||||||
const files = buildMessageFiles(reqFiles, this.options.attachments);
|
|
||||||
if (files.length > 0) {
|
|
||||||
userMessage.files = files;
|
|
||||||
}
|
|
||||||
delete userMessage.image_urls;
|
|
||||||
}
|
|
||||||
userMessagePromise = this.saveMessageToDatabase(userMessage, saveOptions, user);
|
userMessagePromise = this.saveMessageToDatabase(userMessage, saveOptions, user);
|
||||||
this.savedMessageIds.add(userMessage.messageId);
|
this.savedMessageIds.add(userMessage.messageId);
|
||||||
if (typeof opts?.getReqData === 'function') {
|
if (typeof opts?.getReqData === 'function') {
|
||||||
|
|
@ -732,7 +708,7 @@ class BaseClient {
|
||||||
iconURL: this.options.iconURL,
|
iconURL: this.options.iconURL,
|
||||||
endpoint: this.options.endpoint,
|
endpoint: this.options.endpoint,
|
||||||
...(this.metadata ?? {}),
|
...(this.metadata ?? {}),
|
||||||
metadata: Object.keys(metadata ?? {}).length > 0 ? metadata : undefined,
|
metadata,
|
||||||
};
|
};
|
||||||
|
|
||||||
if (typeof completion === 'string') {
|
if (typeof completion === 'string') {
|
||||||
|
|
@ -797,18 +773,9 @@ class BaseClient {
|
||||||
promptTokens,
|
promptTokens,
|
||||||
completionTokens,
|
completionTokens,
|
||||||
balance: balanceConfig,
|
balance: balanceConfig,
|
||||||
/** Note: When using agents, responseMessage.model is the agent ID, not the model */
|
model: responseMessage.model,
|
||||||
model: this.model,
|
|
||||||
messageId: this.responseMessageId,
|
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
logger.debug('[BaseClient] Response token usage', {
|
|
||||||
messageId: responseMessage.messageId,
|
|
||||||
model: responseMessage.model,
|
|
||||||
promptTokens,
|
|
||||||
completionTokens,
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (userMessagePromise) {
|
if (userMessagePromise) {
|
||||||
|
|
@ -964,7 +931,6 @@ class BaseClient {
|
||||||
throw new Error('User mismatch.');
|
throw new Error('User mismatch.');
|
||||||
}
|
}
|
||||||
|
|
||||||
const hasAddedConvo = this.options?.req?.body?.addedConvo != null;
|
|
||||||
const savedMessage = await saveMessage(
|
const savedMessage = await saveMessage(
|
||||||
this.options?.req,
|
this.options?.req,
|
||||||
{
|
{
|
||||||
|
|
@ -972,7 +938,6 @@ class BaseClient {
|
||||||
endpoint: this.options.endpoint,
|
endpoint: this.options.endpoint,
|
||||||
unfinished: false,
|
unfinished: false,
|
||||||
user,
|
user,
|
||||||
...(hasAddedConvo && { addedConvo: true }),
|
|
||||||
},
|
},
|
||||||
{ context: 'api/app/clients/BaseClient.js - saveMessageToDatabase #saveMessage' },
|
{ context: 'api/app/clients/BaseClient.js - saveMessageToDatabase #saveMessage' },
|
||||||
);
|
);
|
||||||
|
|
@ -995,13 +960,6 @@ class BaseClient {
|
||||||
|
|
||||||
const unsetFields = {};
|
const unsetFields = {};
|
||||||
const exceptions = new Set(['spec', 'iconURL']);
|
const exceptions = new Set(['spec', 'iconURL']);
|
||||||
const hasNonEphemeralAgent =
|
|
||||||
isAgentsEndpoint(this.options.endpoint) &&
|
|
||||||
endpointOptions?.agent_id &&
|
|
||||||
!isEphemeralAgentId(endpointOptions.agent_id);
|
|
||||||
if (hasNonEphemeralAgent) {
|
|
||||||
exceptions.add('model');
|
|
||||||
}
|
|
||||||
if (existingConvo != null) {
|
if (existingConvo != null) {
|
||||||
this.fetchedConvo = true;
|
this.fetchedConvo = true;
|
||||||
for (const key in existingConvo) {
|
for (const key in existingConvo) {
|
||||||
|
|
@ -1053,8 +1011,7 @@ class BaseClient {
|
||||||
* @param {Object} options - The options for the function.
|
* @param {Object} options - The options for the function.
|
||||||
* @param {TMessage[]} options.messages - An array of message objects. Each object should have either an 'id' or 'messageId' property, and may have a 'parentMessageId' property.
|
* @param {TMessage[]} options.messages - An array of message objects. Each object should have either an 'id' or 'messageId' property, and may have a 'parentMessageId' property.
|
||||||
* @param {string} options.parentMessageId - The ID of the parent message to start the traversal from.
|
* @param {string} options.parentMessageId - The ID of the parent message to start the traversal from.
|
||||||
* @param {Function} [options.mapMethod] - An optional function to map over the ordered messages. Applied conditionally based on mapCondition.
|
* @param {Function} [options.mapMethod] - An optional function to map over the ordered messages. If provided, it will be applied to each message in the resulting array.
|
||||||
* @param {(message: TMessage) => boolean} [options.mapCondition] - An optional function to determine whether mapMethod should be applied to a given message. If not provided and mapMethod is set, mapMethod applies to all messages.
|
|
||||||
* @param {boolean} [options.summary=false] - If set to true, the traversal modifies messages with 'summary' and 'summaryTokenCount' properties and stops at the message with a 'summary' property.
|
* @param {boolean} [options.summary=false] - If set to true, the traversal modifies messages with 'summary' and 'summaryTokenCount' properties and stops at the message with a 'summary' property.
|
||||||
* @returns {TMessage[]} An array containing the messages in the order they should be displayed, starting with the most recent message with a 'summary' property if the 'summary' option is true, and ending with the message identified by 'parentMessageId'.
|
* @returns {TMessage[]} An array containing the messages in the order they should be displayed, starting with the most recent message with a 'summary' property if the 'summary' option is true, and ending with the message identified by 'parentMessageId'.
|
||||||
*/
|
*/
|
||||||
|
|
@ -1062,7 +1019,6 @@ class BaseClient {
|
||||||
messages,
|
messages,
|
||||||
parentMessageId,
|
parentMessageId,
|
||||||
mapMethod = null,
|
mapMethod = null,
|
||||||
mapCondition = null,
|
|
||||||
summary = false,
|
summary = false,
|
||||||
}) {
|
}) {
|
||||||
if (!messages || messages.length === 0) {
|
if (!messages || messages.length === 0) {
|
||||||
|
|
@ -1097,9 +1053,7 @@ class BaseClient {
|
||||||
message.tokenCount = message.summaryTokenCount;
|
message.tokenCount = message.summaryTokenCount;
|
||||||
}
|
}
|
||||||
|
|
||||||
const shouldMap = mapMethod != null && (mapCondition != null ? mapCondition(message) : true);
|
orderedMessages.push(message);
|
||||||
const processedMessage = shouldMap ? mapMethod(message) : message;
|
|
||||||
orderedMessages.push(processedMessage);
|
|
||||||
|
|
||||||
if (summary && message.summary) {
|
if (summary && message.summary) {
|
||||||
break;
|
break;
|
||||||
|
|
@ -1110,6 +1064,11 @@ class BaseClient {
|
||||||
}
|
}
|
||||||
|
|
||||||
orderedMessages.reverse();
|
orderedMessages.reverse();
|
||||||
|
|
||||||
|
if (mapMethod) {
|
||||||
|
return orderedMessages.map(mapMethod);
|
||||||
|
}
|
||||||
|
|
||||||
return orderedMessages;
|
return orderedMessages;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1326,9 +1285,6 @@ class BaseClient {
|
||||||
|
|
||||||
const allFiles = [];
|
const allFiles = [];
|
||||||
|
|
||||||
const provider = this.options.agent?.provider ?? this.options.endpoint;
|
|
||||||
const isBedrock = provider === EModelEndpoint.bedrock;
|
|
||||||
|
|
||||||
for (const file of attachments) {
|
for (const file of attachments) {
|
||||||
/** @type {FileSources} */
|
/** @type {FileSources} */
|
||||||
const source = file.source ?? FileSources.local;
|
const source = file.source ?? FileSources.local;
|
||||||
|
|
@ -1346,9 +1302,6 @@ class BaseClient {
|
||||||
} else if (file.type === 'application/pdf') {
|
} else if (file.type === 'application/pdf') {
|
||||||
categorizedAttachments.documents.push(file);
|
categorizedAttachments.documents.push(file);
|
||||||
allFiles.push(file);
|
allFiles.push(file);
|
||||||
} else if (isBedrock && isBedrockDocumentType(file.type)) {
|
|
||||||
categorizedAttachments.documents.push(file);
|
|
||||||
allFiles.push(file);
|
|
||||||
} else if (file.type.startsWith('video/')) {
|
} else if (file.type.startsWith('video/')) {
|
||||||
categorizedAttachments.videos.push(file);
|
categorizedAttachments.videos.push(file);
|
||||||
allFiles.push(file);
|
allFiles.push(file);
|
||||||
|
|
|
||||||
994
api/app/clients/GoogleClient.js
Normal file
994
api/app/clients/GoogleClient.js
Normal file
|
|
@ -0,0 +1,994 @@
|
||||||
|
const { google } = require('googleapis');
|
||||||
|
const { sleep } = require('@librechat/agents');
|
||||||
|
const { logger } = require('@librechat/data-schemas');
|
||||||
|
const { getModelMaxTokens } = require('@librechat/api');
|
||||||
|
const { concat } = require('@langchain/core/utils/stream');
|
||||||
|
const { ChatVertexAI } = require('@langchain/google-vertexai');
|
||||||
|
const { Tokenizer, getSafetySettings } = require('@librechat/api');
|
||||||
|
const { ChatGoogleGenerativeAI } = require('@langchain/google-genai');
|
||||||
|
const { GoogleGenerativeAI: GenAI } = require('@google/generative-ai');
|
||||||
|
const { HumanMessage, SystemMessage } = require('@langchain/core/messages');
|
||||||
|
const {
|
||||||
|
googleGenConfigSchema,
|
||||||
|
validateVisionModel,
|
||||||
|
getResponseSender,
|
||||||
|
endpointSettings,
|
||||||
|
parseTextParts,
|
||||||
|
EModelEndpoint,
|
||||||
|
googleSettings,
|
||||||
|
ContentTypes,
|
||||||
|
VisionModes,
|
||||||
|
ErrorTypes,
|
||||||
|
Constants,
|
||||||
|
AuthKeys,
|
||||||
|
} = require('librechat-data-provider');
|
||||||
|
const { encodeAndFormat } = require('~/server/services/Files/images');
|
||||||
|
const { spendTokens } = require('~/models/spendTokens');
|
||||||
|
const {
|
||||||
|
formatMessage,
|
||||||
|
createContextHandlers,
|
||||||
|
titleInstruction,
|
||||||
|
truncateText,
|
||||||
|
} = require('./prompts');
|
||||||
|
const BaseClient = require('./BaseClient');
|
||||||
|
|
||||||
|
const loc = process.env.GOOGLE_LOC || 'us-central1';
|
||||||
|
const publisher = 'google';
|
||||||
|
const endpointPrefix =
|
||||||
|
loc === 'global' ? 'aiplatform.googleapis.com' : `${loc}-aiplatform.googleapis.com`;
|
||||||
|
|
||||||
|
const settings = endpointSettings[EModelEndpoint.google];
|
||||||
|
const EXCLUDED_GENAI_MODELS = /gemini-(?:1\.0|1-0|pro)/;
|
||||||
|
|
||||||
|
class GoogleClient extends BaseClient {
|
||||||
|
constructor(credentials, options = {}) {
|
||||||
|
super('apiKey', options);
|
||||||
|
let creds = {};
|
||||||
|
|
||||||
|
if (typeof credentials === 'string') {
|
||||||
|
creds = JSON.parse(credentials);
|
||||||
|
} else if (credentials) {
|
||||||
|
creds = credentials;
|
||||||
|
}
|
||||||
|
|
||||||
|
const serviceKey = creds[AuthKeys.GOOGLE_SERVICE_KEY] ?? {};
|
||||||
|
this.serviceKey =
|
||||||
|
serviceKey && typeof serviceKey === 'string' ? JSON.parse(serviceKey) : (serviceKey ?? {});
|
||||||
|
/** @type {string | null | undefined} */
|
||||||
|
this.project_id = this.serviceKey.project_id;
|
||||||
|
this.client_email = this.serviceKey.client_email;
|
||||||
|
this.private_key = this.serviceKey.private_key;
|
||||||
|
this.access_token = null;
|
||||||
|
|
||||||
|
this.apiKey = creds[AuthKeys.GOOGLE_API_KEY];
|
||||||
|
|
||||||
|
this.reverseProxyUrl = options.reverseProxyUrl;
|
||||||
|
|
||||||
|
this.authHeader = options.authHeader;
|
||||||
|
|
||||||
|
/** @type {UsageMetadata | undefined} */
|
||||||
|
this.usage;
|
||||||
|
/** The key for the usage object's input tokens
|
||||||
|
* @type {string} */
|
||||||
|
this.inputTokensKey = 'input_tokens';
|
||||||
|
/** The key for the usage object's output tokens
|
||||||
|
* @type {string} */
|
||||||
|
this.outputTokensKey = 'output_tokens';
|
||||||
|
this.visionMode = VisionModes.generative;
|
||||||
|
/** @type {string} */
|
||||||
|
this.systemMessage;
|
||||||
|
if (options.skipSetOptions) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
this.setOptions(options);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Google specific methods */
|
||||||
|
constructUrl() {
|
||||||
|
return `https://${endpointPrefix}/v1/projects/${this.project_id}/locations/${loc}/publishers/${publisher}/models/${this.modelOptions.model}:serverStreamingPredict`;
|
||||||
|
}
|
||||||
|
|
||||||
|
async getClient() {
|
||||||
|
const scopes = ['https://www.googleapis.com/auth/cloud-platform'];
|
||||||
|
const jwtClient = new google.auth.JWT(this.client_email, null, this.private_key, scopes);
|
||||||
|
|
||||||
|
jwtClient.authorize((err) => {
|
||||||
|
if (err) {
|
||||||
|
logger.error('jwtClient failed to authorize', err);
|
||||||
|
throw err;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
return jwtClient;
|
||||||
|
}
|
||||||
|
|
||||||
|
async getAccessToken() {
|
||||||
|
const scopes = ['https://www.googleapis.com/auth/cloud-platform'];
|
||||||
|
const jwtClient = new google.auth.JWT(this.client_email, null, this.private_key, scopes);
|
||||||
|
|
||||||
|
return new Promise((resolve, reject) => {
|
||||||
|
jwtClient.authorize((err, tokens) => {
|
||||||
|
if (err) {
|
||||||
|
logger.error('jwtClient failed to authorize', err);
|
||||||
|
reject(err);
|
||||||
|
} else {
|
||||||
|
resolve(tokens.access_token);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Required Client methods */
|
||||||
|
setOptions(options) {
|
||||||
|
if (this.options && !this.options.replaceOptions) {
|
||||||
|
// nested options aren't spread properly, so we need to do this manually
|
||||||
|
this.options.modelOptions = {
|
||||||
|
...this.options.modelOptions,
|
||||||
|
...options.modelOptions,
|
||||||
|
};
|
||||||
|
delete options.modelOptions;
|
||||||
|
// now we can merge options
|
||||||
|
this.options = {
|
||||||
|
...this.options,
|
||||||
|
...options,
|
||||||
|
};
|
||||||
|
} else {
|
||||||
|
this.options = options;
|
||||||
|
}
|
||||||
|
|
||||||
|
this.modelOptions = this.options.modelOptions || {};
|
||||||
|
|
||||||
|
this.options.attachments?.then((attachments) => this.checkVisionRequest(attachments));
|
||||||
|
|
||||||
|
/** @type {boolean} Whether using a "GenerativeAI" Model */
|
||||||
|
this.isGenerativeModel = /gemini|learnlm|gemma/.test(this.modelOptions.model);
|
||||||
|
|
||||||
|
this.maxContextTokens =
|
||||||
|
this.options.maxContextTokens ??
|
||||||
|
getModelMaxTokens(this.modelOptions.model, EModelEndpoint.google);
|
||||||
|
|
||||||
|
// The max prompt tokens is determined by the max context tokens minus the max response tokens.
|
||||||
|
// Earlier messages will be dropped until the prompt is within the limit.
|
||||||
|
this.maxResponseTokens = this.modelOptions.maxOutputTokens || settings.maxOutputTokens.default;
|
||||||
|
|
||||||
|
if (this.maxContextTokens > 32000) {
|
||||||
|
this.maxContextTokens = this.maxContextTokens - this.maxResponseTokens;
|
||||||
|
}
|
||||||
|
|
||||||
|
this.maxPromptTokens =
|
||||||
|
this.options.maxPromptTokens || this.maxContextTokens - this.maxResponseTokens;
|
||||||
|
|
||||||
|
if (this.maxPromptTokens + this.maxResponseTokens > this.maxContextTokens) {
|
||||||
|
throw new Error(
|
||||||
|
`maxPromptTokens + maxOutputTokens (${this.maxPromptTokens} + ${this.maxResponseTokens} = ${
|
||||||
|
this.maxPromptTokens + this.maxResponseTokens
|
||||||
|
}) must be less than or equal to maxContextTokens (${this.maxContextTokens})`,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add thinking configuration
|
||||||
|
this.modelOptions.thinkingConfig = {
|
||||||
|
thinkingBudget:
|
||||||
|
(this.modelOptions.thinking ?? googleSettings.thinking.default)
|
||||||
|
? this.modelOptions.thinkingBudget
|
||||||
|
: 0,
|
||||||
|
};
|
||||||
|
delete this.modelOptions.thinking;
|
||||||
|
delete this.modelOptions.thinkingBudget;
|
||||||
|
|
||||||
|
this.sender =
|
||||||
|
this.options.sender ??
|
||||||
|
getResponseSender({
|
||||||
|
model: this.modelOptions.model,
|
||||||
|
endpoint: EModelEndpoint.google,
|
||||||
|
modelLabel: this.options.modelLabel,
|
||||||
|
});
|
||||||
|
|
||||||
|
this.userLabel = this.options.userLabel || 'User';
|
||||||
|
this.modelLabel = this.options.modelLabel || 'Assistant';
|
||||||
|
|
||||||
|
if (this.options.reverseProxyUrl) {
|
||||||
|
this.completionsUrl = this.options.reverseProxyUrl;
|
||||||
|
} else {
|
||||||
|
this.completionsUrl = this.constructUrl();
|
||||||
|
}
|
||||||
|
|
||||||
|
let promptPrefix = (this.options.promptPrefix ?? '').trim();
|
||||||
|
if (typeof this.options.artifactsPrompt === 'string' && this.options.artifactsPrompt) {
|
||||||
|
promptPrefix = `${promptPrefix ?? ''}\n${this.options.artifactsPrompt}`.trim();
|
||||||
|
}
|
||||||
|
this.systemMessage = promptPrefix;
|
||||||
|
this.initializeClient();
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
*
|
||||||
|
* Checks if the model is a vision model based on request attachments and sets the appropriate options:
|
||||||
|
* @param {MongoFile[]} attachments
|
||||||
|
*/
|
||||||
|
checkVisionRequest(attachments) {
|
||||||
|
/* Validation vision request */
|
||||||
|
this.defaultVisionModel =
|
||||||
|
this.options.visionModel ??
|
||||||
|
(!EXCLUDED_GENAI_MODELS.test(this.modelOptions.model)
|
||||||
|
? this.modelOptions.model
|
||||||
|
: 'gemini-pro-vision');
|
||||||
|
const availableModels = this.options.modelsConfig?.[EModelEndpoint.google];
|
||||||
|
this.isVisionModel = validateVisionModel({ model: this.modelOptions.model, availableModels });
|
||||||
|
|
||||||
|
if (
|
||||||
|
attachments &&
|
||||||
|
attachments.some((file) => file?.type && file?.type?.includes('image')) &&
|
||||||
|
availableModels?.includes(this.defaultVisionModel) &&
|
||||||
|
!this.isVisionModel
|
||||||
|
) {
|
||||||
|
this.modelOptions.model = this.defaultVisionModel;
|
||||||
|
this.isVisionModel = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (this.isVisionModel && !attachments && this.modelOptions.model.includes('gemini-pro')) {
|
||||||
|
this.modelOptions.model = 'gemini-pro';
|
||||||
|
this.isVisionModel = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
formatMessages() {
|
||||||
|
return ((message) => {
|
||||||
|
const msg = {
|
||||||
|
author: message?.author ?? (message.isCreatedByUser ? this.userLabel : this.modelLabel),
|
||||||
|
content: message?.content ?? message.text,
|
||||||
|
};
|
||||||
|
|
||||||
|
if (!message.image_urls?.length) {
|
||||||
|
return msg;
|
||||||
|
}
|
||||||
|
|
||||||
|
msg.content = (
|
||||||
|
!Array.isArray(msg.content)
|
||||||
|
? [
|
||||||
|
{
|
||||||
|
type: ContentTypes.TEXT,
|
||||||
|
[ContentTypes.TEXT]: msg.content,
|
||||||
|
},
|
||||||
|
]
|
||||||
|
: msg.content
|
||||||
|
).concat(message.image_urls);
|
||||||
|
|
||||||
|
return msg;
|
||||||
|
}).bind(this);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Formats messages for generative AI
|
||||||
|
* @param {TMessage[]} messages
|
||||||
|
* @returns
|
||||||
|
*/
|
||||||
|
async formatGenerativeMessages(messages) {
|
||||||
|
const formattedMessages = [];
|
||||||
|
const attachments = await this.options.attachments;
|
||||||
|
const latestMessage = { ...messages[messages.length - 1] };
|
||||||
|
const files = await this.addImageURLs(latestMessage, attachments, VisionModes.generative);
|
||||||
|
this.options.attachments = files;
|
||||||
|
messages[messages.length - 1] = latestMessage;
|
||||||
|
|
||||||
|
for (const _message of messages) {
|
||||||
|
const role = _message.isCreatedByUser ? this.userLabel : this.modelLabel;
|
||||||
|
const parts = [];
|
||||||
|
parts.push({ text: _message.text });
|
||||||
|
if (!_message.image_urls?.length) {
|
||||||
|
formattedMessages.push({ role, parts });
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (const images of _message.image_urls) {
|
||||||
|
if (images.inlineData) {
|
||||||
|
parts.push({ inlineData: images.inlineData });
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
formattedMessages.push({ role, parts });
|
||||||
|
}
|
||||||
|
|
||||||
|
return formattedMessages;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
*
|
||||||
|
* Adds image URLs to the message object and returns the files
|
||||||
|
*
|
||||||
|
* @param {TMessage[]} messages
|
||||||
|
* @param {MongoFile[]} files
|
||||||
|
* @returns {Promise<MongoFile[]>}
|
||||||
|
*/
|
||||||
|
async addImageURLs(message, attachments, mode = '') {
|
||||||
|
const { files, image_urls } = await encodeAndFormat(
|
||||||
|
this.options.req,
|
||||||
|
attachments,
|
||||||
|
{
|
||||||
|
endpoint: EModelEndpoint.google,
|
||||||
|
},
|
||||||
|
mode,
|
||||||
|
);
|
||||||
|
message.image_urls = image_urls.length ? image_urls : undefined;
|
||||||
|
return files;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Builds the augmented prompt for attachments
|
||||||
|
* TODO: Add File API Support
|
||||||
|
* @param {TMessage[]} messages
|
||||||
|
*/
|
||||||
|
async buildAugmentedPrompt(messages = []) {
|
||||||
|
const attachments = await this.options.attachments;
|
||||||
|
const latestMessage = { ...messages[messages.length - 1] };
|
||||||
|
this.contextHandlers = createContextHandlers(this.options.req, latestMessage.text);
|
||||||
|
|
||||||
|
if (this.contextHandlers) {
|
||||||
|
for (const file of attachments) {
|
||||||
|
if (file.embedded) {
|
||||||
|
this.contextHandlers?.processFile(file);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if (file.metadata?.fileIdentifier) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
this.augmentedPrompt = await this.contextHandlers.createContext();
|
||||||
|
this.systemMessage = this.augmentedPrompt + this.systemMessage;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async buildVisionMessages(messages = [], parentMessageId) {
|
||||||
|
const attachments = await this.options.attachments;
|
||||||
|
const latestMessage = { ...messages[messages.length - 1] };
|
||||||
|
await this.buildAugmentedPrompt(messages);
|
||||||
|
|
||||||
|
const { prompt } = await this.buildMessagesPrompt(messages, parentMessageId);
|
||||||
|
|
||||||
|
const files = await this.addImageURLs(latestMessage, attachments);
|
||||||
|
|
||||||
|
this.options.attachments = files;
|
||||||
|
|
||||||
|
latestMessage.text = prompt;
|
||||||
|
|
||||||
|
const payload = {
|
||||||
|
instances: [
|
||||||
|
{
|
||||||
|
messages: [new HumanMessage(formatMessage({ message: latestMessage }))],
|
||||||
|
},
|
||||||
|
],
|
||||||
|
};
|
||||||
|
return { prompt: payload };
|
||||||
|
}
|
||||||
|
|
||||||
|
/** @param {TMessage[]} [messages=[]] */
|
||||||
|
async buildGenerativeMessages(messages = []) {
|
||||||
|
this.userLabel = 'user';
|
||||||
|
this.modelLabel = 'model';
|
||||||
|
const promises = [];
|
||||||
|
promises.push(await this.formatGenerativeMessages(messages));
|
||||||
|
promises.push(this.buildAugmentedPrompt(messages));
|
||||||
|
const [formattedMessages] = await Promise.all(promises);
|
||||||
|
return { prompt: formattedMessages };
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param {TMessage[]} [messages=[]]
|
||||||
|
* @param {string} [parentMessageId]
|
||||||
|
*/
|
||||||
|
async buildMessages(_messages = [], parentMessageId) {
|
||||||
|
if (!this.isGenerativeModel && !this.project_id) {
|
||||||
|
throw new Error('[GoogleClient] PaLM 2 and Codey models are no longer supported.');
|
||||||
|
}
|
||||||
|
|
||||||
|
if (this.systemMessage) {
|
||||||
|
const instructionsTokenCount = this.getTokenCount(this.systemMessage);
|
||||||
|
|
||||||
|
this.maxContextTokens = this.maxContextTokens - instructionsTokenCount;
|
||||||
|
if (this.maxContextTokens < 0) {
|
||||||
|
const info = `${instructionsTokenCount} / ${this.maxContextTokens}`;
|
||||||
|
const errorMessage = `{ "type": "${ErrorTypes.INPUT_LENGTH}", "info": "${info}" }`;
|
||||||
|
logger.warn(`Instructions token count exceeds max context (${info}).`);
|
||||||
|
throw new Error(errorMessage);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for (let i = 0; i < _messages.length; i++) {
|
||||||
|
const message = _messages[i];
|
||||||
|
if (!message.tokenCount) {
|
||||||
|
_messages[i].tokenCount = this.getTokenCountForMessage({
|
||||||
|
role: message.isCreatedByUser ? 'user' : 'assistant',
|
||||||
|
content: message.content ?? message.text,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const {
|
||||||
|
payload: messages,
|
||||||
|
tokenCountMap,
|
||||||
|
promptTokens,
|
||||||
|
} = await this.handleContextStrategy({
|
||||||
|
orderedMessages: _messages,
|
||||||
|
formattedMessages: _messages,
|
||||||
|
});
|
||||||
|
|
||||||
|
if (!this.project_id && !EXCLUDED_GENAI_MODELS.test(this.modelOptions.model)) {
|
||||||
|
const result = await this.buildGenerativeMessages(messages);
|
||||||
|
result.tokenCountMap = tokenCountMap;
|
||||||
|
result.promptTokens = promptTokens;
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (this.options.attachments && this.isGenerativeModel) {
|
||||||
|
const result = this.buildVisionMessages(messages, parentMessageId);
|
||||||
|
result.tokenCountMap = tokenCountMap;
|
||||||
|
result.promptTokens = promptTokens;
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
let payload = {
|
||||||
|
instances: [
|
||||||
|
{
|
||||||
|
messages: messages
|
||||||
|
.map(this.formatMessages())
|
||||||
|
.map((msg) => ({ ...msg, role: msg.author === 'User' ? 'user' : 'assistant' }))
|
||||||
|
.map((message) => formatMessage({ message, langChain: true })),
|
||||||
|
},
|
||||||
|
],
|
||||||
|
};
|
||||||
|
|
||||||
|
if (this.systemMessage) {
|
||||||
|
payload.instances[0].context = this.systemMessage;
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.debug('[GoogleClient] buildMessages', payload);
|
||||||
|
return { prompt: payload, tokenCountMap, promptTokens };
|
||||||
|
}
|
||||||
|
|
||||||
|
async buildMessagesPrompt(messages, parentMessageId) {
|
||||||
|
const orderedMessages = this.constructor.getMessagesForConversation({
|
||||||
|
messages,
|
||||||
|
parentMessageId,
|
||||||
|
});
|
||||||
|
|
||||||
|
logger.debug('[GoogleClient]', {
|
||||||
|
orderedMessages,
|
||||||
|
parentMessageId,
|
||||||
|
});
|
||||||
|
|
||||||
|
const formattedMessages = orderedMessages.map(this.formatMessages());
|
||||||
|
|
||||||
|
let lastAuthor = '';
|
||||||
|
let groupedMessages = [];
|
||||||
|
|
||||||
|
for (let message of formattedMessages) {
|
||||||
|
// If last author is not same as current author, add to new group
|
||||||
|
if (lastAuthor !== message.author) {
|
||||||
|
groupedMessages.push({
|
||||||
|
author: message.author,
|
||||||
|
content: [message.content],
|
||||||
|
});
|
||||||
|
lastAuthor = message.author;
|
||||||
|
// If same author, append content to the last group
|
||||||
|
} else {
|
||||||
|
groupedMessages[groupedMessages.length - 1].content.push(message.content);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let identityPrefix = '';
|
||||||
|
if (this.options.userLabel) {
|
||||||
|
identityPrefix = `\nHuman's name: ${this.options.userLabel}`;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (this.options.modelLabel) {
|
||||||
|
identityPrefix = `${identityPrefix}\nYou are ${this.options.modelLabel}`;
|
||||||
|
}
|
||||||
|
|
||||||
|
let promptPrefix = (this.systemMessage ?? '').trim();
|
||||||
|
|
||||||
|
if (identityPrefix) {
|
||||||
|
promptPrefix = `${identityPrefix}${promptPrefix}`;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prompt AI to respond, empty if last message was from AI
|
||||||
|
let isEdited = lastAuthor === this.modelLabel;
|
||||||
|
const promptSuffix = isEdited ? '' : `${promptPrefix}\n\n${this.modelLabel}:\n`;
|
||||||
|
let currentTokenCount = isEdited
|
||||||
|
? this.getTokenCount(promptPrefix)
|
||||||
|
: this.getTokenCount(promptSuffix);
|
||||||
|
|
||||||
|
let promptBody = '';
|
||||||
|
const maxTokenCount = this.maxPromptTokens;
|
||||||
|
|
||||||
|
const context = [];
|
||||||
|
|
||||||
|
// Iterate backwards through the messages, adding them to the prompt until we reach the max token count.
|
||||||
|
// Do this within a recursive async function so that it doesn't block the event loop for too long.
|
||||||
|
// Also, remove the next message when the message that puts us over the token limit is created by the user.
|
||||||
|
// Otherwise, remove only the exceeding message. This is due to Anthropic's strict payload rule to start with "Human:".
|
||||||
|
const nextMessage = {
|
||||||
|
remove: false,
|
||||||
|
tokenCount: 0,
|
||||||
|
messageString: '',
|
||||||
|
};
|
||||||
|
|
||||||
|
const buildPromptBody = async () => {
|
||||||
|
if (currentTokenCount < maxTokenCount && groupedMessages.length > 0) {
|
||||||
|
const message = groupedMessages.pop();
|
||||||
|
const isCreatedByUser = message.author === this.userLabel;
|
||||||
|
// Use promptPrefix if message is edited assistant'
|
||||||
|
const messagePrefix =
|
||||||
|
isCreatedByUser || !isEdited
|
||||||
|
? `\n\n${message.author}:`
|
||||||
|
: `${promptPrefix}\n\n${message.author}:`;
|
||||||
|
const messageString = `${messagePrefix}\n${message.content}\n`;
|
||||||
|
let newPromptBody = `${messageString}${promptBody}`;
|
||||||
|
|
||||||
|
context.unshift(message);
|
||||||
|
|
||||||
|
const tokenCountForMessage = this.getTokenCount(messageString);
|
||||||
|
const newTokenCount = currentTokenCount + tokenCountForMessage;
|
||||||
|
|
||||||
|
if (!isCreatedByUser) {
|
||||||
|
nextMessage.messageString = messageString;
|
||||||
|
nextMessage.tokenCount = tokenCountForMessage;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (newTokenCount > maxTokenCount) {
|
||||||
|
if (!promptBody) {
|
||||||
|
// This is the first message, so we can't add it. Just throw an error.
|
||||||
|
throw new Error(
|
||||||
|
`Prompt is too long. Max token count is ${maxTokenCount}, but prompt is ${newTokenCount} tokens long.`,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Otherwise, ths message would put us over the token limit, so don't add it.
|
||||||
|
// if created by user, remove next message, otherwise remove only this message
|
||||||
|
if (isCreatedByUser) {
|
||||||
|
nextMessage.remove = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
promptBody = newPromptBody;
|
||||||
|
currentTokenCount = newTokenCount;
|
||||||
|
|
||||||
|
// Switch off isEdited after using it for the first time
|
||||||
|
if (isEdited) {
|
||||||
|
isEdited = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// wait for next tick to avoid blocking the event loop
|
||||||
|
await new Promise((resolve) => setImmediate(resolve));
|
||||||
|
return buildPromptBody();
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
};
|
||||||
|
|
||||||
|
await buildPromptBody();
|
||||||
|
|
||||||
|
if (nextMessage.remove) {
|
||||||
|
promptBody = promptBody.replace(nextMessage.messageString, '');
|
||||||
|
currentTokenCount -= nextMessage.tokenCount;
|
||||||
|
context.shift();
|
||||||
|
}
|
||||||
|
|
||||||
|
let prompt = `${promptBody}${promptSuffix}`.trim();
|
||||||
|
|
||||||
|
// Add 2 tokens for metadata after all messages have been counted.
|
||||||
|
currentTokenCount += 2;
|
||||||
|
|
||||||
|
// Use up to `this.maxContextTokens` tokens (prompt + response), but try to leave `this.maxTokens` tokens for the response.
|
||||||
|
this.modelOptions.maxOutputTokens = Math.min(
|
||||||
|
this.maxContextTokens - currentTokenCount,
|
||||||
|
this.maxResponseTokens,
|
||||||
|
);
|
||||||
|
|
||||||
|
return { prompt, context };
|
||||||
|
}
|
||||||
|
|
||||||
|
createLLM(clientOptions) {
|
||||||
|
const model = clientOptions.modelName ?? clientOptions.model;
|
||||||
|
clientOptions.location = loc;
|
||||||
|
clientOptions.endpoint = endpointPrefix;
|
||||||
|
|
||||||
|
let requestOptions = null;
|
||||||
|
if (this.reverseProxyUrl) {
|
||||||
|
requestOptions = {
|
||||||
|
baseUrl: this.reverseProxyUrl,
|
||||||
|
};
|
||||||
|
|
||||||
|
if (this.authHeader) {
|
||||||
|
requestOptions.customHeaders = {
|
||||||
|
Authorization: `Bearer ${this.apiKey}`,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (this.project_id != null) {
|
||||||
|
logger.debug('Creating VertexAI client');
|
||||||
|
this.visionMode = undefined;
|
||||||
|
clientOptions.streaming = true;
|
||||||
|
const client = new ChatVertexAI(clientOptions);
|
||||||
|
client.temperature = clientOptions.temperature;
|
||||||
|
client.topP = clientOptions.topP;
|
||||||
|
client.topK = clientOptions.topK;
|
||||||
|
client.topLogprobs = clientOptions.topLogprobs;
|
||||||
|
client.frequencyPenalty = clientOptions.frequencyPenalty;
|
||||||
|
client.presencePenalty = clientOptions.presencePenalty;
|
||||||
|
client.maxOutputTokens = clientOptions.maxOutputTokens;
|
||||||
|
return client;
|
||||||
|
} else if (!EXCLUDED_GENAI_MODELS.test(model)) {
|
||||||
|
logger.debug('Creating GenAI client');
|
||||||
|
return new GenAI(this.apiKey).getGenerativeModel({ model }, requestOptions);
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.debug('Creating Chat Google Generative AI client');
|
||||||
|
return new ChatGoogleGenerativeAI({ ...clientOptions, apiKey: this.apiKey });
|
||||||
|
}
|
||||||
|
|
||||||
|
initializeClient() {
|
||||||
|
let clientOptions = { ...this.modelOptions };
|
||||||
|
|
||||||
|
if (this.project_id) {
|
||||||
|
clientOptions['authOptions'] = {
|
||||||
|
credentials: {
|
||||||
|
...this.serviceKey,
|
||||||
|
},
|
||||||
|
projectId: this.project_id,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
if (this.isGenerativeModel && !this.project_id) {
|
||||||
|
clientOptions.modelName = clientOptions.model;
|
||||||
|
delete clientOptions.model;
|
||||||
|
}
|
||||||
|
|
||||||
|
this.client = this.createLLM(clientOptions);
|
||||||
|
return this.client;
|
||||||
|
}
|
||||||
|
|
||||||
|
async getCompletion(_payload, options = {}) {
|
||||||
|
const { onProgress, abortController } = options;
|
||||||
|
const safetySettings = getSafetySettings(this.modelOptions.model);
|
||||||
|
const streamRate = this.options.streamRate ?? Constants.DEFAULT_STREAM_RATE;
|
||||||
|
const modelName = this.modelOptions.modelName ?? this.modelOptions.model ?? '';
|
||||||
|
|
||||||
|
let reply = '';
|
||||||
|
/** @type {Error} */
|
||||||
|
let error;
|
||||||
|
try {
|
||||||
|
if (!EXCLUDED_GENAI_MODELS.test(modelName) && !this.project_id) {
|
||||||
|
/** @type {GenerativeModel} */
|
||||||
|
const client = this.client;
|
||||||
|
/** @type {GenerateContentRequest} */
|
||||||
|
const requestOptions = {
|
||||||
|
safetySettings,
|
||||||
|
contents: _payload,
|
||||||
|
generationConfig: googleGenConfigSchema.parse(this.modelOptions),
|
||||||
|
};
|
||||||
|
|
||||||
|
const promptPrefix = (this.systemMessage ?? '').trim();
|
||||||
|
if (promptPrefix.length) {
|
||||||
|
requestOptions.systemInstruction = {
|
||||||
|
parts: [
|
||||||
|
{
|
||||||
|
text: promptPrefix,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
const delay = modelName.includes('flash') ? 8 : 15;
|
||||||
|
/** @type {GenAIUsageMetadata} */
|
||||||
|
let usageMetadata;
|
||||||
|
|
||||||
|
abortController.signal.addEventListener(
|
||||||
|
'abort',
|
||||||
|
() => {
|
||||||
|
logger.warn('[GoogleClient] Request was aborted', abortController.signal.reason);
|
||||||
|
},
|
||||||
|
{ once: true },
|
||||||
|
);
|
||||||
|
|
||||||
|
const result = await client.generateContentStream(requestOptions, {
|
||||||
|
signal: abortController.signal,
|
||||||
|
});
|
||||||
|
for await (const chunk of result.stream) {
|
||||||
|
usageMetadata = !usageMetadata
|
||||||
|
? chunk?.usageMetadata
|
||||||
|
: Object.assign(usageMetadata, chunk?.usageMetadata);
|
||||||
|
const chunkText = chunk.text();
|
||||||
|
await this.generateTextStream(chunkText, onProgress, {
|
||||||
|
delay,
|
||||||
|
});
|
||||||
|
reply += chunkText;
|
||||||
|
await sleep(streamRate);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (usageMetadata) {
|
||||||
|
this.usage = {
|
||||||
|
input_tokens: usageMetadata.promptTokenCount,
|
||||||
|
output_tokens: usageMetadata.candidatesTokenCount,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
return reply;
|
||||||
|
}
|
||||||
|
|
||||||
|
const { instances } = _payload;
|
||||||
|
const { messages: messages, context } = instances?.[0] ?? {};
|
||||||
|
|
||||||
|
if (!this.isVisionModel && context && messages?.length > 0) {
|
||||||
|
messages.unshift(new SystemMessage(context));
|
||||||
|
}
|
||||||
|
|
||||||
|
/** @type {import('@langchain/core/messages').AIMessageChunk['usage_metadata']} */
|
||||||
|
let usageMetadata;
|
||||||
|
/** @type {ChatVertexAI} */
|
||||||
|
const client = this.client;
|
||||||
|
const stream = await client.stream(messages, {
|
||||||
|
signal: abortController.signal,
|
||||||
|
streamUsage: true,
|
||||||
|
safetySettings,
|
||||||
|
});
|
||||||
|
|
||||||
|
let delay = this.options.streamRate || 8;
|
||||||
|
|
||||||
|
if (!this.options.streamRate) {
|
||||||
|
if (this.isGenerativeModel) {
|
||||||
|
delay = 15;
|
||||||
|
}
|
||||||
|
if (modelName.includes('flash')) {
|
||||||
|
delay = 5;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for await (const chunk of stream) {
|
||||||
|
if (chunk?.usage_metadata) {
|
||||||
|
const metadata = chunk.usage_metadata;
|
||||||
|
for (const key in metadata) {
|
||||||
|
if (Number.isNaN(metadata[key])) {
|
||||||
|
delete metadata[key];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
usageMetadata = !usageMetadata ? metadata : concat(usageMetadata, metadata);
|
||||||
|
}
|
||||||
|
|
||||||
|
const chunkText = chunk?.content ?? '';
|
||||||
|
await this.generateTextStream(chunkText, onProgress, {
|
||||||
|
delay,
|
||||||
|
});
|
||||||
|
reply += chunkText;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (usageMetadata) {
|
||||||
|
this.usage = usageMetadata;
|
||||||
|
}
|
||||||
|
} catch (e) {
|
||||||
|
error = e;
|
||||||
|
logger.error('[GoogleClient] There was an issue generating the completion', e);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (error != null && reply === '') {
|
||||||
|
const errorMessage = `{ "type": "${ErrorTypes.GoogleError}", "info": "${
|
||||||
|
error.message ?? 'The Google provider failed to generate content, please contact the Admin.'
|
||||||
|
}" }`;
|
||||||
|
throw new Error(errorMessage);
|
||||||
|
}
|
||||||
|
return reply;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get stream usage as returned by this client's API response.
|
||||||
|
* @returns {UsageMetadata} The stream usage object.
|
||||||
|
*/
|
||||||
|
getStreamUsage() {
|
||||||
|
return this.usage;
|
||||||
|
}
|
||||||
|
|
||||||
|
getMessageMapMethod() {
|
||||||
|
/**
|
||||||
|
* @param {TMessage} msg
|
||||||
|
*/
|
||||||
|
return (msg) => {
|
||||||
|
if (msg.text != null && msg.text && msg.text.startsWith(':::thinking')) {
|
||||||
|
msg.text = msg.text.replace(/:::thinking.*?:::/gs, '').trim();
|
||||||
|
} else if (msg.content != null) {
|
||||||
|
msg.text = parseTextParts(msg.content, true);
|
||||||
|
delete msg.content;
|
||||||
|
}
|
||||||
|
|
||||||
|
return msg;
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Calculates the correct token count for the current user message based on the token count map and API usage.
|
||||||
|
* Edge case: If the calculation results in a negative value, it returns the original estimate.
|
||||||
|
* If revisiting a conversation with a chat history entirely composed of token estimates,
|
||||||
|
* the cumulative token count going forward should become more accurate as the conversation progresses.
|
||||||
|
* @param {Object} params - The parameters for the calculation.
|
||||||
|
* @param {Record<string, number>} params.tokenCountMap - A map of message IDs to their token counts.
|
||||||
|
* @param {string} params.currentMessageId - The ID of the current message to calculate.
|
||||||
|
* @param {UsageMetadata} params.usage - The usage object returned by the API.
|
||||||
|
* @returns {number} The correct token count for the current user message.
|
||||||
|
*/
|
||||||
|
calculateCurrentTokenCount({ tokenCountMap, currentMessageId, usage }) {
|
||||||
|
const originalEstimate = tokenCountMap[currentMessageId] || 0;
|
||||||
|
|
||||||
|
if (!usage || typeof usage.input_tokens !== 'number') {
|
||||||
|
return originalEstimate;
|
||||||
|
}
|
||||||
|
|
||||||
|
tokenCountMap[currentMessageId] = 0;
|
||||||
|
const totalTokensFromMap = Object.values(tokenCountMap).reduce((sum, count) => {
|
||||||
|
const numCount = Number(count);
|
||||||
|
return sum + (isNaN(numCount) ? 0 : numCount);
|
||||||
|
}, 0);
|
||||||
|
const totalInputTokens = usage.input_tokens ?? 0;
|
||||||
|
const currentMessageTokens = totalInputTokens - totalTokensFromMap;
|
||||||
|
return currentMessageTokens > 0 ? currentMessageTokens : originalEstimate;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param {object} params
|
||||||
|
* @param {number} params.promptTokens
|
||||||
|
* @param {number} params.completionTokens
|
||||||
|
* @param {UsageMetadata} [params.usage]
|
||||||
|
* @param {string} [params.model]
|
||||||
|
* @param {string} [params.context='message']
|
||||||
|
* @returns {Promise<void>}
|
||||||
|
*/
|
||||||
|
async recordTokenUsage({ promptTokens, completionTokens, model, context = 'message' }) {
|
||||||
|
await spendTokens(
|
||||||
|
{
|
||||||
|
context,
|
||||||
|
user: this.user ?? this.options.req?.user?.id,
|
||||||
|
conversationId: this.conversationId,
|
||||||
|
model: model ?? this.modelOptions.model,
|
||||||
|
endpointTokenConfig: this.options.endpointTokenConfig,
|
||||||
|
},
|
||||||
|
{ promptTokens, completionTokens },
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Stripped-down logic for generating a title. This uses the non-streaming APIs, since the user does not see titles streaming
|
||||||
|
*/
|
||||||
|
async titleChatCompletion(_payload, options = {}) {
|
||||||
|
let reply = '';
|
||||||
|
const { abortController } = options;
|
||||||
|
|
||||||
|
const model =
|
||||||
|
this.options.titleModel ?? this.modelOptions.modelName ?? this.modelOptions.model ?? '';
|
||||||
|
const safetySettings = getSafetySettings(model);
|
||||||
|
if (!EXCLUDED_GENAI_MODELS.test(model) && !this.project_id) {
|
||||||
|
logger.debug('Identified titling model as GenAI version');
|
||||||
|
/** @type {GenerativeModel} */
|
||||||
|
const client = this.client;
|
||||||
|
const requestOptions = {
|
||||||
|
contents: _payload,
|
||||||
|
safetySettings,
|
||||||
|
generationConfig: {
|
||||||
|
temperature: 0.5,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
const result = await client.generateContent(requestOptions);
|
||||||
|
reply = result.response?.text();
|
||||||
|
return reply;
|
||||||
|
} else {
|
||||||
|
const { instances } = _payload;
|
||||||
|
const { messages } = instances?.[0] ?? {};
|
||||||
|
const titleResponse = await this.client.invoke(messages, {
|
||||||
|
signal: abortController.signal,
|
||||||
|
timeout: 7000,
|
||||||
|
safetySettings,
|
||||||
|
});
|
||||||
|
|
||||||
|
if (titleResponse.usage_metadata) {
|
||||||
|
await this.recordTokenUsage({
|
||||||
|
model,
|
||||||
|
promptTokens: titleResponse.usage_metadata.input_tokens,
|
||||||
|
completionTokens: titleResponse.usage_metadata.output_tokens,
|
||||||
|
context: 'title',
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
reply = titleResponse.content;
|
||||||
|
return reply;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async titleConvo({ text, responseText = '' }) {
|
||||||
|
let title = 'New Chat';
|
||||||
|
const convo = `||>User:
|
||||||
|
"${truncateText(text)}"
|
||||||
|
||>Response:
|
||||||
|
"${JSON.stringify(truncateText(responseText))}"`;
|
||||||
|
|
||||||
|
let { prompt: payload } = await this.buildMessages([
|
||||||
|
{
|
||||||
|
text: `Please generate ${titleInstruction}
|
||||||
|
|
||||||
|
${convo}
|
||||||
|
|
||||||
|
||>Title:`,
|
||||||
|
isCreatedByUser: true,
|
||||||
|
author: this.userLabel,
|
||||||
|
},
|
||||||
|
]);
|
||||||
|
|
||||||
|
try {
|
||||||
|
this.initializeClient();
|
||||||
|
title = await this.titleChatCompletion(payload, {
|
||||||
|
abortController: new AbortController(),
|
||||||
|
onProgress: () => {},
|
||||||
|
});
|
||||||
|
} catch (e) {
|
||||||
|
logger.error('[GoogleClient] There was an issue generating the title', e);
|
||||||
|
}
|
||||||
|
logger.debug(`Title response: ${title}`);
|
||||||
|
return title;
|
||||||
|
}
|
||||||
|
|
||||||
|
getSaveOptions() {
|
||||||
|
return {
|
||||||
|
endpointType: null,
|
||||||
|
artifacts: this.options.artifacts,
|
||||||
|
promptPrefix: this.options.promptPrefix,
|
||||||
|
maxContextTokens: this.options.maxContextTokens,
|
||||||
|
modelLabel: this.options.modelLabel,
|
||||||
|
iconURL: this.options.iconURL,
|
||||||
|
greeting: this.options.greeting,
|
||||||
|
spec: this.options.spec,
|
||||||
|
...this.modelOptions,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
getBuildMessagesOptions() {
|
||||||
|
// logger.debug('GoogleClient doesn\'t use getBuildMessagesOptions');
|
||||||
|
}
|
||||||
|
|
||||||
|
async sendCompletion(payload, opts = {}) {
|
||||||
|
let reply = '';
|
||||||
|
reply = await this.getCompletion(payload, opts);
|
||||||
|
return reply.trim();
|
||||||
|
}
|
||||||
|
|
||||||
|
getEncoding() {
|
||||||
|
return 'cl100k_base';
|
||||||
|
}
|
||||||
|
|
||||||
|
async getVertexTokenCount(text) {
|
||||||
|
/** @type {ChatVertexAI} */
|
||||||
|
const client = this.client ?? this.initializeClient();
|
||||||
|
const connection = client.connection;
|
||||||
|
const gAuthClient = connection.client;
|
||||||
|
const tokenEndpoint = `https://${connection._endpoint}/${connection.apiVersion}/projects/${this.project_id}/locations/${connection._location}/publishers/google/models/${connection.model}/:countTokens`;
|
||||||
|
const result = await gAuthClient.request({
|
||||||
|
url: tokenEndpoint,
|
||||||
|
method: 'POST',
|
||||||
|
data: {
|
||||||
|
contents: [{ role: 'user', parts: [{ text }] }],
|
||||||
|
},
|
||||||
|
});
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the token count of a given text. It also checks and resets the tokenizers if necessary.
|
||||||
|
* @param {string} text - The text to get the token count for.
|
||||||
|
* @returns {number} The token count of the given text.
|
||||||
|
*/
|
||||||
|
getTokenCount(text) {
|
||||||
|
const encoding = this.getEncoding();
|
||||||
|
return Tokenizer.getTokenCount(text, encoding);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = GoogleClient;
|
||||||
|
|
@ -2,9 +2,10 @@ const { z } = require('zod');
|
||||||
const axios = require('axios');
|
const axios = require('axios');
|
||||||
const { Ollama } = require('ollama');
|
const { Ollama } = require('ollama');
|
||||||
const { sleep } = require('@librechat/agents');
|
const { sleep } = require('@librechat/agents');
|
||||||
|
const { resolveHeaders } = require('@librechat/api');
|
||||||
const { logger } = require('@librechat/data-schemas');
|
const { logger } = require('@librechat/data-schemas');
|
||||||
const { Constants } = require('librechat-data-provider');
|
const { Constants } = require('librechat-data-provider');
|
||||||
const { resolveHeaders, deriveBaseURL } = require('@librechat/api');
|
const { deriveBaseURL } = require('~/utils');
|
||||||
|
|
||||||
const ollamaPayloadSchema = z.object({
|
const ollamaPayloadSchema = z.object({
|
||||||
mirostat: z.number().optional(),
|
mirostat: z.number().optional(),
|
||||||
|
|
|
||||||
1207
api/app/clients/OpenAIClient.js
Normal file
1207
api/app/clients/OpenAIClient.js
Normal file
File diff suppressed because it is too large
Load diff
5
api/app/clients/document/index.js
Normal file
5
api/app/clients/document/index.js
Normal file
|
|
@ -0,0 +1,5 @@
|
||||||
|
const tokenSplit = require('./tokenSplit');
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
tokenSplit,
|
||||||
|
};
|
||||||
51
api/app/clients/document/tokenSplit.js
Normal file
51
api/app/clients/document/tokenSplit.js
Normal file
|
|
@ -0,0 +1,51 @@
|
||||||
|
const { TokenTextSplitter } = require('@langchain/textsplitters');
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Splits a given text by token chunks, based on the provided parameters for the TokenTextSplitter.
|
||||||
|
* Note: limit or memoize use of this function as its calculation is expensive.
|
||||||
|
*
|
||||||
|
* @param {Object} obj - Configuration object for the text splitting operation.
|
||||||
|
* @param {string} obj.text - The text to be split.
|
||||||
|
* @param {string} [obj.encodingName='cl100k_base'] - Encoding name. Defaults to 'cl100k_base'.
|
||||||
|
* @param {number} [obj.chunkSize=1] - The token size of each chunk. Defaults to 1.
|
||||||
|
* @param {number} [obj.chunkOverlap=0] - The number of chunk elements to be overlapped between adjacent chunks. Defaults to 0.
|
||||||
|
* @param {number} [obj.returnSize] - If specified and not 0, slices the return array from the end by this amount.
|
||||||
|
*
|
||||||
|
* @returns {Promise<Array>} Returns a promise that resolves to an array of text chunks.
|
||||||
|
* If no text is provided, an empty array is returned.
|
||||||
|
* If returnSize is specified and not 0, slices the return array from the end by returnSize.
|
||||||
|
*
|
||||||
|
* @async
|
||||||
|
* @function tokenSplit
|
||||||
|
*/
|
||||||
|
async function tokenSplit({
|
||||||
|
text,
|
||||||
|
encodingName = 'cl100k_base',
|
||||||
|
chunkSize = 1,
|
||||||
|
chunkOverlap = 0,
|
||||||
|
returnSize,
|
||||||
|
}) {
|
||||||
|
if (!text) {
|
||||||
|
return [];
|
||||||
|
}
|
||||||
|
|
||||||
|
const splitter = new TokenTextSplitter({
|
||||||
|
encodingName,
|
||||||
|
chunkSize,
|
||||||
|
chunkOverlap,
|
||||||
|
});
|
||||||
|
|
||||||
|
if (!returnSize) {
|
||||||
|
return await splitter.splitText(text);
|
||||||
|
}
|
||||||
|
|
||||||
|
const splitText = await splitter.splitText(text);
|
||||||
|
|
||||||
|
if (returnSize && returnSize > 0 && splitText.length > 0) {
|
||||||
|
return splitText.slice(-Math.abs(returnSize));
|
||||||
|
}
|
||||||
|
|
||||||
|
return splitText;
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = tokenSplit;
|
||||||
56
api/app/clients/document/tokenSplit.spec.js
Normal file
56
api/app/clients/document/tokenSplit.spec.js
Normal file
|
|
@ -0,0 +1,56 @@
|
||||||
|
const tokenSplit = require('./tokenSplit');
|
||||||
|
|
||||||
|
describe('tokenSplit', () => {
|
||||||
|
const text = 'Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nullam id.';
|
||||||
|
|
||||||
|
it('returns correct text chunks with provided parameters', async () => {
|
||||||
|
const result = await tokenSplit({
|
||||||
|
text: text,
|
||||||
|
encodingName: 'gpt2',
|
||||||
|
chunkSize: 2,
|
||||||
|
chunkOverlap: 1,
|
||||||
|
returnSize: 5,
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(result).toEqual(['it.', '. Null', ' Nullam', 'am id', ' id.']);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('returns correct text chunks with default parameters', async () => {
|
||||||
|
const result = await tokenSplit({ text });
|
||||||
|
expect(result).toEqual([
|
||||||
|
'Lorem',
|
||||||
|
' ipsum',
|
||||||
|
' dolor',
|
||||||
|
' sit',
|
||||||
|
' amet',
|
||||||
|
',',
|
||||||
|
' consectetur',
|
||||||
|
' adipiscing',
|
||||||
|
' elit',
|
||||||
|
'.',
|
||||||
|
' Null',
|
||||||
|
'am',
|
||||||
|
' id',
|
||||||
|
'.',
|
||||||
|
]);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('returns correct text chunks with specific return size', async () => {
|
||||||
|
const result = await tokenSplit({ text, returnSize: 2 });
|
||||||
|
expect(result.length).toEqual(2);
|
||||||
|
expect(result).toEqual([' id', '.']);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('returns correct text chunks with specified chunk size', async () => {
|
||||||
|
const result = await tokenSplit({ text, chunkSize: 10 });
|
||||||
|
expect(result).toEqual([
|
||||||
|
'Lorem ipsum dolor sit amet, consectetur adipiscing elit.',
|
||||||
|
' Nullam id.',
|
||||||
|
]);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('returns empty array with no text', async () => {
|
||||||
|
const result = await tokenSplit({ text: '' });
|
||||||
|
expect(result).toEqual([]);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
@ -1,7 +1,13 @@
|
||||||
|
const OpenAIClient = require('./OpenAIClient');
|
||||||
|
const GoogleClient = require('./GoogleClient');
|
||||||
const TextStream = require('./TextStream');
|
const TextStream = require('./TextStream');
|
||||||
|
const AnthropicClient = require('./AnthropicClient');
|
||||||
const toolUtils = require('./tools/util');
|
const toolUtils = require('./tools/util');
|
||||||
|
|
||||||
module.exports = {
|
module.exports = {
|
||||||
|
OpenAIClient,
|
||||||
|
GoogleClient,
|
||||||
TextStream,
|
TextStream,
|
||||||
|
AnthropicClient,
|
||||||
...toolUtils,
|
...toolUtils,
|
||||||
};
|
};
|
||||||
|
|
|
||||||
85
api/app/clients/llm/createCoherePayload.js
Normal file
85
api/app/clients/llm/createCoherePayload.js
Normal file
|
|
@ -0,0 +1,85 @@
|
||||||
|
const { CohereConstants } = require('librechat-data-provider');
|
||||||
|
const { titleInstruction } = require('../prompts/titlePrompts');
|
||||||
|
|
||||||
|
// Mapping OpenAI roles to Cohere roles
|
||||||
|
const roleMap = {
|
||||||
|
user: CohereConstants.ROLE_USER,
|
||||||
|
assistant: CohereConstants.ROLE_CHATBOT,
|
||||||
|
system: CohereConstants.ROLE_SYSTEM, // Recognize and map the system role explicitly
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Adjusts an OpenAI ChatCompletionPayload to conform with Cohere's expected chat payload format.
|
||||||
|
* Now includes handling for "system" roles explicitly mentioned.
|
||||||
|
*
|
||||||
|
* @param {Object} options - Object containing the model options.
|
||||||
|
* @param {ChatCompletionPayload} options.modelOptions - The OpenAI model payload options.
|
||||||
|
* @returns {CohereChatStreamRequest} Cohere-compatible chat API payload.
|
||||||
|
*/
|
||||||
|
function createCoherePayload({ modelOptions }) {
|
||||||
|
/** @type {string | undefined} */
|
||||||
|
let preamble;
|
||||||
|
let latestUserMessageContent = '';
|
||||||
|
const {
|
||||||
|
stream,
|
||||||
|
stop,
|
||||||
|
top_p,
|
||||||
|
temperature,
|
||||||
|
frequency_penalty,
|
||||||
|
presence_penalty,
|
||||||
|
max_tokens,
|
||||||
|
messages,
|
||||||
|
model,
|
||||||
|
...rest
|
||||||
|
} = modelOptions;
|
||||||
|
|
||||||
|
// Filter out the latest user message and transform remaining messages to Cohere's chat_history format
|
||||||
|
let chatHistory = messages.reduce((acc, message, index, arr) => {
|
||||||
|
const isLastUserMessage = index === arr.length - 1 && message.role === 'user';
|
||||||
|
|
||||||
|
const messageContent =
|
||||||
|
typeof message.content === 'string'
|
||||||
|
? message.content
|
||||||
|
: message.content.map((part) => (part.type === 'text' ? part.text : '')).join(' ');
|
||||||
|
|
||||||
|
if (isLastUserMessage) {
|
||||||
|
latestUserMessageContent = messageContent;
|
||||||
|
} else {
|
||||||
|
acc.push({
|
||||||
|
role: roleMap[message.role] || CohereConstants.ROLE_USER,
|
||||||
|
message: messageContent,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
return acc;
|
||||||
|
}, []);
|
||||||
|
|
||||||
|
if (
|
||||||
|
chatHistory.length === 1 &&
|
||||||
|
chatHistory[0].role === CohereConstants.ROLE_SYSTEM &&
|
||||||
|
!latestUserMessageContent.length
|
||||||
|
) {
|
||||||
|
const message = chatHistory[0].message;
|
||||||
|
latestUserMessageContent = message.includes(titleInstruction)
|
||||||
|
? CohereConstants.TITLE_MESSAGE
|
||||||
|
: '.';
|
||||||
|
preamble = message;
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
message: latestUserMessageContent,
|
||||||
|
model: model,
|
||||||
|
chatHistory,
|
||||||
|
stream: stream ?? false,
|
||||||
|
temperature: temperature,
|
||||||
|
frequencyPenalty: frequency_penalty,
|
||||||
|
presencePenalty: presence_penalty,
|
||||||
|
maxTokens: max_tokens,
|
||||||
|
stopSequences: stop,
|
||||||
|
preamble,
|
||||||
|
p: top_p,
|
||||||
|
...rest,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = createCoherePayload;
|
||||||
5
api/app/clients/llm/index.js
Normal file
5
api/app/clients/llm/index.js
Normal file
|
|
@ -0,0 +1,5 @@
|
||||||
|
const createCoherePayload = require('./createCoherePayload');
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
createCoherePayload,
|
||||||
|
};
|
||||||
90
api/app/clients/output_parsers/addImages.js
Normal file
90
api/app/clients/output_parsers/addImages.js
Normal file
|
|
@ -0,0 +1,90 @@
|
||||||
|
const { getBasePath } = require('@librechat/api');
|
||||||
|
const { logger } = require('@librechat/data-schemas');
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The `addImages` function corrects any erroneous image URLs in the `responseMessage.text`
|
||||||
|
* and appends image observations from `intermediateSteps` if they are not already present.
|
||||||
|
*
|
||||||
|
* @function
|
||||||
|
* @module addImages
|
||||||
|
*
|
||||||
|
* @param {Array.<Object>} intermediateSteps - An array of objects, each containing an observation.
|
||||||
|
* @param {Object} responseMessage - An object containing the text property which might have image URLs.
|
||||||
|
*
|
||||||
|
* @property {string} intermediateSteps[].observation - The observation string which might contain an image markdown.
|
||||||
|
* @property {string} responseMessage.text - The text which might contain image URLs.
|
||||||
|
*
|
||||||
|
* @example
|
||||||
|
*
|
||||||
|
* const intermediateSteps = [
|
||||||
|
* { observation: '' }
|
||||||
|
* ];
|
||||||
|
* const responseMessage = { text: 'Some text with ' };
|
||||||
|
*
|
||||||
|
* addImages(intermediateSteps, responseMessage);
|
||||||
|
*
|
||||||
|
* logger.debug(responseMessage.text);
|
||||||
|
* // Outputs: 'Some text with \n'
|
||||||
|
*
|
||||||
|
* @returns {void}
|
||||||
|
*/
|
||||||
|
function addImages(intermediateSteps, responseMessage) {
|
||||||
|
if (!intermediateSteps || !responseMessage) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const basePath = getBasePath();
|
||||||
|
|
||||||
|
// Correct any erroneous URLs in the responseMessage.text first
|
||||||
|
intermediateSteps.forEach((step) => {
|
||||||
|
const { observation } = step;
|
||||||
|
if (!observation || !observation.includes('![')) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const match = observation.match(/\/images\/.*\.\w*/);
|
||||||
|
if (!match) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
const essentialImagePath = match[0];
|
||||||
|
const fullImagePath = `${basePath}${essentialImagePath}`;
|
||||||
|
|
||||||
|
const regex = /!\[.*?\]\((.*?)\)/g;
|
||||||
|
let matchErroneous;
|
||||||
|
while ((matchErroneous = regex.exec(responseMessage.text)) !== null) {
|
||||||
|
if (matchErroneous[1] && !matchErroneous[1].startsWith(`${basePath}/images/`)) {
|
||||||
|
// Replace with the full path including base path
|
||||||
|
responseMessage.text = responseMessage.text.replace(matchErroneous[1], fullImagePath);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Now, check if the responseMessage already includes the correct image file path and append if not
|
||||||
|
intermediateSteps.forEach((step) => {
|
||||||
|
const { observation } = step;
|
||||||
|
if (!observation || !observation.includes('![')) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
const observedImagePath = observation.match(/!\[[^(]*\]\([^)]*\)/g);
|
||||||
|
if (observedImagePath) {
|
||||||
|
// Fix the image path to include base path if it doesn't already
|
||||||
|
let imageMarkdown = observedImagePath[0];
|
||||||
|
const urlMatch = imageMarkdown.match(/\(([^)]+)\)/);
|
||||||
|
if (
|
||||||
|
urlMatch &&
|
||||||
|
urlMatch[1] &&
|
||||||
|
!urlMatch[1].startsWith(`${basePath}/images/`) &&
|
||||||
|
urlMatch[1].startsWith('/images/')
|
||||||
|
) {
|
||||||
|
imageMarkdown = imageMarkdown.replace(urlMatch[1], `${basePath}${urlMatch[1]}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!responseMessage.text.includes(imageMarkdown)) {
|
||||||
|
responseMessage.text += '\n' + imageMarkdown;
|
||||||
|
logger.debug('[addImages] added image from intermediateSteps:', imageMarkdown);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = addImages;
|
||||||
246
api/app/clients/output_parsers/addImages.spec.js
Normal file
246
api/app/clients/output_parsers/addImages.spec.js
Normal file
|
|
@ -0,0 +1,246 @@
|
||||||
|
let addImages = require('./addImages');
|
||||||
|
|
||||||
|
describe('addImages', () => {
|
||||||
|
let intermediateSteps;
|
||||||
|
let responseMessage;
|
||||||
|
let options;
|
||||||
|
|
||||||
|
beforeEach(() => {
|
||||||
|
intermediateSteps = [];
|
||||||
|
responseMessage = { text: '' };
|
||||||
|
options = { debug: false };
|
||||||
|
this.options = options;
|
||||||
|
addImages = addImages.bind(this);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle null or undefined parameters', () => {
|
||||||
|
addImages(null, responseMessage);
|
||||||
|
expect(responseMessage.text).toBe('');
|
||||||
|
|
||||||
|
addImages(intermediateSteps, null);
|
||||||
|
expect(responseMessage.text).toBe('');
|
||||||
|
|
||||||
|
addImages(null, null);
|
||||||
|
expect(responseMessage.text).toBe('');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should append correct image markdown if not present in responseMessage', () => {
|
||||||
|
intermediateSteps.push({ observation: '' });
|
||||||
|
addImages(intermediateSteps, responseMessage);
|
||||||
|
expect(responseMessage.text).toBe('\n');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should not append image markdown if already present in responseMessage', () => {
|
||||||
|
responseMessage.text = '';
|
||||||
|
intermediateSteps.push({ observation: '' });
|
||||||
|
addImages(intermediateSteps, responseMessage);
|
||||||
|
expect(responseMessage.text).toBe('');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should correct and append image markdown with erroneous URL', () => {
|
||||||
|
responseMessage.text = '';
|
||||||
|
intermediateSteps.push({ observation: '' });
|
||||||
|
addImages(intermediateSteps, responseMessage);
|
||||||
|
expect(responseMessage.text).toBe('');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should correct multiple erroneous URLs in responseMessage', () => {
|
||||||
|
responseMessage.text =
|
||||||
|
' ';
|
||||||
|
intermediateSteps.push({ observation: '' });
|
||||||
|
intermediateSteps.push({ observation: '' });
|
||||||
|
addImages(intermediateSteps, responseMessage);
|
||||||
|
expect(responseMessage.text).toBe(' ');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should not append non-image markdown observations', () => {
|
||||||
|
intermediateSteps.push({ observation: '[desc](/images/test.png)' });
|
||||||
|
addImages(intermediateSteps, responseMessage);
|
||||||
|
expect(responseMessage.text).toBe('');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle multiple observations', () => {
|
||||||
|
intermediateSteps.push({ observation: '' });
|
||||||
|
intermediateSteps.push({ observation: '' });
|
||||||
|
addImages(intermediateSteps, responseMessage);
|
||||||
|
expect(responseMessage.text).toBe('\n\n');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should not append if observation does not contain image markdown', () => {
|
||||||
|
intermediateSteps.push({ observation: 'This is a test observation without image markdown.' });
|
||||||
|
addImages(intermediateSteps, responseMessage);
|
||||||
|
expect(responseMessage.text).toBe('');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should append correctly from a real scenario', () => {
|
||||||
|
responseMessage.text =
|
||||||
|
"Here is the generated image based on your request. It depicts a surreal landscape filled with floating musical notes. The style is impressionistic, with vibrant sunset hues dominating the scene. At the center, there's a silhouette of a grand piano, adding a dreamy emotion to the overall image. This could serve as a unique and creative music album cover. Would you like to make any changes or generate another image?";
|
||||||
|
const originalText = responseMessage.text;
|
||||||
|
const imageMarkdown = '';
|
||||||
|
intermediateSteps.push({ observation: imageMarkdown });
|
||||||
|
addImages(intermediateSteps, responseMessage);
|
||||||
|
expect(responseMessage.text).toBe(`${originalText}\n${imageMarkdown}`);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should extract only image markdowns when there is text between them', () => {
|
||||||
|
const markdownWithTextBetweenImages = `
|
||||||
|

|
||||||
|
Some text between images that should not be included.
|
||||||
|

|
||||||
|
More text that should be ignored.
|
||||||
|

|
||||||
|
`;
|
||||||
|
intermediateSteps.push({ observation: markdownWithTextBetweenImages });
|
||||||
|
addImages(intermediateSteps, responseMessage);
|
||||||
|
expect(responseMessage.text).toBe('\n');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should only return the first image when multiple images are present', () => {
|
||||||
|
const markdownWithMultipleImages = `
|
||||||
|

|
||||||
|

|
||||||
|

|
||||||
|
`;
|
||||||
|
intermediateSteps.push({ observation: markdownWithMultipleImages });
|
||||||
|
addImages(intermediateSteps, responseMessage);
|
||||||
|
expect(responseMessage.text).toBe('\n');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should not include any text or metadata surrounding the image markdown', () => {
|
||||||
|
const markdownWithMetadata = `
|
||||||
|
Title: Test Document
|
||||||
|
Author: John Doe
|
||||||
|

|
||||||
|
Some content after the image.
|
||||||
|
Vector values: [0.1, 0.2, 0.3]
|
||||||
|
`;
|
||||||
|
intermediateSteps.push({ observation: markdownWithMetadata });
|
||||||
|
addImages(intermediateSteps, responseMessage);
|
||||||
|
expect(responseMessage.text).toBe('\n');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle complex markdown with multiple images and only return the first one', () => {
|
||||||
|
const complexMarkdown = `
|
||||||
|
# Document Title
|
||||||
|
|
||||||
|
## Section 1
|
||||||
|
Here's some text with an embedded image:
|
||||||
|

|
||||||
|
|
||||||
|
## Section 2
|
||||||
|
More text here...
|
||||||
|

|
||||||
|
|
||||||
|
### Subsection
|
||||||
|
Even more content
|
||||||
|

|
||||||
|
`;
|
||||||
|
intermediateSteps.push({ observation: complexMarkdown });
|
||||||
|
addImages(intermediateSteps, responseMessage);
|
||||||
|
expect(responseMessage.text).toBe('\n');
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('basePath functionality', () => {
|
||||||
|
let originalDomainClient;
|
||||||
|
|
||||||
|
beforeEach(() => {
|
||||||
|
originalDomainClient = process.env.DOMAIN_CLIENT;
|
||||||
|
});
|
||||||
|
|
||||||
|
afterEach(() => {
|
||||||
|
process.env.DOMAIN_CLIENT = originalDomainClient;
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should prepend base path to image URLs when DOMAIN_CLIENT is set', () => {
|
||||||
|
process.env.DOMAIN_CLIENT = 'http://localhost:3080/librechat';
|
||||||
|
intermediateSteps.push({ observation: '' });
|
||||||
|
addImages(intermediateSteps, responseMessage);
|
||||||
|
expect(responseMessage.text).toBe('\n');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should not prepend base path when image URL already has base path', () => {
|
||||||
|
process.env.DOMAIN_CLIENT = 'http://localhost:3080/librechat';
|
||||||
|
intermediateSteps.push({ observation: '' });
|
||||||
|
addImages(intermediateSteps, responseMessage);
|
||||||
|
expect(responseMessage.text).toBe('\n');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should correct erroneous URLs with base path', () => {
|
||||||
|
process.env.DOMAIN_CLIENT = 'http://localhost:3080/librechat';
|
||||||
|
responseMessage.text = '';
|
||||||
|
intermediateSteps.push({ observation: '' });
|
||||||
|
addImages(intermediateSteps, responseMessage);
|
||||||
|
expect(responseMessage.text).toBe('');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle empty base path (root deployment)', () => {
|
||||||
|
process.env.DOMAIN_CLIENT = 'http://localhost:3080/';
|
||||||
|
intermediateSteps.push({ observation: '' });
|
||||||
|
addImages(intermediateSteps, responseMessage);
|
||||||
|
expect(responseMessage.text).toBe('\n');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle missing DOMAIN_CLIENT', () => {
|
||||||
|
delete process.env.DOMAIN_CLIENT;
|
||||||
|
intermediateSteps.push({ observation: '' });
|
||||||
|
addImages(intermediateSteps, responseMessage);
|
||||||
|
expect(responseMessage.text).toBe('\n');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle observation without image path match', () => {
|
||||||
|
process.env.DOMAIN_CLIENT = 'http://localhost:3080/librechat';
|
||||||
|
intermediateSteps.push({ observation: '' });
|
||||||
|
addImages(intermediateSteps, responseMessage);
|
||||||
|
expect(responseMessage.text).toBe('\n');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle nested subdirectories in base path', () => {
|
||||||
|
process.env.DOMAIN_CLIENT = 'http://localhost:3080/apps/librechat';
|
||||||
|
intermediateSteps.push({ observation: '' });
|
||||||
|
addImages(intermediateSteps, responseMessage);
|
||||||
|
expect(responseMessage.text).toBe('\n');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle multiple observations with mixed base path scenarios', () => {
|
||||||
|
process.env.DOMAIN_CLIENT = 'http://localhost:3080/librechat';
|
||||||
|
intermediateSteps.push({ observation: '' });
|
||||||
|
intermediateSteps.push({ observation: '' });
|
||||||
|
addImages(intermediateSteps, responseMessage);
|
||||||
|
expect(responseMessage.text).toBe(
|
||||||
|
'\n\n',
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle complex markdown with base path', () => {
|
||||||
|
process.env.DOMAIN_CLIENT = 'http://localhost:3080/librechat';
|
||||||
|
const complexMarkdown = `
|
||||||
|
# Document Title
|
||||||
|

|
||||||
|
Some text between images
|
||||||
|

|
||||||
|
`;
|
||||||
|
intermediateSteps.push({ observation: complexMarkdown });
|
||||||
|
addImages(intermediateSteps, responseMessage);
|
||||||
|
expect(responseMessage.text).toBe('\n');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle URLs that are already absolute', () => {
|
||||||
|
process.env.DOMAIN_CLIENT = 'http://localhost:3080/librechat';
|
||||||
|
intermediateSteps.push({ observation: '' });
|
||||||
|
addImages(intermediateSteps, responseMessage);
|
||||||
|
expect(responseMessage.text).toBe('\n');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle data URLs', () => {
|
||||||
|
process.env.DOMAIN_CLIENT = 'http://localhost:3080/librechat';
|
||||||
|
intermediateSteps.push({
|
||||||
|
observation:
|
||||||
|
'',
|
||||||
|
});
|
||||||
|
addImages(intermediateSteps, responseMessage);
|
||||||
|
expect(responseMessage.text).toBe(
|
||||||
|
'\n',
|
||||||
|
);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
88
api/app/clients/output_parsers/handleOutputs.js
Normal file
88
api/app/clients/output_parsers/handleOutputs.js
Normal file
|
|
@ -0,0 +1,88 @@
|
||||||
|
const { instructions, imageInstructions, errorInstructions } = require('../prompts');
|
||||||
|
|
||||||
|
function getActions(actions = [], functionsAgent = false) {
|
||||||
|
let output = 'Internal thoughts & actions taken:\n"';
|
||||||
|
|
||||||
|
if (actions[0]?.action && functionsAgent) {
|
||||||
|
actions = actions.map((step) => ({
|
||||||
|
log: `Action: ${step.action?.tool || ''}\nInput: ${
|
||||||
|
JSON.stringify(step.action?.toolInput) || ''
|
||||||
|
}\nObservation: ${step.observation}`,
|
||||||
|
}));
|
||||||
|
} else if (actions[0]?.action) {
|
||||||
|
actions = actions.map((step) => ({
|
||||||
|
log: `${step.action.log}\nObservation: ${step.observation}`,
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
|
||||||
|
actions.forEach((actionObj, index) => {
|
||||||
|
output += `${actionObj.log}`;
|
||||||
|
if (index < actions.length - 1) {
|
||||||
|
output += '\n';
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
return output + '"';
|
||||||
|
}
|
||||||
|
|
||||||
|
function buildErrorInput({ message, errorMessage, actions, functionsAgent }) {
|
||||||
|
const log = errorMessage.includes('Could not parse LLM output:')
|
||||||
|
? `A formatting error occurred with your response to the human's last message. You didn't follow the formatting instructions. Remember to ${instructions}`
|
||||||
|
: `You encountered an error while replying to the human's last message. Attempt to answer again or admit an answer cannot be given.\nError: ${errorMessage}`;
|
||||||
|
|
||||||
|
return `
|
||||||
|
${log}
|
||||||
|
|
||||||
|
${getActions(actions, functionsAgent)}
|
||||||
|
|
||||||
|
Human's last message: ${message}
|
||||||
|
`;
|
||||||
|
}
|
||||||
|
|
||||||
|
function buildPromptPrefix({ result, message, functionsAgent }) {
|
||||||
|
if ((result.output && result.output.includes('N/A')) || result.output === undefined) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (
|
||||||
|
result?.intermediateSteps?.length === 1 &&
|
||||||
|
result?.intermediateSteps[0]?.action?.toolInput === 'N/A'
|
||||||
|
) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
const internalActions =
|
||||||
|
result?.intermediateSteps?.length > 0
|
||||||
|
? getActions(result.intermediateSteps, functionsAgent)
|
||||||
|
: 'Internal Actions Taken: None';
|
||||||
|
|
||||||
|
const toolBasedInstructions = internalActions.toLowerCase().includes('image')
|
||||||
|
? imageInstructions
|
||||||
|
: '';
|
||||||
|
|
||||||
|
const errorMessage = result.errorMessage ? `${errorInstructions} ${result.errorMessage}\n` : '';
|
||||||
|
|
||||||
|
const preliminaryAnswer =
|
||||||
|
result.output?.length > 0 ? `Preliminary Answer: "${result.output.trim()}"` : '';
|
||||||
|
const prefix = preliminaryAnswer
|
||||||
|
? 'review and improve the answer you generated using plugins in response to the User Message below. The user hasn\'t seen your answer or thoughts yet.'
|
||||||
|
: 'respond to the User Message below based on your preliminary thoughts & actions.';
|
||||||
|
|
||||||
|
return `As a helpful AI Assistant, ${prefix}${errorMessage}\n${internalActions}
|
||||||
|
${preliminaryAnswer}
|
||||||
|
Reply conversationally to the User based on your ${
|
||||||
|
preliminaryAnswer ? 'preliminary answer, ' : ''
|
||||||
|
}internal actions, thoughts, and observations, making improvements wherever possible, but do not modify URLs.
|
||||||
|
${
|
||||||
|
preliminaryAnswer
|
||||||
|
? ''
|
||||||
|
: '\nIf there is an incomplete thought or action, you are expected to complete it in your response now.\n'
|
||||||
|
}You must cite sources if you are using any web links. ${toolBasedInstructions}
|
||||||
|
Only respond with your conversational reply to the following User Message:
|
||||||
|
"${message}"`;
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
buildErrorInput,
|
||||||
|
buildPromptPrefix,
|
||||||
|
};
|
||||||
7
api/app/clients/output_parsers/index.js
Normal file
7
api/app/clients/output_parsers/index.js
Normal file
|
|
@ -0,0 +1,7 @@
|
||||||
|
const addImages = require('./addImages');
|
||||||
|
const handleOutputs = require('./handleOutputs');
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
addImages,
|
||||||
|
...handleOutputs,
|
||||||
|
};
|
||||||
38
api/app/clients/prompts/handleInputs.js
Normal file
38
api/app/clients/prompts/handleInputs.js
Normal file
|
|
@ -0,0 +1,38 @@
|
||||||
|
// Escaping curly braces is necessary for LangChain to correctly process the prompt
|
||||||
|
function escapeBraces(str) {
|
||||||
|
return str
|
||||||
|
.replace(/({{2,})|(}{2,})/g, (match) => `${match[0]}`)
|
||||||
|
.replace(/{|}/g, (match) => `${match}${match}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
function getSnippet(text) {
|
||||||
|
let limit = 50;
|
||||||
|
let splitText = escapeBraces(text).split(' ');
|
||||||
|
|
||||||
|
if (splitText.length === 1 && splitText[0].length > limit) {
|
||||||
|
return splitText[0].substring(0, limit);
|
||||||
|
}
|
||||||
|
|
||||||
|
let result = '';
|
||||||
|
let spaceCount = 0;
|
||||||
|
|
||||||
|
for (let i = 0; i < splitText.length; i++) {
|
||||||
|
if (result.length + splitText[i].length <= limit) {
|
||||||
|
result += splitText[i] + ' ';
|
||||||
|
spaceCount++;
|
||||||
|
} else {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (spaceCount == 10) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return result.trim();
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
escapeBraces,
|
||||||
|
getSnippet,
|
||||||
|
};
|
||||||
|
|
@ -1,5 +1,7 @@
|
||||||
const formatMessages = require('./formatMessages');
|
const formatMessages = require('./formatMessages');
|
||||||
const summaryPrompts = require('./summaryPrompts');
|
const summaryPrompts = require('./summaryPrompts');
|
||||||
|
const handleInputs = require('./handleInputs');
|
||||||
|
const instructions = require('./instructions');
|
||||||
const truncate = require('./truncate');
|
const truncate = require('./truncate');
|
||||||
const createVisionPrompt = require('./createVisionPrompt');
|
const createVisionPrompt = require('./createVisionPrompt');
|
||||||
const createContextHandlers = require('./createContextHandlers');
|
const createContextHandlers = require('./createContextHandlers');
|
||||||
|
|
@ -7,6 +9,8 @@ const createContextHandlers = require('./createContextHandlers');
|
||||||
module.exports = {
|
module.exports = {
|
||||||
...formatMessages,
|
...formatMessages,
|
||||||
...summaryPrompts,
|
...summaryPrompts,
|
||||||
|
...handleInputs,
|
||||||
|
...instructions,
|
||||||
...truncate,
|
...truncate,
|
||||||
createVisionPrompt,
|
createVisionPrompt,
|
||||||
createContextHandlers,
|
createContextHandlers,
|
||||||
|
|
|
||||||
10
api/app/clients/prompts/instructions.js
Normal file
10
api/app/clients/prompts/instructions.js
Normal file
|
|
@ -0,0 +1,10 @@
|
||||||
|
module.exports = {
|
||||||
|
instructions:
|
||||||
|
'Remember, all your responses MUST be in the format described. Do not respond unless it\'s in the format described, using the structure of Action, Action Input, etc.',
|
||||||
|
errorInstructions:
|
||||||
|
'\nYou encountered an error in attempting a response. The user is not aware of the error so you shouldn\'t mention it.\nReview the actions taken carefully in case there is a partial or complete answer within them.\nError Message:',
|
||||||
|
imageInstructions:
|
||||||
|
'You must include the exact image paths from above, formatted in Markdown syntax: ',
|
||||||
|
completionInstructions:
|
||||||
|
'Instructions:\nYou are ChatGPT, a large language model trained by OpenAI. Respond conversationally.\nCurrent date:',
|
||||||
|
};
|
||||||
1043
api/app/clients/specs/AnthropicClient.test.js
Normal file
1043
api/app/clients/specs/AnthropicClient.test.js
Normal file
File diff suppressed because it is too large
Load diff
|
|
@ -41,9 +41,9 @@ jest.mock('~/models', () => ({
|
||||||
const { getConvo, saveConvo } = require('~/models');
|
const { getConvo, saveConvo } = require('~/models');
|
||||||
|
|
||||||
jest.mock('@librechat/agents', () => {
|
jest.mock('@librechat/agents', () => {
|
||||||
const actual = jest.requireActual('@librechat/agents');
|
const { Providers } = jest.requireActual('@librechat/agents');
|
||||||
return {
|
return {
|
||||||
...actual,
|
Providers,
|
||||||
ChatOpenAI: jest.fn().mockImplementation(() => {
|
ChatOpenAI: jest.fn().mockImplementation(() => {
|
||||||
return {};
|
return {};
|
||||||
}),
|
}),
|
||||||
|
|
@ -821,56 +821,6 @@ describe('BaseClient', () => {
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
describe('recordTokenUsage model assignment', () => {
|
|
||||||
test('should pass this.model to recordTokenUsage, not the agent ID from responseMessage.model', async () => {
|
|
||||||
const actualModel = 'claude-opus-4-5';
|
|
||||||
const agentId = 'agent_p5Z_IU6EIxBoqn1BoqLBp';
|
|
||||||
|
|
||||||
TestClient.model = actualModel;
|
|
||||||
TestClient.options.endpoint = 'agents';
|
|
||||||
TestClient.options.agent = { id: agentId };
|
|
||||||
|
|
||||||
TestClient.getTokenCountForResponse = jest.fn().mockReturnValue(50);
|
|
||||||
TestClient.recordTokenUsage = jest.fn().mockResolvedValue(undefined);
|
|
||||||
TestClient.buildMessages.mockReturnValue({
|
|
||||||
prompt: [],
|
|
||||||
tokenCountMap: { res: 50 },
|
|
||||||
});
|
|
||||||
|
|
||||||
await TestClient.sendMessage('Hello', {});
|
|
||||||
|
|
||||||
expect(TestClient.recordTokenUsage).toHaveBeenCalledWith(
|
|
||||||
expect.objectContaining({
|
|
||||||
model: actualModel,
|
|
||||||
}),
|
|
||||||
);
|
|
||||||
|
|
||||||
const callArgs = TestClient.recordTokenUsage.mock.calls[0][0];
|
|
||||||
expect(callArgs.model).not.toBe(agentId);
|
|
||||||
});
|
|
||||||
|
|
||||||
test('should pass this.model even when this.model differs from modelOptions.model', async () => {
|
|
||||||
const instanceModel = 'gpt-4o';
|
|
||||||
TestClient.model = instanceModel;
|
|
||||||
TestClient.modelOptions = { model: 'gpt-4o-mini' };
|
|
||||||
|
|
||||||
TestClient.getTokenCountForResponse = jest.fn().mockReturnValue(50);
|
|
||||||
TestClient.recordTokenUsage = jest.fn().mockResolvedValue(undefined);
|
|
||||||
TestClient.buildMessages.mockReturnValue({
|
|
||||||
prompt: [],
|
|
||||||
tokenCountMap: { res: 50 },
|
|
||||||
});
|
|
||||||
|
|
||||||
await TestClient.sendMessage('Hello', {});
|
|
||||||
|
|
||||||
expect(TestClient.recordTokenUsage).toHaveBeenCalledWith(
|
|
||||||
expect.objectContaining({
|
|
||||||
model: instanceModel,
|
|
||||||
}),
|
|
||||||
);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
describe('getMessagesWithinTokenLimit with instructions', () => {
|
describe('getMessagesWithinTokenLimit with instructions', () => {
|
||||||
test('should always include instructions when present', async () => {
|
test('should always include instructions when present', async () => {
|
||||||
TestClient.maxContextTokens = 50;
|
TestClient.maxContextTokens = 50;
|
||||||
|
|
@ -978,123 +928,4 @@ describe('BaseClient', () => {
|
||||||
expect(result.remainingContextTokens).toBe(2); // 25 - 20 - 3(assistant label)
|
expect(result.remainingContextTokens).toBe(2); // 25 - 20 - 3(assistant label)
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
describe('sendMessage file population', () => {
|
|
||||||
const attachment = {
|
|
||||||
file_id: 'file-abc',
|
|
||||||
filename: 'image.png',
|
|
||||||
filepath: '/uploads/image.png',
|
|
||||||
type: 'image/png',
|
|
||||||
bytes: 1024,
|
|
||||||
object: 'file',
|
|
||||||
user: 'user-1',
|
|
||||||
embedded: false,
|
|
||||||
usage: 0,
|
|
||||||
text: 'large ocr blob that should be stripped',
|
|
||||||
_id: 'mongo-id-1',
|
|
||||||
};
|
|
||||||
|
|
||||||
beforeEach(() => {
|
|
||||||
TestClient.options.req = { body: { files: [{ file_id: 'file-abc' }] } };
|
|
||||||
TestClient.options.attachments = [attachment];
|
|
||||||
});
|
|
||||||
|
|
||||||
test('populates userMessage.files before saveMessageToDatabase is called', async () => {
|
|
||||||
TestClient.saveMessageToDatabase = jest.fn().mockImplementation((msg) => {
|
|
||||||
return Promise.resolve({ message: msg });
|
|
||||||
});
|
|
||||||
|
|
||||||
await TestClient.sendMessage('Hello');
|
|
||||||
|
|
||||||
const userSave = TestClient.saveMessageToDatabase.mock.calls.find(
|
|
||||||
([msg]) => msg.isCreatedByUser,
|
|
||||||
);
|
|
||||||
expect(userSave).toBeDefined();
|
|
||||||
expect(userSave[0].files).toBeDefined();
|
|
||||||
expect(userSave[0].files).toHaveLength(1);
|
|
||||||
expect(userSave[0].files[0].file_id).toBe('file-abc');
|
|
||||||
});
|
|
||||||
|
|
||||||
test('strips text and _id from files before saving', async () => {
|
|
||||||
TestClient.saveMessageToDatabase = jest.fn().mockResolvedValue({ message: {} });
|
|
||||||
|
|
||||||
await TestClient.sendMessage('Hello');
|
|
||||||
|
|
||||||
const userSave = TestClient.saveMessageToDatabase.mock.calls.find(
|
|
||||||
([msg]) => msg.isCreatedByUser,
|
|
||||||
);
|
|
||||||
expect(userSave[0].files[0].text).toBeUndefined();
|
|
||||||
expect(userSave[0].files[0]._id).toBeUndefined();
|
|
||||||
expect(userSave[0].files[0].filename).toBe('image.png');
|
|
||||||
});
|
|
||||||
|
|
||||||
test('deletes image_urls from userMessage when files are present', async () => {
|
|
||||||
TestClient.saveMessageToDatabase = jest.fn().mockResolvedValue({ message: {} });
|
|
||||||
TestClient.options.attachments = [
|
|
||||||
{ ...attachment, image_urls: ['data:image/png;base64,...'] },
|
|
||||||
];
|
|
||||||
|
|
||||||
await TestClient.sendMessage('Hello');
|
|
||||||
|
|
||||||
const userSave = TestClient.saveMessageToDatabase.mock.calls.find(
|
|
||||||
([msg]) => msg.isCreatedByUser,
|
|
||||||
);
|
|
||||||
expect(userSave[0].image_urls).toBeUndefined();
|
|
||||||
});
|
|
||||||
|
|
||||||
test('does not set files when no attachments match request file IDs', async () => {
|
|
||||||
TestClient.options.req = { body: { files: [{ file_id: 'file-nomatch' }] } };
|
|
||||||
TestClient.saveMessageToDatabase = jest.fn().mockResolvedValue({ message: {} });
|
|
||||||
|
|
||||||
await TestClient.sendMessage('Hello');
|
|
||||||
|
|
||||||
const userSave = TestClient.saveMessageToDatabase.mock.calls.find(
|
|
||||||
([msg]) => msg.isCreatedByUser,
|
|
||||||
);
|
|
||||||
expect(userSave[0].files).toBeUndefined();
|
|
||||||
});
|
|
||||||
|
|
||||||
test('skips file population when attachments is not an array (Promise case)', async () => {
|
|
||||||
TestClient.options.attachments = Promise.resolve([attachment]);
|
|
||||||
TestClient.saveMessageToDatabase = jest.fn().mockResolvedValue({ message: {} });
|
|
||||||
|
|
||||||
await TestClient.sendMessage('Hello');
|
|
||||||
|
|
||||||
const userSave = TestClient.saveMessageToDatabase.mock.calls.find(
|
|
||||||
([msg]) => msg.isCreatedByUser,
|
|
||||||
);
|
|
||||||
expect(userSave[0].files).toBeUndefined();
|
|
||||||
});
|
|
||||||
|
|
||||||
test('skips file population when skipSaveUserMessage is true', async () => {
|
|
||||||
TestClient.skipSaveUserMessage = true;
|
|
||||||
TestClient.saveMessageToDatabase = jest.fn().mockResolvedValue({ message: {} });
|
|
||||||
|
|
||||||
await TestClient.sendMessage('Hello');
|
|
||||||
|
|
||||||
const userSave = TestClient.saveMessageToDatabase.mock.calls.find(
|
|
||||||
([msg]) => msg?.isCreatedByUser,
|
|
||||||
);
|
|
||||||
expect(userSave).toBeUndefined();
|
|
||||||
});
|
|
||||||
|
|
||||||
test('ignores file_id: undefined entries in req.body.files (no set poisoning)', async () => {
|
|
||||||
TestClient.options.req = {
|
|
||||||
body: { files: [{ file_id: undefined }, { file_id: 'file-abc' }] },
|
|
||||||
};
|
|
||||||
TestClient.options.attachments = [
|
|
||||||
{ ...attachment, file_id: undefined },
|
|
||||||
{ ...attachment, file_id: 'file-abc' },
|
|
||||||
];
|
|
||||||
TestClient.saveMessageToDatabase = jest.fn().mockResolvedValue({ message: {} });
|
|
||||||
|
|
||||||
await TestClient.sendMessage('Hello');
|
|
||||||
|
|
||||||
const userSave = TestClient.saveMessageToDatabase.mock.calls.find(
|
|
||||||
([msg]) => msg.isCreatedByUser,
|
|
||||||
);
|
|
||||||
expect(userSave[0].files).toHaveLength(1);
|
|
||||||
expect(userSave[0].files[0].file_id).toBe('file-abc');
|
|
||||||
});
|
|
||||||
});
|
|
||||||
});
|
});
|
||||||
|
|
|
||||||
630
api/app/clients/specs/OpenAIClient.test.js
Normal file
630
api/app/clients/specs/OpenAIClient.test.js
Normal file
|
|
@ -0,0 +1,630 @@
|
||||||
|
jest.mock('~/cache/getLogStores');
|
||||||
|
require('dotenv').config();
|
||||||
|
const { fetchEventSource } = require('@waylaidwanderer/fetch-event-source');
|
||||||
|
const getLogStores = require('~/cache/getLogStores');
|
||||||
|
const OpenAIClient = require('../OpenAIClient');
|
||||||
|
jest.mock('meilisearch');
|
||||||
|
|
||||||
|
jest.mock('~/db/connect');
|
||||||
|
jest.mock('~/models', () => ({
|
||||||
|
User: jest.fn(),
|
||||||
|
Key: jest.fn(),
|
||||||
|
Session: jest.fn(),
|
||||||
|
Balance: jest.fn(),
|
||||||
|
Transaction: jest.fn(),
|
||||||
|
getMessages: jest.fn().mockResolvedValue([]),
|
||||||
|
saveMessage: jest.fn(),
|
||||||
|
updateMessage: jest.fn(),
|
||||||
|
deleteMessagesSince: jest.fn(),
|
||||||
|
deleteMessages: jest.fn(),
|
||||||
|
getConvoTitle: jest.fn(),
|
||||||
|
getConvo: jest.fn(),
|
||||||
|
saveConvo: jest.fn(),
|
||||||
|
deleteConvos: jest.fn(),
|
||||||
|
getPreset: jest.fn(),
|
||||||
|
getPresets: jest.fn(),
|
||||||
|
savePreset: jest.fn(),
|
||||||
|
deletePresets: jest.fn(),
|
||||||
|
findFileById: jest.fn(),
|
||||||
|
createFile: jest.fn(),
|
||||||
|
updateFile: jest.fn(),
|
||||||
|
deleteFile: jest.fn(),
|
||||||
|
deleteFiles: jest.fn(),
|
||||||
|
getFiles: jest.fn(),
|
||||||
|
updateFileUsage: jest.fn(),
|
||||||
|
}));
|
||||||
|
|
||||||
|
// Import the actual module but mock specific parts
|
||||||
|
const agents = jest.requireActual('@librechat/agents');
|
||||||
|
const { CustomOpenAIClient } = agents;
|
||||||
|
|
||||||
|
// Also mock ChatOpenAI to prevent real API calls
|
||||||
|
agents.ChatOpenAI = jest.fn().mockImplementation(() => {
|
||||||
|
return {};
|
||||||
|
});
|
||||||
|
agents.AzureChatOpenAI = jest.fn().mockImplementation(() => {
|
||||||
|
return {};
|
||||||
|
});
|
||||||
|
|
||||||
|
// Mock only the CustomOpenAIClient constructor
|
||||||
|
jest.spyOn(CustomOpenAIClient, 'constructor').mockImplementation(function (...options) {
|
||||||
|
return new CustomOpenAIClient(...options);
|
||||||
|
});
|
||||||
|
|
||||||
|
const finalChatCompletion = jest.fn().mockResolvedValue({
|
||||||
|
choices: [
|
||||||
|
{
|
||||||
|
message: { role: 'assistant', content: 'Mock message content' },
|
||||||
|
finish_reason: 'Mock finish reason',
|
||||||
|
},
|
||||||
|
],
|
||||||
|
});
|
||||||
|
|
||||||
|
const stream = jest.fn().mockImplementation(() => {
|
||||||
|
let isDone = false;
|
||||||
|
let isError = false;
|
||||||
|
let errorCallback = null;
|
||||||
|
|
||||||
|
const onEventHandlers = {
|
||||||
|
abort: () => {
|
||||||
|
// Mock abort behavior
|
||||||
|
},
|
||||||
|
error: (callback) => {
|
||||||
|
errorCallback = callback; // Save the error callback for later use
|
||||||
|
},
|
||||||
|
finalMessage: (callback) => {
|
||||||
|
callback({ role: 'assistant', content: 'Mock Response' });
|
||||||
|
isDone = true; // Set stream to done
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
const mockStream = {
|
||||||
|
on: jest.fn((event, callback) => {
|
||||||
|
if (onEventHandlers[event]) {
|
||||||
|
onEventHandlers[event](callback);
|
||||||
|
}
|
||||||
|
return mockStream;
|
||||||
|
}),
|
||||||
|
finalChatCompletion,
|
||||||
|
controller: { abort: jest.fn() },
|
||||||
|
triggerError: () => {
|
||||||
|
isError = true;
|
||||||
|
if (errorCallback) {
|
||||||
|
errorCallback(new Error('Mock error'));
|
||||||
|
}
|
||||||
|
},
|
||||||
|
[Symbol.asyncIterator]: () => {
|
||||||
|
return {
|
||||||
|
next: () => {
|
||||||
|
if (isError) {
|
||||||
|
return Promise.reject(new Error('Mock error'));
|
||||||
|
}
|
||||||
|
if (isDone) {
|
||||||
|
return Promise.resolve({ done: true });
|
||||||
|
}
|
||||||
|
const chunk = { choices: [{ delta: { content: 'Mock chunk' } }] };
|
||||||
|
return Promise.resolve({ value: chunk, done: false });
|
||||||
|
},
|
||||||
|
};
|
||||||
|
},
|
||||||
|
};
|
||||||
|
return mockStream;
|
||||||
|
});
|
||||||
|
|
||||||
|
const create = jest.fn().mockResolvedValue({
|
||||||
|
choices: [
|
||||||
|
{
|
||||||
|
message: { content: 'Mock message content' },
|
||||||
|
finish_reason: 'Mock finish reason',
|
||||||
|
},
|
||||||
|
],
|
||||||
|
});
|
||||||
|
|
||||||
|
// Mock the implementation of CustomOpenAIClient instances
|
||||||
|
jest.spyOn(CustomOpenAIClient.prototype, 'constructor').mockImplementation(function () {
|
||||||
|
return this;
|
||||||
|
});
|
||||||
|
|
||||||
|
// Create a mock for the CustomOpenAIClient class
|
||||||
|
const mockCustomOpenAIClient = jest.fn().mockImplementation(() => ({
|
||||||
|
beta: {
|
||||||
|
chat: {
|
||||||
|
completions: {
|
||||||
|
stream,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
chat: {
|
||||||
|
completions: {
|
||||||
|
create,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}));
|
||||||
|
|
||||||
|
CustomOpenAIClient.mockImplementation = mockCustomOpenAIClient;
|
||||||
|
|
||||||
|
describe('OpenAIClient', () => {
|
||||||
|
beforeEach(() => {
|
||||||
|
const mockCache = {
|
||||||
|
get: jest.fn().mockResolvedValue({}),
|
||||||
|
set: jest.fn(),
|
||||||
|
};
|
||||||
|
getLogStores.mockReturnValue(mockCache);
|
||||||
|
});
|
||||||
|
let client;
|
||||||
|
const model = 'gpt-4';
|
||||||
|
const parentMessageId = '1';
|
||||||
|
const messages = [
|
||||||
|
{ role: 'user', sender: 'User', text: 'Hello', messageId: parentMessageId },
|
||||||
|
{ role: 'assistant', sender: 'Assistant', text: 'Hi', messageId: '2' },
|
||||||
|
];
|
||||||
|
|
||||||
|
const defaultOptions = {
|
||||||
|
// debug: true,
|
||||||
|
req: {},
|
||||||
|
openaiApiKey: 'new-api-key',
|
||||||
|
modelOptions: {
|
||||||
|
model,
|
||||||
|
temperature: 0.7,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
const defaultAzureOptions = {
|
||||||
|
azureOpenAIApiInstanceName: 'your-instance-name',
|
||||||
|
azureOpenAIApiDeploymentName: 'your-deployment-name',
|
||||||
|
azureOpenAIApiVersion: '2020-07-01-preview',
|
||||||
|
};
|
||||||
|
|
||||||
|
let originalWarn;
|
||||||
|
|
||||||
|
beforeAll(() => {
|
||||||
|
originalWarn = console.warn;
|
||||||
|
console.warn = jest.fn();
|
||||||
|
});
|
||||||
|
|
||||||
|
afterAll(() => {
|
||||||
|
console.warn = originalWarn;
|
||||||
|
});
|
||||||
|
|
||||||
|
beforeEach(() => {
|
||||||
|
console.warn.mockClear();
|
||||||
|
});
|
||||||
|
|
||||||
|
beforeEach(() => {
|
||||||
|
const options = { ...defaultOptions };
|
||||||
|
client = new OpenAIClient('test-api-key', options);
|
||||||
|
client.summarizeMessages = jest.fn().mockResolvedValue({
|
||||||
|
role: 'assistant',
|
||||||
|
content: 'Refined answer',
|
||||||
|
tokenCount: 30,
|
||||||
|
});
|
||||||
|
client.buildPrompt = jest
|
||||||
|
.fn()
|
||||||
|
.mockResolvedValue({ prompt: messages.map((m) => m.text).join('\n') });
|
||||||
|
client.getMessages = jest.fn().mockResolvedValue([]);
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('setOptions', () => {
|
||||||
|
it('should set the options correctly', () => {
|
||||||
|
expect(client.apiKey).toBe('new-api-key');
|
||||||
|
expect(client.modelOptions.model).toBe(model);
|
||||||
|
expect(client.modelOptions.temperature).toBe(0.7);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should set FORCE_PROMPT based on OPENAI_FORCE_PROMPT or reverseProxyUrl', () => {
|
||||||
|
process.env.OPENAI_FORCE_PROMPT = 'true';
|
||||||
|
client.setOptions({});
|
||||||
|
expect(client.FORCE_PROMPT).toBe(true);
|
||||||
|
delete process.env.OPENAI_FORCE_PROMPT; // Cleanup
|
||||||
|
client.FORCE_PROMPT = undefined;
|
||||||
|
|
||||||
|
client.setOptions({ reverseProxyUrl: 'https://example.com/completions' });
|
||||||
|
expect(client.FORCE_PROMPT).toBe(true);
|
||||||
|
client.FORCE_PROMPT = undefined;
|
||||||
|
|
||||||
|
client.setOptions({ reverseProxyUrl: 'https://example.com/chat' });
|
||||||
|
expect(client.FORCE_PROMPT).toBe(false);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should set isChatCompletion based on useOpenRouter, reverseProxyUrl, or model', () => {
|
||||||
|
client.setOptions({ reverseProxyUrl: null });
|
||||||
|
// true by default since default model will be gpt-4o-mini
|
||||||
|
expect(client.isChatCompletion).toBe(true);
|
||||||
|
client.isChatCompletion = undefined;
|
||||||
|
|
||||||
|
// false because completions url will force prompt payload
|
||||||
|
client.setOptions({ reverseProxyUrl: 'https://example.com/completions' });
|
||||||
|
expect(client.isChatCompletion).toBe(false);
|
||||||
|
client.isChatCompletion = undefined;
|
||||||
|
|
||||||
|
client.setOptions({ modelOptions: { model: 'gpt-4o-mini' }, reverseProxyUrl: null });
|
||||||
|
expect(client.isChatCompletion).toBe(true);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should set completionsUrl and langchainProxy based on reverseProxyUrl', () => {
|
||||||
|
client.setOptions({ reverseProxyUrl: 'https://localhost:8080/v1/chat/completions' });
|
||||||
|
expect(client.completionsUrl).toBe('https://localhost:8080/v1/chat/completions');
|
||||||
|
expect(client.langchainProxy).toBe('https://localhost:8080/v1');
|
||||||
|
|
||||||
|
client.setOptions({ reverseProxyUrl: 'https://example.com/completions' });
|
||||||
|
expect(client.completionsUrl).toBe('https://example.com/completions');
|
||||||
|
expect(client.langchainProxy).toBe('https://example.com/completions');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('setOptions with Simplified Azure Integration', () => {
|
||||||
|
afterEach(() => {
|
||||||
|
delete process.env.AZURE_OPENAI_DEFAULT_MODEL;
|
||||||
|
delete process.env.AZURE_USE_MODEL_AS_DEPLOYMENT_NAME;
|
||||||
|
});
|
||||||
|
|
||||||
|
const azureOpenAIApiInstanceName = 'test-instance';
|
||||||
|
const azureOpenAIApiDeploymentName = 'test-deployment';
|
||||||
|
const azureOpenAIApiVersion = '2020-07-01-preview';
|
||||||
|
|
||||||
|
const createOptions = (model) => ({
|
||||||
|
modelOptions: { model },
|
||||||
|
azure: {
|
||||||
|
azureOpenAIApiInstanceName,
|
||||||
|
azureOpenAIApiDeploymentName,
|
||||||
|
azureOpenAIApiVersion,
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should set model from AZURE_OPENAI_DEFAULT_MODEL when Azure is enabled', () => {
|
||||||
|
process.env.AZURE_OPENAI_DEFAULT_MODEL = 'gpt-4-azure';
|
||||||
|
const options = createOptions('test');
|
||||||
|
client.azure = options.azure;
|
||||||
|
client.setOptions(options);
|
||||||
|
expect(client.modelOptions.model).toBe('gpt-4-azure');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should not change model if Azure is not enabled', () => {
|
||||||
|
process.env.AZURE_OPENAI_DEFAULT_MODEL = 'gpt-4-azure';
|
||||||
|
const originalModel = 'test';
|
||||||
|
client.azure = false;
|
||||||
|
client.setOptions(createOptions('test'));
|
||||||
|
expect(client.modelOptions.model).toBe(originalModel);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should not change model if AZURE_OPENAI_DEFAULT_MODEL is not set and model is passed', () => {
|
||||||
|
const originalModel = 'GROK-LLM';
|
||||||
|
const options = createOptions(originalModel);
|
||||||
|
client.azure = options.azure;
|
||||||
|
client.setOptions(options);
|
||||||
|
expect(client.modelOptions.model).toBe(originalModel);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should change model if AZURE_OPENAI_DEFAULT_MODEL is set and model is passed', () => {
|
||||||
|
process.env.AZURE_OPENAI_DEFAULT_MODEL = 'gpt-4-azure';
|
||||||
|
const originalModel = 'GROK-LLM';
|
||||||
|
const options = createOptions(originalModel);
|
||||||
|
client.azure = options.azure;
|
||||||
|
client.setOptions(options);
|
||||||
|
expect(client.modelOptions.model).toBe(process.env.AZURE_OPENAI_DEFAULT_MODEL);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should include model in deployment name if AZURE_USE_MODEL_AS_DEPLOYMENT_NAME is set', () => {
|
||||||
|
process.env.AZURE_USE_MODEL_AS_DEPLOYMENT_NAME = 'true';
|
||||||
|
const model = 'gpt-4-azure';
|
||||||
|
|
||||||
|
const AzureClient = new OpenAIClient('test-api-key', createOptions(model));
|
||||||
|
|
||||||
|
const expectedValue = `https://${azureOpenAIApiInstanceName}.openai.azure.com/openai/deployments/${model}/chat/completions?api-version=${azureOpenAIApiVersion}`;
|
||||||
|
|
||||||
|
expect(AzureClient.modelOptions.model).toBe(model);
|
||||||
|
expect(AzureClient.azureEndpoint).toBe(expectedValue);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should include model in deployment name if AZURE_USE_MODEL_AS_DEPLOYMENT_NAME and default model is set', () => {
|
||||||
|
const defaultModel = 'gpt-4-azure';
|
||||||
|
process.env.AZURE_USE_MODEL_AS_DEPLOYMENT_NAME = 'true';
|
||||||
|
process.env.AZURE_OPENAI_DEFAULT_MODEL = defaultModel;
|
||||||
|
const model = 'gpt-4-this-is-a-test-model-name';
|
||||||
|
|
||||||
|
const AzureClient = new OpenAIClient('test-api-key', createOptions(model));
|
||||||
|
|
||||||
|
const expectedValue = `https://${azureOpenAIApiInstanceName}.openai.azure.com/openai/deployments/${model}/chat/completions?api-version=${azureOpenAIApiVersion}`;
|
||||||
|
|
||||||
|
expect(AzureClient.modelOptions.model).toBe(defaultModel);
|
||||||
|
expect(AzureClient.azureEndpoint).toBe(expectedValue);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should not include model in deployment name if AZURE_USE_MODEL_AS_DEPLOYMENT_NAME is not set', () => {
|
||||||
|
const model = 'gpt-4-azure';
|
||||||
|
|
||||||
|
const AzureClient = new OpenAIClient('test-api-key', createOptions(model));
|
||||||
|
|
||||||
|
const expectedValue = `https://${azureOpenAIApiInstanceName}.openai.azure.com/openai/deployments/${azureOpenAIApiDeploymentName}/chat/completions?api-version=${azureOpenAIApiVersion}`;
|
||||||
|
|
||||||
|
expect(AzureClient.modelOptions.model).toBe(model);
|
||||||
|
expect(AzureClient.azureEndpoint).toBe(expectedValue);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('getTokenCount', () => {
|
||||||
|
it('should return the correct token count', () => {
|
||||||
|
const count = client.getTokenCount('Hello, world!');
|
||||||
|
expect(count).toBeGreaterThan(0);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('getSaveOptions', () => {
|
||||||
|
it('should return the correct save options', () => {
|
||||||
|
const options = client.getSaveOptions();
|
||||||
|
expect(options).toHaveProperty('chatGptLabel');
|
||||||
|
expect(options).toHaveProperty('modelLabel');
|
||||||
|
expect(options).toHaveProperty('promptPrefix');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('getBuildMessagesOptions', () => {
|
||||||
|
it('should return the correct build messages options', () => {
|
||||||
|
const options = client.getBuildMessagesOptions({ promptPrefix: 'Hello' });
|
||||||
|
expect(options).toHaveProperty('isChatCompletion');
|
||||||
|
expect(options).toHaveProperty('promptPrefix');
|
||||||
|
expect(options.promptPrefix).toBe('Hello');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('buildMessages', () => {
|
||||||
|
it('should build messages correctly for chat completion', async () => {
|
||||||
|
const result = await client.buildMessages(messages, parentMessageId, {
|
||||||
|
isChatCompletion: true,
|
||||||
|
});
|
||||||
|
expect(result).toHaveProperty('prompt');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should build messages correctly for non-chat completion', async () => {
|
||||||
|
const result = await client.buildMessages(messages, parentMessageId, {
|
||||||
|
isChatCompletion: false,
|
||||||
|
});
|
||||||
|
expect(result).toHaveProperty('prompt');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should build messages correctly with a promptPrefix', async () => {
|
||||||
|
const result = await client.buildMessages(messages, parentMessageId, {
|
||||||
|
isChatCompletion: true,
|
||||||
|
promptPrefix: 'Test Prefix',
|
||||||
|
});
|
||||||
|
expect(result).toHaveProperty('prompt');
|
||||||
|
const instructions = result.prompt.find((item) => item.content.includes('Test Prefix'));
|
||||||
|
expect(instructions).toBeDefined();
|
||||||
|
expect(instructions.content).toContain('Test Prefix');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle context strategy correctly', async () => {
|
||||||
|
client.contextStrategy = 'summarize';
|
||||||
|
const result = await client.buildMessages(messages, parentMessageId, {
|
||||||
|
isChatCompletion: true,
|
||||||
|
});
|
||||||
|
expect(result).toHaveProperty('prompt');
|
||||||
|
expect(result).toHaveProperty('tokenCountMap');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should assign name property for user messages when options.name is set', async () => {
|
||||||
|
client.options.name = 'Test User';
|
||||||
|
const result = await client.buildMessages(messages, parentMessageId, {
|
||||||
|
isChatCompletion: true,
|
||||||
|
});
|
||||||
|
const hasUserWithName = result.prompt.some(
|
||||||
|
(item) => item.role === 'user' && item.name === 'Test_User',
|
||||||
|
);
|
||||||
|
expect(hasUserWithName).toBe(true);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle promptPrefix from options when promptPrefix argument is not provided', async () => {
|
||||||
|
client.options.promptPrefix = 'Test Prefix from options';
|
||||||
|
const result = await client.buildMessages(messages, parentMessageId, {
|
||||||
|
isChatCompletion: true,
|
||||||
|
});
|
||||||
|
const instructions = result.prompt.find((item) =>
|
||||||
|
item.content.includes('Test Prefix from options'),
|
||||||
|
);
|
||||||
|
expect(instructions.content).toContain('Test Prefix from options');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle case when neither promptPrefix argument nor options.promptPrefix is set', async () => {
|
||||||
|
const result = await client.buildMessages(messages, parentMessageId, {
|
||||||
|
isChatCompletion: true,
|
||||||
|
});
|
||||||
|
const instructions = result.prompt.find((item) => item.content.includes('Test Prefix'));
|
||||||
|
expect(instructions).toBeUndefined();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle case when getMessagesForConversation returns null or an empty array', async () => {
|
||||||
|
const messages = [];
|
||||||
|
const result = await client.buildMessages(messages, parentMessageId, {
|
||||||
|
isChatCompletion: true,
|
||||||
|
});
|
||||||
|
expect(result.prompt).toEqual([]);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('getTokenCountForMessage', () => {
|
||||||
|
const example_messages = [
|
||||||
|
{
|
||||||
|
role: 'system',
|
||||||
|
content:
|
||||||
|
'You are a helpful, pattern-following assistant that translates corporate jargon into plain English.',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
role: 'system',
|
||||||
|
name: 'example_user',
|
||||||
|
content: 'New synergies will help drive top-line growth.',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
role: 'system',
|
||||||
|
name: 'example_assistant',
|
||||||
|
content: 'Things working well together will increase revenue.',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
role: 'system',
|
||||||
|
name: 'example_user',
|
||||||
|
content:
|
||||||
|
"Let's circle back when we have more bandwidth to touch base on opportunities for increased leverage.",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
role: 'system',
|
||||||
|
name: 'example_assistant',
|
||||||
|
content: "Let's talk later when we're less busy about how to do better.",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
role: 'user',
|
||||||
|
content:
|
||||||
|
"This late pivot means we don't have time to boil the ocean for the client deliverable.",
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
const testCases = [
|
||||||
|
{ model: 'gpt-3.5-turbo-0301', expected: 127 },
|
||||||
|
{ model: 'gpt-3.5-turbo-0613', expected: 129 },
|
||||||
|
{ model: 'gpt-3.5-turbo', expected: 129 },
|
||||||
|
{ model: 'gpt-4-0314', expected: 129 },
|
||||||
|
{ model: 'gpt-4-0613', expected: 129 },
|
||||||
|
{ model: 'gpt-4', expected: 129 },
|
||||||
|
{ model: 'unknown', expected: 129 },
|
||||||
|
];
|
||||||
|
|
||||||
|
testCases.forEach((testCase) => {
|
||||||
|
it(`should return ${testCase.expected} tokens for model ${testCase.model}`, () => {
|
||||||
|
client.modelOptions.model = testCase.model;
|
||||||
|
// 3 tokens for assistant label
|
||||||
|
let totalTokens = 3;
|
||||||
|
for (let message of example_messages) {
|
||||||
|
totalTokens += client.getTokenCountForMessage(message);
|
||||||
|
}
|
||||||
|
expect(totalTokens).toBe(testCase.expected);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
const vision_request = [
|
||||||
|
{
|
||||||
|
role: 'user',
|
||||||
|
content: [
|
||||||
|
{
|
||||||
|
type: 'text',
|
||||||
|
text: 'describe what is in this image?',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
type: 'image_url',
|
||||||
|
image_url: {
|
||||||
|
url: 'https://venturebeat.com/wp-content/uploads/2019/03/openai-1.png',
|
||||||
|
detail: 'high',
|
||||||
|
},
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
const expectedTokens = 14;
|
||||||
|
const visionModel = 'gpt-4-vision-preview';
|
||||||
|
|
||||||
|
it(`should return ${expectedTokens} tokens for model ${visionModel} (Vision Request)`, () => {
|
||||||
|
client.modelOptions.model = visionModel;
|
||||||
|
// 3 tokens for assistant label
|
||||||
|
let totalTokens = 3;
|
||||||
|
for (let message of vision_request) {
|
||||||
|
totalTokens += client.getTokenCountForMessage(message);
|
||||||
|
}
|
||||||
|
expect(totalTokens).toBe(expectedTokens);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('checkVisionRequest functionality', () => {
|
||||||
|
let client;
|
||||||
|
const attachments = [{ type: 'image/png' }];
|
||||||
|
|
||||||
|
beforeEach(() => {
|
||||||
|
client = new OpenAIClient('test-api-key', {
|
||||||
|
endpoint: 'ollama',
|
||||||
|
modelOptions: {
|
||||||
|
model: 'initial-model',
|
||||||
|
},
|
||||||
|
modelsConfig: {
|
||||||
|
ollama: ['initial-model', 'llava', 'other-model'],
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
client.defaultVisionModel = 'non-valid-default-model';
|
||||||
|
});
|
||||||
|
|
||||||
|
afterEach(() => {
|
||||||
|
jest.restoreAllMocks();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should set "llava" as the model if it is the first valid model when default validation fails', () => {
|
||||||
|
client.checkVisionRequest(attachments);
|
||||||
|
|
||||||
|
expect(client.modelOptions.model).toBe('llava');
|
||||||
|
expect(client.isVisionModel).toBeTruthy();
|
||||||
|
expect(client.modelOptions.stop).toBeUndefined();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('getStreamUsage', () => {
|
||||||
|
it('should return this.usage when completion_tokens_details is null', () => {
|
||||||
|
const client = new OpenAIClient('test-api-key', defaultOptions);
|
||||||
|
client.usage = {
|
||||||
|
completion_tokens_details: null,
|
||||||
|
prompt_tokens: 10,
|
||||||
|
completion_tokens: 20,
|
||||||
|
};
|
||||||
|
client.inputTokensKey = 'prompt_tokens';
|
||||||
|
client.outputTokensKey = 'completion_tokens';
|
||||||
|
|
||||||
|
const result = client.getStreamUsage();
|
||||||
|
|
||||||
|
expect(result).toEqual(client.usage);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return this.usage when completion_tokens_details is missing reasoning_tokens', () => {
|
||||||
|
const client = new OpenAIClient('test-api-key', defaultOptions);
|
||||||
|
client.usage = {
|
||||||
|
completion_tokens_details: {
|
||||||
|
other_tokens: 5,
|
||||||
|
},
|
||||||
|
prompt_tokens: 10,
|
||||||
|
completion_tokens: 20,
|
||||||
|
};
|
||||||
|
client.inputTokensKey = 'prompt_tokens';
|
||||||
|
client.outputTokensKey = 'completion_tokens';
|
||||||
|
|
||||||
|
const result = client.getStreamUsage();
|
||||||
|
|
||||||
|
expect(result).toEqual(client.usage);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should calculate output tokens correctly when completion_tokens_details is present with reasoning_tokens', () => {
|
||||||
|
const client = new OpenAIClient('test-api-key', defaultOptions);
|
||||||
|
client.usage = {
|
||||||
|
completion_tokens_details: {
|
||||||
|
reasoning_tokens: 30,
|
||||||
|
other_tokens: 5,
|
||||||
|
},
|
||||||
|
prompt_tokens: 10,
|
||||||
|
completion_tokens: 20,
|
||||||
|
};
|
||||||
|
client.inputTokensKey = 'prompt_tokens';
|
||||||
|
client.outputTokensKey = 'completion_tokens';
|
||||||
|
|
||||||
|
const result = client.getStreamUsage();
|
||||||
|
|
||||||
|
expect(result).toEqual({
|
||||||
|
reasoning_tokens: 30,
|
||||||
|
other_tokens: 5,
|
||||||
|
prompt_tokens: 10,
|
||||||
|
completion_tokens: 10, // |30 - 20| = 10
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return this.usage when it is undefined', () => {
|
||||||
|
const client = new OpenAIClient('test-api-key', defaultOptions);
|
||||||
|
client.usage = undefined;
|
||||||
|
|
||||||
|
const result = client.getStreamUsage();
|
||||||
|
|
||||||
|
expect(result).toBeUndefined();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
130
api/app/clients/specs/OpenAIClient.tokens.js
Normal file
130
api/app/clients/specs/OpenAIClient.tokens.js
Normal file
|
|
@ -0,0 +1,130 @@
|
||||||
|
/*
|
||||||
|
This is a test script to see how much memory is used by the client when encoding.
|
||||||
|
On my work machine, it was able to process 10,000 encoding requests / 48.686 seconds = approximately 205.4 RPS
|
||||||
|
I've significantly reduced the amount of encoding needed by saving token counts in the database, so these
|
||||||
|
numbers should only be hit with a large amount of concurrent users
|
||||||
|
It would take 103 concurrent users sending 1 message every 1 second to hit these numbers, which is rather unrealistic,
|
||||||
|
and at that point, out-sourcing the encoding to a separate server would be a better solution
|
||||||
|
Also, for scaling, could increase the rate at which the encoder resets; the trade-off is more resource usage on the server.
|
||||||
|
Initial memory usage: 25.93 megabytes
|
||||||
|
Peak memory usage: 55 megabytes
|
||||||
|
Final memory usage: 28.03 megabytes
|
||||||
|
Post-test (timeout of 15s): 21.91 megabytes
|
||||||
|
*/
|
||||||
|
|
||||||
|
require('dotenv').config();
|
||||||
|
const { OpenAIClient } = require('../');
|
||||||
|
|
||||||
|
function timeout(ms) {
|
||||||
|
return new Promise((resolve) => setTimeout(resolve, ms));
|
||||||
|
}
|
||||||
|
|
||||||
|
const run = async () => {
|
||||||
|
const text = `
|
||||||
|
The standard Lorem Ipsum passage, used since the 1500s
|
||||||
|
|
||||||
|
"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum."
|
||||||
|
Section 1.10.32 of "de Finibus Bonorum et Malorum", written by Cicero in 45 BC
|
||||||
|
|
||||||
|
"Sed ut perspiciatis unde omnis iste natus error sit voluptatem accusantium doloremque laudantium, totam rem aperiam, eaque ipsa quae ab illo inventore veritatis et quasi architecto beatae vitae dicta sunt explicabo. Nemo enim ipsam voluptatem quia voluptas sit aspernatur aut odit aut fugit, sed quia consequuntur magni dolores eos qui ratione voluptatem sequi nesciunt. Neque porro quisquam est, qui dolorem ipsum quia dolor sit amet, consectetur, adipisci velit, sed quia non numquam eius modi tempora incidunt ut labore et dolore magnam aliquam quaerat voluptatem. Ut enim ad minima veniam, quis nostrum exercitationem ullam corporis suscipit laboriosam, nisi ut aliquid ex ea commodi consequatur? Quis autem vel eum iure reprehenderit qui in ea voluptate velit esse quam nihil molestiae consequatur, vel illum qui dolorem eum fugiat quo voluptas nulla pariatur?"
|
||||||
|
1914 translation by H. Rackham
|
||||||
|
|
||||||
|
"But I must explain to you how all this mistaken idea of denouncing pleasure and praising pain was born and I will give you a complete account of the system, and expound the actual teachings of the great explorer of the truth, the master-builder of human happiness. No one rejects, dislikes, or avoids pleasure itself, because it is pleasure, but because those who do not know how to pursue pleasure rationally encounter consequences that are extremely painful. Nor again is there anyone who loves or pursues or desires to obtain pain of itself, because it is pain, but because occasionally circumstances occur in which toil and pain can procure him some great pleasure. To take a trivial example, which of us ever undertakes laborious physical exercise, except to obtain some advantage from it? But who has any right to find fault with a man who chooses to enjoy a pleasure that has no annoying consequences, or one who avoids a pain that produces no resultant pleasure?"
|
||||||
|
Section 1.10.33 of "de Finibus Bonorum et Malorum", written by Cicero in 45 BC
|
||||||
|
|
||||||
|
"At vero eos et accusamus et iusto odio dignissimos ducimus qui blanditiis praesentium voluptatum deleniti atque corrupti quos dolores et quas molestias excepturi sint occaecati cupiditate non provident, similique sunt in culpa qui officia deserunt mollitia animi, id est laborum et dolorum fuga. Et harum quidem rerum facilis est et expedita distinctio. Nam libero tempore, cum soluta nobis est eligendi optio cumque nihil impedit quo minus id quod maxime placeat facere possimus, omnis voluptas assumenda est, omnis dolor repellendus. Temporibus autem quibusdam et aut officiis debitis aut rerum necessitatibus saepe eveniet ut et voluptates repudiandae sint et molestiae non recusandae. Itaque earum rerum hic tenetur a sapiente delectus, ut aut reiciendis voluptatibus maiores alias consequatur aut perferendis doloribus asperiores repellat."
|
||||||
|
1914 translation by H. Rackham
|
||||||
|
|
||||||
|
"On the other hand, we denounce with righteous indignation and dislike men who are so beguiled and demoralized by the charms of pleasure of the moment, so blinded by desire, that they cannot foresee the pain and trouble that are bound to ensue; and equal blame belongs to those who fail in their duty through weakness of will, which is the same as saying through shrinking from toil and pain. These cases are perfectly simple and easy to distinguish. In a free hour, when our power of choice is untrammelled and when nothing prevents our being able to do what we like best, every pleasure is to be welcomed and every pain avoided. But in certain circumstances and owing to the claims of duty or the obligations of business it will frequently occur that pleasures have to be repudiated and annoyances accepted. The wise man therefore always holds in these matters to this principle of selection: he rejects pleasures to secure other greater pleasures, or else he endures pains to avoid worse pains."
|
||||||
|
`;
|
||||||
|
const model = 'gpt-3.5-turbo';
|
||||||
|
let maxContextTokens = 4095;
|
||||||
|
if (model === 'gpt-4') {
|
||||||
|
maxContextTokens = 8191;
|
||||||
|
} else if (model === 'gpt-4-32k') {
|
||||||
|
maxContextTokens = 32767;
|
||||||
|
}
|
||||||
|
const clientOptions = {
|
||||||
|
reverseProxyUrl: process.env.OPENAI_REVERSE_PROXY || null,
|
||||||
|
maxContextTokens,
|
||||||
|
modelOptions: {
|
||||||
|
model,
|
||||||
|
},
|
||||||
|
proxy: process.env.PROXY || null,
|
||||||
|
debug: true,
|
||||||
|
};
|
||||||
|
|
||||||
|
let apiKey = process.env.OPENAI_API_KEY;
|
||||||
|
|
||||||
|
const maxMemory = 0.05 * 1024 * 1024 * 1024;
|
||||||
|
|
||||||
|
// Calculate initial percentage of memory used
|
||||||
|
const initialMemoryUsage = process.memoryUsage().heapUsed;
|
||||||
|
|
||||||
|
function printProgressBar(percentageUsed) {
|
||||||
|
const filledBlocks = Math.round(percentageUsed / 2); // Each block represents 2%
|
||||||
|
const emptyBlocks = 50 - filledBlocks; // Total blocks is 50 (each represents 2%), so the rest are empty
|
||||||
|
const progressBar =
|
||||||
|
'[' +
|
||||||
|
'█'.repeat(filledBlocks) +
|
||||||
|
' '.repeat(emptyBlocks) +
|
||||||
|
'] ' +
|
||||||
|
percentageUsed.toFixed(2) +
|
||||||
|
'%';
|
||||||
|
console.log(progressBar);
|
||||||
|
}
|
||||||
|
|
||||||
|
const iterations = 10000;
|
||||||
|
console.time('loopTime');
|
||||||
|
// Trying to catch the error doesn't help; all future calls will immediately crash
|
||||||
|
for (let i = 0; i < iterations; i++) {
|
||||||
|
try {
|
||||||
|
console.log(`Iteration ${i}`);
|
||||||
|
const client = new OpenAIClient(apiKey, clientOptions);
|
||||||
|
|
||||||
|
client.getTokenCount(text);
|
||||||
|
// const encoder = client.constructor.getTokenizer('cl100k_base');
|
||||||
|
// console.log(`Iteration ${i}: call encode()...`);
|
||||||
|
// encoder.encode(text, 'all');
|
||||||
|
// encoder.free();
|
||||||
|
|
||||||
|
const memoryUsageDuringLoop = process.memoryUsage().heapUsed;
|
||||||
|
const percentageUsed = (memoryUsageDuringLoop / maxMemory) * 100;
|
||||||
|
printProgressBar(percentageUsed);
|
||||||
|
|
||||||
|
if (i === iterations - 1) {
|
||||||
|
console.log(' done');
|
||||||
|
// encoder.free();
|
||||||
|
}
|
||||||
|
} catch (e) {
|
||||||
|
console.log(`caught error! in Iteration ${i}`);
|
||||||
|
console.log(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
console.timeEnd('loopTime');
|
||||||
|
// Calculate final percentage of memory used
|
||||||
|
const finalMemoryUsage = process.memoryUsage().heapUsed;
|
||||||
|
// const finalPercentageUsed = finalMemoryUsage / maxMemory * 100;
|
||||||
|
console.log(`Initial memory usage: ${initialMemoryUsage / 1024 / 1024} megabytes`);
|
||||||
|
console.log(`Final memory usage: ${finalMemoryUsage / 1024 / 1024} megabytes`);
|
||||||
|
await timeout(15000);
|
||||||
|
const memoryUsageAfterTimeout = process.memoryUsage().heapUsed;
|
||||||
|
console.log(`Post timeout: ${memoryUsageAfterTimeout / 1024 / 1024} megabytes`);
|
||||||
|
};
|
||||||
|
|
||||||
|
run();
|
||||||
|
|
||||||
|
process.on('uncaughtException', (err) => {
|
||||||
|
if (!err.message.includes('fetch failed')) {
|
||||||
|
console.error('There was an uncaught error:');
|
||||||
|
console.error(err);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (err.message.includes('fetch failed')) {
|
||||||
|
console.log('fetch failed error caught');
|
||||||
|
// process.exit(0);
|
||||||
|
} else {
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
});
|
||||||
18
api/app/clients/tools/.well-known/Ai_PDF.json
Normal file
18
api/app/clients/tools/.well-known/Ai_PDF.json
Normal file
|
|
@ -0,0 +1,18 @@
|
||||||
|
{
|
||||||
|
"schema_version": "v1",
|
||||||
|
"name_for_human": "Ai PDF",
|
||||||
|
"name_for_model": "Ai_PDF",
|
||||||
|
"description_for_human": "Super-fast, interactive chats with PDFs of any size, complete with page references for fact checking.",
|
||||||
|
"description_for_model": "Provide a URL to a PDF and search the document. Break the user question in multiple semantic search queries and calls as needed. Think step by step.",
|
||||||
|
"auth": {
|
||||||
|
"type": "none"
|
||||||
|
},
|
||||||
|
"api": {
|
||||||
|
"type": "openapi",
|
||||||
|
"url": "https://plugin-3c56b9d4c8a6465998395f28b6a445b2-jexkai4vea-uc.a.run.app/openapi.yaml",
|
||||||
|
"is_user_authenticated": false
|
||||||
|
},
|
||||||
|
"logo_url": "https://plugin-3c56b9d4c8a6465998395f28b6a445b2-jexkai4vea-uc.a.run.app/logo.png",
|
||||||
|
"contact_email": "support@promptapps.ai",
|
||||||
|
"legal_info_url": "https://plugin-3c56b9d4c8a6465998395f28b6a445b2-jexkai4vea-uc.a.run.app/legal.html"
|
||||||
|
}
|
||||||
17
api/app/clients/tools/.well-known/BrowserOp.json
Normal file
17
api/app/clients/tools/.well-known/BrowserOp.json
Normal file
|
|
@ -0,0 +1,17 @@
|
||||||
|
{
|
||||||
|
"schema_version": "v1",
|
||||||
|
"name_for_human": "BrowserOp",
|
||||||
|
"name_for_model": "BrowserOp",
|
||||||
|
"description_for_human": "Browse dozens of webpages in one query. Fetch information more efficiently.",
|
||||||
|
"description_for_model": "This tool offers the feature for users to input a URL or multiple URLs and interact with them as needed. It's designed to comprehend the user's intent and proffer tailored suggestions in line with the content and functionality of the webpage at hand. Services like text rewrites, translations and more can be requested. When users need specific information to finish a task or if they intend to perform a search, this tool becomes a bridge to the search engine and generates responses based on the results. Whether the user is seeking information about restaurants, rentals, weather, or shopping, this tool connects to the internet and delivers the most recent results.",
|
||||||
|
"auth": {
|
||||||
|
"type": "none"
|
||||||
|
},
|
||||||
|
"api": {
|
||||||
|
"type": "openapi",
|
||||||
|
"url": "https://testplugin.feednews.com/.well-known/openapi.yaml"
|
||||||
|
},
|
||||||
|
"logo_url": "https://openapi-af.op-mobile.opera.com/openapi/testplugin/.well-known/logo.png",
|
||||||
|
"contact_email": "aiplugins-contact-list@opera.com",
|
||||||
|
"legal_info_url": "https://legal.apexnews.com/terms/"
|
||||||
|
}
|
||||||
89
api/app/clients/tools/.well-known/Dr_Thoths_Tarot.json
Normal file
89
api/app/clients/tools/.well-known/Dr_Thoths_Tarot.json
Normal file
|
|
@ -0,0 +1,89 @@
|
||||||
|
{
|
||||||
|
"schema_version": "v1",
|
||||||
|
"name_for_human": "Dr. Thoth's Tarot",
|
||||||
|
"name_for_model": "Dr_Thoths_Tarot",
|
||||||
|
"description_for_human": "Tarot card novelty entertainment & analysis, by Mnemosyne Labs.",
|
||||||
|
"description_for_model": "Intelligent analysis program for tarot card entertaiment, data, & prompts, by Mnemosyne Labs, a division of AzothCorp.",
|
||||||
|
"auth": {
|
||||||
|
"type": "none"
|
||||||
|
},
|
||||||
|
"api": {
|
||||||
|
"type": "openapi",
|
||||||
|
"url": "https://dr-thoth-tarot.herokuapp.com/openapi.yaml",
|
||||||
|
"is_user_authenticated": false
|
||||||
|
},
|
||||||
|
"logo_url": "https://dr-thoth-tarot.herokuapp.com/logo.png",
|
||||||
|
"contact_email": "legal@AzothCorp.com",
|
||||||
|
"legal_info_url": "http://AzothCorp.com/legal",
|
||||||
|
"endpoints": [
|
||||||
|
{
|
||||||
|
"name": "Draw Card",
|
||||||
|
"path": "/drawcard",
|
||||||
|
"method": "GET",
|
||||||
|
"description": "Generate a single tarot card from the deck of 78 cards."
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "Occult Card",
|
||||||
|
"path": "/occult_card",
|
||||||
|
"method": "GET",
|
||||||
|
"description": "Generate a tarot card using the specified planet's Kamea matrix.",
|
||||||
|
"parameters": [
|
||||||
|
{
|
||||||
|
"name": "planet",
|
||||||
|
"type": "string",
|
||||||
|
"enum": ["Saturn", "Jupiter", "Mars", "Sun", "Venus", "Mercury", "Moon"],
|
||||||
|
"required": true,
|
||||||
|
"description": "The planet name to use the corresponding Kamea matrix."
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "Three Card Spread",
|
||||||
|
"path": "/threecardspread",
|
||||||
|
"method": "GET",
|
||||||
|
"description": "Perform a three-card tarot spread."
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "Celtic Cross Spread",
|
||||||
|
"path": "/celticcross",
|
||||||
|
"method": "GET",
|
||||||
|
"description": "Perform a Celtic Cross tarot spread with 10 cards."
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "Past, Present, Future Spread",
|
||||||
|
"path": "/pastpresentfuture",
|
||||||
|
"method": "GET",
|
||||||
|
"description": "Perform a Past, Present, Future tarot spread with 3 cards."
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "Horseshoe Spread",
|
||||||
|
"path": "/horseshoe",
|
||||||
|
"method": "GET",
|
||||||
|
"description": "Perform a Horseshoe tarot spread with 7 cards."
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "Relationship Spread",
|
||||||
|
"path": "/relationship",
|
||||||
|
"method": "GET",
|
||||||
|
"description": "Perform a Relationship tarot spread."
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "Career Spread",
|
||||||
|
"path": "/career",
|
||||||
|
"method": "GET",
|
||||||
|
"description": "Perform a Career tarot spread."
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "Yes/No Spread",
|
||||||
|
"path": "/yesno",
|
||||||
|
"method": "GET",
|
||||||
|
"description": "Perform a Yes/No tarot spread."
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "Chakra Spread",
|
||||||
|
"path": "/chakra",
|
||||||
|
"method": "GET",
|
||||||
|
"description": "Perform a Chakra tarot spread with 7 cards."
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
18
api/app/clients/tools/.well-known/DreamInterpreter.json
Normal file
18
api/app/clients/tools/.well-known/DreamInterpreter.json
Normal file
|
|
@ -0,0 +1,18 @@
|
||||||
|
{
|
||||||
|
"schema_version": "v1",
|
||||||
|
"name_for_model": "DreamInterpreter",
|
||||||
|
"name_for_human": "Dream Interpreter",
|
||||||
|
"description_for_model": "Interprets your dreams using advanced techniques.",
|
||||||
|
"description_for_human": "Interprets your dreams using advanced techniques.",
|
||||||
|
"auth": {
|
||||||
|
"type": "none"
|
||||||
|
},
|
||||||
|
"api": {
|
||||||
|
"type": "openapi",
|
||||||
|
"url": "https://dreamplugin.bgnetmobile.com/.well-known/openapi.json",
|
||||||
|
"has_user_authentication": false
|
||||||
|
},
|
||||||
|
"logo_url": "https://dreamplugin.bgnetmobile.com/.well-known/logo.png",
|
||||||
|
"contact_email": "ismail.orkler@bgnetmobile.com",
|
||||||
|
"legal_info_url": "https://dreamplugin.bgnetmobile.com/terms.html"
|
||||||
|
}
|
||||||
22
api/app/clients/tools/.well-known/VoxScript.json
Normal file
22
api/app/clients/tools/.well-known/VoxScript.json
Normal file
|
|
@ -0,0 +1,22 @@
|
||||||
|
{
|
||||||
|
"schema_version": "v1",
|
||||||
|
"name_for_human": "VoxScript",
|
||||||
|
"name_for_model": "VoxScript",
|
||||||
|
"description_for_human": "Enables searching of YouTube transcripts, financial data sources Google Search results, and more!",
|
||||||
|
"description_for_model": "Plugin for searching through varius data sources.",
|
||||||
|
"auth": {
|
||||||
|
"type": "service_http",
|
||||||
|
"authorization_type": "bearer",
|
||||||
|
"verification_tokens": {
|
||||||
|
"openai": "ffc5226d1af346c08a98dee7deec9f76"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"api": {
|
||||||
|
"type": "openapi",
|
||||||
|
"url": "https://voxscript.awt.icu/swagger/v1/swagger.yaml",
|
||||||
|
"is_user_authenticated": false
|
||||||
|
},
|
||||||
|
"logo_url": "https://voxscript.awt.icu/images/VoxScript_logo_32x32.png",
|
||||||
|
"contact_email": "voxscript@allwiretech.com",
|
||||||
|
"legal_info_url": "https://voxscript.awt.icu/legal/"
|
||||||
|
}
|
||||||
18
api/app/clients/tools/.well-known/askyourpdf.json
Normal file
18
api/app/clients/tools/.well-known/askyourpdf.json
Normal file
|
|
@ -0,0 +1,18 @@
|
||||||
|
{
|
||||||
|
"schema_version": "v1",
|
||||||
|
"name_for_model": "askyourpdf",
|
||||||
|
"name_for_human": "AskYourPDF",
|
||||||
|
"description_for_model": "This plugin is designed to expedite the extraction of information from PDF documents. It works by accepting a URL link to a PDF or a document ID (doc_id) from the user. If a URL is provided, the plugin first validates that it is a correct URL. \\nAfter validating the URL, the plugin proceeds to download the PDF and store its content in a vector database. If the user provides a doc_id, the plugin directly retrieves the document from the database. The plugin then scans through the stored PDFs to find answers to user queries or retrieve specific details.\\n\\nHowever, if an error occurs while querying the API, the user is prompted to download their document first, then manually upload it to [](https://askyourpdf.com/upload). Once the upload is complete, the user should copy the resulting doc_id and paste it back into the chat for further interaction.\nThe plugin is particularly useful when the user's question pertains to content within a PDF document. When providing answers, the plugin also specifies the page number (highlighted in bold) where the relevant information was found. Remember, the URL must be valid for a successful query. Failure to validate the URL may lead to errors or unsuccessful queries.",
|
||||||
|
"description_for_human": "Unlock the power of your PDFs!, dive into your documents, find answers, and bring information to your fingertips.",
|
||||||
|
"auth": {
|
||||||
|
"type": "none"
|
||||||
|
},
|
||||||
|
"api": {
|
||||||
|
"type": "openapi",
|
||||||
|
"url": "askyourpdf.yaml",
|
||||||
|
"has_user_authentication": false
|
||||||
|
},
|
||||||
|
"logo_url": "https://plugin.askyourpdf.com/.well-known/logo.png",
|
||||||
|
"contact_email": "plugin@askyourpdf.com",
|
||||||
|
"legal_info_url": "https://askyourpdf.com/terms"
|
||||||
|
}
|
||||||
18
api/app/clients/tools/.well-known/drink_maestro.json
Normal file
18
api/app/clients/tools/.well-known/drink_maestro.json
Normal file
|
|
@ -0,0 +1,18 @@
|
||||||
|
{
|
||||||
|
"schema_version": "v1",
|
||||||
|
"name_for_human": "Drink Maestro",
|
||||||
|
"name_for_model": "drink_maestro",
|
||||||
|
"description_for_human": "Learn to mix any drink you can imagine (real or made-up), and discover new ones. Includes drink images.",
|
||||||
|
"description_for_model": "You are a silly bartender/comic who knows how to make any drink imaginable. You provide recipes for specific drinks, suggest new drinks, and show pictures of drinks. Be creative in your descriptions and make jokes and puns. Use a lot of emojis. If the user makes a request in another language, send API call in English, and then translate the response.",
|
||||||
|
"auth": {
|
||||||
|
"type": "none"
|
||||||
|
},
|
||||||
|
"api": {
|
||||||
|
"type": "openapi",
|
||||||
|
"url": "https://api.drinkmaestro.space/.well-known/openapi.yaml",
|
||||||
|
"is_user_authenticated": false
|
||||||
|
},
|
||||||
|
"logo_url": "https://i.imgur.com/6q8HWdz.png",
|
||||||
|
"contact_email": "nikkmitchell@gmail.com",
|
||||||
|
"legal_info_url": "https://github.com/nikkmitchell/DrinkMaestro/blob/main/Legal.txt"
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,18 @@
|
||||||
|
{
|
||||||
|
"schema_version": "v1",
|
||||||
|
"name_for_human": "Earth",
|
||||||
|
"name_for_model": "earthImagesAndVisualizations",
|
||||||
|
"description_for_human": "Generates a map image based on provided location, tilt and style.",
|
||||||
|
"description_for_model": "Generates a map image based on provided coordinates or location, tilt and style, and even geoJson to provide markers, paths, and polygons. Responds with an image-link. For the styles choose one of these: [light, dark, streets, outdoors, satellite, satellite-streets]",
|
||||||
|
"auth": {
|
||||||
|
"type": "none"
|
||||||
|
},
|
||||||
|
"api": {
|
||||||
|
"type": "openapi",
|
||||||
|
"url": "https://api.earth-plugin.com/openapi.yaml",
|
||||||
|
"is_user_authenticated": false
|
||||||
|
},
|
||||||
|
"logo_url": "https://api.earth-plugin.com/logo.png",
|
||||||
|
"contact_email": "contact@earth-plugin.com",
|
||||||
|
"legal_info_url": "https://api.earth-plugin.com/legal.html"
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,18 @@
|
||||||
|
{
|
||||||
|
"schema_version": "v1",
|
||||||
|
"name_for_human": "Scholarly Graph Link",
|
||||||
|
"name_for_model": "scholarly_graph_link",
|
||||||
|
"description_for_human": "You can search papers, authors, datasets and software. It has access to Figshare, Arxiv, and many others.",
|
||||||
|
"description_for_model": "Run GraphQL queries against an API hosted by DataCite API. The API supports most GraphQL query but does not support mutations statements. Use `{ __schema { types { name kind } } }` to get all the types in the GraphQL schema. Use `{ datasets { nodes { id sizes citations { nodes { id titles { title } } } } } }` to get all the citations of all datasets in the API. Use `{ datasets { nodes { id sizes citations { nodes { id titles { title } } } } } }` to get all the citations of all datasets in the API. Use `{person(id:ORCID) {works(first:50) {nodes {id titles(first: 1){title} publicationYear}}}}` to get the first 50 works of a person based on their ORCID. All Ids are urls, e.g., https://orcid.org/0012-0000-1012-1110. Mutations statements are not allowed.",
|
||||||
|
"auth": {
|
||||||
|
"type": "none"
|
||||||
|
},
|
||||||
|
"api": {
|
||||||
|
"type": "openapi",
|
||||||
|
"url": "https://api.datacite.org/graphql-openapi.yaml",
|
||||||
|
"is_user_authenticated": false
|
||||||
|
},
|
||||||
|
"logo_url": "https://raw.githubusercontent.com/kjgarza/scholarly_graph_link/master/logo.png",
|
||||||
|
"contact_email": "kj.garza@gmail.com",
|
||||||
|
"legal_info_url": "https://github.com/kjgarza/scholarly_graph_link/blob/master/LICENSE"
|
||||||
|
}
|
||||||
24
api/app/clients/tools/.well-known/has-issues/web_pilot.json
Normal file
24
api/app/clients/tools/.well-known/has-issues/web_pilot.json
Normal file
|
|
@ -0,0 +1,24 @@
|
||||||
|
{
|
||||||
|
"schema_version": "v1",
|
||||||
|
"name_for_human": "WebPilot",
|
||||||
|
"name_for_model": "web_pilot",
|
||||||
|
"description_for_human": "Browse & QA Webpage/PDF/Data. Generate articles, from one or more URLs.",
|
||||||
|
"description_for_model": "This tool allows users to provide a URL(or URLs) and optionally requests for interacting with, extracting specific information or how to do with the content from the URL. Requests may include rewrite, translate, and others. If there any requests, when accessing the /api/visit-web endpoint, the parameter 'user_has_request' should be set to 'true. And if there's no any requests, 'user_has_request' should be set to 'false'.",
|
||||||
|
"auth": {
|
||||||
|
"type": "none"
|
||||||
|
},
|
||||||
|
"api": {
|
||||||
|
"type": "openapi",
|
||||||
|
"url": "https://webreader.webpilotai.com/openapi.yaml",
|
||||||
|
"is_user_authenticated": false
|
||||||
|
},
|
||||||
|
"logo_url": "https://webreader.webpilotai.com/logo.png",
|
||||||
|
"contact_email": "dev@webpilot.ai",
|
||||||
|
"legal_info_url": "https://webreader.webpilotai.com/legal_info.html",
|
||||||
|
"headers": {
|
||||||
|
"id": "WebPilot-Friend-UID"
|
||||||
|
},
|
||||||
|
"params": {
|
||||||
|
"user_has_request": true
|
||||||
|
}
|
||||||
|
}
|
||||||
18
api/app/clients/tools/.well-known/image_prompt_enhancer.json
Normal file
18
api/app/clients/tools/.well-known/image_prompt_enhancer.json
Normal file
|
|
@ -0,0 +1,18 @@
|
||||||
|
{
|
||||||
|
"schema_version": "v1",
|
||||||
|
"name_for_human": "Image Prompt Enhancer",
|
||||||
|
"name_for_model": "image_prompt_enhancer",
|
||||||
|
"description_for_human": "Transform your ideas into complex, personalized image generation prompts.",
|
||||||
|
"description_for_model": "Provides instructions for crafting an enhanced image prompt. Use this whenever the user wants to enhance a prompt.",
|
||||||
|
"auth": {
|
||||||
|
"type": "none"
|
||||||
|
},
|
||||||
|
"api": {
|
||||||
|
"type": "openapi",
|
||||||
|
"url": "https://image-prompt-enhancer.gafo.tech/openapi.yaml",
|
||||||
|
"is_user_authenticated": false
|
||||||
|
},
|
||||||
|
"logo_url": "https://image-prompt-enhancer.gafo.tech/logo.png",
|
||||||
|
"contact_email": "gafotech1@gmail.com",
|
||||||
|
"legal_info_url": "https://image-prompt-enhancer.gafo.tech/legal"
|
||||||
|
}
|
||||||
157
api/app/clients/tools/.well-known/openapi/askyourpdf.yaml
Normal file
157
api/app/clients/tools/.well-known/openapi/askyourpdf.yaml
Normal file
|
|
@ -0,0 +1,157 @@
|
||||||
|
openapi: 3.0.2
|
||||||
|
info:
|
||||||
|
title: FastAPI
|
||||||
|
version: 0.1.0
|
||||||
|
servers:
|
||||||
|
- url: https://plugin.askyourpdf.com
|
||||||
|
paths:
|
||||||
|
/api/download_pdf:
|
||||||
|
post:
|
||||||
|
summary: Download Pdf
|
||||||
|
description: Download a PDF file from a URL and save it to the vector database.
|
||||||
|
operationId: download_pdf_api_download_pdf_post
|
||||||
|
parameters:
|
||||||
|
- required: true
|
||||||
|
schema:
|
||||||
|
title: Url
|
||||||
|
type: string
|
||||||
|
name: url
|
||||||
|
in: query
|
||||||
|
responses:
|
||||||
|
'200':
|
||||||
|
description: Successful Response
|
||||||
|
content:
|
||||||
|
application/json:
|
||||||
|
schema:
|
||||||
|
$ref: '#/components/schemas/FileResponse'
|
||||||
|
'422':
|
||||||
|
description: Validation Error
|
||||||
|
content:
|
||||||
|
application/json:
|
||||||
|
schema:
|
||||||
|
$ref: '#/components/schemas/HTTPValidationError'
|
||||||
|
/query:
|
||||||
|
post:
|
||||||
|
summary: Perform Query
|
||||||
|
description: Perform a query on a document.
|
||||||
|
operationId: perform_query_query_post
|
||||||
|
requestBody:
|
||||||
|
content:
|
||||||
|
application/json:
|
||||||
|
schema:
|
||||||
|
$ref: '#/components/schemas/InputData'
|
||||||
|
required: true
|
||||||
|
responses:
|
||||||
|
'200':
|
||||||
|
description: Successful Response
|
||||||
|
content:
|
||||||
|
application/json:
|
||||||
|
schema:
|
||||||
|
$ref: '#/components/schemas/ResponseModel'
|
||||||
|
'422':
|
||||||
|
description: Validation Error
|
||||||
|
content:
|
||||||
|
application/json:
|
||||||
|
schema:
|
||||||
|
$ref: '#/components/schemas/HTTPValidationError'
|
||||||
|
components:
|
||||||
|
schemas:
|
||||||
|
DocumentMetadata:
|
||||||
|
title: DocumentMetadata
|
||||||
|
required:
|
||||||
|
- source
|
||||||
|
- page_number
|
||||||
|
- author
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
source:
|
||||||
|
title: Source
|
||||||
|
type: string
|
||||||
|
page_number:
|
||||||
|
title: Page Number
|
||||||
|
type: integer
|
||||||
|
author:
|
||||||
|
title: Author
|
||||||
|
type: string
|
||||||
|
FileResponse:
|
||||||
|
title: FileResponse
|
||||||
|
required:
|
||||||
|
- docId
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
docId:
|
||||||
|
title: Docid
|
||||||
|
type: string
|
||||||
|
error:
|
||||||
|
title: Error
|
||||||
|
type: string
|
||||||
|
HTTPValidationError:
|
||||||
|
title: HTTPValidationError
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
detail:
|
||||||
|
title: Detail
|
||||||
|
type: array
|
||||||
|
items:
|
||||||
|
$ref: '#/components/schemas/ValidationError'
|
||||||
|
InputData:
|
||||||
|
title: InputData
|
||||||
|
required:
|
||||||
|
- doc_id
|
||||||
|
- query
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
doc_id:
|
||||||
|
title: Doc Id
|
||||||
|
type: string
|
||||||
|
query:
|
||||||
|
title: Query
|
||||||
|
type: string
|
||||||
|
ResponseModel:
|
||||||
|
title: ResponseModel
|
||||||
|
required:
|
||||||
|
- results
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
results:
|
||||||
|
title: Results
|
||||||
|
type: array
|
||||||
|
items:
|
||||||
|
$ref: '#/components/schemas/SearchResult'
|
||||||
|
SearchResult:
|
||||||
|
title: SearchResult
|
||||||
|
required:
|
||||||
|
- doc_id
|
||||||
|
- text
|
||||||
|
- metadata
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
doc_id:
|
||||||
|
title: Doc Id
|
||||||
|
type: string
|
||||||
|
text:
|
||||||
|
title: Text
|
||||||
|
type: string
|
||||||
|
metadata:
|
||||||
|
$ref: '#/components/schemas/DocumentMetadata'
|
||||||
|
ValidationError:
|
||||||
|
title: ValidationError
|
||||||
|
required:
|
||||||
|
- loc
|
||||||
|
- msg
|
||||||
|
- type
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
loc:
|
||||||
|
title: Location
|
||||||
|
type: array
|
||||||
|
items:
|
||||||
|
anyOf:
|
||||||
|
- type: string
|
||||||
|
- type: integer
|
||||||
|
msg:
|
||||||
|
title: Message
|
||||||
|
type: string
|
||||||
|
type:
|
||||||
|
title: Error Type
|
||||||
|
type: string
|
||||||
185
api/app/clients/tools/.well-known/openapi/scholarai.yaml
Normal file
185
api/app/clients/tools/.well-known/openapi/scholarai.yaml
Normal file
|
|
@ -0,0 +1,185 @@
|
||||||
|
openapi: 3.0.1
|
||||||
|
info:
|
||||||
|
title: ScholarAI
|
||||||
|
description: Allows the user to search facts and findings from scientific articles
|
||||||
|
version: 'v1'
|
||||||
|
servers:
|
||||||
|
- url: https://scholar-ai.net
|
||||||
|
paths:
|
||||||
|
/api/abstracts:
|
||||||
|
get:
|
||||||
|
operationId: searchAbstracts
|
||||||
|
summary: Get relevant paper abstracts by keywords search
|
||||||
|
parameters:
|
||||||
|
- name: keywords
|
||||||
|
in: query
|
||||||
|
description: Keywords of inquiry which should appear in article. Must be in English.
|
||||||
|
required: true
|
||||||
|
schema:
|
||||||
|
type: string
|
||||||
|
- name: sort
|
||||||
|
in: query
|
||||||
|
description: The sort order for results. Valid values are cited_by_count or publication_date. Excluding this value does a relevance based search.
|
||||||
|
required: false
|
||||||
|
schema:
|
||||||
|
type: string
|
||||||
|
enum:
|
||||||
|
- cited_by_count
|
||||||
|
- publication_date
|
||||||
|
- name: query
|
||||||
|
in: query
|
||||||
|
description: The user query
|
||||||
|
required: true
|
||||||
|
schema:
|
||||||
|
type: string
|
||||||
|
- name: peer_reviewed_only
|
||||||
|
in: query
|
||||||
|
description: Whether to only return peer reviewed articles. Defaults to true, ChatGPT should cautiously suggest this value can be set to false
|
||||||
|
required: false
|
||||||
|
schema:
|
||||||
|
type: string
|
||||||
|
- name: start_year
|
||||||
|
in: query
|
||||||
|
description: The first year, inclusive, to include in the search range. Excluding this value will include all years.
|
||||||
|
required: false
|
||||||
|
schema:
|
||||||
|
type: string
|
||||||
|
- name: end_year
|
||||||
|
in: query
|
||||||
|
description: The last year, inclusive, to include in the search range. Excluding this value will include all years.
|
||||||
|
required: false
|
||||||
|
schema:
|
||||||
|
type: string
|
||||||
|
- name: offset
|
||||||
|
in: query
|
||||||
|
description: The offset of the first result to return. Defaults to 0.
|
||||||
|
required: false
|
||||||
|
schema:
|
||||||
|
type: string
|
||||||
|
responses:
|
||||||
|
"200":
|
||||||
|
description: OK
|
||||||
|
content:
|
||||||
|
application/json:
|
||||||
|
schema:
|
||||||
|
$ref: '#/components/schemas/searchAbstractsResponse'
|
||||||
|
/api/fulltext:
|
||||||
|
get:
|
||||||
|
operationId: getFullText
|
||||||
|
summary: Get full text of a paper by URL for PDF
|
||||||
|
parameters:
|
||||||
|
- name: pdf_url
|
||||||
|
in: query
|
||||||
|
description: URL for PDF
|
||||||
|
required: true
|
||||||
|
schema:
|
||||||
|
type: string
|
||||||
|
- name: chunk
|
||||||
|
in: query
|
||||||
|
description: chunk number to retrieve, defaults to 1
|
||||||
|
required: false
|
||||||
|
schema:
|
||||||
|
type: number
|
||||||
|
responses:
|
||||||
|
"200":
|
||||||
|
description: OK
|
||||||
|
content:
|
||||||
|
application/json:
|
||||||
|
schema:
|
||||||
|
$ref: '#/components/schemas/getFullTextResponse'
|
||||||
|
/api/save-citation:
|
||||||
|
get:
|
||||||
|
operationId: saveCitation
|
||||||
|
summary: Save citation to reference manager
|
||||||
|
parameters:
|
||||||
|
- name: doi
|
||||||
|
in: query
|
||||||
|
description: Digital Object Identifier (DOI) of article
|
||||||
|
required: true
|
||||||
|
schema:
|
||||||
|
type: string
|
||||||
|
- name: zotero_user_id
|
||||||
|
in: query
|
||||||
|
description: Zotero User ID
|
||||||
|
required: true
|
||||||
|
schema:
|
||||||
|
type: string
|
||||||
|
- name: zotero_api_key
|
||||||
|
in: query
|
||||||
|
description: Zotero API Key
|
||||||
|
required: true
|
||||||
|
schema:
|
||||||
|
type: string
|
||||||
|
responses:
|
||||||
|
"200":
|
||||||
|
description: OK
|
||||||
|
content:
|
||||||
|
application/json:
|
||||||
|
schema:
|
||||||
|
$ref: '#/components/schemas/saveCitationResponse'
|
||||||
|
components:
|
||||||
|
schemas:
|
||||||
|
searchAbstractsResponse:
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
next_offset:
|
||||||
|
type: number
|
||||||
|
description: The offset of the next page of results.
|
||||||
|
total_num_results:
|
||||||
|
type: number
|
||||||
|
description: The total number of results.
|
||||||
|
abstracts:
|
||||||
|
type: array
|
||||||
|
items:
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
title:
|
||||||
|
type: string
|
||||||
|
abstract:
|
||||||
|
type: string
|
||||||
|
description: Summary of the context, methods, results, and conclusions of the paper.
|
||||||
|
doi:
|
||||||
|
type: string
|
||||||
|
description: The DOI of the paper.
|
||||||
|
landing_page_url:
|
||||||
|
type: string
|
||||||
|
description: Link to the paper on its open-access host.
|
||||||
|
pdf_url:
|
||||||
|
type: string
|
||||||
|
description: Link to the paper PDF.
|
||||||
|
publicationDate:
|
||||||
|
type: string
|
||||||
|
description: The date the paper was published in YYYY-MM-DD format.
|
||||||
|
relevance:
|
||||||
|
type: number
|
||||||
|
description: The relevance of the paper to the search query. 1 is the most relevant.
|
||||||
|
creators:
|
||||||
|
type: array
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
|
description: The name of the creator.
|
||||||
|
cited_by_count:
|
||||||
|
type: number
|
||||||
|
description: The number of citations of the article.
|
||||||
|
description: The list of relevant abstracts.
|
||||||
|
getFullTextResponse:
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
full_text:
|
||||||
|
type: string
|
||||||
|
description: The full text of the paper.
|
||||||
|
pdf_url:
|
||||||
|
type: string
|
||||||
|
description: The PDF URL of the paper.
|
||||||
|
chunk:
|
||||||
|
type: number
|
||||||
|
description: The chunk of the paper.
|
||||||
|
total_chunk_num:
|
||||||
|
type: number
|
||||||
|
description: The total chunks of the paper.
|
||||||
|
saveCitationResponse:
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
message:
|
||||||
|
type: string
|
||||||
|
description: Confirmation of successful save or error message.
|
||||||
17
api/app/clients/tools/.well-known/qrCodes.json
Normal file
17
api/app/clients/tools/.well-known/qrCodes.json
Normal file
|
|
@ -0,0 +1,17 @@
|
||||||
|
{
|
||||||
|
"schema_version": "v1",
|
||||||
|
"name_for_human": "QR Codes",
|
||||||
|
"name_for_model": "qrCodes",
|
||||||
|
"description_for_human": "Create QR codes.",
|
||||||
|
"description_for_model": "Plugin for generating QR codes.",
|
||||||
|
"auth": {
|
||||||
|
"type": "none"
|
||||||
|
},
|
||||||
|
"api": {
|
||||||
|
"type": "openapi",
|
||||||
|
"url": "https://chatgpt-qrcode-46d7d4ebefc8.herokuapp.com/openapi.yaml"
|
||||||
|
},
|
||||||
|
"logo_url": "https://chatgpt-qrcode-46d7d4ebefc8.herokuapp.com/logo.png",
|
||||||
|
"contact_email": "chrismountzou@gmail.com",
|
||||||
|
"legal_info_url": "https://raw.githubusercontent.com/mountzou/qrCodeGPTv1/master/legal"
|
||||||
|
}
|
||||||
22
api/app/clients/tools/.well-known/scholarai.json
Normal file
22
api/app/clients/tools/.well-known/scholarai.json
Normal file
|
|
@ -0,0 +1,22 @@
|
||||||
|
{
|
||||||
|
"schema_version": "v1",
|
||||||
|
"name_for_human": "ScholarAI",
|
||||||
|
"name_for_model": "scholarai",
|
||||||
|
"description_for_human": "Unleash scientific research: search 40M+ peer-reviewed papers, explore scientific PDFs, and save to reference managers.",
|
||||||
|
"description_for_model": "Access open access scientific literature from peer-reviewed journals. The abstract endpoint finds relevant papers based on 2 to 6 keywords. After getting abstracts, ALWAYS prompt the user offering to go into more detail. Use the fulltext endpoint to retrieve the entire paper's text and access specific details using the provided pdf_url, if available. ALWAYS hyperlink the pdf_url from the responses if available. Offer to dive into the fulltext or search for additional papers. Always ask if the user wants save any paper to the user’s Zotero reference manager by using the save-citation endpoint and providing the doi and requesting the user’s zotero_user_id and zotero_api_key.",
|
||||||
|
"auth": {
|
||||||
|
"type": "none"
|
||||||
|
},
|
||||||
|
"api": {
|
||||||
|
"type": "openapi",
|
||||||
|
"url": "scholarai.yaml",
|
||||||
|
"is_user_authenticated": false
|
||||||
|
},
|
||||||
|
"params": {
|
||||||
|
"sort": "cited_by_count"
|
||||||
|
},
|
||||||
|
"logo_url": "https://scholar-ai.net/logo.png",
|
||||||
|
"contact_email": "lakshb429@gmail.com",
|
||||||
|
"legal_info_url": "https://scholar-ai.net/legal.txt",
|
||||||
|
"HttpAuthorizationType": "basic"
|
||||||
|
}
|
||||||
18
api/app/clients/tools/.well-known/uberchord.json
Normal file
18
api/app/clients/tools/.well-known/uberchord.json
Normal file
|
|
@ -0,0 +1,18 @@
|
||||||
|
{
|
||||||
|
"schema_version": "v1",
|
||||||
|
"name_for_human": "Uberchord",
|
||||||
|
"name_for_model": "uberchord",
|
||||||
|
"description_for_human": "Find guitar chord diagrams by specifying the chord name.",
|
||||||
|
"description_for_model": "Fetch guitar chord diagrams, their positions on the guitar fretboard.",
|
||||||
|
"auth": {
|
||||||
|
"type": "none"
|
||||||
|
},
|
||||||
|
"api": {
|
||||||
|
"type": "openapi",
|
||||||
|
"url": "https://guitarchords.pluginboost.com/.well-known/openapi.yaml",
|
||||||
|
"is_user_authenticated": false
|
||||||
|
},
|
||||||
|
"logo_url": "https://guitarchords.pluginboost.com/logo.png",
|
||||||
|
"contact_email": "info.bluelightweb@gmail.com",
|
||||||
|
"legal_info_url": "https://guitarchords.pluginboost.com/legal"
|
||||||
|
}
|
||||||
18
api/app/clients/tools/.well-known/web_search.json
Normal file
18
api/app/clients/tools/.well-known/web_search.json
Normal file
|
|
@ -0,0 +1,18 @@
|
||||||
|
{
|
||||||
|
"schema_version": "v1",
|
||||||
|
"name_for_human": "Web Search",
|
||||||
|
"name_for_model": "web_search",
|
||||||
|
"description_for_human": "Search for information from the internet",
|
||||||
|
"description_for_model": "Search for information from the internet",
|
||||||
|
"auth": {
|
||||||
|
"type": "none"
|
||||||
|
},
|
||||||
|
"api": {
|
||||||
|
"type": "openapi",
|
||||||
|
"url": "https://websearch.plugsugar.com/api/openapi_yaml",
|
||||||
|
"is_user_authenticated": false
|
||||||
|
},
|
||||||
|
"logo_url": "https://websearch.plugsugar.com/200x200.png",
|
||||||
|
"contact_email": "support@plugsugar.com",
|
||||||
|
"legal_info_url": "https://websearch.plugsugar.com/contact"
|
||||||
|
}
|
||||||
|
|
@ -5,13 +5,13 @@ const DALLE3 = require('./structured/DALLE3');
|
||||||
const FluxAPI = require('./structured/FluxAPI');
|
const FluxAPI = require('./structured/FluxAPI');
|
||||||
const OpenWeather = require('./structured/OpenWeather');
|
const OpenWeather = require('./structured/OpenWeather');
|
||||||
const StructuredWolfram = require('./structured/Wolfram');
|
const StructuredWolfram = require('./structured/Wolfram');
|
||||||
|
const createYouTubeTools = require('./structured/YouTube');
|
||||||
const StructuredACS = require('./structured/AzureAISearch');
|
const StructuredACS = require('./structured/AzureAISearch');
|
||||||
const StructuredSD = require('./structured/StableDiffusion');
|
const StructuredSD = require('./structured/StableDiffusion');
|
||||||
const GoogleSearchAPI = require('./structured/GoogleSearch');
|
const GoogleSearchAPI = require('./structured/GoogleSearch');
|
||||||
const TraversaalSearch = require('./structured/TraversaalSearch');
|
const TraversaalSearch = require('./structured/TraversaalSearch');
|
||||||
const createOpenAIImageTools = require('./structured/OpenAIImageTools');
|
const createOpenAIImageTools = require('./structured/OpenAIImageTools');
|
||||||
const TavilySearchResults = require('./structured/TavilySearchResults');
|
const TavilySearchResults = require('./structured/TavilySearchResults');
|
||||||
const createGeminiImageTool = require('./structured/GeminiImageGen');
|
|
||||||
|
|
||||||
module.exports = {
|
module.exports = {
|
||||||
...manifest,
|
...manifest,
|
||||||
|
|
@ -24,7 +24,7 @@ module.exports = {
|
||||||
GoogleSearchAPI,
|
GoogleSearchAPI,
|
||||||
TraversaalSearch,
|
TraversaalSearch,
|
||||||
StructuredWolfram,
|
StructuredWolfram,
|
||||||
|
createYouTubeTools,
|
||||||
TavilySearchResults,
|
TavilySearchResults,
|
||||||
createOpenAIImageTools,
|
createOpenAIImageTools,
|
||||||
createGeminiImageTool,
|
|
||||||
};
|
};
|
||||||
|
|
|
||||||
|
|
@ -30,6 +30,20 @@
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"name": "YouTube",
|
||||||
|
"pluginKey": "youtube",
|
||||||
|
"toolkit": true,
|
||||||
|
"description": "Get YouTube video information, retrieve comments, analyze transcripts and search for videos.",
|
||||||
|
"icon": "https://www.youtube.com/s/desktop/7449ebf7/img/favicon_144x144.png",
|
||||||
|
"authConfig": [
|
||||||
|
{
|
||||||
|
"authField": "YOUTUBE_API_KEY",
|
||||||
|
"label": "YouTube API Key",
|
||||||
|
"description": "Your YouTube Data API v3 key."
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"name": "OpenAI Image Tools",
|
"name": "OpenAI Image Tools",
|
||||||
"pluginKey": "image_gen_oai",
|
"pluginKey": "image_gen_oai",
|
||||||
|
|
@ -57,6 +71,19 @@
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"name": "Browser",
|
||||||
|
"pluginKey": "web-browser",
|
||||||
|
"description": "Scrape and summarize webpage data",
|
||||||
|
"icon": "assets/web-browser.svg",
|
||||||
|
"authConfig": [
|
||||||
|
{
|
||||||
|
"authField": "OPENAI_API_KEY",
|
||||||
|
"label": "OpenAI API Key",
|
||||||
|
"description": "Browser makes use of OpenAI embeddings"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"name": "DALL-E-3",
|
"name": "DALL-E-3",
|
||||||
"pluginKey": "dalle",
|
"pluginKey": "dalle",
|
||||||
|
|
@ -152,20 +179,5 @@
|
||||||
"description": "Provide your Flux API key from your user profile."
|
"description": "Provide your Flux API key from your user profile."
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "Gemini Image Tools",
|
|
||||||
"pluginKey": "gemini_image_gen",
|
|
||||||
"toolkit": true,
|
|
||||||
"description": "Generate high-quality images using Google's Gemini Image Models. Supports Gemini API or Vertex AI.",
|
|
||||||
"icon": "assets/gemini_image_gen.svg",
|
|
||||||
"authConfig": [
|
|
||||||
{
|
|
||||||
"authField": "GEMINI_API_KEY||GOOGLE_KEY||GOOGLE_SERVICE_KEY_FILE",
|
|
||||||
"label": "Gemini API Key (optional)",
|
|
||||||
"description": "Your Google Gemini API Key from <a href='https://aistudio.google.com/app/apikey' target='_blank'>Google AI Studio</a>. Leave blank to use Vertex AI with a service account (GOOGLE_SERVICE_KEY_FILE or api/data/auth.json).",
|
|
||||||
"optional": true
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
|
|
|
||||||
|
|
@ -1,28 +1,14 @@
|
||||||
|
const { z } = require('zod');
|
||||||
const { Tool } = require('@langchain/core/tools');
|
const { Tool } = require('@langchain/core/tools');
|
||||||
const { logger } = require('@librechat/data-schemas');
|
const { logger } = require('@librechat/data-schemas');
|
||||||
const { SearchClient, AzureKeyCredential } = require('@azure/search-documents');
|
const { SearchClient, AzureKeyCredential } = require('@azure/search-documents');
|
||||||
|
|
||||||
const azureAISearchJsonSchema = {
|
|
||||||
type: 'object',
|
|
||||||
properties: {
|
|
||||||
query: {
|
|
||||||
type: 'string',
|
|
||||||
description: 'Search word or phrase to Azure AI Search',
|
|
||||||
},
|
|
||||||
},
|
|
||||||
required: ['query'],
|
|
||||||
};
|
|
||||||
|
|
||||||
class AzureAISearch extends Tool {
|
class AzureAISearch extends Tool {
|
||||||
// Constants for default values
|
// Constants for default values
|
||||||
static DEFAULT_API_VERSION = '2023-11-01';
|
static DEFAULT_API_VERSION = '2023-11-01';
|
||||||
static DEFAULT_QUERY_TYPE = 'simple';
|
static DEFAULT_QUERY_TYPE = 'simple';
|
||||||
static DEFAULT_TOP = 5;
|
static DEFAULT_TOP = 5;
|
||||||
|
|
||||||
static get jsonSchema() {
|
|
||||||
return azureAISearchJsonSchema;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Helper function for initializing properties
|
// Helper function for initializing properties
|
||||||
_initializeField(field, envVar, defaultValue) {
|
_initializeField(field, envVar, defaultValue) {
|
||||||
return field || process.env[envVar] || defaultValue;
|
return field || process.env[envVar] || defaultValue;
|
||||||
|
|
@ -36,7 +22,10 @@ class AzureAISearch extends Tool {
|
||||||
/* Used to initialize the Tool without necessary variables. */
|
/* Used to initialize the Tool without necessary variables. */
|
||||||
this.override = fields.override ?? false;
|
this.override = fields.override ?? false;
|
||||||
|
|
||||||
this.schema = azureAISearchJsonSchema;
|
// Define schema
|
||||||
|
this.schema = z.object({
|
||||||
|
query: z.string().describe('Search word or phrase to Azure AI Search'),
|
||||||
|
});
|
||||||
|
|
||||||
// Initialize properties using helper function
|
// Initialize properties using helper function
|
||||||
this.serviceEndpoint = this._initializeField(
|
this.serviceEndpoint = this._initializeField(
|
||||||
|
|
|
||||||
|
|
@ -1,41 +1,13 @@
|
||||||
|
const { z } = require('zod');
|
||||||
const path = require('path');
|
const path = require('path');
|
||||||
const OpenAI = require('openai');
|
const OpenAI = require('openai');
|
||||||
const { v4: uuidv4 } = require('uuid');
|
const { v4: uuidv4 } = require('uuid');
|
||||||
const { ProxyAgent, fetch } = require('undici');
|
const { ProxyAgent, fetch } = require('undici');
|
||||||
const { Tool } = require('@langchain/core/tools');
|
const { Tool } = require('@langchain/core/tools');
|
||||||
const { logger } = require('@librechat/data-schemas');
|
const { logger } = require('@librechat/data-schemas');
|
||||||
const { getImageBasename, extractBaseURL } = require('@librechat/api');
|
const { getImageBasename } = require('@librechat/api');
|
||||||
const { FileContext, ContentTypes } = require('librechat-data-provider');
|
const { FileContext, ContentTypes } = require('librechat-data-provider');
|
||||||
|
const extractBaseURL = require('~/utils/extractBaseURL');
|
||||||
const dalle3JsonSchema = {
|
|
||||||
type: 'object',
|
|
||||||
properties: {
|
|
||||||
prompt: {
|
|
||||||
type: 'string',
|
|
||||||
maxLength: 4000,
|
|
||||||
description:
|
|
||||||
'A text description of the desired image, following the rules, up to 4000 characters.',
|
|
||||||
},
|
|
||||||
style: {
|
|
||||||
type: 'string',
|
|
||||||
enum: ['vivid', 'natural'],
|
|
||||||
description:
|
|
||||||
'Must be one of `vivid` or `natural`. `vivid` generates hyper-real and dramatic images, `natural` produces more natural, less hyper-real looking images',
|
|
||||||
},
|
|
||||||
quality: {
|
|
||||||
type: 'string',
|
|
||||||
enum: ['hd', 'standard'],
|
|
||||||
description: 'The quality of the generated image. Only `hd` and `standard` are supported.',
|
|
||||||
},
|
|
||||||
size: {
|
|
||||||
type: 'string',
|
|
||||||
enum: ['1024x1024', '1792x1024', '1024x1792'],
|
|
||||||
description:
|
|
||||||
'The size of the requested image. Use 1024x1024 (square) as the default, 1792x1024 if the user requests a wide image, and 1024x1792 for full-body portraits. Always include this parameter in the request.',
|
|
||||||
},
|
|
||||||
},
|
|
||||||
required: ['prompt', 'style', 'quality', 'size'],
|
|
||||||
};
|
|
||||||
|
|
||||||
const displayMessage =
|
const displayMessage =
|
||||||
"DALL-E displayed an image. All generated images are already plainly visible, so don't repeat the descriptions in detail. Do not list download links as they are available in the UI already. The user may download the images by clicking on them, but do not mention anything about downloading to the user.";
|
"DALL-E displayed an image. All generated images are already plainly visible, so don't repeat the descriptions in detail. Do not list download links as they are available in the UI already. The user may download the images by clicking on them, but do not mention anything about downloading to the user.";
|
||||||
|
|
@ -101,11 +73,27 @@ class DALLE3 extends Tool {
|
||||||
// The prompt must intricately describe every part of the image in concrete, objective detail. THINK about what the end goal of the description is, and extrapolate that to what would make satisfying images.
|
// The prompt must intricately describe every part of the image in concrete, objective detail. THINK about what the end goal of the description is, and extrapolate that to what would make satisfying images.
|
||||||
// All descriptions sent to dalle should be a paragraph of text that is extremely descriptive and detailed. Each should be more than 3 sentences long.
|
// All descriptions sent to dalle should be a paragraph of text that is extremely descriptive and detailed. Each should be more than 3 sentences long.
|
||||||
// - The "vivid" style is HIGHLY preferred, but "natural" is also supported.`;
|
// - The "vivid" style is HIGHLY preferred, but "natural" is also supported.`;
|
||||||
this.schema = dalle3JsonSchema;
|
this.schema = z.object({
|
||||||
}
|
prompt: z
|
||||||
|
.string()
|
||||||
static get jsonSchema() {
|
.max(4000)
|
||||||
return dalle3JsonSchema;
|
.describe(
|
||||||
|
'A text description of the desired image, following the rules, up to 4000 characters.',
|
||||||
|
),
|
||||||
|
style: z
|
||||||
|
.enum(['vivid', 'natural'])
|
||||||
|
.describe(
|
||||||
|
'Must be one of `vivid` or `natural`. `vivid` generates hyper-real and dramatic images, `natural` produces more natural, less hyper-real looking images',
|
||||||
|
),
|
||||||
|
quality: z
|
||||||
|
.enum(['hd', 'standard'])
|
||||||
|
.describe('The quality of the generated image. Only `hd` and `standard` are supported.'),
|
||||||
|
size: z
|
||||||
|
.enum(['1024x1024', '1792x1024', '1024x1792'])
|
||||||
|
.describe(
|
||||||
|
'The size of the requested image. Use 1024x1024 (square) as the default, 1792x1024 if the user requests a wide image, and 1024x1792 for full-body portraits. Always include this parameter in the request.',
|
||||||
|
),
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
getApiKey() {
|
getApiKey() {
|
||||||
|
|
|
||||||
|
|
@ -1,3 +1,4 @@
|
||||||
|
const { z } = require('zod');
|
||||||
const axios = require('axios');
|
const axios = require('axios');
|
||||||
const fetch = require('node-fetch');
|
const fetch = require('node-fetch');
|
||||||
const { v4: uuidv4 } = require('uuid');
|
const { v4: uuidv4 } = require('uuid');
|
||||||
|
|
@ -6,84 +7,6 @@ const { logger } = require('@librechat/data-schemas');
|
||||||
const { HttpsProxyAgent } = require('https-proxy-agent');
|
const { HttpsProxyAgent } = require('https-proxy-agent');
|
||||||
const { FileContext, ContentTypes } = require('librechat-data-provider');
|
const { FileContext, ContentTypes } = require('librechat-data-provider');
|
||||||
|
|
||||||
const fluxApiJsonSchema = {
|
|
||||||
type: 'object',
|
|
||||||
properties: {
|
|
||||||
action: {
|
|
||||||
type: 'string',
|
|
||||||
enum: ['generate', 'list_finetunes', 'generate_finetuned'],
|
|
||||||
description:
|
|
||||||
'Action to perform: "generate" for image generation, "generate_finetuned" for finetuned model generation, "list_finetunes" to get available custom models',
|
|
||||||
},
|
|
||||||
prompt: {
|
|
||||||
type: 'string',
|
|
||||||
description:
|
|
||||||
'Text prompt for image generation. Required when action is "generate". Not used for list_finetunes.',
|
|
||||||
},
|
|
||||||
width: {
|
|
||||||
type: 'number',
|
|
||||||
description:
|
|
||||||
'Width of the generated image in pixels. Must be a multiple of 32. Default is 1024.',
|
|
||||||
},
|
|
||||||
height: {
|
|
||||||
type: 'number',
|
|
||||||
description:
|
|
||||||
'Height of the generated image in pixels. Must be a multiple of 32. Default is 768.',
|
|
||||||
},
|
|
||||||
prompt_upsampling: {
|
|
||||||
type: 'boolean',
|
|
||||||
description: 'Whether to perform upsampling on the prompt.',
|
|
||||||
},
|
|
||||||
steps: {
|
|
||||||
type: 'integer',
|
|
||||||
description: 'Number of steps to run the model for, a number from 1 to 50. Default is 40.',
|
|
||||||
},
|
|
||||||
seed: {
|
|
||||||
type: 'number',
|
|
||||||
description: 'Optional seed for reproducibility.',
|
|
||||||
},
|
|
||||||
safety_tolerance: {
|
|
||||||
type: 'number',
|
|
||||||
description:
|
|
||||||
'Tolerance level for input and output moderation. Between 0 and 6, 0 being most strict, 6 being least strict.',
|
|
||||||
},
|
|
||||||
endpoint: {
|
|
||||||
type: 'string',
|
|
||||||
enum: [
|
|
||||||
'/v1/flux-pro-1.1',
|
|
||||||
'/v1/flux-pro',
|
|
||||||
'/v1/flux-dev',
|
|
||||||
'/v1/flux-pro-1.1-ultra',
|
|
||||||
'/v1/flux-pro-finetuned',
|
|
||||||
'/v1/flux-pro-1.1-ultra-finetuned',
|
|
||||||
],
|
|
||||||
description: 'Endpoint to use for image generation.',
|
|
||||||
},
|
|
||||||
raw: {
|
|
||||||
type: 'boolean',
|
|
||||||
description:
|
|
||||||
'Generate less processed, more natural-looking images. Only works for /v1/flux-pro-1.1-ultra.',
|
|
||||||
},
|
|
||||||
finetune_id: {
|
|
||||||
type: 'string',
|
|
||||||
description: 'ID of the finetuned model to use',
|
|
||||||
},
|
|
||||||
finetune_strength: {
|
|
||||||
type: 'number',
|
|
||||||
description: 'Strength of the finetuning effect (typically between 0.1 and 1.2)',
|
|
||||||
},
|
|
||||||
guidance: {
|
|
||||||
type: 'number',
|
|
||||||
description: 'Guidance scale for finetuned models',
|
|
||||||
},
|
|
||||||
aspect_ratio: {
|
|
||||||
type: 'string',
|
|
||||||
description: 'Aspect ratio for ultra models (e.g., "16:9")',
|
|
||||||
},
|
|
||||||
},
|
|
||||||
required: [],
|
|
||||||
};
|
|
||||||
|
|
||||||
const displayMessage =
|
const displayMessage =
|
||||||
"Flux displayed an image. All generated images are already plainly visible, so don't repeat the descriptions in detail. Do not list download links as they are available in the UI already. The user may download the images by clicking on them, but do not mention anything about downloading to the user.";
|
"Flux displayed an image. All generated images are already plainly visible, so don't repeat the descriptions in detail. Do not list download links as they are available in the UI already. The user may download the images by clicking on them, but do not mention anything about downloading to the user.";
|
||||||
|
|
||||||
|
|
@ -134,11 +57,82 @@ class FluxAPI extends Tool {
|
||||||
// Add base URL from environment variable with fallback
|
// Add base URL from environment variable with fallback
|
||||||
this.baseUrl = process.env.FLUX_API_BASE_URL || 'https://api.us1.bfl.ai';
|
this.baseUrl = process.env.FLUX_API_BASE_URL || 'https://api.us1.bfl.ai';
|
||||||
|
|
||||||
this.schema = fluxApiJsonSchema;
|
// Define the schema for structured input
|
||||||
}
|
this.schema = z.object({
|
||||||
|
action: z
|
||||||
static get jsonSchema() {
|
.enum(['generate', 'list_finetunes', 'generate_finetuned'])
|
||||||
return fluxApiJsonSchema;
|
.default('generate')
|
||||||
|
.describe(
|
||||||
|
'Action to perform: "generate" for image generation, "generate_finetuned" for finetuned model generation, "list_finetunes" to get available custom models',
|
||||||
|
),
|
||||||
|
prompt: z
|
||||||
|
.string()
|
||||||
|
.optional()
|
||||||
|
.describe(
|
||||||
|
'Text prompt for image generation. Required when action is "generate". Not used for list_finetunes.',
|
||||||
|
),
|
||||||
|
width: z
|
||||||
|
.number()
|
||||||
|
.optional()
|
||||||
|
.describe(
|
||||||
|
'Width of the generated image in pixels. Must be a multiple of 32. Default is 1024.',
|
||||||
|
),
|
||||||
|
height: z
|
||||||
|
.number()
|
||||||
|
.optional()
|
||||||
|
.describe(
|
||||||
|
'Height of the generated image in pixels. Must be a multiple of 32. Default is 768.',
|
||||||
|
),
|
||||||
|
prompt_upsampling: z
|
||||||
|
.boolean()
|
||||||
|
.optional()
|
||||||
|
.default(false)
|
||||||
|
.describe('Whether to perform upsampling on the prompt.'),
|
||||||
|
steps: z
|
||||||
|
.number()
|
||||||
|
.int()
|
||||||
|
.optional()
|
||||||
|
.describe('Number of steps to run the model for, a number from 1 to 50. Default is 40.'),
|
||||||
|
seed: z.number().optional().describe('Optional seed for reproducibility.'),
|
||||||
|
safety_tolerance: z
|
||||||
|
.number()
|
||||||
|
.optional()
|
||||||
|
.default(6)
|
||||||
|
.describe(
|
||||||
|
'Tolerance level for input and output moderation. Between 0 and 6, 0 being most strict, 6 being least strict.',
|
||||||
|
),
|
||||||
|
endpoint: z
|
||||||
|
.enum([
|
||||||
|
'/v1/flux-pro-1.1',
|
||||||
|
'/v1/flux-pro',
|
||||||
|
'/v1/flux-dev',
|
||||||
|
'/v1/flux-pro-1.1-ultra',
|
||||||
|
'/v1/flux-pro-finetuned',
|
||||||
|
'/v1/flux-pro-1.1-ultra-finetuned',
|
||||||
|
])
|
||||||
|
.optional()
|
||||||
|
.default('/v1/flux-pro-1.1')
|
||||||
|
.describe('Endpoint to use for image generation.'),
|
||||||
|
raw: z
|
||||||
|
.boolean()
|
||||||
|
.optional()
|
||||||
|
.default(false)
|
||||||
|
.describe(
|
||||||
|
'Generate less processed, more natural-looking images. Only works for /v1/flux-pro-1.1-ultra.',
|
||||||
|
),
|
||||||
|
finetune_id: z.string().optional().describe('ID of the finetuned model to use'),
|
||||||
|
finetune_strength: z
|
||||||
|
.number()
|
||||||
|
.optional()
|
||||||
|
.default(1.1)
|
||||||
|
.describe('Strength of the finetuning effect (typically between 0.1 and 1.2)'),
|
||||||
|
guidance: z.number().optional().default(2.5).describe('Guidance scale for finetuned models'),
|
||||||
|
aspect_ratio: z
|
||||||
|
.string()
|
||||||
|
.optional()
|
||||||
|
.default('16:9')
|
||||||
|
.describe('Aspect ratio for ultra models (e.g., "16:9")'),
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
getAxiosConfig() {
|
getAxiosConfig() {
|
||||||
|
|
|
||||||
|
|
@ -1,477 +0,0 @@
|
||||||
const path = require('path');
|
|
||||||
const sharp = require('sharp');
|
|
||||||
const { v4 } = require('uuid');
|
|
||||||
const { ProxyAgent } = require('undici');
|
|
||||||
const { GoogleGenAI } = require('@google/genai');
|
|
||||||
const { tool } = require('@langchain/core/tools');
|
|
||||||
const { logger } = require('@librechat/data-schemas');
|
|
||||||
const { ContentTypes, EImageOutputType } = require('librechat-data-provider');
|
|
||||||
const {
|
|
||||||
geminiToolkit,
|
|
||||||
loadServiceKey,
|
|
||||||
getBalanceConfig,
|
|
||||||
getTransactionsConfig,
|
|
||||||
} = require('@librechat/api');
|
|
||||||
const { getStrategyFunctions } = require('~/server/services/Files/strategies');
|
|
||||||
const { spendTokens } = require('~/models/spendTokens');
|
|
||||||
const { getFiles } = require('~/models/File');
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Configure proxy support for Google APIs
|
|
||||||
* This wraps globalThis.fetch to add a proxy dispatcher only for googleapis.com URLs
|
|
||||||
* This is necessary because @google/genai SDK doesn't support custom fetch or httpOptions.dispatcher
|
|
||||||
*/
|
|
||||||
if (process.env.PROXY) {
|
|
||||||
const originalFetch = globalThis.fetch;
|
|
||||||
const proxyAgent = new ProxyAgent(process.env.PROXY);
|
|
||||||
|
|
||||||
globalThis.fetch = function (url, options = {}) {
|
|
||||||
const urlString = url.toString();
|
|
||||||
if (urlString.includes('googleapis.com')) {
|
|
||||||
options = { ...options, dispatcher: proxyAgent };
|
|
||||||
}
|
|
||||||
return originalFetch.call(this, url, options);
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Get the default service key file path (consistent with main Google endpoint)
|
|
||||||
* @returns {string} - The default path to the service key file
|
|
||||||
*/
|
|
||||||
function getDefaultServiceKeyPath() {
|
|
||||||
return (
|
|
||||||
process.env.GOOGLE_SERVICE_KEY_FILE || path.join(process.cwd(), 'api', 'data', 'auth.json')
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
const displayMessage =
|
|
||||||
"Gemini displayed an image. All generated images are already plainly visible, so don't repeat the descriptions in detail. Do not list download links as they are available in the UI already. The user may download the images by clicking on them, but do not mention anything about downloading to the user.";
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Replaces unwanted characters from the input string
|
|
||||||
* @param {string} inputString - The input string to process
|
|
||||||
* @returns {string} - The processed string
|
|
||||||
*/
|
|
||||||
function replaceUnwantedChars(inputString) {
|
|
||||||
return (
|
|
||||||
inputString
|
|
||||||
?.replace(/\r\n|\r|\n/g, ' ')
|
|
||||||
.replace(/"/g, '')
|
|
||||||
.trim() || ''
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Convert image buffer to target format if needed
|
|
||||||
* @param {Buffer} inputBuffer - The input image buffer
|
|
||||||
* @param {string} targetFormat - The target format (png, jpeg, webp)
|
|
||||||
* @returns {Promise<{buffer: Buffer, format: string}>} - Converted buffer and format
|
|
||||||
*/
|
|
||||||
async function convertImageFormat(inputBuffer, targetFormat) {
|
|
||||||
const metadata = await sharp(inputBuffer).metadata();
|
|
||||||
const currentFormat = metadata.format;
|
|
||||||
|
|
||||||
// Normalize format names (jpg -> jpeg)
|
|
||||||
const normalizedTarget = targetFormat === 'jpg' ? 'jpeg' : targetFormat.toLowerCase();
|
|
||||||
const normalizedCurrent = currentFormat === 'jpg' ? 'jpeg' : currentFormat;
|
|
||||||
|
|
||||||
// If already in target format, return as-is
|
|
||||||
if (normalizedCurrent === normalizedTarget) {
|
|
||||||
return { buffer: inputBuffer, format: normalizedTarget };
|
|
||||||
}
|
|
||||||
|
|
||||||
// Convert to target format
|
|
||||||
const convertedBuffer = await sharp(inputBuffer).toFormat(normalizedTarget).toBuffer();
|
|
||||||
return { buffer: convertedBuffer, format: normalizedTarget };
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Initialize Gemini client (supports both Gemini API and Vertex AI)
|
|
||||||
* Priority: API key (from options, resolved by loadAuthValues) > Vertex AI service account
|
|
||||||
* @param {Object} options - Initialization options
|
|
||||||
* @param {string} [options.GEMINI_API_KEY] - Gemini API key (resolved by loadAuthValues)
|
|
||||||
* @param {string} [options.GOOGLE_KEY] - Google API key (resolved by loadAuthValues)
|
|
||||||
* @returns {Promise<GoogleGenAI>} - The initialized client
|
|
||||||
*/
|
|
||||||
async function initializeGeminiClient(options = {}) {
|
|
||||||
const geminiKey = options.GEMINI_API_KEY;
|
|
||||||
if (geminiKey) {
|
|
||||||
logger.debug('[GeminiImageGen] Using Gemini API with GEMINI_API_KEY');
|
|
||||||
return new GoogleGenAI({ apiKey: geminiKey });
|
|
||||||
}
|
|
||||||
|
|
||||||
const googleKey = options.GOOGLE_KEY;
|
|
||||||
if (googleKey) {
|
|
||||||
logger.debug('[GeminiImageGen] Using Gemini API with GOOGLE_KEY');
|
|
||||||
return new GoogleGenAI({ apiKey: googleKey });
|
|
||||||
}
|
|
||||||
|
|
||||||
logger.debug('[GeminiImageGen] Using Vertex AI with service account');
|
|
||||||
const credentialsPath = getDefaultServiceKeyPath();
|
|
||||||
const serviceKey = await loadServiceKey(credentialsPath);
|
|
||||||
|
|
||||||
if (!serviceKey || !serviceKey.project_id) {
|
|
||||||
throw new Error(
|
|
||||||
'Gemini Image Generation requires one of: user-provided API key, GEMINI_API_KEY or GOOGLE_KEY env var, or a valid Google service account. ' +
|
|
||||||
`Service account file not found or invalid at: ${credentialsPath}`,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
return new GoogleGenAI({
|
|
||||||
vertexai: true,
|
|
||||||
project: serviceKey.project_id,
|
|
||||||
location: process.env.GOOGLE_LOC || process.env.GOOGLE_CLOUD_LOCATION || 'global',
|
|
||||||
googleAuthOptions: { credentials: serviceKey },
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Convert image files to Gemini inline data format
|
|
||||||
* @param {Object} params - Parameters
|
|
||||||
* @returns {Promise<Array>} - Array of inline data objects
|
|
||||||
*/
|
|
||||||
async function convertImagesToInlineData({ imageFiles, image_ids, req, fileStrategy }) {
|
|
||||||
if (!image_ids || image_ids.length === 0) {
|
|
||||||
return [];
|
|
||||||
}
|
|
||||||
|
|
||||||
const streamMethods = {};
|
|
||||||
const requestFilesMap = Object.fromEntries(imageFiles.map((f) => [f.file_id, { ...f }]));
|
|
||||||
const orderedFiles = new Array(image_ids.length);
|
|
||||||
const idsToFetch = [];
|
|
||||||
const indexOfMissing = Object.create(null);
|
|
||||||
|
|
||||||
for (let i = 0; i < image_ids.length; i++) {
|
|
||||||
const id = image_ids[i];
|
|
||||||
const file = requestFilesMap[id];
|
|
||||||
if (file) {
|
|
||||||
orderedFiles[i] = file;
|
|
||||||
} else {
|
|
||||||
idsToFetch.push(id);
|
|
||||||
indexOfMissing[id] = i;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (idsToFetch.length && req?.user?.id) {
|
|
||||||
const fetchedFiles = await getFiles(
|
|
||||||
{
|
|
||||||
user: req.user.id,
|
|
||||||
file_id: { $in: idsToFetch },
|
|
||||||
height: { $exists: true },
|
|
||||||
width: { $exists: true },
|
|
||||||
},
|
|
||||||
{},
|
|
||||||
{},
|
|
||||||
);
|
|
||||||
|
|
||||||
for (const file of fetchedFiles) {
|
|
||||||
requestFilesMap[file.file_id] = file;
|
|
||||||
orderedFiles[indexOfMissing[file.file_id]] = file;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const inlineDataArray = [];
|
|
||||||
for (const imageFile of orderedFiles) {
|
|
||||||
if (!imageFile) continue;
|
|
||||||
|
|
||||||
try {
|
|
||||||
const source = imageFile.source || fileStrategy;
|
|
||||||
if (!source) continue;
|
|
||||||
|
|
||||||
let getDownloadStream = streamMethods[source];
|
|
||||||
if (!getDownloadStream) {
|
|
||||||
({ getDownloadStream } = getStrategyFunctions(source));
|
|
||||||
streamMethods[source] = getDownloadStream;
|
|
||||||
}
|
|
||||||
if (!getDownloadStream) continue;
|
|
||||||
|
|
||||||
const stream = await getDownloadStream(req, imageFile.filepath);
|
|
||||||
if (!stream) continue;
|
|
||||||
|
|
||||||
const chunks = [];
|
|
||||||
for await (const chunk of stream) {
|
|
||||||
chunks.push(chunk);
|
|
||||||
}
|
|
||||||
const buffer = Buffer.concat(chunks);
|
|
||||||
const base64Data = buffer.toString('base64');
|
|
||||||
const mimeType = imageFile.type || 'image/png';
|
|
||||||
|
|
||||||
inlineDataArray.push({
|
|
||||||
inlineData: { mimeType, data: base64Data },
|
|
||||||
});
|
|
||||||
} catch (error) {
|
|
||||||
logger.error('[GeminiImageGen] Error processing image:', imageFile.file_id, error);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return inlineDataArray;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Check for safety blocks in API response
|
|
||||||
* @param {Object} response - The API response
|
|
||||||
* @returns {Object|null} - Safety block info or null
|
|
||||||
*/
|
|
||||||
function checkForSafetyBlock(response) {
|
|
||||||
if (!response?.candidates?.length) {
|
|
||||||
return { reason: 'NO_CANDIDATES', message: 'No candidates returned' };
|
|
||||||
}
|
|
||||||
|
|
||||||
const candidate = response.candidates[0];
|
|
||||||
const finishReason = candidate.finishReason;
|
|
||||||
|
|
||||||
if (finishReason === 'SAFETY' || finishReason === 'PROHIBITED_CONTENT') {
|
|
||||||
return { reason: finishReason, message: 'Content blocked by safety filters' };
|
|
||||||
}
|
|
||||||
|
|
||||||
if (finishReason === 'RECITATION') {
|
|
||||||
return { reason: finishReason, message: 'Content blocked due to recitation concerns' };
|
|
||||||
}
|
|
||||||
|
|
||||||
if (candidate.safetyRatings) {
|
|
||||||
for (const rating of candidate.safetyRatings) {
|
|
||||||
if (rating.probability === 'HIGH' || rating.blocked === true) {
|
|
||||||
return {
|
|
||||||
reason: 'SAFETY_RATING',
|
|
||||||
message: `Blocked due to ${rating.category}`,
|
|
||||||
category: rating.category,
|
|
||||||
};
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Record token usage for balance tracking
|
|
||||||
* @param {Object} params - Parameters
|
|
||||||
* @param {Object} params.usageMetadata - The usage metadata from API response
|
|
||||||
* @param {Object} params.req - The request object
|
|
||||||
* @param {string} params.userId - The user ID
|
|
||||||
* @param {string} params.conversationId - The conversation ID
|
|
||||||
* @param {string} params.model - The model name
|
|
||||||
* @param {string} [params.messageId] - The response message ID for transaction correlation
|
|
||||||
*/
|
|
||||||
async function recordTokenUsage({ usageMetadata, req, userId, conversationId, model, messageId }) {
|
|
||||||
if (!usageMetadata) {
|
|
||||||
logger.debug('[GeminiImageGen] No usage metadata available for balance tracking');
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
const appConfig = req?.config;
|
|
||||||
const balance = getBalanceConfig(appConfig);
|
|
||||||
const transactions = getTransactionsConfig(appConfig);
|
|
||||||
|
|
||||||
// Skip if neither balance nor transactions are enabled
|
|
||||||
if (!balance?.enabled && transactions?.enabled === false) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
const promptTokens = usageMetadata.prompt_token_count || usageMetadata.promptTokenCount || 0;
|
|
||||||
const completionTokens =
|
|
||||||
usageMetadata.candidates_token_count || usageMetadata.candidatesTokenCount || 0;
|
|
||||||
|
|
||||||
if (promptTokens === 0 && completionTokens === 0) {
|
|
||||||
logger.debug('[GeminiImageGen] No tokens to record');
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
logger.debug('[GeminiImageGen] Recording token usage:', {
|
|
||||||
promptTokens,
|
|
||||||
completionTokens,
|
|
||||||
model,
|
|
||||||
conversationId,
|
|
||||||
});
|
|
||||||
|
|
||||||
try {
|
|
||||||
await spendTokens(
|
|
||||||
{
|
|
||||||
user: userId,
|
|
||||||
model,
|
|
||||||
messageId,
|
|
||||||
conversationId,
|
|
||||||
context: 'image_generation',
|
|
||||||
balance,
|
|
||||||
transactions,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
promptTokens,
|
|
||||||
completionTokens,
|
|
||||||
},
|
|
||||||
);
|
|
||||||
} catch (error) {
|
|
||||||
logger.error('[GeminiImageGen] Error recording token usage:', error);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates Gemini Image Generation tool
|
|
||||||
* @param {Object} fields - Configuration fields
|
|
||||||
* @returns {ReturnType<tool>} - The image generation tool
|
|
||||||
*/
|
|
||||||
function createGeminiImageTool(fields = {}) {
|
|
||||||
const override = fields.override ?? false;
|
|
||||||
|
|
||||||
if (!override && !fields.isAgent) {
|
|
||||||
throw new Error('This tool is only available for agents.');
|
|
||||||
}
|
|
||||||
|
|
||||||
const { req, imageFiles = [], userId, fileStrategy, GEMINI_API_KEY, GOOGLE_KEY } = fields;
|
|
||||||
|
|
||||||
const imageOutputType = fields.imageOutputType || EImageOutputType.PNG;
|
|
||||||
|
|
||||||
const geminiImageGenTool = tool(
|
|
||||||
async ({ prompt, image_ids, aspectRatio, imageSize }, runnableConfig) => {
|
|
||||||
if (!prompt) {
|
|
||||||
throw new Error('Missing required field: prompt');
|
|
||||||
}
|
|
||||||
|
|
||||||
logger.debug('[GeminiImageGen] Generating image', { aspectRatio, imageSize });
|
|
||||||
|
|
||||||
let ai;
|
|
||||||
try {
|
|
||||||
ai = await initializeGeminiClient({
|
|
||||||
GEMINI_API_KEY,
|
|
||||||
GOOGLE_KEY,
|
|
||||||
});
|
|
||||||
} catch (error) {
|
|
||||||
logger.error('[GeminiImageGen] Failed to initialize client:', error);
|
|
||||||
return [
|
|
||||||
[{ type: ContentTypes.TEXT, text: `Failed to initialize Gemini: ${error.message}` }],
|
|
||||||
{ content: [], file_ids: [] },
|
|
||||||
];
|
|
||||||
}
|
|
||||||
|
|
||||||
const contents = [{ text: replaceUnwantedChars(prompt) }];
|
|
||||||
|
|
||||||
if (image_ids?.length > 0) {
|
|
||||||
const contextImages = await convertImagesToInlineData({
|
|
||||||
imageFiles,
|
|
||||||
image_ids,
|
|
||||||
req,
|
|
||||||
fileStrategy,
|
|
||||||
});
|
|
||||||
contents.push(...contextImages);
|
|
||||||
logger.debug('[GeminiImageGen] Added', contextImages.length, 'context images');
|
|
||||||
}
|
|
||||||
|
|
||||||
let apiResponse;
|
|
||||||
const geminiModel = process.env.GEMINI_IMAGE_MODEL || 'gemini-2.5-flash-image';
|
|
||||||
const config = {
|
|
||||||
responseModalities: ['TEXT', 'IMAGE'],
|
|
||||||
};
|
|
||||||
|
|
||||||
const supportsImageSize = !geminiModel.includes('gemini-2.5-flash-image');
|
|
||||||
if (aspectRatio || (imageSize && supportsImageSize)) {
|
|
||||||
config.imageConfig = {};
|
|
||||||
if (aspectRatio) {
|
|
||||||
config.imageConfig.aspectRatio = aspectRatio;
|
|
||||||
}
|
|
||||||
if (imageSize && supportsImageSize) {
|
|
||||||
config.imageConfig.imageSize = imageSize;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let derivedSignal = null;
|
|
||||||
let abortHandler = null;
|
|
||||||
|
|
||||||
if (runnableConfig?.signal) {
|
|
||||||
derivedSignal = AbortSignal.any([runnableConfig.signal]);
|
|
||||||
abortHandler = () => logger.debug('[GeminiImageGen] Image generation aborted');
|
|
||||||
derivedSignal.addEventListener('abort', abortHandler, { once: true });
|
|
||||||
config.abortSignal = derivedSignal;
|
|
||||||
}
|
|
||||||
|
|
||||||
try {
|
|
||||||
apiResponse = await ai.models.generateContent({
|
|
||||||
model: geminiModel,
|
|
||||||
contents,
|
|
||||||
config,
|
|
||||||
});
|
|
||||||
} catch (error) {
|
|
||||||
logger.error('[GeminiImageGen] API error:', error);
|
|
||||||
return [
|
|
||||||
[{ type: ContentTypes.TEXT, text: `Image generation failed: ${error.message}` }],
|
|
||||||
{ content: [], file_ids: [] },
|
|
||||||
];
|
|
||||||
} finally {
|
|
||||||
if (abortHandler && derivedSignal) {
|
|
||||||
derivedSignal.removeEventListener('abort', abortHandler);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const safetyBlock = checkForSafetyBlock(apiResponse);
|
|
||||||
if (safetyBlock) {
|
|
||||||
logger.warn('[GeminiImageGen] Safety block:', safetyBlock);
|
|
||||||
const errorMsg = 'Image blocked by content safety filters. Please try different content.';
|
|
||||||
return [[{ type: ContentTypes.TEXT, text: errorMsg }], { content: [], file_ids: [] }];
|
|
||||||
}
|
|
||||||
|
|
||||||
const rawImageData = apiResponse.candidates?.[0]?.content?.parts?.find((p) => p.inlineData)
|
|
||||||
?.inlineData?.data;
|
|
||||||
|
|
||||||
if (!rawImageData) {
|
|
||||||
logger.warn('[GeminiImageGen] No image data in response');
|
|
||||||
return [
|
|
||||||
[{ type: ContentTypes.TEXT, text: 'No image was generated. Please try again.' }],
|
|
||||||
{ content: [], file_ids: [] },
|
|
||||||
];
|
|
||||||
}
|
|
||||||
|
|
||||||
const rawBuffer = Buffer.from(rawImageData, 'base64');
|
|
||||||
const { buffer: convertedBuffer, format: outputFormat } = await convertImageFormat(
|
|
||||||
rawBuffer,
|
|
||||||
imageOutputType,
|
|
||||||
);
|
|
||||||
const imageData = convertedBuffer.toString('base64');
|
|
||||||
const mimeType = outputFormat === 'jpeg' ? 'image/jpeg' : `image/${outputFormat}`;
|
|
||||||
|
|
||||||
const dataUrl = `data:${mimeType};base64,${imageData}`;
|
|
||||||
const file_ids = [v4()];
|
|
||||||
const content = [
|
|
||||||
{
|
|
||||||
type: ContentTypes.IMAGE_URL,
|
|
||||||
image_url: { url: dataUrl },
|
|
||||||
},
|
|
||||||
];
|
|
||||||
|
|
||||||
const textResponse = [
|
|
||||||
{
|
|
||||||
type: ContentTypes.TEXT,
|
|
||||||
text:
|
|
||||||
displayMessage +
|
|
||||||
`\n\ngenerated_image_id: "${file_ids[0]}"` +
|
|
||||||
(image_ids?.length > 0 ? `\nreferenced_image_ids: ["${image_ids.join('", "')}"]` : ''),
|
|
||||||
},
|
|
||||||
];
|
|
||||||
|
|
||||||
const conversationId = runnableConfig?.configurable?.thread_id;
|
|
||||||
const messageId =
|
|
||||||
runnableConfig?.configurable?.run_id ??
|
|
||||||
runnableConfig?.configurable?.requestBody?.messageId;
|
|
||||||
recordTokenUsage({
|
|
||||||
usageMetadata: apiResponse.usageMetadata,
|
|
||||||
req,
|
|
||||||
userId,
|
|
||||||
messageId,
|
|
||||||
conversationId,
|
|
||||||
model: geminiModel,
|
|
||||||
}).catch((error) => {
|
|
||||||
logger.error('[GeminiImageGen] Failed to record token usage:', error);
|
|
||||||
});
|
|
||||||
|
|
||||||
return [textResponse, { content, file_ids }];
|
|
||||||
},
|
|
||||||
{
|
|
||||||
...geminiToolkit.gemini_image_gen,
|
|
||||||
responseFormat: 'content_and_artifact',
|
|
||||||
},
|
|
||||||
);
|
|
||||||
|
|
||||||
return geminiImageGenTool;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Export both for compatibility
|
|
||||||
module.exports = createGeminiImageTool;
|
|
||||||
module.exports.createGeminiImageTool = createGeminiImageTool;
|
|
||||||
|
|
@ -1,33 +1,12 @@
|
||||||
|
const { z } = require('zod');
|
||||||
const { Tool } = require('@langchain/core/tools');
|
const { Tool } = require('@langchain/core/tools');
|
||||||
const { getEnvironmentVariable } = require('@langchain/core/utils/env');
|
const { getEnvironmentVariable } = require('@langchain/core/utils/env');
|
||||||
|
|
||||||
const googleSearchJsonSchema = {
|
|
||||||
type: 'object',
|
|
||||||
properties: {
|
|
||||||
query: {
|
|
||||||
type: 'string',
|
|
||||||
minLength: 1,
|
|
||||||
description: 'The search query string.',
|
|
||||||
},
|
|
||||||
max_results: {
|
|
||||||
type: 'integer',
|
|
||||||
minimum: 1,
|
|
||||||
maximum: 10,
|
|
||||||
description: 'The maximum number of search results to return. Defaults to 5.',
|
|
||||||
},
|
|
||||||
},
|
|
||||||
required: ['query'],
|
|
||||||
};
|
|
||||||
|
|
||||||
class GoogleSearchResults extends Tool {
|
class GoogleSearchResults extends Tool {
|
||||||
static lc_name() {
|
static lc_name() {
|
||||||
return 'google';
|
return 'google';
|
||||||
}
|
}
|
||||||
|
|
||||||
static get jsonSchema() {
|
|
||||||
return googleSearchJsonSchema;
|
|
||||||
}
|
|
||||||
|
|
||||||
constructor(fields = {}) {
|
constructor(fields = {}) {
|
||||||
super(fields);
|
super(fields);
|
||||||
this.name = 'google';
|
this.name = 'google';
|
||||||
|
|
@ -49,11 +28,25 @@ class GoogleSearchResults extends Tool {
|
||||||
this.description =
|
this.description =
|
||||||
'A search engine optimized for comprehensive, accurate, and trusted results. Useful for when you need to answer questions about current events.';
|
'A search engine optimized for comprehensive, accurate, and trusted results. Useful for when you need to answer questions about current events.';
|
||||||
|
|
||||||
this.schema = googleSearchJsonSchema;
|
this.schema = z.object({
|
||||||
|
query: z.string().min(1).describe('The search query string.'),
|
||||||
|
max_results: z
|
||||||
|
.number()
|
||||||
|
.min(1)
|
||||||
|
.max(10)
|
||||||
|
.optional()
|
||||||
|
.describe('The maximum number of search results to return. Defaults to 10.'),
|
||||||
|
// Note: Google API has its own parameters for search customization, adjust as needed.
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
async _call(input) {
|
async _call(input) {
|
||||||
const { query, max_results = 5 } = input;
|
const validationResult = this.schema.safeParse(input);
|
||||||
|
if (!validationResult.success) {
|
||||||
|
throw new Error(`Validation failed: ${JSON.stringify(validationResult.error.issues)}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
const { query, max_results = 5 } = validationResult.data;
|
||||||
|
|
||||||
const response = await fetch(
|
const response = await fetch(
|
||||||
`https://www.googleapis.com/customsearch/v1?key=${this.apiKey}&cx=${
|
`https://www.googleapis.com/customsearch/v1?key=${this.apiKey}&cx=${
|
||||||
|
|
|
||||||
|
|
@ -6,10 +6,11 @@ const { ProxyAgent } = require('undici');
|
||||||
const { tool } = require('@langchain/core/tools');
|
const { tool } = require('@langchain/core/tools');
|
||||||
const { logger } = require('@librechat/data-schemas');
|
const { logger } = require('@librechat/data-schemas');
|
||||||
const { HttpsProxyAgent } = require('https-proxy-agent');
|
const { HttpsProxyAgent } = require('https-proxy-agent');
|
||||||
|
const { logAxiosError, oaiToolkit } = require('@librechat/api');
|
||||||
const { ContentTypes, EImageOutputType } = require('librechat-data-provider');
|
const { ContentTypes, EImageOutputType } = require('librechat-data-provider');
|
||||||
const { logAxiosError, oaiToolkit, extractBaseURL } = require('@librechat/api');
|
|
||||||
const { getStrategyFunctions } = require('~/server/services/Files/strategies');
|
const { getStrategyFunctions } = require('~/server/services/Files/strategies');
|
||||||
const { getFiles } = require('~/models');
|
const extractBaseURL = require('~/utils/extractBaseURL');
|
||||||
|
const { getFiles } = require('~/models/File');
|
||||||
|
|
||||||
const displayMessage =
|
const displayMessage =
|
||||||
"The tool displayed an image. All generated images are already plainly visible, so don't repeat the descriptions in detail. Do not list download links as they are available in the UI already. The user may download the images by clicking on them, but do not mention anything about downloading to the user.";
|
"The tool displayed an image. All generated images are already plainly visible, so don't repeat the descriptions in detail. Do not list download links as they are available in the UI already. The user may download the images by clicking on them, but do not mention anything about downloading to the user.";
|
||||||
|
|
@ -78,8 +79,6 @@ function createOpenAIImageTools(fields = {}) {
|
||||||
let apiKey = fields.IMAGE_GEN_OAI_API_KEY ?? getApiKey();
|
let apiKey = fields.IMAGE_GEN_OAI_API_KEY ?? getApiKey();
|
||||||
const closureConfig = { apiKey };
|
const closureConfig = { apiKey };
|
||||||
|
|
||||||
const imageModel = process.env.IMAGE_GEN_OAI_MODEL || 'gpt-image-1';
|
|
||||||
|
|
||||||
let baseURL = 'https://api.openai.com/v1/';
|
let baseURL = 'https://api.openai.com/v1/';
|
||||||
if (!override && process.env.IMAGE_GEN_OAI_BASEURL) {
|
if (!override && process.env.IMAGE_GEN_OAI_BASEURL) {
|
||||||
baseURL = extractBaseURL(process.env.IMAGE_GEN_OAI_BASEURL);
|
baseURL = extractBaseURL(process.env.IMAGE_GEN_OAI_BASEURL);
|
||||||
|
|
@ -159,7 +158,7 @@ function createOpenAIImageTools(fields = {}) {
|
||||||
|
|
||||||
resp = await openai.images.generate(
|
resp = await openai.images.generate(
|
||||||
{
|
{
|
||||||
model: imageModel,
|
model: 'gpt-image-1',
|
||||||
prompt: replaceUnwantedChars(prompt),
|
prompt: replaceUnwantedChars(prompt),
|
||||||
n: Math.min(Math.max(1, n), 10),
|
n: Math.min(Math.max(1, n), 10),
|
||||||
background,
|
background,
|
||||||
|
|
@ -241,7 +240,7 @@ Error Message: ${error.message}`);
|
||||||
}
|
}
|
||||||
|
|
||||||
const formData = new FormData();
|
const formData = new FormData();
|
||||||
formData.append('model', imageModel);
|
formData.append('model', 'gpt-image-1');
|
||||||
formData.append('prompt', replaceUnwantedChars(prompt));
|
formData.append('prompt', replaceUnwantedChars(prompt));
|
||||||
// TODO: `mask` support
|
// TODO: `mask` support
|
||||||
// TODO: more than 1 image support
|
// TODO: more than 1 image support
|
||||||
|
|
|
||||||
|
|
@ -1,52 +1,8 @@
|
||||||
const { Tool } = require('@langchain/core/tools');
|
const { Tool } = require('@langchain/core/tools');
|
||||||
|
const { z } = require('zod');
|
||||||
const { getEnvironmentVariable } = require('@langchain/core/utils/env');
|
const { getEnvironmentVariable } = require('@langchain/core/utils/env');
|
||||||
const fetch = require('node-fetch');
|
const fetch = require('node-fetch');
|
||||||
|
|
||||||
const openWeatherJsonSchema = {
|
|
||||||
type: 'object',
|
|
||||||
properties: {
|
|
||||||
action: {
|
|
||||||
type: 'string',
|
|
||||||
enum: ['help', 'current_forecast', 'timestamp', 'daily_aggregation', 'overview'],
|
|
||||||
description: 'The action to perform',
|
|
||||||
},
|
|
||||||
city: {
|
|
||||||
type: 'string',
|
|
||||||
description: 'City name for geocoding if lat/lon not provided',
|
|
||||||
},
|
|
||||||
lat: {
|
|
||||||
type: 'number',
|
|
||||||
description: 'Latitude coordinate',
|
|
||||||
},
|
|
||||||
lon: {
|
|
||||||
type: 'number',
|
|
||||||
description: 'Longitude coordinate',
|
|
||||||
},
|
|
||||||
exclude: {
|
|
||||||
type: 'string',
|
|
||||||
description: 'Parts to exclude from the response',
|
|
||||||
},
|
|
||||||
units: {
|
|
||||||
type: 'string',
|
|
||||||
enum: ['Celsius', 'Kelvin', 'Fahrenheit'],
|
|
||||||
description: 'Temperature units',
|
|
||||||
},
|
|
||||||
lang: {
|
|
||||||
type: 'string',
|
|
||||||
description: 'Language code',
|
|
||||||
},
|
|
||||||
date: {
|
|
||||||
type: 'string',
|
|
||||||
description: 'Date in YYYY-MM-DD format for timestamp and daily_aggregation',
|
|
||||||
},
|
|
||||||
tz: {
|
|
||||||
type: 'string',
|
|
||||||
description: 'Timezone',
|
|
||||||
},
|
|
||||||
},
|
|
||||||
required: ['action'],
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Map user-friendly units to OpenWeather units.
|
* Map user-friendly units to OpenWeather units.
|
||||||
* Defaults to Celsius if not specified.
|
* Defaults to Celsius if not specified.
|
||||||
|
|
@ -110,11 +66,17 @@ class OpenWeather extends Tool {
|
||||||
'Units: "Celsius", "Kelvin", or "Fahrenheit" (default: Celsius). ' +
|
'Units: "Celsius", "Kelvin", or "Fahrenheit" (default: Celsius). ' +
|
||||||
'For timestamp action, use "date" in YYYY-MM-DD format.';
|
'For timestamp action, use "date" in YYYY-MM-DD format.';
|
||||||
|
|
||||||
schema = openWeatherJsonSchema;
|
schema = z.object({
|
||||||
|
action: z.enum(['help', 'current_forecast', 'timestamp', 'daily_aggregation', 'overview']),
|
||||||
static get jsonSchema() {
|
city: z.string().optional(),
|
||||||
return openWeatherJsonSchema;
|
lat: z.number().optional(),
|
||||||
}
|
lon: z.number().optional(),
|
||||||
|
exclude: z.string().optional(),
|
||||||
|
units: z.enum(['Celsius', 'Kelvin', 'Fahrenheit']).optional(),
|
||||||
|
lang: z.string().optional(),
|
||||||
|
date: z.string().optional(), // For timestamp and daily_aggregation
|
||||||
|
tz: z.string().optional(),
|
||||||
|
});
|
||||||
|
|
||||||
constructor(fields = {}) {
|
constructor(fields = {}) {
|
||||||
super();
|
super();
|
||||||
|
|
@ -270,7 +232,7 @@ class OpenWeather extends Tool {
|
||||||
|
|
||||||
if (['current_forecast', 'timestamp', 'daily_aggregation', 'overview'].includes(action)) {
|
if (['current_forecast', 'timestamp', 'daily_aggregation', 'overview'].includes(action)) {
|
||||||
if (typeof finalLat !== 'number' || typeof finalLon !== 'number') {
|
if (typeof finalLat !== 'number' || typeof finalLon !== 'number') {
|
||||||
return "Error: lat and lon are required and must be numbers for this action (or specify 'city').";
|
return 'Error: lat and lon are required and must be numbers for this action (or specify \'city\').';
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -281,7 +243,7 @@ class OpenWeather extends Tool {
|
||||||
let dt;
|
let dt;
|
||||||
if (action === 'timestamp') {
|
if (action === 'timestamp') {
|
||||||
if (!date) {
|
if (!date) {
|
||||||
return "Error: For timestamp action, a 'date' in YYYY-MM-DD format is required.";
|
return 'Error: For timestamp action, a \'date\' in YYYY-MM-DD format is required.';
|
||||||
}
|
}
|
||||||
dt = this.convertDateToUnix(date);
|
dt = this.convertDateToUnix(date);
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,6 @@
|
||||||
// Generates image using stable diffusion webui's api (automatic1111)
|
// Generates image using stable diffusion webui's api (automatic1111)
|
||||||
const fs = require('fs');
|
const fs = require('fs');
|
||||||
|
const { z } = require('zod');
|
||||||
const path = require('path');
|
const path = require('path');
|
||||||
const axios = require('axios');
|
const axios = require('axios');
|
||||||
const sharp = require('sharp');
|
const sharp = require('sharp');
|
||||||
|
|
@ -10,23 +11,6 @@ const { FileContext, ContentTypes } = require('librechat-data-provider');
|
||||||
const { getBasePath } = require('@librechat/api');
|
const { getBasePath } = require('@librechat/api');
|
||||||
const paths = require('~/config/paths');
|
const paths = require('~/config/paths');
|
||||||
|
|
||||||
const stableDiffusionJsonSchema = {
|
|
||||||
type: 'object',
|
|
||||||
properties: {
|
|
||||||
prompt: {
|
|
||||||
type: 'string',
|
|
||||||
description:
|
|
||||||
'Detailed keywords to describe the subject, using at least 7 keywords to accurately describe the image, separated by comma',
|
|
||||||
},
|
|
||||||
negative_prompt: {
|
|
||||||
type: 'string',
|
|
||||||
description:
|
|
||||||
'Keywords we want to exclude from the final image, using at least 7 keywords to accurately describe the image, separated by comma',
|
|
||||||
},
|
|
||||||
},
|
|
||||||
required: ['prompt', 'negative_prompt'],
|
|
||||||
};
|
|
||||||
|
|
||||||
const displayMessage =
|
const displayMessage =
|
||||||
"Stable Diffusion displayed an image. All generated images are already plainly visible, so don't repeat the descriptions in detail. Do not list download links as they are available in the UI already. The user may download the images by clicking on them, but do not mention anything about downloading to the user.";
|
"Stable Diffusion displayed an image. All generated images are already plainly visible, so don't repeat the descriptions in detail. Do not list download links as they are available in the UI already. The user may download the images by clicking on them, but do not mention anything about downloading to the user.";
|
||||||
|
|
||||||
|
|
@ -62,11 +46,18 @@ class StableDiffusionAPI extends Tool {
|
||||||
// - Generate images only once per human query unless explicitly requested by the user`;
|
// - Generate images only once per human query unless explicitly requested by the user`;
|
||||||
this.description =
|
this.description =
|
||||||
"You can generate images using text with 'stable-diffusion'. This tool is exclusively for visual content.";
|
"You can generate images using text with 'stable-diffusion'. This tool is exclusively for visual content.";
|
||||||
this.schema = stableDiffusionJsonSchema;
|
this.schema = z.object({
|
||||||
}
|
prompt: z
|
||||||
|
.string()
|
||||||
static get jsonSchema() {
|
.describe(
|
||||||
return stableDiffusionJsonSchema;
|
'Detailed keywords to describe the subject, using at least 7 keywords to accurately describe the image, separated by comma',
|
||||||
|
),
|
||||||
|
negative_prompt: z
|
||||||
|
.string()
|
||||||
|
.describe(
|
||||||
|
'Keywords we want to exclude from the final image, using at least 7 keywords to accurately describe the image, separated by comma',
|
||||||
|
),
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
replaceNewLinesWithSpaces(inputString) {
|
replaceNewLinesWithSpaces(inputString) {
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,4 @@
|
||||||
const { z } = require('zod');
|
const { z } = require('zod');
|
||||||
const { ProxyAgent, fetch } = require('undici');
|
|
||||||
const { tool } = require('@langchain/core/tools');
|
const { tool } = require('@langchain/core/tools');
|
||||||
const { getApiKey } = require('./credentials');
|
const { getApiKey } = require('./credentials');
|
||||||
|
|
||||||
|
|
@ -20,19 +19,13 @@ function createTavilySearchTool(fields = {}) {
|
||||||
...kwargs,
|
...kwargs,
|
||||||
};
|
};
|
||||||
|
|
||||||
const fetchOptions = {
|
const response = await fetch('https://api.tavily.com/search', {
|
||||||
method: 'POST',
|
method: 'POST',
|
||||||
headers: {
|
headers: {
|
||||||
'Content-Type': 'application/json',
|
'Content-Type': 'application/json',
|
||||||
},
|
},
|
||||||
body: JSON.stringify(requestBody),
|
body: JSON.stringify(requestBody),
|
||||||
};
|
});
|
||||||
|
|
||||||
if (process.env.PROXY) {
|
|
||||||
fetchOptions.dispatcher = new ProxyAgent(process.env.PROXY);
|
|
||||||
}
|
|
||||||
|
|
||||||
const response = await fetch('https://api.tavily.com/search', fetchOptions);
|
|
||||||
|
|
||||||
const json = await response.json();
|
const json = await response.json();
|
||||||
if (!response.ok) {
|
if (!response.ok) {
|
||||||
|
|
|
||||||
|
|
@ -1,75 +1,7 @@
|
||||||
const { ProxyAgent, fetch } = require('undici');
|
const { z } = require('zod');
|
||||||
const { Tool } = require('@langchain/core/tools');
|
const { Tool } = require('@langchain/core/tools');
|
||||||
const { getEnvironmentVariable } = require('@langchain/core/utils/env');
|
const { getEnvironmentVariable } = require('@langchain/core/utils/env');
|
||||||
|
|
||||||
const tavilySearchJsonSchema = {
|
|
||||||
type: 'object',
|
|
||||||
properties: {
|
|
||||||
query: {
|
|
||||||
type: 'string',
|
|
||||||
minLength: 1,
|
|
||||||
description: 'The search query string.',
|
|
||||||
},
|
|
||||||
max_results: {
|
|
||||||
type: 'number',
|
|
||||||
minimum: 1,
|
|
||||||
maximum: 10,
|
|
||||||
description: 'The maximum number of search results to return. Defaults to 5.',
|
|
||||||
},
|
|
||||||
search_depth: {
|
|
||||||
type: 'string',
|
|
||||||
enum: ['basic', 'advanced'],
|
|
||||||
description:
|
|
||||||
'The depth of the search, affecting result quality and response time (`basic` or `advanced`). Default is basic for quick results and advanced for indepth high quality results but longer response time. Advanced calls equals 2 requests.',
|
|
||||||
},
|
|
||||||
include_images: {
|
|
||||||
type: 'boolean',
|
|
||||||
description:
|
|
||||||
'Whether to include a list of query-related images in the response. Default is False.',
|
|
||||||
},
|
|
||||||
include_answer: {
|
|
||||||
type: 'boolean',
|
|
||||||
description: 'Whether to include answers in the search results. Default is False.',
|
|
||||||
},
|
|
||||||
include_raw_content: {
|
|
||||||
type: 'boolean',
|
|
||||||
description: 'Whether to include raw content in the search results. Default is False.',
|
|
||||||
},
|
|
||||||
include_domains: {
|
|
||||||
type: 'array',
|
|
||||||
items: { type: 'string' },
|
|
||||||
description: 'A list of domains to specifically include in the search results.',
|
|
||||||
},
|
|
||||||
exclude_domains: {
|
|
||||||
type: 'array',
|
|
||||||
items: { type: 'string' },
|
|
||||||
description: 'A list of domains to specifically exclude from the search results.',
|
|
||||||
},
|
|
||||||
topic: {
|
|
||||||
type: 'string',
|
|
||||||
enum: ['general', 'news', 'finance'],
|
|
||||||
description:
|
|
||||||
'The category of the search. Use news ONLY if query SPECIFCALLY mentions the word "news".',
|
|
||||||
},
|
|
||||||
time_range: {
|
|
||||||
type: 'string',
|
|
||||||
enum: ['day', 'week', 'month', 'year', 'd', 'w', 'm', 'y'],
|
|
||||||
description: 'The time range back from the current date to filter results.',
|
|
||||||
},
|
|
||||||
days: {
|
|
||||||
type: 'number',
|
|
||||||
minimum: 1,
|
|
||||||
description: 'Number of days back from the current date to include. Only if topic is news.',
|
|
||||||
},
|
|
||||||
include_image_descriptions: {
|
|
||||||
type: 'boolean',
|
|
||||||
description:
|
|
||||||
'When include_images is true, also add a descriptive text for each image. Default is false.',
|
|
||||||
},
|
|
||||||
},
|
|
||||||
required: ['query'],
|
|
||||||
};
|
|
||||||
|
|
||||||
class TavilySearchResults extends Tool {
|
class TavilySearchResults extends Tool {
|
||||||
static lc_name() {
|
static lc_name() {
|
||||||
return 'TavilySearchResults';
|
return 'TavilySearchResults';
|
||||||
|
|
@ -87,11 +19,64 @@ class TavilySearchResults extends Tool {
|
||||||
this.description =
|
this.description =
|
||||||
'A search engine optimized for comprehensive, accurate, and trusted results. Useful for when you need to answer questions about current events.';
|
'A search engine optimized for comprehensive, accurate, and trusted results. Useful for when you need to answer questions about current events.';
|
||||||
|
|
||||||
this.schema = tavilySearchJsonSchema;
|
this.schema = z.object({
|
||||||
}
|
query: z.string().min(1).describe('The search query string.'),
|
||||||
|
max_results: z
|
||||||
static get jsonSchema() {
|
.number()
|
||||||
return tavilySearchJsonSchema;
|
.min(1)
|
||||||
|
.max(10)
|
||||||
|
.optional()
|
||||||
|
.describe('The maximum number of search results to return. Defaults to 5.'),
|
||||||
|
search_depth: z
|
||||||
|
.enum(['basic', 'advanced'])
|
||||||
|
.optional()
|
||||||
|
.describe(
|
||||||
|
'The depth of the search, affecting result quality and response time (`basic` or `advanced`). Default is basic for quick results and advanced for indepth high quality results but longer response time. Advanced calls equals 2 requests.',
|
||||||
|
),
|
||||||
|
include_images: z
|
||||||
|
.boolean()
|
||||||
|
.optional()
|
||||||
|
.describe(
|
||||||
|
'Whether to include a list of query-related images in the response. Default is False.',
|
||||||
|
),
|
||||||
|
include_answer: z
|
||||||
|
.boolean()
|
||||||
|
.optional()
|
||||||
|
.describe('Whether to include answers in the search results. Default is False.'),
|
||||||
|
include_raw_content: z
|
||||||
|
.boolean()
|
||||||
|
.optional()
|
||||||
|
.describe('Whether to include raw content in the search results. Default is False.'),
|
||||||
|
include_domains: z
|
||||||
|
.array(z.string())
|
||||||
|
.optional()
|
||||||
|
.describe('A list of domains to specifically include in the search results.'),
|
||||||
|
exclude_domains: z
|
||||||
|
.array(z.string())
|
||||||
|
.optional()
|
||||||
|
.describe('A list of domains to specifically exclude from the search results.'),
|
||||||
|
topic: z
|
||||||
|
.enum(['general', 'news', 'finance'])
|
||||||
|
.optional()
|
||||||
|
.describe(
|
||||||
|
'The category of the search. Use news ONLY if query SPECIFCALLY mentions the word "news".',
|
||||||
|
),
|
||||||
|
time_range: z
|
||||||
|
.enum(['day', 'week', 'month', 'year', 'd', 'w', 'm', 'y'])
|
||||||
|
.optional()
|
||||||
|
.describe('The time range back from the current date to filter results.'),
|
||||||
|
days: z
|
||||||
|
.number()
|
||||||
|
.min(1)
|
||||||
|
.optional()
|
||||||
|
.describe('Number of days back from the current date to include. Only if topic is news.'),
|
||||||
|
include_image_descriptions: z
|
||||||
|
.boolean()
|
||||||
|
.optional()
|
||||||
|
.describe(
|
||||||
|
'When include_images is true, also add a descriptive text for each image. Default is false.',
|
||||||
|
),
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
getApiKey() {
|
getApiKey() {
|
||||||
|
|
@ -103,7 +88,12 @@ class TavilySearchResults extends Tool {
|
||||||
}
|
}
|
||||||
|
|
||||||
async _call(input) {
|
async _call(input) {
|
||||||
const { query, ...rest } = input;
|
const validationResult = this.schema.safeParse(input);
|
||||||
|
if (!validationResult.success) {
|
||||||
|
throw new Error(`Validation failed: ${JSON.stringify(validationResult.error.issues)}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
const { query, ...rest } = validationResult.data;
|
||||||
|
|
||||||
const requestBody = {
|
const requestBody = {
|
||||||
api_key: this.apiKey,
|
api_key: this.apiKey,
|
||||||
|
|
@ -112,19 +102,13 @@ class TavilySearchResults extends Tool {
|
||||||
...this.kwargs,
|
...this.kwargs,
|
||||||
};
|
};
|
||||||
|
|
||||||
const fetchOptions = {
|
const response = await fetch('https://api.tavily.com/search', {
|
||||||
method: 'POST',
|
method: 'POST',
|
||||||
headers: {
|
headers: {
|
||||||
'Content-Type': 'application/json',
|
'Content-Type': 'application/json',
|
||||||
},
|
},
|
||||||
body: JSON.stringify(requestBody),
|
body: JSON.stringify(requestBody),
|
||||||
};
|
});
|
||||||
|
|
||||||
if (process.env.PROXY) {
|
|
||||||
fetchOptions.dispatcher = new ProxyAgent(process.env.PROXY);
|
|
||||||
}
|
|
||||||
|
|
||||||
const response = await fetch('https://api.tavily.com/search', fetchOptions);
|
|
||||||
|
|
||||||
const json = await response.json();
|
const json = await response.json();
|
||||||
if (!response.ok) {
|
if (!response.ok) {
|
||||||
|
|
|
||||||
|
|
@ -1,19 +1,8 @@
|
||||||
|
const { z } = require('zod');
|
||||||
const { Tool } = require('@langchain/core/tools');
|
const { Tool } = require('@langchain/core/tools');
|
||||||
const { logger } = require('@librechat/data-schemas');
|
const { logger } = require('@librechat/data-schemas');
|
||||||
const { getEnvironmentVariable } = require('@langchain/core/utils/env');
|
const { getEnvironmentVariable } = require('@langchain/core/utils/env');
|
||||||
|
|
||||||
const traversaalSearchJsonSchema = {
|
|
||||||
type: 'object',
|
|
||||||
properties: {
|
|
||||||
query: {
|
|
||||||
type: 'string',
|
|
||||||
description:
|
|
||||||
"A properly written sentence to be interpreted by an AI to search the web according to the user's request.",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
required: ['query'],
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Tool for the Traversaal AI search API, Ares.
|
* Tool for the Traversaal AI search API, Ares.
|
||||||
*/
|
*/
|
||||||
|
|
@ -28,15 +17,17 @@ class TraversaalSearch extends Tool {
|
||||||
Useful for when you need to answer questions about current events. Input should be a search query.`;
|
Useful for when you need to answer questions about current events. Input should be a search query.`;
|
||||||
this.description_for_model =
|
this.description_for_model =
|
||||||
'\'Please create a specific sentence for the AI to understand and use as a query to search the web based on the user\'s request. For example, "Find information about the highest mountains in the world." or "Show me the latest news articles about climate change and its impact on polar ice caps."\'';
|
'\'Please create a specific sentence for the AI to understand and use as a query to search the web based on the user\'s request. For example, "Find information about the highest mountains in the world." or "Show me the latest news articles about climate change and its impact on polar ice caps."\'';
|
||||||
this.schema = traversaalSearchJsonSchema;
|
this.schema = z.object({
|
||||||
|
query: z
|
||||||
|
.string()
|
||||||
|
.describe(
|
||||||
|
"A properly written sentence to be interpreted by an AI to search the web according to the user's request.",
|
||||||
|
),
|
||||||
|
});
|
||||||
|
|
||||||
this.apiKey = fields?.TRAVERSAAL_API_KEY ?? this.getApiKey();
|
this.apiKey = fields?.TRAVERSAAL_API_KEY ?? this.getApiKey();
|
||||||
}
|
}
|
||||||
|
|
||||||
static get jsonSchema() {
|
|
||||||
return traversaalSearchJsonSchema;
|
|
||||||
}
|
|
||||||
|
|
||||||
getApiKey() {
|
getApiKey() {
|
||||||
const apiKey = getEnvironmentVariable('TRAVERSAAL_API_KEY');
|
const apiKey = getEnvironmentVariable('TRAVERSAAL_API_KEY');
|
||||||
if (!apiKey && this.override) {
|
if (!apiKey && this.override) {
|
||||||
|
|
|
||||||
|
|
@ -1,19 +1,9 @@
|
||||||
/* eslint-disable no-useless-escape */
|
/* eslint-disable no-useless-escape */
|
||||||
|
const { z } = require('zod');
|
||||||
const axios = require('axios');
|
const axios = require('axios');
|
||||||
const { Tool } = require('@langchain/core/tools');
|
const { Tool } = require('@langchain/core/tools');
|
||||||
const { logger } = require('@librechat/data-schemas');
|
const { logger } = require('@librechat/data-schemas');
|
||||||
|
|
||||||
const wolframJsonSchema = {
|
|
||||||
type: 'object',
|
|
||||||
properties: {
|
|
||||||
input: {
|
|
||||||
type: 'string',
|
|
||||||
description: 'Natural language query to WolframAlpha following the guidelines',
|
|
||||||
},
|
|
||||||
},
|
|
||||||
required: ['input'],
|
|
||||||
};
|
|
||||||
|
|
||||||
class WolframAlphaAPI extends Tool {
|
class WolframAlphaAPI extends Tool {
|
||||||
constructor(fields) {
|
constructor(fields) {
|
||||||
super();
|
super();
|
||||||
|
|
@ -51,11 +41,9 @@ class WolframAlphaAPI extends Tool {
|
||||||
// -- Do not explain each step unless user input is needed. Proceed directly to making a better API call based on the available assumptions.`;
|
// -- Do not explain each step unless user input is needed. Proceed directly to making a better API call based on the available assumptions.`;
|
||||||
this.description = `WolframAlpha offers computation, math, curated knowledge, and real-time data. It handles natural language queries and performs complex calculations.
|
this.description = `WolframAlpha offers computation, math, curated knowledge, and real-time data. It handles natural language queries and performs complex calculations.
|
||||||
Follow the guidelines to get the best results.`;
|
Follow the guidelines to get the best results.`;
|
||||||
this.schema = wolframJsonSchema;
|
this.schema = z.object({
|
||||||
}
|
input: z.string().describe('Natural language query to WolframAlpha following the guidelines'),
|
||||||
|
});
|
||||||
static get jsonSchema() {
|
|
||||||
return wolframJsonSchema;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
async fetchRawText(url) {
|
async fetchRawText(url) {
|
||||||
|
|
|
||||||
137
api/app/clients/tools/structured/YouTube.js
Normal file
137
api/app/clients/tools/structured/YouTube.js
Normal file
|
|
@ -0,0 +1,137 @@
|
||||||
|
const { ytToolkit } = require('@librechat/api');
|
||||||
|
const { tool } = require('@langchain/core/tools');
|
||||||
|
const { youtube } = require('@googleapis/youtube');
|
||||||
|
const { logger } = require('@librechat/data-schemas');
|
||||||
|
const { YoutubeTranscript } = require('youtube-transcript');
|
||||||
|
const { getApiKey } = require('./credentials');
|
||||||
|
|
||||||
|
function extractVideoId(url) {
|
||||||
|
const rawIdRegex = /^[a-zA-Z0-9_-]{11}$/;
|
||||||
|
if (rawIdRegex.test(url)) {
|
||||||
|
return url;
|
||||||
|
}
|
||||||
|
|
||||||
|
const regex = new RegExp(
|
||||||
|
'(?:youtu\\.be/|youtube(?:\\.com)?/(?:' +
|
||||||
|
'(?:watch\\?v=)|(?:embed/)|(?:shorts/)|(?:live/)|(?:v/)|(?:/))?)' +
|
||||||
|
'([a-zA-Z0-9_-]{11})(?:\\S+)?$',
|
||||||
|
);
|
||||||
|
const match = url.match(regex);
|
||||||
|
return match ? match[1] : null;
|
||||||
|
}
|
||||||
|
|
||||||
|
function parseTranscript(transcriptResponse) {
|
||||||
|
if (!Array.isArray(transcriptResponse)) {
|
||||||
|
return '';
|
||||||
|
}
|
||||||
|
|
||||||
|
return transcriptResponse
|
||||||
|
.map((entry) => entry.text.trim())
|
||||||
|
.filter((text) => text)
|
||||||
|
.join(' ')
|
||||||
|
.replaceAll('&#39;', "'");
|
||||||
|
}
|
||||||
|
|
||||||
|
function createYouTubeTools(fields = {}) {
|
||||||
|
const envVar = 'YOUTUBE_API_KEY';
|
||||||
|
const override = fields.override ?? false;
|
||||||
|
const apiKey = fields.apiKey ?? fields[envVar] ?? getApiKey(envVar, override);
|
||||||
|
|
||||||
|
const youtubeClient = youtube({
|
||||||
|
version: 'v3',
|
||||||
|
auth: apiKey,
|
||||||
|
});
|
||||||
|
|
||||||
|
const searchTool = tool(async ({ query, maxResults = 5 }) => {
|
||||||
|
const response = await youtubeClient.search.list({
|
||||||
|
part: 'snippet',
|
||||||
|
q: query,
|
||||||
|
type: 'video',
|
||||||
|
maxResults: maxResults || 5,
|
||||||
|
});
|
||||||
|
const result = response.data.items.map((item) => ({
|
||||||
|
title: item.snippet.title,
|
||||||
|
description: item.snippet.description,
|
||||||
|
url: `https://www.youtube.com/watch?v=${item.id.videoId}`,
|
||||||
|
}));
|
||||||
|
return JSON.stringify(result, null, 2);
|
||||||
|
}, ytToolkit.youtube_search);
|
||||||
|
|
||||||
|
const infoTool = tool(async ({ url }) => {
|
||||||
|
const videoId = extractVideoId(url);
|
||||||
|
if (!videoId) {
|
||||||
|
throw new Error('Invalid YouTube URL or video ID');
|
||||||
|
}
|
||||||
|
|
||||||
|
const response = await youtubeClient.videos.list({
|
||||||
|
part: 'snippet,statistics',
|
||||||
|
id: videoId,
|
||||||
|
});
|
||||||
|
|
||||||
|
if (!response.data.items?.length) {
|
||||||
|
throw new Error('Video not found');
|
||||||
|
}
|
||||||
|
const video = response.data.items[0];
|
||||||
|
|
||||||
|
const result = {
|
||||||
|
title: video.snippet.title,
|
||||||
|
description: video.snippet.description,
|
||||||
|
views: video.statistics.viewCount,
|
||||||
|
likes: video.statistics.likeCount,
|
||||||
|
comments: video.statistics.commentCount,
|
||||||
|
};
|
||||||
|
return JSON.stringify(result, null, 2);
|
||||||
|
}, ytToolkit.youtube_info);
|
||||||
|
|
||||||
|
const commentsTool = tool(async ({ url, maxResults = 10 }) => {
|
||||||
|
const videoId = extractVideoId(url);
|
||||||
|
if (!videoId) {
|
||||||
|
throw new Error('Invalid YouTube URL or video ID');
|
||||||
|
}
|
||||||
|
|
||||||
|
const response = await youtubeClient.commentThreads.list({
|
||||||
|
part: 'snippet',
|
||||||
|
videoId,
|
||||||
|
maxResults: maxResults || 10,
|
||||||
|
});
|
||||||
|
|
||||||
|
const result = response.data.items.map((item) => ({
|
||||||
|
author: item.snippet.topLevelComment.snippet.authorDisplayName,
|
||||||
|
text: item.snippet.topLevelComment.snippet.textDisplay,
|
||||||
|
likes: item.snippet.topLevelComment.snippet.likeCount,
|
||||||
|
}));
|
||||||
|
return JSON.stringify(result, null, 2);
|
||||||
|
}, ytToolkit.youtube_comments);
|
||||||
|
|
||||||
|
const transcriptTool = tool(async ({ url }) => {
|
||||||
|
const videoId = extractVideoId(url);
|
||||||
|
if (!videoId) {
|
||||||
|
throw new Error('Invalid YouTube URL or video ID');
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
try {
|
||||||
|
const transcript = await YoutubeTranscript.fetchTranscript(videoId, { lang: 'en' });
|
||||||
|
return parseTranscript(transcript);
|
||||||
|
} catch (e) {
|
||||||
|
logger.error(e);
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
const transcript = await YoutubeTranscript.fetchTranscript(videoId, { lang: 'de' });
|
||||||
|
return parseTranscript(transcript);
|
||||||
|
} catch (e) {
|
||||||
|
logger.error(e);
|
||||||
|
}
|
||||||
|
|
||||||
|
const transcript = await YoutubeTranscript.fetchTranscript(videoId);
|
||||||
|
return parseTranscript(transcript);
|
||||||
|
} catch (error) {
|
||||||
|
throw new Error(`Failed to fetch transcript: ${error.message}`);
|
||||||
|
}
|
||||||
|
}, ytToolkit.youtube_transcript);
|
||||||
|
|
||||||
|
return [searchTool, infoTool, commentsTool, transcriptTool];
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = createYouTubeTools;
|
||||||
|
|
@ -1,125 +0,0 @@
|
||||||
const { ProxyAgent } = require('undici');
|
|
||||||
|
|
||||||
/**
|
|
||||||
* These tests verify the proxy wrapper behavior for GeminiImageGen.
|
|
||||||
* Instead of loading the full module (which has many dependencies),
|
|
||||||
* we directly test the wrapper logic that would be applied.
|
|
||||||
*/
|
|
||||||
describe('GeminiImageGen Proxy Configuration', () => {
|
|
||||||
let originalEnv;
|
|
||||||
let originalFetch;
|
|
||||||
|
|
||||||
beforeAll(() => {
|
|
||||||
originalEnv = { ...process.env };
|
|
||||||
originalFetch = globalThis.fetch;
|
|
||||||
});
|
|
||||||
|
|
||||||
beforeEach(() => {
|
|
||||||
process.env = { ...originalEnv };
|
|
||||||
globalThis.fetch = originalFetch;
|
|
||||||
});
|
|
||||||
|
|
||||||
afterEach(() => {
|
|
||||||
process.env = originalEnv;
|
|
||||||
globalThis.fetch = originalFetch;
|
|
||||||
});
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Simulates the proxy wrapper that GeminiImageGen applies at module load.
|
|
||||||
* This is the same logic from GeminiImageGen.js lines 30-42.
|
|
||||||
*/
|
|
||||||
function applyProxyWrapper() {
|
|
||||||
if (process.env.PROXY) {
|
|
||||||
const _originalFetch = globalThis.fetch;
|
|
||||||
const proxyAgent = new ProxyAgent(process.env.PROXY);
|
|
||||||
|
|
||||||
globalThis.fetch = function (url, options = {}) {
|
|
||||||
const urlString = url.toString();
|
|
||||||
if (urlString.includes('googleapis.com')) {
|
|
||||||
options = { ...options, dispatcher: proxyAgent };
|
|
||||||
}
|
|
||||||
return _originalFetch.call(this, url, options);
|
|
||||||
};
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
it('should wrap globalThis.fetch when PROXY env is set', () => {
|
|
||||||
process.env.PROXY = 'http://proxy.example.com:8080';
|
|
||||||
|
|
||||||
const fetchBeforeWrap = globalThis.fetch;
|
|
||||||
|
|
||||||
applyProxyWrapper();
|
|
||||||
|
|
||||||
expect(globalThis.fetch).not.toBe(fetchBeforeWrap);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should not wrap globalThis.fetch when PROXY env is not set', () => {
|
|
||||||
delete process.env.PROXY;
|
|
||||||
|
|
||||||
const fetchBeforeWrap = globalThis.fetch;
|
|
||||||
|
|
||||||
applyProxyWrapper();
|
|
||||||
|
|
||||||
expect(globalThis.fetch).toBe(fetchBeforeWrap);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should add dispatcher to googleapis.com URLs', async () => {
|
|
||||||
process.env.PROXY = 'http://proxy.example.com:8080';
|
|
||||||
|
|
||||||
let capturedOptions = null;
|
|
||||||
const mockFetch = jest.fn((url, options) => {
|
|
||||||
capturedOptions = options;
|
|
||||||
return Promise.resolve({ ok: true });
|
|
||||||
});
|
|
||||||
globalThis.fetch = mockFetch;
|
|
||||||
|
|
||||||
applyProxyWrapper();
|
|
||||||
|
|
||||||
await globalThis.fetch('https://generativelanguage.googleapis.com/v1/models', {});
|
|
||||||
|
|
||||||
expect(capturedOptions).toBeDefined();
|
|
||||||
expect(capturedOptions.dispatcher).toBeInstanceOf(ProxyAgent);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should not add dispatcher to non-googleapis.com URLs', async () => {
|
|
||||||
process.env.PROXY = 'http://proxy.example.com:8080';
|
|
||||||
|
|
||||||
let capturedOptions = null;
|
|
||||||
const mockFetch = jest.fn((url, options) => {
|
|
||||||
capturedOptions = options;
|
|
||||||
return Promise.resolve({ ok: true });
|
|
||||||
});
|
|
||||||
globalThis.fetch = mockFetch;
|
|
||||||
|
|
||||||
applyProxyWrapper();
|
|
||||||
|
|
||||||
await globalThis.fetch('https://api.openai.com/v1/images', {});
|
|
||||||
|
|
||||||
expect(capturedOptions).toBeDefined();
|
|
||||||
expect(capturedOptions.dispatcher).toBeUndefined();
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should preserve existing options when adding dispatcher', async () => {
|
|
||||||
process.env.PROXY = 'http://proxy.example.com:8080';
|
|
||||||
|
|
||||||
let capturedOptions = null;
|
|
||||||
const mockFetch = jest.fn((url, options) => {
|
|
||||||
capturedOptions = options;
|
|
||||||
return Promise.resolve({ ok: true });
|
|
||||||
});
|
|
||||||
globalThis.fetch = mockFetch;
|
|
||||||
|
|
||||||
applyProxyWrapper();
|
|
||||||
|
|
||||||
const customHeaders = { 'X-Custom-Header': 'test' };
|
|
||||||
await globalThis.fetch('https://aiplatform.googleapis.com/v1/models', {
|
|
||||||
headers: customHeaders,
|
|
||||||
method: 'POST',
|
|
||||||
});
|
|
||||||
|
|
||||||
expect(capturedOptions).toBeDefined();
|
|
||||||
expect(capturedOptions.dispatcher).toBeInstanceOf(ProxyAgent);
|
|
||||||
expect(capturedOptions.headers).toEqual(customHeaders);
|
|
||||||
expect(capturedOptions.method).toBe('POST');
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
@ -1,7 +1,6 @@
|
||||||
const { fetch, ProxyAgent } = require('undici');
|
|
||||||
const TavilySearchResults = require('../TavilySearchResults');
|
const TavilySearchResults = require('../TavilySearchResults');
|
||||||
|
|
||||||
jest.mock('undici');
|
jest.mock('node-fetch');
|
||||||
jest.mock('@langchain/core/utils/env');
|
jest.mock('@langchain/core/utils/env');
|
||||||
|
|
||||||
describe('TavilySearchResults', () => {
|
describe('TavilySearchResults', () => {
|
||||||
|
|
@ -14,7 +13,6 @@ describe('TavilySearchResults', () => {
|
||||||
|
|
||||||
beforeEach(() => {
|
beforeEach(() => {
|
||||||
jest.resetModules();
|
jest.resetModules();
|
||||||
jest.clearAllMocks();
|
|
||||||
process.env = {
|
process.env = {
|
||||||
...originalEnv,
|
...originalEnv,
|
||||||
TAVILY_API_KEY: mockApiKey,
|
TAVILY_API_KEY: mockApiKey,
|
||||||
|
|
@ -22,6 +20,7 @@ describe('TavilySearchResults', () => {
|
||||||
});
|
});
|
||||||
|
|
||||||
afterEach(() => {
|
afterEach(() => {
|
||||||
|
jest.clearAllMocks();
|
||||||
process.env = originalEnv;
|
process.env = originalEnv;
|
||||||
});
|
});
|
||||||
|
|
||||||
|
|
@ -36,49 +35,4 @@ describe('TavilySearchResults', () => {
|
||||||
});
|
});
|
||||||
expect(instance.apiKey).toBe(mockApiKey);
|
expect(instance.apiKey).toBe(mockApiKey);
|
||||||
});
|
});
|
||||||
|
|
||||||
describe('proxy support', () => {
|
|
||||||
const mockResponse = {
|
|
||||||
ok: true,
|
|
||||||
json: jest.fn().mockResolvedValue({ results: [] }),
|
|
||||||
};
|
|
||||||
|
|
||||||
beforeEach(() => {
|
|
||||||
fetch.mockResolvedValue(mockResponse);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should use ProxyAgent when PROXY env var is set', async () => {
|
|
||||||
const proxyUrl = 'http://proxy.example.com:8080';
|
|
||||||
process.env.PROXY = proxyUrl;
|
|
||||||
|
|
||||||
const mockProxyAgent = { type: 'proxy-agent' };
|
|
||||||
ProxyAgent.mockImplementation(() => mockProxyAgent);
|
|
||||||
|
|
||||||
const instance = new TavilySearchResults({ TAVILY_API_KEY: mockApiKey });
|
|
||||||
await instance._call({ query: 'test query' });
|
|
||||||
|
|
||||||
expect(ProxyAgent).toHaveBeenCalledWith(proxyUrl);
|
|
||||||
expect(fetch).toHaveBeenCalledWith(
|
|
||||||
'https://api.tavily.com/search',
|
|
||||||
expect.objectContaining({
|
|
||||||
dispatcher: mockProxyAgent,
|
|
||||||
}),
|
|
||||||
);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should not use ProxyAgent when PROXY env var is not set', async () => {
|
|
||||||
delete process.env.PROXY;
|
|
||||||
|
|
||||||
const instance = new TavilySearchResults({ TAVILY_API_KEY: mockApiKey });
|
|
||||||
await instance._call({ query: 'test query' });
|
|
||||||
|
|
||||||
expect(ProxyAgent).not.toHaveBeenCalled();
|
|
||||||
expect(fetch).toHaveBeenCalledWith(
|
|
||||||
'https://api.tavily.com/search',
|
|
||||||
expect.not.objectContaining({
|
|
||||||
dispatcher: expect.anything(),
|
|
||||||
}),
|
|
||||||
);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
});
|
});
|
||||||
|
|
|
||||||
|
|
@ -1,22 +1,11 @@
|
||||||
|
const { z } = require('zod');
|
||||||
const axios = require('axios');
|
const axios = require('axios');
|
||||||
const { tool } = require('@langchain/core/tools');
|
const { tool } = require('@langchain/core/tools');
|
||||||
const { logger } = require('@librechat/data-schemas');
|
const { logger } = require('@librechat/data-schemas');
|
||||||
const { generateShortLivedToken } = require('@librechat/api');
|
const { generateShortLivedToken } = require('@librechat/api');
|
||||||
const { Tools, EToolResources } = require('librechat-data-provider');
|
const { Tools, EToolResources } = require('librechat-data-provider');
|
||||||
const { filterFilesByAgentAccess } = require('~/server/services/Files/permissions');
|
const { filterFilesByAgentAccess } = require('~/server/services/Files/permissions');
|
||||||
const { getFiles } = require('~/models');
|
const { getFiles } = require('~/models/File');
|
||||||
|
|
||||||
const fileSearchJsonSchema = {
|
|
||||||
type: 'object',
|
|
||||||
properties: {
|
|
||||||
query: {
|
|
||||||
type: 'string',
|
|
||||||
description:
|
|
||||||
"A natural language query to search for relevant information in the files. Be specific and use keywords related to the information you're looking for. The query will be used for semantic similarity matching against the file contents.",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
required: ['query'],
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*
|
*
|
||||||
|
|
@ -97,6 +86,7 @@ const createFileSearchTool = async ({ userId, files, entity_id, fileCitations =
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
*
|
||||||
* @param {import('librechat-data-provider').TFile} file
|
* @param {import('librechat-data-provider').TFile} file
|
||||||
* @returns {{ file_id: string, query: string, k: number, entity_id?: string }}
|
* @returns {{ file_id: string, query: string, k: number, entity_id?: string }}
|
||||||
*/
|
*/
|
||||||
|
|
@ -145,16 +135,11 @@ const createFileSearchTool = async ({ userId, files, entity_id, fileCitations =
|
||||||
page: docInfo.metadata.page || null,
|
page: docInfo.metadata.page || null,
|
||||||
})),
|
})),
|
||||||
)
|
)
|
||||||
|
// TODO: results should be sorted by relevance, not distance
|
||||||
.sort((a, b) => a.distance - b.distance)
|
.sort((a, b) => a.distance - b.distance)
|
||||||
|
// TODO: make this configurable
|
||||||
.slice(0, 10);
|
.slice(0, 10);
|
||||||
|
|
||||||
if (formattedResults.length === 0) {
|
|
||||||
return [
|
|
||||||
'No content found in the files. The files may not have been processed correctly or you may need to refine your query.',
|
|
||||||
undefined,
|
|
||||||
];
|
|
||||||
}
|
|
||||||
|
|
||||||
const formattedString = formattedResults
|
const formattedString = formattedResults
|
||||||
.map(
|
.map(
|
||||||
(result, index) =>
|
(result, index) =>
|
||||||
|
|
@ -184,18 +169,23 @@ const createFileSearchTool = async ({ userId, files, entity_id, fileCitations =
|
||||||
? `
|
? `
|
||||||
|
|
||||||
**CITE FILE SEARCH RESULTS:**
|
**CITE FILE SEARCH RESULTS:**
|
||||||
Use the EXACT anchor markers shown below (copy them verbatim) immediately after statements derived from file content. Reference the filename in your text:
|
Use anchor markers immediately after statements derived from file content. Reference the filename in your text:
|
||||||
- File citation: "The document.pdf states that... \\ue202turn0file0"
|
- File citation: "The document.pdf states that... \\ue202turn0file0"
|
||||||
- Page reference: "According to report.docx... \\ue202turn0file1"
|
- Page reference: "According to report.docx... \\ue202turn0file1"
|
||||||
- Multi-file: "Multiple sources confirm... \\ue200\\ue202turn0file0\\ue202turn0file1\\ue201"
|
- Multi-file: "Multiple sources confirm... \\ue200\\ue202turn0file0\\ue202turn0file1\\ue201"
|
||||||
|
|
||||||
**CRITICAL:** Output these escape sequences EXACTLY as shown (e.g., \\ue202turn0file0). Do NOT substitute with other characters like † or similar symbols.
|
|
||||||
**ALWAYS mention the filename in your text before the citation marker. NEVER use markdown links or footnotes.**`
|
**ALWAYS mention the filename in your text before the citation marker. NEVER use markdown links or footnotes.**`
|
||||||
: ''
|
: ''
|
||||||
}`,
|
}`,
|
||||||
schema: fileSearchJsonSchema,
|
schema: z.object({
|
||||||
|
query: z
|
||||||
|
.string()
|
||||||
|
.describe(
|
||||||
|
"A natural language query to search for relevant information in the files. Be specific and use keywords related to the information you're looking for. The query will be used for semantic similarity matching against the file contents.",
|
||||||
|
),
|
||||||
|
}),
|
||||||
},
|
},
|
||||||
);
|
);
|
||||||
};
|
};
|
||||||
|
|
||||||
module.exports = { createFileSearchTool, primeFiles, fileSearchJsonSchema };
|
module.exports = { createFileSearchTool, primeFiles };
|
||||||
|
|
|
||||||
33
api/app/clients/tools/util/handleOpenAIErrors.js
Normal file
33
api/app/clients/tools/util/handleOpenAIErrors.js
Normal file
|
|
@ -0,0 +1,33 @@
|
||||||
|
const OpenAI = require('openai');
|
||||||
|
const { logger } = require('@librechat/data-schemas');
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Handles errors that may occur when making requests to OpenAI's API.
|
||||||
|
* It checks the instance of the error and prints a specific warning message
|
||||||
|
* to the console depending on the type of error encountered.
|
||||||
|
* It then calls an optional error callback function with the error object.
|
||||||
|
*
|
||||||
|
* @param {Error} err - The error object thrown by OpenAI API.
|
||||||
|
* @param {Function} errorCallback - A callback function that is called with the error object.
|
||||||
|
* @param {string} [context='stream'] - A string providing context where the error occurred, defaults to 'stream'.
|
||||||
|
*/
|
||||||
|
async function handleOpenAIErrors(err, errorCallback, context = 'stream') {
|
||||||
|
if (err instanceof OpenAI.APIError && err?.message?.includes('abort')) {
|
||||||
|
logger.warn(`[OpenAIClient.chatCompletion][${context}] Aborted Message`);
|
||||||
|
}
|
||||||
|
if (err instanceof OpenAI.OpenAIError && err?.message?.includes('missing finish_reason')) {
|
||||||
|
logger.warn(`[OpenAIClient.chatCompletion][${context}] Missing finish_reason`);
|
||||||
|
} else if (err instanceof OpenAI.APIError) {
|
||||||
|
logger.warn(`[OpenAIClient.chatCompletion][${context}] API error`);
|
||||||
|
} else {
|
||||||
|
logger.warn(`[OpenAIClient.chatCompletion][${context}] Unhandled error type`);
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.error(err);
|
||||||
|
|
||||||
|
if (errorCallback) {
|
||||||
|
errorCallback(err);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = handleOpenAIErrors;
|
||||||
|
|
@ -10,16 +10,14 @@ const {
|
||||||
createSafeUser,
|
createSafeUser,
|
||||||
mcpToolPattern,
|
mcpToolPattern,
|
||||||
loadWebSearchAuth,
|
loadWebSearchAuth,
|
||||||
buildImageToolContext,
|
|
||||||
buildWebSearchContext,
|
|
||||||
} = require('@librechat/api');
|
} = require('@librechat/api');
|
||||||
const { getMCPServersRegistry } = require('~/config');
|
|
||||||
const {
|
const {
|
||||||
Tools,
|
Tools,
|
||||||
Constants,
|
Constants,
|
||||||
Permissions,
|
Permissions,
|
||||||
EToolResources,
|
EToolResources,
|
||||||
PermissionTypes,
|
PermissionTypes,
|
||||||
|
replaceSpecialVars,
|
||||||
} = require('librechat-data-provider');
|
} = require('librechat-data-provider');
|
||||||
const {
|
const {
|
||||||
availableTools,
|
availableTools,
|
||||||
|
|
@ -34,8 +32,8 @@ const {
|
||||||
StructuredACS,
|
StructuredACS,
|
||||||
TraversaalSearch,
|
TraversaalSearch,
|
||||||
StructuredWolfram,
|
StructuredWolfram,
|
||||||
|
createYouTubeTools,
|
||||||
TavilySearchResults,
|
TavilySearchResults,
|
||||||
createGeminiImageTool,
|
|
||||||
createOpenAIImageTools,
|
createOpenAIImageTools,
|
||||||
} = require('../');
|
} = require('../');
|
||||||
const { primeFiles: primeCodeFiles } = require('~/server/services/Files/Code/process');
|
const { primeFiles: primeCodeFiles } = require('~/server/services/Files/Code/process');
|
||||||
|
|
@ -184,15 +182,30 @@ const loadTools = async ({
|
||||||
};
|
};
|
||||||
|
|
||||||
const customConstructors = {
|
const customConstructors = {
|
||||||
|
youtube: async (_toolContextMap) => {
|
||||||
|
const authFields = getAuthFields('youtube');
|
||||||
|
const authValues = await loadAuthValues({ userId: user, authFields });
|
||||||
|
return createYouTubeTools(authValues);
|
||||||
|
},
|
||||||
image_gen_oai: async (toolContextMap) => {
|
image_gen_oai: async (toolContextMap) => {
|
||||||
const authFields = getAuthFields('image_gen_oai');
|
const authFields = getAuthFields('image_gen_oai');
|
||||||
const authValues = await loadAuthValues({ userId: user, authFields });
|
const authValues = await loadAuthValues({ userId: user, authFields });
|
||||||
const imageFiles = options.tool_resources?.[EToolResources.image_edit]?.files ?? [];
|
const imageFiles = options.tool_resources?.[EToolResources.image_edit]?.files ?? [];
|
||||||
const toolContext = buildImageToolContext({
|
let toolContext = '';
|
||||||
imageFiles,
|
for (let i = 0; i < imageFiles.length; i++) {
|
||||||
toolName: `${EToolResources.image_edit}_oai`,
|
const file = imageFiles[i];
|
||||||
contextDescription: 'image editing',
|
if (!file) {
|
||||||
});
|
continue;
|
||||||
|
}
|
||||||
|
if (i === 0) {
|
||||||
|
toolContext =
|
||||||
|
'Image files provided in this request (their image IDs listed in order of appearance) available for image editing:';
|
||||||
|
}
|
||||||
|
toolContext += `\n\t- ${file.file_id}`;
|
||||||
|
if (i === imageFiles.length - 1) {
|
||||||
|
toolContext += `\n\nInclude any you need in the \`image_ids\` array when calling \`${EToolResources.image_edit}_oai\`. You may also include previously referenced or generated image IDs.`;
|
||||||
|
}
|
||||||
|
}
|
||||||
if (toolContext) {
|
if (toolContext) {
|
||||||
toolContextMap.image_edit_oai = toolContext;
|
toolContextMap.image_edit_oai = toolContext;
|
||||||
}
|
}
|
||||||
|
|
@ -205,27 +218,6 @@ const loadTools = async ({
|
||||||
imageFiles,
|
imageFiles,
|
||||||
});
|
});
|
||||||
},
|
},
|
||||||
gemini_image_gen: async (toolContextMap) => {
|
|
||||||
const authFields = getAuthFields('gemini_image_gen');
|
|
||||||
const authValues = await loadAuthValues({ userId: user, authFields, throwError: false });
|
|
||||||
const imageFiles = options.tool_resources?.[EToolResources.image_edit]?.files ?? [];
|
|
||||||
const toolContext = buildImageToolContext({
|
|
||||||
imageFiles,
|
|
||||||
toolName: 'gemini_image_gen',
|
|
||||||
contextDescription: 'image context',
|
|
||||||
});
|
|
||||||
if (toolContext) {
|
|
||||||
toolContextMap.gemini_image_gen = toolContext;
|
|
||||||
}
|
|
||||||
return createGeminiImageTool({
|
|
||||||
...authValues,
|
|
||||||
isAgent: !!agent,
|
|
||||||
req: options.req,
|
|
||||||
imageFiles,
|
|
||||||
userId: user,
|
|
||||||
fileStrategy,
|
|
||||||
});
|
|
||||||
},
|
|
||||||
};
|
};
|
||||||
|
|
||||||
const requestedTools = {};
|
const requestedTools = {};
|
||||||
|
|
@ -248,7 +240,6 @@ const loadTools = async ({
|
||||||
flux: imageGenOptions,
|
flux: imageGenOptions,
|
||||||
dalle: imageGenOptions,
|
dalle: imageGenOptions,
|
||||||
'stable-diffusion': imageGenOptions,
|
'stable-diffusion': imageGenOptions,
|
||||||
gemini_image_gen: imageGenOptions,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/** @type {Record<string, string>} */
|
/** @type {Record<string, string>} */
|
||||||
|
|
@ -324,7 +315,16 @@ const loadTools = async ({
|
||||||
});
|
});
|
||||||
const { onSearchResults, onGetHighlights } = options?.[Tools.web_search] ?? {};
|
const { onSearchResults, onGetHighlights } = options?.[Tools.web_search] ?? {};
|
||||||
requestedTools[tool] = async () => {
|
requestedTools[tool] = async () => {
|
||||||
toolContextMap[tool] = buildWebSearchContext();
|
toolContextMap[tool] = `# \`${tool}\`:
|
||||||
|
Current Date & Time: ${replaceSpecialVars({ text: '{{iso_datetime}}' })}
|
||||||
|
1. **Execute immediately without preface** when using \`${tool}\`.
|
||||||
|
2. **After the search, begin with a brief summary** that directly addresses the query without headers or explaining your process.
|
||||||
|
3. **Structure your response clearly** using Markdown formatting (Level 2 headers for sections, lists for multiple points, tables for comparisons).
|
||||||
|
4. **Cite sources properly** according to the citation anchor format, utilizing group anchors when appropriate.
|
||||||
|
5. **Tailor your approach to the query type** (academic, news, coding, etc.) while maintaining an expert, journalistic, unbiased tone.
|
||||||
|
6. **Provide comprehensive information** with specific details, examples, and as much relevant context as possible from search results.
|
||||||
|
7. **Avoid moralizing language.**
|
||||||
|
`.trim();
|
||||||
return createSearchTool({
|
return createSearchTool({
|
||||||
...result.authResult,
|
...result.authResult,
|
||||||
onSearchResults,
|
onSearchResults,
|
||||||
|
|
@ -339,10 +339,7 @@ const loadTools = async ({
|
||||||
/** Placeholder used for UI purposes */
|
/** Placeholder used for UI purposes */
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
const serverConfig = serverName
|
if (serverName && options.req?.config?.mcpConfig?.[serverName] == null) {
|
||||||
? await getMCPServersRegistry().getServerConfig(serverName, user)
|
|
||||||
: null;
|
|
||||||
if (!serverConfig) {
|
|
||||||
logger.warn(
|
logger.warn(
|
||||||
`MCP server "${serverName}" for "${toolName}" tool is not configured${agent?.id != null && agent.id ? ` but attached to "${agent.id}"` : ''}`,
|
`MCP server "${serverName}" for "${toolName}" tool is not configured${agent?.id != null && agent.id ? ` but attached to "${agent.id}"` : ''}`,
|
||||||
);
|
);
|
||||||
|
|
@ -353,7 +350,6 @@ const loadTools = async ({
|
||||||
{
|
{
|
||||||
type: 'all',
|
type: 'all',
|
||||||
serverName,
|
serverName,
|
||||||
config: serverConfig,
|
|
||||||
},
|
},
|
||||||
];
|
];
|
||||||
continue;
|
continue;
|
||||||
|
|
@ -364,7 +360,6 @@ const loadTools = async ({
|
||||||
type: 'single',
|
type: 'single',
|
||||||
toolKey: tool,
|
toolKey: tool,
|
||||||
serverName,
|
serverName,
|
||||||
config: serverConfig,
|
|
||||||
});
|
});
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
@ -425,11 +420,9 @@ const loadTools = async ({
|
||||||
user: safeUser,
|
user: safeUser,
|
||||||
userMCPAuthMap,
|
userMCPAuthMap,
|
||||||
res: options.res,
|
res: options.res,
|
||||||
streamId: options.req?._resumableStreamId || null,
|
|
||||||
model: agent?.model ?? model,
|
model: agent?.model ?? model,
|
||||||
serverName: config.serverName,
|
serverName: config.serverName,
|
||||||
provider: agent?.provider ?? endpoint,
|
provider: agent?.provider ?? endpoint,
|
||||||
config: config.config,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
if (config.type === 'all' && toolConfigs.length === 1) {
|
if (config.type === 'all' && toolConfigs.length === 1) {
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,8 @@
|
||||||
const { validateTools, loadTools } = require('./handleTools');
|
const { validateTools, loadTools } = require('./handleTools');
|
||||||
|
const handleOpenAIErrors = require('./handleOpenAIErrors');
|
||||||
|
|
||||||
module.exports = {
|
module.exports = {
|
||||||
|
handleOpenAIErrors,
|
||||||
validateTools,
|
validateTools,
|
||||||
loadTools,
|
loadTools,
|
||||||
};
|
};
|
||||||
|
|
|
||||||
10
api/cache/banViolation.js
vendored
10
api/cache/banViolation.js
vendored
|
|
@ -47,17 +47,7 @@ const banViolation = async (req, res, errorMessage) => {
|
||||||
}
|
}
|
||||||
|
|
||||||
await deleteAllUserSessions({ userId: user_id });
|
await deleteAllUserSessions({ userId: user_id });
|
||||||
|
|
||||||
/** Clear OpenID session tokens if present */
|
|
||||||
if (req.session?.openidTokens) {
|
|
||||||
delete req.session.openidTokens;
|
|
||||||
}
|
|
||||||
|
|
||||||
res.clearCookie('refreshToken');
|
res.clearCookie('refreshToken');
|
||||||
res.clearCookie('openid_access_token');
|
|
||||||
res.clearCookie('openid_id_token');
|
|
||||||
res.clearCookie('openid_user_id');
|
|
||||||
res.clearCookie('token_provider');
|
|
||||||
|
|
||||||
const banLogs = getLogStores(ViolationTypes.BAN);
|
const banLogs = getLogStores(ViolationTypes.BAN);
|
||||||
const duration = errorMessage.duration || banLogs.opts.ttl;
|
const duration = errorMessage.duration || banLogs.opts.ttl;
|
||||||
|
|
|
||||||
7
api/cache/getLogStores.js
vendored
7
api/cache/getLogStores.js
vendored
|
|
@ -37,7 +37,6 @@ const namespaces = {
|
||||||
[CacheKeys.ROLES]: standardCache(CacheKeys.ROLES),
|
[CacheKeys.ROLES]: standardCache(CacheKeys.ROLES),
|
||||||
[CacheKeys.APP_CONFIG]: standardCache(CacheKeys.APP_CONFIG),
|
[CacheKeys.APP_CONFIG]: standardCache(CacheKeys.APP_CONFIG),
|
||||||
[CacheKeys.CONFIG_STORE]: standardCache(CacheKeys.CONFIG_STORE),
|
[CacheKeys.CONFIG_STORE]: standardCache(CacheKeys.CONFIG_STORE),
|
||||||
[CacheKeys.TOOL_CACHE]: standardCache(CacheKeys.TOOL_CACHE),
|
|
||||||
[CacheKeys.PENDING_REQ]: standardCache(CacheKeys.PENDING_REQ),
|
[CacheKeys.PENDING_REQ]: standardCache(CacheKeys.PENDING_REQ),
|
||||||
[CacheKeys.ENCODED_DOMAINS]: new Keyv({ store: keyvMongo, namespace: CacheKeys.ENCODED_DOMAINS }),
|
[CacheKeys.ENCODED_DOMAINS]: new Keyv({ store: keyvMongo, namespace: CacheKeys.ENCODED_DOMAINS }),
|
||||||
[CacheKeys.ABORT_KEYS]: standardCache(CacheKeys.ABORT_KEYS, Time.TEN_MINUTES),
|
[CacheKeys.ABORT_KEYS]: standardCache(CacheKeys.ABORT_KEYS, Time.TEN_MINUTES),
|
||||||
|
|
@ -47,15 +46,11 @@ const namespaces = {
|
||||||
[CacheKeys.MODEL_QUERIES]: standardCache(CacheKeys.MODEL_QUERIES),
|
[CacheKeys.MODEL_QUERIES]: standardCache(CacheKeys.MODEL_QUERIES),
|
||||||
[CacheKeys.AUDIO_RUNS]: standardCache(CacheKeys.AUDIO_RUNS, Time.TEN_MINUTES),
|
[CacheKeys.AUDIO_RUNS]: standardCache(CacheKeys.AUDIO_RUNS, Time.TEN_MINUTES),
|
||||||
[CacheKeys.MESSAGES]: standardCache(CacheKeys.MESSAGES, Time.ONE_MINUTE),
|
[CacheKeys.MESSAGES]: standardCache(CacheKeys.MESSAGES, Time.ONE_MINUTE),
|
||||||
[CacheKeys.FLOWS]: standardCache(CacheKeys.FLOWS, Time.ONE_MINUTE * 10),
|
[CacheKeys.FLOWS]: standardCache(CacheKeys.FLOWS, Time.ONE_MINUTE * 3),
|
||||||
[CacheKeys.OPENID_EXCHANGED_TOKENS]: standardCache(
|
[CacheKeys.OPENID_EXCHANGED_TOKENS]: standardCache(
|
||||||
CacheKeys.OPENID_EXCHANGED_TOKENS,
|
CacheKeys.OPENID_EXCHANGED_TOKENS,
|
||||||
Time.TEN_MINUTES,
|
Time.TEN_MINUTES,
|
||||||
),
|
),
|
||||||
[CacheKeys.ADMIN_OAUTH_EXCHANGE]: standardCache(
|
|
||||||
CacheKeys.ADMIN_OAUTH_EXCHANGE,
|
|
||||||
Time.THIRTY_SECONDS,
|
|
||||||
),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
||||||
|
|
@ -1,11 +1,6 @@
|
||||||
const { EventSource } = require('eventsource');
|
const { EventSource } = require('eventsource');
|
||||||
const { Time } = require('librechat-data-provider');
|
const { Time } = require('librechat-data-provider');
|
||||||
const {
|
const { MCPManager, FlowStateManager, OAuthReconnectionManager } = require('@librechat/api');
|
||||||
MCPManager,
|
|
||||||
FlowStateManager,
|
|
||||||
MCPServersRegistry,
|
|
||||||
OAuthReconnectionManager,
|
|
||||||
} = require('@librechat/api');
|
|
||||||
const logger = require('./winston');
|
const logger = require('./winston');
|
||||||
|
|
||||||
global.EventSource = EventSource;
|
global.EventSource = EventSource;
|
||||||
|
|
@ -28,8 +23,6 @@ function getFlowStateManager(flowsCache) {
|
||||||
|
|
||||||
module.exports = {
|
module.exports = {
|
||||||
logger,
|
logger,
|
||||||
createMCPServersRegistry: MCPServersRegistry.createInstance,
|
|
||||||
getMCPServersRegistry: MCPServersRegistry.getInstance,
|
|
||||||
createMCPManager: MCPManager.createInstance,
|
createMCPManager: MCPManager.createInstance,
|
||||||
getMCPManager: MCPManager.getInstance,
|
getMCPManager: MCPManager.getInstance,
|
||||||
getFlowStateManager,
|
getFlowStateManager,
|
||||||
|
|
|
||||||
|
|
@ -1,35 +1,8 @@
|
||||||
const path = require('path');
|
const path = require('path');
|
||||||
const fs = require('fs');
|
|
||||||
const winston = require('winston');
|
const winston = require('winston');
|
||||||
require('winston-daily-rotate-file');
|
require('winston-daily-rotate-file');
|
||||||
|
|
||||||
/**
|
const logDir = path.join(__dirname, '..', 'logs');
|
||||||
* Determine the log directory.
|
|
||||||
* Priority:
|
|
||||||
* 1. LIBRECHAT_LOG_DIR environment variable (allows user override)
|
|
||||||
* 2. /app/logs if running in Docker (bind-mounted with correct permissions)
|
|
||||||
* 3. api/logs relative to this file (local development)
|
|
||||||
*/
|
|
||||||
const getLogDir = () => {
|
|
||||||
if (process.env.LIBRECHAT_LOG_DIR) {
|
|
||||||
return process.env.LIBRECHAT_LOG_DIR;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if running in Docker container (cwd is /app)
|
|
||||||
if (process.cwd() === '/app') {
|
|
||||||
const dockerLogDir = '/app/logs';
|
|
||||||
// Ensure the directory exists
|
|
||||||
if (!fs.existsSync(dockerLogDir)) {
|
|
||||||
fs.mkdirSync(dockerLogDir, { recursive: true });
|
|
||||||
}
|
|
||||||
return dockerLogDir;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Local development: use api/logs relative to this file
|
|
||||||
return path.join(__dirname, '..', 'logs');
|
|
||||||
};
|
|
||||||
|
|
||||||
const logDir = getLogDir();
|
|
||||||
|
|
||||||
const { NODE_ENV, DEBUG_LOGGING = false } = process.env;
|
const { NODE_ENV, DEBUG_LOGGING = false } = process.env;
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,36 +1,9 @@
|
||||||
const path = require('path');
|
const path = require('path');
|
||||||
const fs = require('fs');
|
|
||||||
const winston = require('winston');
|
const winston = require('winston');
|
||||||
require('winston-daily-rotate-file');
|
require('winston-daily-rotate-file');
|
||||||
const { redactFormat, redactMessage, debugTraverse, jsonTruncateFormat } = require('./parsers');
|
const { redactFormat, redactMessage, debugTraverse, jsonTruncateFormat } = require('./parsers');
|
||||||
|
|
||||||
/**
|
const logDir = path.join(__dirname, '..', 'logs');
|
||||||
* Determine the log directory.
|
|
||||||
* Priority:
|
|
||||||
* 1. LIBRECHAT_LOG_DIR environment variable (allows user override)
|
|
||||||
* 2. /app/logs if running in Docker (bind-mounted with correct permissions)
|
|
||||||
* 3. api/logs relative to this file (local development)
|
|
||||||
*/
|
|
||||||
const getLogDir = () => {
|
|
||||||
if (process.env.LIBRECHAT_LOG_DIR) {
|
|
||||||
return process.env.LIBRECHAT_LOG_DIR;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if running in Docker container (cwd is /app)
|
|
||||||
if (process.cwd() === '/app') {
|
|
||||||
const dockerLogDir = '/app/logs';
|
|
||||||
// Ensure the directory exists
|
|
||||||
if (!fs.existsSync(dockerLogDir)) {
|
|
||||||
fs.mkdirSync(dockerLogDir, { recursive: true });
|
|
||||||
}
|
|
||||||
return dockerLogDir;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Local development: use api/logs relative to this file
|
|
||||||
return path.join(__dirname, '..', 'logs');
|
|
||||||
};
|
|
||||||
|
|
||||||
const logDir = getLogDir();
|
|
||||||
|
|
||||||
const { NODE_ENV, DEBUG_LOGGING = true, CONSOLE_JSON = false, DEBUG_CONSOLE = false } = process.env;
|
const { NODE_ENV, DEBUG_LOGGING = true, CONSOLE_JSON = false, DEBUG_CONSOLE = false } = process.env;
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -40,10 +40,6 @@ if (!cached) {
|
||||||
cached = global.mongoose = { conn: null, promise: null };
|
cached = global.mongoose = { conn: null, promise: null };
|
||||||
}
|
}
|
||||||
|
|
||||||
mongoose.connection.on('error', (err) => {
|
|
||||||
logger.error('[connectDb] MongoDB connection error:', err);
|
|
||||||
});
|
|
||||||
|
|
||||||
async function connectDb() {
|
async function connectDb() {
|
||||||
if (cached.conn && cached.conn?._readyState === 1) {
|
if (cached.conn && cached.conn?._readyState === 1) {
|
||||||
return cached.conn;
|
return cached.conn;
|
||||||
|
|
|
||||||
|
|
@ -4,7 +4,6 @@ const { logger } = require('@librechat/data-schemas');
|
||||||
const { CacheKeys } = require('librechat-data-provider');
|
const { CacheKeys } = require('librechat-data-provider');
|
||||||
const { isEnabled, FlowStateManager } = require('@librechat/api');
|
const { isEnabled, FlowStateManager } = require('@librechat/api');
|
||||||
const { getLogStores } = require('~/cache');
|
const { getLogStores } = require('~/cache');
|
||||||
const { batchResetMeiliFlags } = require('./utils');
|
|
||||||
|
|
||||||
const Conversation = mongoose.models.Conversation;
|
const Conversation = mongoose.models.Conversation;
|
||||||
const Message = mongoose.models.Message;
|
const Message = mongoose.models.Message;
|
||||||
|
|
@ -13,11 +12,6 @@ const searchEnabled = isEnabled(process.env.SEARCH);
|
||||||
const indexingDisabled = isEnabled(process.env.MEILI_NO_SYNC);
|
const indexingDisabled = isEnabled(process.env.MEILI_NO_SYNC);
|
||||||
let currentTimeout = null;
|
let currentTimeout = null;
|
||||||
|
|
||||||
const defaultSyncThreshold = 1000;
|
|
||||||
const syncThreshold = process.env.MEILI_SYNC_THRESHOLD
|
|
||||||
? parseInt(process.env.MEILI_SYNC_THRESHOLD, 10)
|
|
||||||
: defaultSyncThreshold;
|
|
||||||
|
|
||||||
class MeiliSearchClient {
|
class MeiliSearchClient {
|
||||||
static instance = null;
|
static instance = null;
|
||||||
|
|
||||||
|
|
@ -195,11 +189,6 @@ async function ensureFilterableAttributes(client) {
|
||||||
*/
|
*/
|
||||||
async function performSync(flowManager, flowId, flowType) {
|
async function performSync(flowManager, flowId, flowType) {
|
||||||
try {
|
try {
|
||||||
if (indexingDisabled === true) {
|
|
||||||
logger.info('[indexSync] Indexing is disabled, skipping...');
|
|
||||||
return { messagesSync: false, convosSync: false };
|
|
||||||
}
|
|
||||||
|
|
||||||
const client = MeiliSearchClient.getInstance();
|
const client = MeiliSearchClient.getInstance();
|
||||||
|
|
||||||
const { status } = await client.health();
|
const { status } = await client.health();
|
||||||
|
|
@ -207,6 +196,11 @@ async function performSync(flowManager, flowId, flowType) {
|
||||||
throw new Error('Meilisearch not available');
|
throw new Error('Meilisearch not available');
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (indexingDisabled === true) {
|
||||||
|
logger.info('[indexSync] Indexing is disabled, skipping...');
|
||||||
|
return { messagesSync: false, convosSync: false };
|
||||||
|
}
|
||||||
|
|
||||||
/** Ensures indexes have proper filterable attributes configured */
|
/** Ensures indexes have proper filterable attributes configured */
|
||||||
const { settingsUpdated, orphanedDocsFound: _orphanedDocsFound } =
|
const { settingsUpdated, orphanedDocsFound: _orphanedDocsFound } =
|
||||||
await ensureFilterableAttributes(client);
|
await ensureFilterableAttributes(client);
|
||||||
|
|
@ -221,30 +215,33 @@ async function performSync(flowManager, flowId, flowType) {
|
||||||
);
|
);
|
||||||
|
|
||||||
// Reset sync flags to force full re-sync
|
// Reset sync flags to force full re-sync
|
||||||
await batchResetMeiliFlags(Message.collection);
|
await Message.collection.updateMany({ _meiliIndex: true }, { $set: { _meiliIndex: false } });
|
||||||
await batchResetMeiliFlags(Conversation.collection);
|
await Conversation.collection.updateMany(
|
||||||
|
{ _meiliIndex: true },
|
||||||
|
{ $set: { _meiliIndex: false } },
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if we need to sync messages
|
// Check if we need to sync messages
|
||||||
logger.info('[indexSync] Requesting message sync progress...');
|
|
||||||
const messageProgress = await Message.getSyncProgress();
|
const messageProgress = await Message.getSyncProgress();
|
||||||
if (!messageProgress.isComplete || settingsUpdated) {
|
if (!messageProgress.isComplete || settingsUpdated) {
|
||||||
logger.info(
|
logger.info(
|
||||||
`[indexSync] Messages need syncing: ${messageProgress.totalProcessed}/${messageProgress.totalDocuments} indexed`,
|
`[indexSync] Messages need syncing: ${messageProgress.totalProcessed}/${messageProgress.totalDocuments} indexed`,
|
||||||
);
|
);
|
||||||
|
|
||||||
const messageCount = messageProgress.totalDocuments;
|
// Check if we should do a full sync or incremental
|
||||||
|
const messageCount = await Message.countDocuments();
|
||||||
const messagesIndexed = messageProgress.totalProcessed;
|
const messagesIndexed = messageProgress.totalProcessed;
|
||||||
const unindexedMessages = messageCount - messagesIndexed;
|
const syncThreshold = parseInt(process.env.MEILI_SYNC_THRESHOLD || '1000', 10);
|
||||||
|
|
||||||
if (settingsUpdated || unindexedMessages > syncThreshold) {
|
if (messageCount - messagesIndexed > syncThreshold) {
|
||||||
logger.info(`[indexSync] Starting message sync (${unindexedMessages} unindexed)`);
|
logger.info('[indexSync] Starting full message sync due to large difference');
|
||||||
|
await Message.syncWithMeili();
|
||||||
|
messagesSync = true;
|
||||||
|
} else if (messageCount !== messagesIndexed) {
|
||||||
|
logger.warn('[indexSync] Messages out of sync, performing incremental sync');
|
||||||
await Message.syncWithMeili();
|
await Message.syncWithMeili();
|
||||||
messagesSync = true;
|
messagesSync = true;
|
||||||
} else if (unindexedMessages > 0) {
|
|
||||||
logger.info(
|
|
||||||
`[indexSync] ${unindexedMessages} messages unindexed (below threshold: ${syncThreshold}, skipping)`,
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
logger.info(
|
logger.info(
|
||||||
|
|
@ -259,18 +256,18 @@ async function performSync(flowManager, flowId, flowType) {
|
||||||
`[indexSync] Conversations need syncing: ${convoProgress.totalProcessed}/${convoProgress.totalDocuments} indexed`,
|
`[indexSync] Conversations need syncing: ${convoProgress.totalProcessed}/${convoProgress.totalDocuments} indexed`,
|
||||||
);
|
);
|
||||||
|
|
||||||
const convoCount = convoProgress.totalDocuments;
|
const convoCount = await Conversation.countDocuments();
|
||||||
const convosIndexed = convoProgress.totalProcessed;
|
const convosIndexed = convoProgress.totalProcessed;
|
||||||
|
const syncThreshold = parseInt(process.env.MEILI_SYNC_THRESHOLD || '1000', 10);
|
||||||
|
|
||||||
const unindexedConvos = convoCount - convosIndexed;
|
if (convoCount - convosIndexed > syncThreshold) {
|
||||||
if (settingsUpdated || unindexedConvos > syncThreshold) {
|
logger.info('[indexSync] Starting full conversation sync due to large difference');
|
||||||
logger.info(`[indexSync] Starting convos sync (${unindexedConvos} unindexed)`);
|
await Conversation.syncWithMeili();
|
||||||
|
convosSync = true;
|
||||||
|
} else if (convoCount !== convosIndexed) {
|
||||||
|
logger.warn('[indexSync] Convos out of sync, performing incremental sync');
|
||||||
await Conversation.syncWithMeili();
|
await Conversation.syncWithMeili();
|
||||||
convosSync = true;
|
convosSync = true;
|
||||||
} else if (unindexedConvos > 0) {
|
|
||||||
logger.info(
|
|
||||||
`[indexSync] ${unindexedConvos} convos unindexed (below threshold: ${syncThreshold}, skipping)`,
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
logger.info(
|
logger.info(
|
||||||
|
|
|
||||||
|
|
@ -1,465 +0,0 @@
|
||||||
/**
|
|
||||||
* Unit tests for performSync() function in indexSync.js
|
|
||||||
*
|
|
||||||
* Tests use real mongoose with mocked model methods, only mocking external calls.
|
|
||||||
*/
|
|
||||||
|
|
||||||
const mongoose = require('mongoose');
|
|
||||||
|
|
||||||
// Mock only external dependencies (not internal classes/models)
|
|
||||||
const mockLogger = {
|
|
||||||
info: jest.fn(),
|
|
||||||
warn: jest.fn(),
|
|
||||||
error: jest.fn(),
|
|
||||||
debug: jest.fn(),
|
|
||||||
};
|
|
||||||
|
|
||||||
const mockMeiliHealth = jest.fn();
|
|
||||||
const mockMeiliIndex = jest.fn();
|
|
||||||
const mockBatchResetMeiliFlags = jest.fn();
|
|
||||||
const mockIsEnabled = jest.fn();
|
|
||||||
const mockGetLogStores = jest.fn();
|
|
||||||
|
|
||||||
// Create mock models that will be reused
|
|
||||||
const createMockModel = (collectionName) => ({
|
|
||||||
collection: { name: collectionName },
|
|
||||||
getSyncProgress: jest.fn(),
|
|
||||||
syncWithMeili: jest.fn(),
|
|
||||||
countDocuments: jest.fn(),
|
|
||||||
});
|
|
||||||
|
|
||||||
const originalMessageModel = mongoose.models.Message;
|
|
||||||
const originalConversationModel = mongoose.models.Conversation;
|
|
||||||
|
|
||||||
// Mock external modules
|
|
||||||
jest.mock('@librechat/data-schemas', () => ({
|
|
||||||
logger: mockLogger,
|
|
||||||
}));
|
|
||||||
|
|
||||||
jest.mock('meilisearch', () => ({
|
|
||||||
MeiliSearch: jest.fn(() => ({
|
|
||||||
health: mockMeiliHealth,
|
|
||||||
index: mockMeiliIndex,
|
|
||||||
})),
|
|
||||||
}));
|
|
||||||
|
|
||||||
jest.mock('./utils', () => ({
|
|
||||||
batchResetMeiliFlags: mockBatchResetMeiliFlags,
|
|
||||||
}));
|
|
||||||
|
|
||||||
jest.mock('@librechat/api', () => ({
|
|
||||||
isEnabled: mockIsEnabled,
|
|
||||||
FlowStateManager: jest.fn(),
|
|
||||||
}));
|
|
||||||
|
|
||||||
jest.mock('~/cache', () => ({
|
|
||||||
getLogStores: mockGetLogStores,
|
|
||||||
}));
|
|
||||||
|
|
||||||
// Set environment before module load
|
|
||||||
process.env.MEILI_HOST = 'http://localhost:7700';
|
|
||||||
process.env.MEILI_MASTER_KEY = 'test-key';
|
|
||||||
process.env.SEARCH = 'true';
|
|
||||||
process.env.MEILI_SYNC_THRESHOLD = '1000'; // Set threshold before module loads
|
|
||||||
|
|
||||||
describe('performSync() - syncThreshold logic', () => {
|
|
||||||
const ORIGINAL_ENV = process.env;
|
|
||||||
let Message;
|
|
||||||
let Conversation;
|
|
||||||
|
|
||||||
beforeAll(() => {
|
|
||||||
Message = createMockModel('messages');
|
|
||||||
Conversation = createMockModel('conversations');
|
|
||||||
|
|
||||||
mongoose.models.Message = Message;
|
|
||||||
mongoose.models.Conversation = Conversation;
|
|
||||||
});
|
|
||||||
|
|
||||||
beforeEach(() => {
|
|
||||||
// Reset all mocks
|
|
||||||
jest.clearAllMocks();
|
|
||||||
// Reset modules to ensure fresh load of indexSync.js and its top-level consts (like syncThreshold)
|
|
||||||
jest.resetModules();
|
|
||||||
|
|
||||||
// Set up environment
|
|
||||||
process.env = { ...ORIGINAL_ENV };
|
|
||||||
process.env.MEILI_HOST = 'http://localhost:7700';
|
|
||||||
process.env.MEILI_MASTER_KEY = 'test-key';
|
|
||||||
process.env.SEARCH = 'true';
|
|
||||||
delete process.env.MEILI_NO_SYNC;
|
|
||||||
|
|
||||||
// Re-ensure models are available in mongoose after resetModules
|
|
||||||
// We must require mongoose again to get the fresh instance that indexSync will use
|
|
||||||
const mongoose = require('mongoose');
|
|
||||||
mongoose.models.Message = Message;
|
|
||||||
mongoose.models.Conversation = Conversation;
|
|
||||||
|
|
||||||
// Mock isEnabled
|
|
||||||
mockIsEnabled.mockImplementation((val) => val === 'true' || val === true);
|
|
||||||
|
|
||||||
// Mock MeiliSearch client responses
|
|
||||||
mockMeiliHealth.mockResolvedValue({ status: 'available' });
|
|
||||||
mockMeiliIndex.mockReturnValue({
|
|
||||||
getSettings: jest.fn().mockResolvedValue({ filterableAttributes: ['user'] }),
|
|
||||||
updateSettings: jest.fn().mockResolvedValue({}),
|
|
||||||
search: jest.fn().mockResolvedValue({ hits: [] }),
|
|
||||||
});
|
|
||||||
|
|
||||||
mockBatchResetMeiliFlags.mockResolvedValue(undefined);
|
|
||||||
});
|
|
||||||
|
|
||||||
afterEach(() => {
|
|
||||||
process.env = ORIGINAL_ENV;
|
|
||||||
});
|
|
||||||
|
|
||||||
afterAll(() => {
|
|
||||||
mongoose.models.Message = originalMessageModel;
|
|
||||||
mongoose.models.Conversation = originalConversationModel;
|
|
||||||
});
|
|
||||||
|
|
||||||
test('triggers sync when unindexed messages exceed syncThreshold', async () => {
|
|
||||||
// Arrange: Set threshold before module load
|
|
||||||
process.env.MEILI_SYNC_THRESHOLD = '1000';
|
|
||||||
|
|
||||||
// Arrange: 1050 unindexed messages > 1000 threshold
|
|
||||||
Message.getSyncProgress.mockResolvedValue({
|
|
||||||
totalProcessed: 100,
|
|
||||||
totalDocuments: 1150, // 1050 unindexed
|
|
||||||
isComplete: false,
|
|
||||||
});
|
|
||||||
|
|
||||||
Conversation.getSyncProgress.mockResolvedValue({
|
|
||||||
totalProcessed: 50,
|
|
||||||
totalDocuments: 50,
|
|
||||||
isComplete: true,
|
|
||||||
});
|
|
||||||
|
|
||||||
Message.syncWithMeili.mockResolvedValue(undefined);
|
|
||||||
|
|
||||||
// Act
|
|
||||||
const indexSync = require('./indexSync');
|
|
||||||
await indexSync();
|
|
||||||
|
|
||||||
// Assert: No countDocuments calls
|
|
||||||
expect(Message.countDocuments).not.toHaveBeenCalled();
|
|
||||||
expect(Conversation.countDocuments).not.toHaveBeenCalled();
|
|
||||||
|
|
||||||
// Assert: Message sync triggered because 1050 > 1000
|
|
||||||
expect(Message.syncWithMeili).toHaveBeenCalledTimes(1);
|
|
||||||
expect(mockLogger.info).toHaveBeenCalledWith(
|
|
||||||
'[indexSync] Messages need syncing: 100/1150 indexed',
|
|
||||||
);
|
|
||||||
expect(mockLogger.info).toHaveBeenCalledWith(
|
|
||||||
'[indexSync] Starting message sync (1050 unindexed)',
|
|
||||||
);
|
|
||||||
|
|
||||||
// Assert: Conversation sync NOT triggered (already complete)
|
|
||||||
expect(Conversation.syncWithMeili).not.toHaveBeenCalled();
|
|
||||||
});
|
|
||||||
|
|
||||||
test('skips sync when unindexed messages are below syncThreshold', async () => {
|
|
||||||
// Arrange: 50 unindexed messages < 1000 threshold
|
|
||||||
Message.getSyncProgress.mockResolvedValue({
|
|
||||||
totalProcessed: 100,
|
|
||||||
totalDocuments: 150, // 50 unindexed
|
|
||||||
isComplete: false,
|
|
||||||
});
|
|
||||||
|
|
||||||
Conversation.getSyncProgress.mockResolvedValue({
|
|
||||||
totalProcessed: 50,
|
|
||||||
totalDocuments: 50,
|
|
||||||
isComplete: true,
|
|
||||||
});
|
|
||||||
|
|
||||||
process.env.MEILI_SYNC_THRESHOLD = '1000';
|
|
||||||
|
|
||||||
// Act
|
|
||||||
const indexSync = require('./indexSync');
|
|
||||||
await indexSync();
|
|
||||||
|
|
||||||
// Assert: No countDocuments calls
|
|
||||||
expect(Message.countDocuments).not.toHaveBeenCalled();
|
|
||||||
expect(Conversation.countDocuments).not.toHaveBeenCalled();
|
|
||||||
|
|
||||||
// Assert: Message sync NOT triggered because 50 < 1000
|
|
||||||
expect(Message.syncWithMeili).not.toHaveBeenCalled();
|
|
||||||
expect(mockLogger.info).toHaveBeenCalledWith(
|
|
||||||
'[indexSync] Messages need syncing: 100/150 indexed',
|
|
||||||
);
|
|
||||||
expect(mockLogger.info).toHaveBeenCalledWith(
|
|
||||||
'[indexSync] 50 messages unindexed (below threshold: 1000, skipping)',
|
|
||||||
);
|
|
||||||
|
|
||||||
// Assert: Conversation sync NOT triggered (already complete)
|
|
||||||
expect(Conversation.syncWithMeili).not.toHaveBeenCalled();
|
|
||||||
});
|
|
||||||
|
|
||||||
test('respects syncThreshold at boundary (exactly at threshold)', async () => {
|
|
||||||
// Arrange: 1000 unindexed messages = 1000 threshold (NOT greater than)
|
|
||||||
Message.getSyncProgress.mockResolvedValue({
|
|
||||||
totalProcessed: 100,
|
|
||||||
totalDocuments: 1100, // 1000 unindexed
|
|
||||||
isComplete: false,
|
|
||||||
});
|
|
||||||
|
|
||||||
Conversation.getSyncProgress.mockResolvedValue({
|
|
||||||
totalProcessed: 0,
|
|
||||||
totalDocuments: 0,
|
|
||||||
isComplete: true,
|
|
||||||
});
|
|
||||||
|
|
||||||
process.env.MEILI_SYNC_THRESHOLD = '1000';
|
|
||||||
|
|
||||||
// Act
|
|
||||||
const indexSync = require('./indexSync');
|
|
||||||
await indexSync();
|
|
||||||
|
|
||||||
// Assert: No countDocuments calls
|
|
||||||
expect(Message.countDocuments).not.toHaveBeenCalled();
|
|
||||||
|
|
||||||
// Assert: Message sync NOT triggered because 1000 is NOT > 1000
|
|
||||||
expect(Message.syncWithMeili).not.toHaveBeenCalled();
|
|
||||||
expect(mockLogger.info).toHaveBeenCalledWith(
|
|
||||||
'[indexSync] Messages need syncing: 100/1100 indexed',
|
|
||||||
);
|
|
||||||
expect(mockLogger.info).toHaveBeenCalledWith(
|
|
||||||
'[indexSync] 1000 messages unindexed (below threshold: 1000, skipping)',
|
|
||||||
);
|
|
||||||
});
|
|
||||||
|
|
||||||
test('triggers sync when unindexed is threshold + 1', async () => {
|
|
||||||
// Arrange: 1001 unindexed messages > 1000 threshold
|
|
||||||
Message.getSyncProgress.mockResolvedValue({
|
|
||||||
totalProcessed: 100,
|
|
||||||
totalDocuments: 1101, // 1001 unindexed
|
|
||||||
isComplete: false,
|
|
||||||
});
|
|
||||||
|
|
||||||
Conversation.getSyncProgress.mockResolvedValue({
|
|
||||||
totalProcessed: 0,
|
|
||||||
totalDocuments: 0,
|
|
||||||
isComplete: true,
|
|
||||||
});
|
|
||||||
|
|
||||||
Message.syncWithMeili.mockResolvedValue(undefined);
|
|
||||||
|
|
||||||
process.env.MEILI_SYNC_THRESHOLD = '1000';
|
|
||||||
|
|
||||||
// Act
|
|
||||||
const indexSync = require('./indexSync');
|
|
||||||
await indexSync();
|
|
||||||
|
|
||||||
// Assert: No countDocuments calls
|
|
||||||
expect(Message.countDocuments).not.toHaveBeenCalled();
|
|
||||||
|
|
||||||
// Assert: Message sync triggered because 1001 > 1000
|
|
||||||
expect(Message.syncWithMeili).toHaveBeenCalledTimes(1);
|
|
||||||
expect(mockLogger.info).toHaveBeenCalledWith(
|
|
||||||
'[indexSync] Messages need syncing: 100/1101 indexed',
|
|
||||||
);
|
|
||||||
expect(mockLogger.info).toHaveBeenCalledWith(
|
|
||||||
'[indexSync] Starting message sync (1001 unindexed)',
|
|
||||||
);
|
|
||||||
});
|
|
||||||
|
|
||||||
test('uses totalDocuments from convoProgress for conversation sync decisions', async () => {
|
|
||||||
// Arrange: Messages complete, conversations need sync
|
|
||||||
Message.getSyncProgress.mockResolvedValue({
|
|
||||||
totalProcessed: 100,
|
|
||||||
totalDocuments: 100,
|
|
||||||
isComplete: true,
|
|
||||||
});
|
|
||||||
|
|
||||||
Conversation.getSyncProgress.mockResolvedValue({
|
|
||||||
totalProcessed: 50,
|
|
||||||
totalDocuments: 1100, // 1050 unindexed > 1000 threshold
|
|
||||||
isComplete: false,
|
|
||||||
});
|
|
||||||
|
|
||||||
Conversation.syncWithMeili.mockResolvedValue(undefined);
|
|
||||||
|
|
||||||
process.env.MEILI_SYNC_THRESHOLD = '1000';
|
|
||||||
|
|
||||||
// Act
|
|
||||||
const indexSync = require('./indexSync');
|
|
||||||
await indexSync();
|
|
||||||
|
|
||||||
// Assert: No countDocuments calls (the optimization)
|
|
||||||
expect(Message.countDocuments).not.toHaveBeenCalled();
|
|
||||||
expect(Conversation.countDocuments).not.toHaveBeenCalled();
|
|
||||||
|
|
||||||
// Assert: Only conversation sync triggered
|
|
||||||
expect(Message.syncWithMeili).not.toHaveBeenCalled();
|
|
||||||
expect(Conversation.syncWithMeili).toHaveBeenCalledTimes(1);
|
|
||||||
expect(mockLogger.info).toHaveBeenCalledWith(
|
|
||||||
'[indexSync] Conversations need syncing: 50/1100 indexed',
|
|
||||||
);
|
|
||||||
expect(mockLogger.info).toHaveBeenCalledWith(
|
|
||||||
'[indexSync] Starting convos sync (1050 unindexed)',
|
|
||||||
);
|
|
||||||
});
|
|
||||||
|
|
||||||
test('skips sync when collections are fully synced', async () => {
|
|
||||||
// Arrange: Everything already synced
|
|
||||||
Message.getSyncProgress.mockResolvedValue({
|
|
||||||
totalProcessed: 100,
|
|
||||||
totalDocuments: 100,
|
|
||||||
isComplete: true,
|
|
||||||
});
|
|
||||||
|
|
||||||
Conversation.getSyncProgress.mockResolvedValue({
|
|
||||||
totalProcessed: 50,
|
|
||||||
totalDocuments: 50,
|
|
||||||
isComplete: true,
|
|
||||||
});
|
|
||||||
|
|
||||||
// Act
|
|
||||||
const indexSync = require('./indexSync');
|
|
||||||
await indexSync();
|
|
||||||
|
|
||||||
// Assert: No countDocuments calls
|
|
||||||
expect(Message.countDocuments).not.toHaveBeenCalled();
|
|
||||||
expect(Conversation.countDocuments).not.toHaveBeenCalled();
|
|
||||||
|
|
||||||
// Assert: No sync triggered
|
|
||||||
expect(Message.syncWithMeili).not.toHaveBeenCalled();
|
|
||||||
expect(Conversation.syncWithMeili).not.toHaveBeenCalled();
|
|
||||||
|
|
||||||
// Assert: Correct logs
|
|
||||||
expect(mockLogger.info).toHaveBeenCalledWith('[indexSync] Messages are fully synced: 100/100');
|
|
||||||
expect(mockLogger.info).toHaveBeenCalledWith(
|
|
||||||
'[indexSync] Conversations are fully synced: 50/50',
|
|
||||||
);
|
|
||||||
});
|
|
||||||
|
|
||||||
test('triggers message sync when settingsUpdated even if below syncThreshold', async () => {
|
|
||||||
// Arrange: Only 50 unindexed messages (< 1000 threshold), but settings were updated
|
|
||||||
Message.getSyncProgress.mockResolvedValue({
|
|
||||||
totalProcessed: 100,
|
|
||||||
totalDocuments: 150, // 50 unindexed
|
|
||||||
isComplete: false,
|
|
||||||
});
|
|
||||||
|
|
||||||
Conversation.getSyncProgress.mockResolvedValue({
|
|
||||||
totalProcessed: 50,
|
|
||||||
totalDocuments: 50,
|
|
||||||
isComplete: true,
|
|
||||||
});
|
|
||||||
|
|
||||||
Message.syncWithMeili.mockResolvedValue(undefined);
|
|
||||||
|
|
||||||
// Mock settings update scenario
|
|
||||||
mockMeiliIndex.mockReturnValue({
|
|
||||||
getSettings: jest.fn().mockResolvedValue({ filterableAttributes: [] }), // No user field
|
|
||||||
updateSettings: jest.fn().mockResolvedValue({}),
|
|
||||||
search: jest.fn().mockResolvedValue({ hits: [] }),
|
|
||||||
});
|
|
||||||
|
|
||||||
process.env.MEILI_SYNC_THRESHOLD = '1000';
|
|
||||||
|
|
||||||
// Act
|
|
||||||
const indexSync = require('./indexSync');
|
|
||||||
await indexSync();
|
|
||||||
|
|
||||||
// Assert: Flags were reset due to settings update
|
|
||||||
expect(mockBatchResetMeiliFlags).toHaveBeenCalledWith(Message.collection);
|
|
||||||
expect(mockBatchResetMeiliFlags).toHaveBeenCalledWith(Conversation.collection);
|
|
||||||
|
|
||||||
// Assert: Message sync triggered despite being below threshold (50 < 1000)
|
|
||||||
expect(Message.syncWithMeili).toHaveBeenCalledTimes(1);
|
|
||||||
expect(mockLogger.info).toHaveBeenCalledWith(
|
|
||||||
'[indexSync] Settings updated. Forcing full re-sync to reindex with new configuration...',
|
|
||||||
);
|
|
||||||
expect(mockLogger.info).toHaveBeenCalledWith(
|
|
||||||
'[indexSync] Starting message sync (50 unindexed)',
|
|
||||||
);
|
|
||||||
});
|
|
||||||
|
|
||||||
test('triggers conversation sync when settingsUpdated even if below syncThreshold', async () => {
|
|
||||||
// Arrange: Messages complete, conversations have 50 unindexed (< 1000 threshold), but settings were updated
|
|
||||||
Message.getSyncProgress.mockResolvedValue({
|
|
||||||
totalProcessed: 100,
|
|
||||||
totalDocuments: 100,
|
|
||||||
isComplete: true,
|
|
||||||
});
|
|
||||||
|
|
||||||
Conversation.getSyncProgress.mockResolvedValue({
|
|
||||||
totalProcessed: 50,
|
|
||||||
totalDocuments: 100, // 50 unindexed
|
|
||||||
isComplete: false,
|
|
||||||
});
|
|
||||||
|
|
||||||
Conversation.syncWithMeili.mockResolvedValue(undefined);
|
|
||||||
|
|
||||||
// Mock settings update scenario
|
|
||||||
mockMeiliIndex.mockReturnValue({
|
|
||||||
getSettings: jest.fn().mockResolvedValue({ filterableAttributes: [] }), // No user field
|
|
||||||
updateSettings: jest.fn().mockResolvedValue({}),
|
|
||||||
search: jest.fn().mockResolvedValue({ hits: [] }),
|
|
||||||
});
|
|
||||||
|
|
||||||
process.env.MEILI_SYNC_THRESHOLD = '1000';
|
|
||||||
|
|
||||||
// Act
|
|
||||||
const indexSync = require('./indexSync');
|
|
||||||
await indexSync();
|
|
||||||
|
|
||||||
// Assert: Flags were reset due to settings update
|
|
||||||
expect(mockBatchResetMeiliFlags).toHaveBeenCalledWith(Message.collection);
|
|
||||||
expect(mockBatchResetMeiliFlags).toHaveBeenCalledWith(Conversation.collection);
|
|
||||||
|
|
||||||
// Assert: Conversation sync triggered despite being below threshold (50 < 1000)
|
|
||||||
expect(Conversation.syncWithMeili).toHaveBeenCalledTimes(1);
|
|
||||||
expect(mockLogger.info).toHaveBeenCalledWith(
|
|
||||||
'[indexSync] Settings updated. Forcing full re-sync to reindex with new configuration...',
|
|
||||||
);
|
|
||||||
expect(mockLogger.info).toHaveBeenCalledWith('[indexSync] Starting convos sync (50 unindexed)');
|
|
||||||
});
|
|
||||||
|
|
||||||
test('triggers both message and conversation sync when settingsUpdated even if both below syncThreshold', async () => {
|
|
||||||
// Arrange: Set threshold before module load
|
|
||||||
process.env.MEILI_SYNC_THRESHOLD = '1000';
|
|
||||||
|
|
||||||
// Arrange: Both have documents below threshold (50 each), but settings were updated
|
|
||||||
Message.getSyncProgress.mockResolvedValue({
|
|
||||||
totalProcessed: 100,
|
|
||||||
totalDocuments: 150, // 50 unindexed
|
|
||||||
isComplete: false,
|
|
||||||
});
|
|
||||||
|
|
||||||
Conversation.getSyncProgress.mockResolvedValue({
|
|
||||||
totalProcessed: 50,
|
|
||||||
totalDocuments: 100, // 50 unindexed
|
|
||||||
isComplete: false,
|
|
||||||
});
|
|
||||||
|
|
||||||
Message.syncWithMeili.mockResolvedValue(undefined);
|
|
||||||
Conversation.syncWithMeili.mockResolvedValue(undefined);
|
|
||||||
|
|
||||||
// Mock settings update scenario
|
|
||||||
mockMeiliIndex.mockReturnValue({
|
|
||||||
getSettings: jest.fn().mockResolvedValue({ filterableAttributes: [] }), // No user field
|
|
||||||
updateSettings: jest.fn().mockResolvedValue({}),
|
|
||||||
search: jest.fn().mockResolvedValue({ hits: [] }),
|
|
||||||
});
|
|
||||||
|
|
||||||
// Act
|
|
||||||
const indexSync = require('./indexSync');
|
|
||||||
await indexSync();
|
|
||||||
|
|
||||||
// Assert: Flags were reset due to settings update
|
|
||||||
expect(mockBatchResetMeiliFlags).toHaveBeenCalledWith(Message.collection);
|
|
||||||
expect(mockBatchResetMeiliFlags).toHaveBeenCalledWith(Conversation.collection);
|
|
||||||
|
|
||||||
// Assert: Both syncs triggered despite both being below threshold
|
|
||||||
expect(Message.syncWithMeili).toHaveBeenCalledTimes(1);
|
|
||||||
expect(Conversation.syncWithMeili).toHaveBeenCalledTimes(1);
|
|
||||||
expect(mockLogger.info).toHaveBeenCalledWith(
|
|
||||||
'[indexSync] Settings updated. Forcing full re-sync to reindex with new configuration...',
|
|
||||||
);
|
|
||||||
expect(mockLogger.info).toHaveBeenCalledWith(
|
|
||||||
'[indexSync] Starting message sync (50 unindexed)',
|
|
||||||
);
|
|
||||||
expect(mockLogger.info).toHaveBeenCalledWith('[indexSync] Starting convos sync (50 unindexed)');
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
@ -1,90 +0,0 @@
|
||||||
const { logger } = require('@librechat/data-schemas');
|
|
||||||
|
|
||||||
const sleep = (ms) => new Promise((resolve) => setTimeout(resolve, ms));
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Batch update documents in chunks to avoid timeouts on weak instances
|
|
||||||
* @param {mongoose.Collection} collection - MongoDB collection
|
|
||||||
* @returns {Promise<number>} - Total modified count
|
|
||||||
* @throws {Error} - Throws if database operations fail (e.g., network issues, connection loss, permission problems)
|
|
||||||
*/
|
|
||||||
async function batchResetMeiliFlags(collection) {
|
|
||||||
const DEFAULT_BATCH_SIZE = 1000;
|
|
||||||
|
|
||||||
let BATCH_SIZE = parseEnvInt('MEILI_SYNC_BATCH_SIZE', DEFAULT_BATCH_SIZE);
|
|
||||||
if (BATCH_SIZE === 0) {
|
|
||||||
logger.warn(
|
|
||||||
`[batchResetMeiliFlags] MEILI_SYNC_BATCH_SIZE cannot be 0. Using default: ${DEFAULT_BATCH_SIZE}`,
|
|
||||||
);
|
|
||||||
BATCH_SIZE = DEFAULT_BATCH_SIZE;
|
|
||||||
}
|
|
||||||
|
|
||||||
const BATCH_DELAY_MS = parseEnvInt('MEILI_SYNC_DELAY_MS', 100);
|
|
||||||
let totalModified = 0;
|
|
||||||
let hasMore = true;
|
|
||||||
|
|
||||||
try {
|
|
||||||
while (hasMore) {
|
|
||||||
const docs = await collection
|
|
||||||
.find({ expiredAt: null, _meiliIndex: { $ne: false } }, { projection: { _id: 1 } })
|
|
||||||
.limit(BATCH_SIZE)
|
|
||||||
.toArray();
|
|
||||||
|
|
||||||
if (docs.length === 0) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
const ids = docs.map((doc) => doc._id);
|
|
||||||
const result = await collection.updateMany(
|
|
||||||
{ _id: { $in: ids } },
|
|
||||||
{ $set: { _meiliIndex: false } },
|
|
||||||
);
|
|
||||||
|
|
||||||
totalModified += result.modifiedCount;
|
|
||||||
process.stdout.write(
|
|
||||||
`\r Updating ${collection.collectionName}: ${totalModified} documents...`,
|
|
||||||
);
|
|
||||||
|
|
||||||
if (docs.length < BATCH_SIZE) {
|
|
||||||
hasMore = false;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (hasMore && BATCH_DELAY_MS > 0) {
|
|
||||||
await sleep(BATCH_DELAY_MS);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return totalModified;
|
|
||||||
} catch (error) {
|
|
||||||
throw new Error(
|
|
||||||
`Failed to batch reset Meili flags for collection '${collection.collectionName}' after processing ${totalModified} documents: ${error.message}`,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Parse and validate an environment variable as a positive integer
|
|
||||||
* @param {string} varName - Environment variable name
|
|
||||||
* @param {number} defaultValue - Default value to use if invalid or missing
|
|
||||||
* @returns {number} - Parsed value or default
|
|
||||||
*/
|
|
||||||
function parseEnvInt(varName, defaultValue) {
|
|
||||||
const value = process.env[varName];
|
|
||||||
if (!value) {
|
|
||||||
return defaultValue;
|
|
||||||
}
|
|
||||||
|
|
||||||
const parsed = parseInt(value, 10);
|
|
||||||
if (isNaN(parsed) || parsed < 0) {
|
|
||||||
logger.warn(
|
|
||||||
`[batchResetMeiliFlags] Invalid value for ${varName}="${value}". Expected a positive integer. Using default: ${defaultValue}`,
|
|
||||||
);
|
|
||||||
return defaultValue;
|
|
||||||
}
|
|
||||||
|
|
||||||
return parsed;
|
|
||||||
}
|
|
||||||
|
|
||||||
module.exports = {
|
|
||||||
batchResetMeiliFlags,
|
|
||||||
};
|
|
||||||
|
|
@ -1,523 +0,0 @@
|
||||||
const mongoose = require('mongoose');
|
|
||||||
const { MongoMemoryServer } = require('mongodb-memory-server');
|
|
||||||
const { batchResetMeiliFlags } = require('./utils');
|
|
||||||
|
|
||||||
describe('batchResetMeiliFlags', () => {
|
|
||||||
let mongoServer;
|
|
||||||
let testCollection;
|
|
||||||
const ORIGINAL_BATCH_SIZE = process.env.MEILI_SYNC_BATCH_SIZE;
|
|
||||||
const ORIGINAL_BATCH_DELAY = process.env.MEILI_SYNC_DELAY_MS;
|
|
||||||
|
|
||||||
beforeAll(async () => {
|
|
||||||
mongoServer = await MongoMemoryServer.create();
|
|
||||||
const mongoUri = mongoServer.getUri();
|
|
||||||
await mongoose.connect(mongoUri);
|
|
||||||
});
|
|
||||||
|
|
||||||
afterAll(async () => {
|
|
||||||
await mongoose.disconnect();
|
|
||||||
await mongoServer.stop();
|
|
||||||
|
|
||||||
// Restore original env variables
|
|
||||||
if (ORIGINAL_BATCH_SIZE !== undefined) {
|
|
||||||
process.env.MEILI_SYNC_BATCH_SIZE = ORIGINAL_BATCH_SIZE;
|
|
||||||
} else {
|
|
||||||
delete process.env.MEILI_SYNC_BATCH_SIZE;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (ORIGINAL_BATCH_DELAY !== undefined) {
|
|
||||||
process.env.MEILI_SYNC_DELAY_MS = ORIGINAL_BATCH_DELAY;
|
|
||||||
} else {
|
|
||||||
delete process.env.MEILI_SYNC_DELAY_MS;
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
beforeEach(async () => {
|
|
||||||
// Create a fresh collection for each test
|
|
||||||
testCollection = mongoose.connection.db.collection('test_meili_batch');
|
|
||||||
await testCollection.deleteMany({});
|
|
||||||
|
|
||||||
// Reset env variables to defaults
|
|
||||||
delete process.env.MEILI_SYNC_BATCH_SIZE;
|
|
||||||
delete process.env.MEILI_SYNC_DELAY_MS;
|
|
||||||
});
|
|
||||||
|
|
||||||
afterEach(async () => {
|
|
||||||
if (testCollection) {
|
|
||||||
await testCollection.deleteMany({});
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
describe('basic functionality', () => {
|
|
||||||
it('should reset _meiliIndex flag for documents with expiredAt: null and _meiliIndex: true', async () => {
|
|
||||||
// Insert test documents
|
|
||||||
await testCollection.insertMany([
|
|
||||||
{ _id: new mongoose.Types.ObjectId(), expiredAt: null, _meiliIndex: true, name: 'doc1' },
|
|
||||||
{ _id: new mongoose.Types.ObjectId(), expiredAt: null, _meiliIndex: true, name: 'doc2' },
|
|
||||||
{ _id: new mongoose.Types.ObjectId(), expiredAt: null, _meiliIndex: true, name: 'doc3' },
|
|
||||||
]);
|
|
||||||
|
|
||||||
const result = await batchResetMeiliFlags(testCollection);
|
|
||||||
|
|
||||||
expect(result).toBe(3);
|
|
||||||
|
|
||||||
const updatedDocs = await testCollection.find({ _meiliIndex: false }).toArray();
|
|
||||||
expect(updatedDocs).toHaveLength(3);
|
|
||||||
|
|
||||||
const notUpdatedDocs = await testCollection.find({ _meiliIndex: true }).toArray();
|
|
||||||
expect(notUpdatedDocs).toHaveLength(0);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should not modify documents with expiredAt set', async () => {
|
|
||||||
const expiredDate = new Date();
|
|
||||||
await testCollection.insertMany([
|
|
||||||
{ _id: new mongoose.Types.ObjectId(), expiredAt: expiredDate, _meiliIndex: true },
|
|
||||||
{ _id: new mongoose.Types.ObjectId(), expiredAt: null, _meiliIndex: true },
|
|
||||||
]);
|
|
||||||
|
|
||||||
const result = await batchResetMeiliFlags(testCollection);
|
|
||||||
|
|
||||||
expect(result).toBe(1);
|
|
||||||
|
|
||||||
const expiredDoc = await testCollection.findOne({ expiredAt: expiredDate });
|
|
||||||
expect(expiredDoc._meiliIndex).toBe(true);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should not modify documents with _meiliIndex: false', async () => {
|
|
||||||
await testCollection.insertMany([
|
|
||||||
{ _id: new mongoose.Types.ObjectId(), expiredAt: null, _meiliIndex: false },
|
|
||||||
{ _id: new mongoose.Types.ObjectId(), expiredAt: null, _meiliIndex: true },
|
|
||||||
]);
|
|
||||||
|
|
||||||
const result = await batchResetMeiliFlags(testCollection);
|
|
||||||
|
|
||||||
expect(result).toBe(1);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should return 0 when no documents match the criteria', async () => {
|
|
||||||
await testCollection.insertMany([
|
|
||||||
{ _id: new mongoose.Types.ObjectId(), expiredAt: new Date(), _meiliIndex: true },
|
|
||||||
{ _id: new mongoose.Types.ObjectId(), expiredAt: null, _meiliIndex: false },
|
|
||||||
]);
|
|
||||||
|
|
||||||
const result = await batchResetMeiliFlags(testCollection);
|
|
||||||
|
|
||||||
expect(result).toBe(0);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should return 0 when collection is empty', async () => {
|
|
||||||
const result = await batchResetMeiliFlags(testCollection);
|
|
||||||
|
|
||||||
expect(result).toBe(0);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
describe('batch processing', () => {
|
|
||||||
it('should process documents in batches according to MEILI_SYNC_BATCH_SIZE', async () => {
|
|
||||||
process.env.MEILI_SYNC_BATCH_SIZE = '2';
|
|
||||||
|
|
||||||
const docs = [];
|
|
||||||
for (let i = 0; i < 5; i++) {
|
|
||||||
docs.push({
|
|
||||||
_id: new mongoose.Types.ObjectId(),
|
|
||||||
expiredAt: null,
|
|
||||||
_meiliIndex: true,
|
|
||||||
name: `doc${i}`,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
await testCollection.insertMany(docs);
|
|
||||||
|
|
||||||
const result = await batchResetMeiliFlags(testCollection);
|
|
||||||
|
|
||||||
expect(result).toBe(5);
|
|
||||||
|
|
||||||
const updatedDocs = await testCollection.find({ _meiliIndex: false }).toArray();
|
|
||||||
expect(updatedDocs).toHaveLength(5);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should handle large datasets with small batch sizes', async () => {
|
|
||||||
process.env.MEILI_SYNC_BATCH_SIZE = '10';
|
|
||||||
|
|
||||||
const docs = [];
|
|
||||||
for (let i = 0; i < 25; i++) {
|
|
||||||
docs.push({
|
|
||||||
_id: new mongoose.Types.ObjectId(),
|
|
||||||
expiredAt: null,
|
|
||||||
_meiliIndex: true,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
await testCollection.insertMany(docs);
|
|
||||||
|
|
||||||
const result = await batchResetMeiliFlags(testCollection);
|
|
||||||
|
|
||||||
expect(result).toBe(25);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should use default batch size of 1000 when env variable is not set', async () => {
|
|
||||||
// Create exactly 1000 documents to verify default batch behavior
|
|
||||||
const docs = [];
|
|
||||||
for (let i = 0; i < 1000; i++) {
|
|
||||||
docs.push({
|
|
||||||
_id: new mongoose.Types.ObjectId(),
|
|
||||||
expiredAt: null,
|
|
||||||
_meiliIndex: true,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
await testCollection.insertMany(docs);
|
|
||||||
|
|
||||||
const result = await batchResetMeiliFlags(testCollection);
|
|
||||||
|
|
||||||
expect(result).toBe(1000);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
describe('return value', () => {
|
|
||||||
it('should return correct modified count', async () => {
|
|
||||||
await testCollection.insertMany([
|
|
||||||
{ _id: new mongoose.Types.ObjectId(), expiredAt: null, _meiliIndex: true },
|
|
||||||
]);
|
|
||||||
|
|
||||||
await expect(batchResetMeiliFlags(testCollection)).resolves.toBe(1);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
describe('batch delay', () => {
|
|
||||||
it('should respect MEILI_SYNC_DELAY_MS between batches', async () => {
|
|
||||||
process.env.MEILI_SYNC_BATCH_SIZE = '2';
|
|
||||||
process.env.MEILI_SYNC_DELAY_MS = '50';
|
|
||||||
|
|
||||||
const docs = [];
|
|
||||||
for (let i = 0; i < 5; i++) {
|
|
||||||
docs.push({
|
|
||||||
_id: new mongoose.Types.ObjectId(),
|
|
||||||
expiredAt: null,
|
|
||||||
_meiliIndex: true,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
await testCollection.insertMany(docs);
|
|
||||||
|
|
||||||
const startTime = Date.now();
|
|
||||||
await batchResetMeiliFlags(testCollection);
|
|
||||||
const endTime = Date.now();
|
|
||||||
|
|
||||||
// With 5 documents and batch size 2, we need 3 batches
|
|
||||||
// That means 2 delays between batches (not after the last one)
|
|
||||||
// So minimum time should be around 100ms (2 * 50ms)
|
|
||||||
// Using a slightly lower threshold to account for timing variations
|
|
||||||
const elapsed = endTime - startTime;
|
|
||||||
expect(elapsed).toBeGreaterThanOrEqual(80);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should not delay when MEILI_SYNC_DELAY_MS is 0', async () => {
|
|
||||||
process.env.MEILI_SYNC_BATCH_SIZE = '2';
|
|
||||||
process.env.MEILI_SYNC_DELAY_MS = '0';
|
|
||||||
|
|
||||||
const docs = [];
|
|
||||||
for (let i = 0; i < 5; i++) {
|
|
||||||
docs.push({
|
|
||||||
_id: new mongoose.Types.ObjectId(),
|
|
||||||
expiredAt: null,
|
|
||||||
_meiliIndex: true,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
await testCollection.insertMany(docs);
|
|
||||||
|
|
||||||
const startTime = Date.now();
|
|
||||||
await batchResetMeiliFlags(testCollection);
|
|
||||||
const endTime = Date.now();
|
|
||||||
|
|
||||||
const elapsed = endTime - startTime;
|
|
||||||
// Should complete without intentional delays, but database operations still take time
|
|
||||||
// Just verify it completes and returns the correct count
|
|
||||||
expect(elapsed).toBeLessThan(1000); // More reasonable upper bound
|
|
||||||
|
|
||||||
const result = await testCollection.countDocuments({ _meiliIndex: false });
|
|
||||||
expect(result).toBe(5);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should not delay after the last batch', async () => {
|
|
||||||
process.env.MEILI_SYNC_BATCH_SIZE = '3';
|
|
||||||
process.env.MEILI_SYNC_DELAY_MS = '100';
|
|
||||||
|
|
||||||
// Exactly 3 documents - should fit in one batch, no delay
|
|
||||||
await testCollection.insertMany([
|
|
||||||
{ _id: new mongoose.Types.ObjectId(), expiredAt: null, _meiliIndex: true },
|
|
||||||
{ _id: new mongoose.Types.ObjectId(), expiredAt: null, _meiliIndex: true },
|
|
||||||
{ _id: new mongoose.Types.ObjectId(), expiredAt: null, _meiliIndex: true },
|
|
||||||
]);
|
|
||||||
|
|
||||||
const result = await batchResetMeiliFlags(testCollection);
|
|
||||||
|
|
||||||
// Verify all 3 documents were processed in a single batch
|
|
||||||
expect(result).toBe(3);
|
|
||||||
|
|
||||||
const updatedDocs = await testCollection.countDocuments({ _meiliIndex: false });
|
|
||||||
expect(updatedDocs).toBe(3);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
describe('edge cases', () => {
|
|
||||||
it('should handle documents without _meiliIndex field', async () => {
|
|
||||||
await testCollection.insertMany([
|
|
||||||
{ _id: new mongoose.Types.ObjectId(), expiredAt: null },
|
|
||||||
{ _id: new mongoose.Types.ObjectId(), expiredAt: null, _meiliIndex: true },
|
|
||||||
]);
|
|
||||||
|
|
||||||
const result = await batchResetMeiliFlags(testCollection);
|
|
||||||
|
|
||||||
// both documents should be updated
|
|
||||||
expect(result).toBe(2);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should handle mixed document states correctly', async () => {
|
|
||||||
await testCollection.insertMany([
|
|
||||||
{ _id: new mongoose.Types.ObjectId(), expiredAt: null, _meiliIndex: true },
|
|
||||||
{ _id: new mongoose.Types.ObjectId(), expiredAt: null, _meiliIndex: false },
|
|
||||||
{ _id: new mongoose.Types.ObjectId(), expiredAt: new Date(), _meiliIndex: true },
|
|
||||||
{ _id: new mongoose.Types.ObjectId(), expiredAt: null, _meiliIndex: true },
|
|
||||||
{ _id: new mongoose.Types.ObjectId(), expiredAt: null, _meiliIndex: null },
|
|
||||||
{ _id: new mongoose.Types.ObjectId(), expiredAt: null },
|
|
||||||
]);
|
|
||||||
|
|
||||||
const result = await batchResetMeiliFlags(testCollection);
|
|
||||||
|
|
||||||
expect(result).toBe(4);
|
|
||||||
|
|
||||||
const flaggedDocs = await testCollection
|
|
||||||
.find({ expiredAt: null, _meiliIndex: false })
|
|
||||||
.toArray();
|
|
||||||
expect(flaggedDocs).toHaveLength(5); // 4 were updated, 1 was already false
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
describe('error handling', () => {
|
|
||||||
it('should throw error with context when find operation fails', async () => {
|
|
||||||
const mockCollection = {
|
|
||||||
collectionName: 'test_meili_batch',
|
|
||||||
find: jest.fn().mockReturnValue({
|
|
||||||
limit: jest.fn().mockReturnValue({
|
|
||||||
toArray: jest.fn().mockRejectedValue(new Error('Network error')),
|
|
||||||
}),
|
|
||||||
}),
|
|
||||||
};
|
|
||||||
|
|
||||||
await expect(batchResetMeiliFlags(mockCollection)).rejects.toThrow(
|
|
||||||
"Failed to batch reset Meili flags for collection 'test_meili_batch' after processing 0 documents: Network error",
|
|
||||||
);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should throw error with context when updateMany operation fails', async () => {
|
|
||||||
const mockCollection = {
|
|
||||||
collectionName: 'test_meili_batch',
|
|
||||||
find: jest.fn().mockReturnValue({
|
|
||||||
limit: jest.fn().mockReturnValue({
|
|
||||||
toArray: jest
|
|
||||||
.fn()
|
|
||||||
.mockResolvedValue([
|
|
||||||
{ _id: new mongoose.Types.ObjectId() },
|
|
||||||
{ _id: new mongoose.Types.ObjectId() },
|
|
||||||
]),
|
|
||||||
}),
|
|
||||||
}),
|
|
||||||
updateMany: jest.fn().mockRejectedValue(new Error('Connection lost')),
|
|
||||||
};
|
|
||||||
|
|
||||||
await expect(batchResetMeiliFlags(mockCollection)).rejects.toThrow(
|
|
||||||
"Failed to batch reset Meili flags for collection 'test_meili_batch' after processing 0 documents: Connection lost",
|
|
||||||
);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should include documents processed count in error when failure occurs mid-batch', async () => {
|
|
||||||
// Set batch size to 2 to force multiple batches
|
|
||||||
process.env.MEILI_SYNC_BATCH_SIZE = '2';
|
|
||||||
process.env.MEILI_SYNC_DELAY_MS = '0'; // No delay for faster test
|
|
||||||
|
|
||||||
let findCallCount = 0;
|
|
||||||
let updateCallCount = 0;
|
|
||||||
|
|
||||||
const mockCollection = {
|
|
||||||
collectionName: 'test_meili_batch',
|
|
||||||
find: jest.fn().mockReturnValue({
|
|
||||||
limit: jest.fn().mockReturnValue({
|
|
||||||
toArray: jest.fn().mockImplementation(() => {
|
|
||||||
findCallCount++;
|
|
||||||
// Return 2 documents for first two calls (to keep loop going)
|
|
||||||
// Return 2 documents for third call (to trigger third update which will fail)
|
|
||||||
if (findCallCount <= 3) {
|
|
||||||
return Promise.resolve([
|
|
||||||
{ _id: new mongoose.Types.ObjectId() },
|
|
||||||
{ _id: new mongoose.Types.ObjectId() },
|
|
||||||
]);
|
|
||||||
}
|
|
||||||
// Should not reach here due to error
|
|
||||||
return Promise.resolve([]);
|
|
||||||
}),
|
|
||||||
}),
|
|
||||||
}),
|
|
||||||
updateMany: jest.fn().mockImplementation(() => {
|
|
||||||
updateCallCount++;
|
|
||||||
if (updateCallCount === 1) {
|
|
||||||
return Promise.resolve({ modifiedCount: 2 });
|
|
||||||
} else if (updateCallCount === 2) {
|
|
||||||
return Promise.resolve({ modifiedCount: 2 });
|
|
||||||
} else {
|
|
||||||
return Promise.reject(new Error('Database timeout'));
|
|
||||||
}
|
|
||||||
}),
|
|
||||||
};
|
|
||||||
|
|
||||||
await expect(batchResetMeiliFlags(mockCollection)).rejects.toThrow(
|
|
||||||
"Failed to batch reset Meili flags for collection 'test_meili_batch' after processing 4 documents: Database timeout",
|
|
||||||
);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should use collection.collectionName in error messages', async () => {
|
|
||||||
const mockCollection = {
|
|
||||||
collectionName: 'messages',
|
|
||||||
find: jest.fn().mockReturnValue({
|
|
||||||
limit: jest.fn().mockReturnValue({
|
|
||||||
toArray: jest.fn().mockRejectedValue(new Error('Permission denied')),
|
|
||||||
}),
|
|
||||||
}),
|
|
||||||
};
|
|
||||||
|
|
||||||
await expect(batchResetMeiliFlags(mockCollection)).rejects.toThrow(
|
|
||||||
"Failed to batch reset Meili flags for collection 'messages' after processing 0 documents: Permission denied",
|
|
||||||
);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
describe('environment variable validation', () => {
|
|
||||||
let warnSpy;
|
|
||||||
|
|
||||||
beforeEach(() => {
|
|
||||||
// Mock logger.warn to track warning calls
|
|
||||||
const { logger } = require('@librechat/data-schemas');
|
|
||||||
warnSpy = jest.spyOn(logger, 'warn').mockImplementation(() => {});
|
|
||||||
});
|
|
||||||
|
|
||||||
afterEach(() => {
|
|
||||||
if (warnSpy) {
|
|
||||||
warnSpy.mockRestore();
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should log warning and use default when MEILI_SYNC_BATCH_SIZE is not a number', async () => {
|
|
||||||
process.env.MEILI_SYNC_BATCH_SIZE = 'abc';
|
|
||||||
|
|
||||||
await testCollection.insertMany([
|
|
||||||
{ _id: new mongoose.Types.ObjectId(), expiredAt: null, _meiliIndex: true },
|
|
||||||
]);
|
|
||||||
|
|
||||||
const result = await batchResetMeiliFlags(testCollection);
|
|
||||||
|
|
||||||
expect(result).toBe(1);
|
|
||||||
expect(warnSpy).toHaveBeenCalledWith(
|
|
||||||
expect.stringContaining('Invalid value for MEILI_SYNC_BATCH_SIZE="abc"'),
|
|
||||||
);
|
|
||||||
expect(warnSpy).toHaveBeenCalledWith(expect.stringContaining('Using default: 1000'));
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should log warning and use default when MEILI_SYNC_DELAY_MS is not a number', async () => {
|
|
||||||
process.env.MEILI_SYNC_DELAY_MS = 'xyz';
|
|
||||||
|
|
||||||
await testCollection.insertMany([
|
|
||||||
{ _id: new mongoose.Types.ObjectId(), expiredAt: null, _meiliIndex: true },
|
|
||||||
]);
|
|
||||||
|
|
||||||
const result = await batchResetMeiliFlags(testCollection);
|
|
||||||
|
|
||||||
expect(result).toBe(1);
|
|
||||||
expect(warnSpy).toHaveBeenCalledWith(
|
|
||||||
expect.stringContaining('Invalid value for MEILI_SYNC_DELAY_MS="xyz"'),
|
|
||||||
);
|
|
||||||
expect(warnSpy).toHaveBeenCalledWith(expect.stringContaining('Using default: 100'));
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should log warning and use default when MEILI_SYNC_BATCH_SIZE is negative', async () => {
|
|
||||||
process.env.MEILI_SYNC_BATCH_SIZE = '-50';
|
|
||||||
|
|
||||||
await testCollection.insertMany([
|
|
||||||
{ _id: new mongoose.Types.ObjectId(), expiredAt: null, _meiliIndex: true },
|
|
||||||
]);
|
|
||||||
|
|
||||||
const result = await batchResetMeiliFlags(testCollection);
|
|
||||||
|
|
||||||
expect(result).toBe(1);
|
|
||||||
expect(warnSpy).toHaveBeenCalledWith(
|
|
||||||
expect.stringContaining('Invalid value for MEILI_SYNC_BATCH_SIZE="-50"'),
|
|
||||||
);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should log warning and use default when MEILI_SYNC_DELAY_MS is negative', async () => {
|
|
||||||
process.env.MEILI_SYNC_DELAY_MS = '-100';
|
|
||||||
|
|
||||||
await testCollection.insertMany([
|
|
||||||
{ _id: new mongoose.Types.ObjectId(), expiredAt: null, _meiliIndex: true },
|
|
||||||
]);
|
|
||||||
|
|
||||||
const result = await batchResetMeiliFlags(testCollection);
|
|
||||||
|
|
||||||
expect(result).toBe(1);
|
|
||||||
expect(warnSpy).toHaveBeenCalledWith(
|
|
||||||
expect.stringContaining('Invalid value for MEILI_SYNC_DELAY_MS="-100"'),
|
|
||||||
);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should accept valid positive integer values without warnings', async () => {
|
|
||||||
process.env.MEILI_SYNC_BATCH_SIZE = '500';
|
|
||||||
process.env.MEILI_SYNC_DELAY_MS = '50';
|
|
||||||
|
|
||||||
await testCollection.insertMany([
|
|
||||||
{ _id: new mongoose.Types.ObjectId(), expiredAt: null, _meiliIndex: true },
|
|
||||||
]);
|
|
||||||
|
|
||||||
const result = await batchResetMeiliFlags(testCollection);
|
|
||||||
|
|
||||||
expect(result).toBe(1);
|
|
||||||
expect(warnSpy).not.toHaveBeenCalled();
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should log warning and use default when MEILI_SYNC_BATCH_SIZE is zero', async () => {
|
|
||||||
process.env.MEILI_SYNC_BATCH_SIZE = '0';
|
|
||||||
|
|
||||||
await testCollection.insertMany([
|
|
||||||
{ _id: new mongoose.Types.ObjectId(), expiredAt: null, _meiliIndex: true },
|
|
||||||
]);
|
|
||||||
|
|
||||||
const result = await batchResetMeiliFlags(testCollection);
|
|
||||||
|
|
||||||
expect(result).toBe(1);
|
|
||||||
expect(warnSpy).toHaveBeenCalledWith(
|
|
||||||
expect.stringContaining('MEILI_SYNC_BATCH_SIZE cannot be 0. Using default: 1000'),
|
|
||||||
);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should accept zero as a valid value for MEILI_SYNC_DELAY_MS without warnings', async () => {
|
|
||||||
process.env.MEILI_SYNC_DELAY_MS = '0';
|
|
||||||
|
|
||||||
await testCollection.insertMany([
|
|
||||||
{ _id: new mongoose.Types.ObjectId(), expiredAt: null, _meiliIndex: true },
|
|
||||||
]);
|
|
||||||
|
|
||||||
const result = await batchResetMeiliFlags(testCollection);
|
|
||||||
|
|
||||||
expect(result).toBe(1);
|
|
||||||
expect(warnSpy).not.toHaveBeenCalled();
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should not log warnings when environment variables are not set', async () => {
|
|
||||||
delete process.env.MEILI_SYNC_BATCH_SIZE;
|
|
||||||
delete process.env.MEILI_SYNC_DELAY_MS;
|
|
||||||
|
|
||||||
await testCollection.insertMany([
|
|
||||||
{ _id: new mongoose.Types.ObjectId(), expiredAt: null, _meiliIndex: true },
|
|
||||||
]);
|
|
||||||
|
|
||||||
const result = await batchResetMeiliFlags(testCollection);
|
|
||||||
|
|
||||||
expect(result).toBe(1);
|
|
||||||
expect(warnSpy).not.toHaveBeenCalled();
|
|
||||||
});
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
@ -4,7 +4,11 @@ module.exports = {
|
||||||
roots: ['<rootDir>'],
|
roots: ['<rootDir>'],
|
||||||
coverageDirectory: 'coverage',
|
coverageDirectory: 'coverage',
|
||||||
testTimeout: 30000, // 30 seconds timeout for all tests
|
testTimeout: 30000, // 30 seconds timeout for all tests
|
||||||
setupFiles: ['./test/jestSetup.js', './test/__mocks__/logger.js'],
|
setupFiles: [
|
||||||
|
'./test/jestSetup.js',
|
||||||
|
'./test/__mocks__/logger.js',
|
||||||
|
'./test/__mocks__/fetchEventSource.js',
|
||||||
|
],
|
||||||
moduleNameMapper: {
|
moduleNameMapper: {
|
||||||
'~/(.*)': '<rootDir>/$1',
|
'~/(.*)': '<rootDir>/$1',
|
||||||
'~/data/auth.json': '<rootDir>/__mocks__/auth.mock.json',
|
'~/data/auth.json': '<rootDir>/__mocks__/auth.mock.json',
|
||||||
|
|
|
||||||
29
api/lib/utils/mergeSort.js
Normal file
29
api/lib/utils/mergeSort.js
Normal file
|
|
@ -0,0 +1,29 @@
|
||||||
|
function mergeSort(arr, compareFn) {
|
||||||
|
if (arr.length <= 1) {
|
||||||
|
return arr;
|
||||||
|
}
|
||||||
|
|
||||||
|
const mid = Math.floor(arr.length / 2);
|
||||||
|
const leftArr = arr.slice(0, mid);
|
||||||
|
const rightArr = arr.slice(mid);
|
||||||
|
|
||||||
|
return merge(mergeSort(leftArr, compareFn), mergeSort(rightArr, compareFn), compareFn);
|
||||||
|
}
|
||||||
|
|
||||||
|
function merge(leftArr, rightArr, compareFn) {
|
||||||
|
const result = [];
|
||||||
|
let leftIndex = 0;
|
||||||
|
let rightIndex = 0;
|
||||||
|
|
||||||
|
while (leftIndex < leftArr.length && rightIndex < rightArr.length) {
|
||||||
|
if (compareFn(leftArr[leftIndex], rightArr[rightIndex]) < 0) {
|
||||||
|
result.push(leftArr[leftIndex++]);
|
||||||
|
} else {
|
||||||
|
result.push(rightArr[rightIndex++]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return result.concat(leftArr.slice(leftIndex)).concat(rightArr.slice(rightIndex));
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = mergeSort;
|
||||||
8
api/lib/utils/misc.js
Normal file
8
api/lib/utils/misc.js
Normal file
|
|
@ -0,0 +1,8 @@
|
||||||
|
const cleanUpPrimaryKeyValue = (value) => {
|
||||||
|
// For Bing convoId handling
|
||||||
|
return value.replace(/--/g, '|');
|
||||||
|
};
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
cleanUpPrimaryKeyValue,
|
||||||
|
};
|
||||||
|
|
@ -1,50 +1,20 @@
|
||||||
const mongoose = require('mongoose');
|
const mongoose = require('mongoose');
|
||||||
const crypto = require('node:crypto');
|
const crypto = require('node:crypto');
|
||||||
const { logger } = require('@librechat/data-schemas');
|
const { logger } = require('@librechat/data-schemas');
|
||||||
const { getCustomEndpointConfig } = require('@librechat/api');
|
const { ResourceType, SystemRoles, Tools, actionDelimiter } = require('librechat-data-provider');
|
||||||
const {
|
const { GLOBAL_PROJECT_NAME, EPHEMERAL_AGENT_ID, mcp_all, mcp_delimiter } =
|
||||||
Tools,
|
require('librechat-data-provider').Constants;
|
||||||
SystemRoles,
|
|
||||||
ResourceType,
|
|
||||||
actionDelimiter,
|
|
||||||
isAgentsEndpoint,
|
|
||||||
isEphemeralAgentId,
|
|
||||||
encodeEphemeralAgentId,
|
|
||||||
} = require('librechat-data-provider');
|
|
||||||
const { mcp_all, mcp_delimiter } = require('librechat-data-provider').Constants;
|
|
||||||
const {
|
const {
|
||||||
removeAgentFromAllProjects,
|
removeAgentFromAllProjects,
|
||||||
removeAgentIdsFromProject,
|
removeAgentIdsFromProject,
|
||||||
addAgentIdsToProject,
|
addAgentIdsToProject,
|
||||||
|
getProjectByName,
|
||||||
} = require('./Project');
|
} = require('./Project');
|
||||||
const { removeAllPermissions } = require('~/server/services/PermissionService');
|
const { removeAllPermissions } = require('~/server/services/PermissionService');
|
||||||
const { getMCPServerTools } = require('~/server/services/Config');
|
const { getMCPServerTools } = require('~/server/services/Config');
|
||||||
const { Agent, AclEntry, User } = require('~/db/models');
|
const { Agent, AclEntry } = require('~/db/models');
|
||||||
const { getActions } = require('./Action');
|
const { getActions } = require('./Action');
|
||||||
|
|
||||||
/**
|
|
||||||
* Extracts unique MCP server names from tools array
|
|
||||||
* Tools format: "toolName_mcp_serverName" or "sys__server__sys_mcp_serverName"
|
|
||||||
* @param {string[]} tools - Array of tool identifiers
|
|
||||||
* @returns {string[]} Array of unique MCP server names
|
|
||||||
*/
|
|
||||||
const extractMCPServerNames = (tools) => {
|
|
||||||
if (!tools || !Array.isArray(tools)) {
|
|
||||||
return [];
|
|
||||||
}
|
|
||||||
const serverNames = new Set();
|
|
||||||
for (const tool of tools) {
|
|
||||||
if (!tool || !tool.includes(mcp_delimiter)) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
const parts = tool.split(mcp_delimiter);
|
|
||||||
if (parts.length >= 2) {
|
|
||||||
serverNames.add(parts[parts.length - 1]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return Array.from(serverNames);
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create an agent with the provided data.
|
* Create an agent with the provided data.
|
||||||
* @param {Object} agentData - The agent data to create.
|
* @param {Object} agentData - The agent data to create.
|
||||||
|
|
@ -64,7 +34,6 @@ const createAgent = async (agentData) => {
|
||||||
},
|
},
|
||||||
],
|
],
|
||||||
category: agentData.category || 'general',
|
category: agentData.category || 'general',
|
||||||
mcpServerNames: extractMCPServerNames(agentData.tools),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
return (await Agent.create(initialAgentData)).toObject();
|
return (await Agent.create(initialAgentData)).toObject();
|
||||||
|
|
@ -99,7 +68,7 @@ const getAgents = async (searchParameter) => await Agent.find(searchParameter).l
|
||||||
* @param {import('@librechat/agents').ClientOptions} [params.model_parameters]
|
* @param {import('@librechat/agents').ClientOptions} [params.model_parameters]
|
||||||
* @returns {Promise<Agent|null>} The agent document as a plain object, or null if not found.
|
* @returns {Promise<Agent|null>} The agent document as a plain object, or null if not found.
|
||||||
*/
|
*/
|
||||||
const loadEphemeralAgent = async ({ req, spec, endpoint, model_parameters: _m }) => {
|
const loadEphemeralAgent = async ({ req, spec, agent_id, endpoint, model_parameters: _m }) => {
|
||||||
const { model, ...model_parameters } = _m;
|
const { model, ...model_parameters } = _m;
|
||||||
const modelSpecs = req.config?.modelSpecs?.list;
|
const modelSpecs = req.config?.modelSpecs?.list;
|
||||||
/** @type {TModelSpec | null} */
|
/** @type {TModelSpec | null} */
|
||||||
|
|
@ -146,28 +115,8 @@ const loadEphemeralAgent = async ({ req, spec, endpoint, model_parameters: _m })
|
||||||
}
|
}
|
||||||
|
|
||||||
const instructions = req.body.promptPrefix;
|
const instructions = req.body.promptPrefix;
|
||||||
|
|
||||||
// Get endpoint config for modelDisplayLabel fallback
|
|
||||||
const appConfig = req.config;
|
|
||||||
let endpointConfig = appConfig?.endpoints?.[endpoint];
|
|
||||||
if (!isAgentsEndpoint(endpoint) && !endpointConfig) {
|
|
||||||
try {
|
|
||||||
endpointConfig = getCustomEndpointConfig({ endpoint, appConfig });
|
|
||||||
} catch (err) {
|
|
||||||
logger.error('[loadEphemeralAgent] Error getting custom endpoint config', err);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// For ephemeral agents, use modelLabel if provided, then model spec's label,
|
|
||||||
// then modelDisplayLabel from endpoint config, otherwise empty string to show model name
|
|
||||||
const sender =
|
|
||||||
model_parameters?.modelLabel ?? modelSpec?.label ?? endpointConfig?.modelDisplayLabel ?? '';
|
|
||||||
|
|
||||||
// Encode ephemeral agent ID with endpoint, model, and computed sender for display
|
|
||||||
const ephemeralId = encodeEphemeralAgentId({ endpoint, model, sender });
|
|
||||||
|
|
||||||
const result = {
|
const result = {
|
||||||
id: ephemeralId,
|
id: agent_id,
|
||||||
instructions,
|
instructions,
|
||||||
provider: endpoint,
|
provider: endpoint,
|
||||||
model_parameters,
|
model_parameters,
|
||||||
|
|
@ -196,8 +145,8 @@ const loadAgent = async ({ req, spec, agent_id, endpoint, model_parameters }) =>
|
||||||
if (!agent_id) {
|
if (!agent_id) {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
if (isEphemeralAgentId(agent_id)) {
|
if (agent_id === EPHEMERAL_AGENT_ID) {
|
||||||
return await loadEphemeralAgent({ req, spec, endpoint, model_parameters });
|
return await loadEphemeralAgent({ req, spec, agent_id, endpoint, model_parameters });
|
||||||
}
|
}
|
||||||
const agent = await getAgent({
|
const agent = await getAgent({
|
||||||
id: agent_id,
|
id: agent_id,
|
||||||
|
|
@ -405,13 +354,6 @@ const updateAgent = async (searchParameter, updateData, options = {}) => {
|
||||||
} = currentAgent.toObject();
|
} = currentAgent.toObject();
|
||||||
const { $push, $pull, $addToSet, ...directUpdates } = updateData;
|
const { $push, $pull, $addToSet, ...directUpdates } = updateData;
|
||||||
|
|
||||||
// Sync mcpServerNames when tools are updated
|
|
||||||
if (directUpdates.tools !== undefined) {
|
|
||||||
const mcpServerNames = extractMCPServerNames(directUpdates.tools);
|
|
||||||
directUpdates.mcpServerNames = mcpServerNames;
|
|
||||||
updateData.mcpServerNames = mcpServerNames; // Also update the original updateData
|
|
||||||
}
|
|
||||||
|
|
||||||
let actionsHash = null;
|
let actionsHash = null;
|
||||||
|
|
||||||
// Generate actions hash if agent has actions
|
// Generate actions hash if agent has actions
|
||||||
|
|
@ -589,29 +531,10 @@ const deleteAgent = async (searchParameter) => {
|
||||||
const agent = await Agent.findOneAndDelete(searchParameter);
|
const agent = await Agent.findOneAndDelete(searchParameter);
|
||||||
if (agent) {
|
if (agent) {
|
||||||
await removeAgentFromAllProjects(agent.id);
|
await removeAgentFromAllProjects(agent.id);
|
||||||
await Promise.all([
|
await removeAllPermissions({
|
||||||
removeAllPermissions({
|
|
||||||
resourceType: ResourceType.AGENT,
|
resourceType: ResourceType.AGENT,
|
||||||
resourceId: agent._id,
|
resourceId: agent._id,
|
||||||
}),
|
});
|
||||||
removeAllPermissions({
|
|
||||||
resourceType: ResourceType.REMOTE_AGENT,
|
|
||||||
resourceId: agent._id,
|
|
||||||
}),
|
|
||||||
]);
|
|
||||||
try {
|
|
||||||
await Agent.updateMany({ 'edges.to': agent.id }, { $pull: { edges: { to: agent.id } } });
|
|
||||||
} catch (error) {
|
|
||||||
logger.error('[deleteAgent] Error removing agent from handoff edges', error);
|
|
||||||
}
|
|
||||||
try {
|
|
||||||
await User.updateMany(
|
|
||||||
{ 'favorites.agentId': agent.id },
|
|
||||||
{ $pull: { favorites: { agentId: agent.id } } },
|
|
||||||
);
|
|
||||||
} catch (error) {
|
|
||||||
logger.error('[deleteAgent] Error removing agent from user favorites', error);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return agent;
|
return agent;
|
||||||
};
|
};
|
||||||
|
|
@ -637,19 +560,10 @@ const deleteUserAgents = async (userId) => {
|
||||||
}
|
}
|
||||||
|
|
||||||
await AclEntry.deleteMany({
|
await AclEntry.deleteMany({
|
||||||
resourceType: { $in: [ResourceType.AGENT, ResourceType.REMOTE_AGENT] },
|
resourceType: ResourceType.AGENT,
|
||||||
resourceId: { $in: agentObjectIds },
|
resourceId: { $in: agentObjectIds },
|
||||||
});
|
});
|
||||||
|
|
||||||
try {
|
|
||||||
await User.updateMany(
|
|
||||||
{ 'favorites.agentId': { $in: agentIds } },
|
|
||||||
{ $pull: { favorites: { agentId: { $in: agentIds } } } },
|
|
||||||
);
|
|
||||||
} catch (error) {
|
|
||||||
logger.error('[deleteUserAgents] Error removing agents from user favorites', error);
|
|
||||||
}
|
|
||||||
|
|
||||||
await Agent.deleteMany({ author: userId });
|
await Agent.deleteMany({ author: userId });
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
logger.error('[deleteUserAgents] General error:', error);
|
logger.error('[deleteUserAgents] General error:', error);
|
||||||
|
|
@ -756,6 +670,59 @@ const getListAgentsByAccess = async ({
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get all agents.
|
||||||
|
* @deprecated Use getListAgentsByAccess for ACL-aware agent listing
|
||||||
|
* @param {Object} searchParameter - The search parameters to find matching agents.
|
||||||
|
* @param {string} searchParameter.author - The user ID of the agent's author.
|
||||||
|
* @returns {Promise<Object>} A promise that resolves to an object containing the agents data and pagination info.
|
||||||
|
*/
|
||||||
|
const getListAgents = async (searchParameter) => {
|
||||||
|
const { author, ...otherParams } = searchParameter;
|
||||||
|
|
||||||
|
let query = Object.assign({ author }, otherParams);
|
||||||
|
|
||||||
|
const globalProject = await getProjectByName(GLOBAL_PROJECT_NAME, ['agentIds']);
|
||||||
|
if (globalProject && (globalProject.agentIds?.length ?? 0) > 0) {
|
||||||
|
const globalQuery = { id: { $in: globalProject.agentIds }, ...otherParams };
|
||||||
|
delete globalQuery.author;
|
||||||
|
query = { $or: [globalQuery, query] };
|
||||||
|
}
|
||||||
|
const agents = (
|
||||||
|
await Agent.find(query, {
|
||||||
|
id: 1,
|
||||||
|
_id: 1,
|
||||||
|
name: 1,
|
||||||
|
avatar: 1,
|
||||||
|
author: 1,
|
||||||
|
projectIds: 1,
|
||||||
|
description: 1,
|
||||||
|
// @deprecated - isCollaborative replaced by ACL permissions
|
||||||
|
isCollaborative: 1,
|
||||||
|
category: 1,
|
||||||
|
}).lean()
|
||||||
|
).map((agent) => {
|
||||||
|
if (agent.author?.toString() !== author) {
|
||||||
|
delete agent.author;
|
||||||
|
}
|
||||||
|
if (agent.author) {
|
||||||
|
agent.author = agent.author.toString();
|
||||||
|
}
|
||||||
|
return agent;
|
||||||
|
});
|
||||||
|
|
||||||
|
const hasMore = agents.length > 0;
|
||||||
|
const firstId = agents.length > 0 ? agents[0].id : null;
|
||||||
|
const lastId = agents.length > 0 ? agents[agents.length - 1].id : null;
|
||||||
|
|
||||||
|
return {
|
||||||
|
data: agents,
|
||||||
|
has_more: hasMore,
|
||||||
|
first_id: firstId,
|
||||||
|
last_id: lastId,
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Updates the projects associated with an agent, adding and removing project IDs as specified.
|
* Updates the projects associated with an agent, adding and removing project IDs as specified.
|
||||||
* This function also updates the corresponding projects to include or exclude the agent ID.
|
* This function also updates the corresponding projects to include or exclude the agent ID.
|
||||||
|
|
@ -921,11 +888,12 @@ module.exports = {
|
||||||
updateAgent,
|
updateAgent,
|
||||||
deleteAgent,
|
deleteAgent,
|
||||||
deleteUserAgents,
|
deleteUserAgents,
|
||||||
|
getListAgents,
|
||||||
revertAgentVersion,
|
revertAgentVersion,
|
||||||
updateAgentProjects,
|
updateAgentProjects,
|
||||||
countPromotedAgents,
|
|
||||||
addAgentResourceFile,
|
addAgentResourceFile,
|
||||||
getListAgentsByAccess,
|
getListAgentsByAccess,
|
||||||
removeAgentResourceFiles,
|
removeAgentResourceFiles,
|
||||||
generateActionMetadataHash,
|
generateActionMetadataHash,
|
||||||
|
countPromotedAgents,
|
||||||
};
|
};
|
||||||
|
|
|
||||||
|
|
@ -22,17 +22,17 @@ const {
|
||||||
createAgent,
|
createAgent,
|
||||||
updateAgent,
|
updateAgent,
|
||||||
deleteAgent,
|
deleteAgent,
|
||||||
deleteUserAgents,
|
getListAgents,
|
||||||
|
getListAgentsByAccess,
|
||||||
revertAgentVersion,
|
revertAgentVersion,
|
||||||
updateAgentProjects,
|
updateAgentProjects,
|
||||||
addAgentResourceFile,
|
addAgentResourceFile,
|
||||||
getListAgentsByAccess,
|
|
||||||
removeAgentResourceFiles,
|
removeAgentResourceFiles,
|
||||||
generateActionMetadataHash,
|
generateActionMetadataHash,
|
||||||
} = require('./Agent');
|
} = require('./Agent');
|
||||||
const permissionService = require('~/server/services/PermissionService');
|
const permissionService = require('~/server/services/PermissionService');
|
||||||
const { getCachedTools, getMCPServerTools } = require('~/server/services/Config');
|
const { getCachedTools, getMCPServerTools } = require('~/server/services/Config');
|
||||||
const { AclEntry, User } = require('~/db/models');
|
const { AclEntry } = require('~/db/models');
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @type {import('mongoose').Model<import('@librechat/data-schemas').IAgent>}
|
* @type {import('mongoose').Model<import('@librechat/data-schemas').IAgent>}
|
||||||
|
|
@ -59,7 +59,6 @@ describe('models/Agent', () => {
|
||||||
|
|
||||||
beforeEach(async () => {
|
beforeEach(async () => {
|
||||||
await Agent.deleteMany({});
|
await Agent.deleteMany({});
|
||||||
await User.deleteMany({});
|
|
||||||
});
|
});
|
||||||
|
|
||||||
test('should add tool_resource to tools if missing', async () => {
|
test('should add tool_resource to tools if missing', async () => {
|
||||||
|
|
@ -533,531 +532,43 @@ describe('models/Agent', () => {
|
||||||
expect(aclEntriesAfter).toHaveLength(0);
|
expect(aclEntriesAfter).toHaveLength(0);
|
||||||
});
|
});
|
||||||
|
|
||||||
test('should remove handoff edges referencing deleted agent from other agents', async () => {
|
test('should list agents by author', async () => {
|
||||||
const authorId = new mongoose.Types.ObjectId();
|
|
||||||
const targetAgentId = `agent_${uuidv4()}`;
|
|
||||||
const sourceAgentId = `agent_${uuidv4()}`;
|
|
||||||
|
|
||||||
// Create target agent (handoff destination)
|
|
||||||
await createAgent({
|
|
||||||
id: targetAgentId,
|
|
||||||
name: 'Target Agent',
|
|
||||||
provider: 'test',
|
|
||||||
model: 'test-model',
|
|
||||||
author: authorId,
|
|
||||||
});
|
|
||||||
|
|
||||||
// Create source agent with handoff edge to target
|
|
||||||
await createAgent({
|
|
||||||
id: sourceAgentId,
|
|
||||||
name: 'Source Agent',
|
|
||||||
provider: 'test',
|
|
||||||
model: 'test-model',
|
|
||||||
author: authorId,
|
|
||||||
edges: [
|
|
||||||
{
|
|
||||||
from: sourceAgentId,
|
|
||||||
to: targetAgentId,
|
|
||||||
edgeType: 'handoff',
|
|
||||||
},
|
|
||||||
],
|
|
||||||
});
|
|
||||||
|
|
||||||
// Verify edge exists before deletion
|
|
||||||
const sourceAgentBefore = await getAgent({ id: sourceAgentId });
|
|
||||||
expect(sourceAgentBefore.edges).toHaveLength(1);
|
|
||||||
expect(sourceAgentBefore.edges[0].to).toBe(targetAgentId);
|
|
||||||
|
|
||||||
// Delete the target agent
|
|
||||||
await deleteAgent({ id: targetAgentId });
|
|
||||||
|
|
||||||
// Verify the edge is removed from source agent
|
|
||||||
const sourceAgentAfter = await getAgent({ id: sourceAgentId });
|
|
||||||
expect(sourceAgentAfter.edges).toHaveLength(0);
|
|
||||||
});
|
|
||||||
|
|
||||||
test('should remove agent from user favorites when agent is deleted', async () => {
|
|
||||||
const agentId = `agent_${uuidv4()}`;
|
|
||||||
const authorId = new mongoose.Types.ObjectId();
|
|
||||||
const userId = new mongoose.Types.ObjectId();
|
|
||||||
|
|
||||||
// Create agent
|
|
||||||
await createAgent({
|
|
||||||
id: agentId,
|
|
||||||
name: 'Agent To Delete',
|
|
||||||
provider: 'test',
|
|
||||||
model: 'test-model',
|
|
||||||
author: authorId,
|
|
||||||
});
|
|
||||||
|
|
||||||
// Create user with the agent in favorites
|
|
||||||
await User.create({
|
|
||||||
_id: userId,
|
|
||||||
name: 'Test User',
|
|
||||||
email: `test-${uuidv4()}@example.com`,
|
|
||||||
provider: 'local',
|
|
||||||
favorites: [{ agentId: agentId }, { model: 'gpt-4', endpoint: 'openAI' }],
|
|
||||||
});
|
|
||||||
|
|
||||||
// Verify user has agent in favorites
|
|
||||||
const userBefore = await User.findById(userId);
|
|
||||||
expect(userBefore.favorites).toHaveLength(2);
|
|
||||||
expect(userBefore.favorites.some((f) => f.agentId === agentId)).toBe(true);
|
|
||||||
|
|
||||||
// Delete the agent
|
|
||||||
await deleteAgent({ id: agentId });
|
|
||||||
|
|
||||||
// Verify agent is deleted
|
|
||||||
const agentAfterDelete = await getAgent({ id: agentId });
|
|
||||||
expect(agentAfterDelete).toBeNull();
|
|
||||||
|
|
||||||
// Verify agent is removed from user favorites
|
|
||||||
const userAfter = await User.findById(userId);
|
|
||||||
expect(userAfter.favorites).toHaveLength(1);
|
|
||||||
expect(userAfter.favorites.some((f) => f.agentId === agentId)).toBe(false);
|
|
||||||
expect(userAfter.favorites.some((f) => f.model === 'gpt-4')).toBe(true);
|
|
||||||
});
|
|
||||||
|
|
||||||
test('should remove agent from multiple users favorites when agent is deleted', async () => {
|
|
||||||
const agentId = `agent_${uuidv4()}`;
|
|
||||||
const authorId = new mongoose.Types.ObjectId();
|
|
||||||
const user1Id = new mongoose.Types.ObjectId();
|
|
||||||
const user2Id = new mongoose.Types.ObjectId();
|
|
||||||
|
|
||||||
// Create agent
|
|
||||||
await createAgent({
|
|
||||||
id: agentId,
|
|
||||||
name: 'Agent To Delete',
|
|
||||||
provider: 'test',
|
|
||||||
model: 'test-model',
|
|
||||||
author: authorId,
|
|
||||||
});
|
|
||||||
|
|
||||||
// Create two users with the agent in favorites
|
|
||||||
await User.create({
|
|
||||||
_id: user1Id,
|
|
||||||
name: 'Test User 1',
|
|
||||||
email: `test1-${uuidv4()}@example.com`,
|
|
||||||
provider: 'local',
|
|
||||||
favorites: [{ agentId: agentId }],
|
|
||||||
});
|
|
||||||
|
|
||||||
await User.create({
|
|
||||||
_id: user2Id,
|
|
||||||
name: 'Test User 2',
|
|
||||||
email: `test2-${uuidv4()}@example.com`,
|
|
||||||
provider: 'local',
|
|
||||||
favorites: [{ agentId: agentId }, { agentId: `agent_${uuidv4()}` }],
|
|
||||||
});
|
|
||||||
|
|
||||||
// Delete the agent
|
|
||||||
await deleteAgent({ id: agentId });
|
|
||||||
|
|
||||||
// Verify agent is removed from both users' favorites
|
|
||||||
const user1After = await User.findById(user1Id);
|
|
||||||
const user2After = await User.findById(user2Id);
|
|
||||||
|
|
||||||
expect(user1After.favorites).toHaveLength(0);
|
|
||||||
expect(user2After.favorites).toHaveLength(1);
|
|
||||||
expect(user2After.favorites.some((f) => f.agentId === agentId)).toBe(false);
|
|
||||||
});
|
|
||||||
|
|
||||||
test('should preserve other agents in database when one agent is deleted', async () => {
|
|
||||||
const agentToDeleteId = `agent_${uuidv4()}`;
|
|
||||||
const agentToKeep1Id = `agent_${uuidv4()}`;
|
|
||||||
const agentToKeep2Id = `agent_${uuidv4()}`;
|
|
||||||
const authorId = new mongoose.Types.ObjectId();
|
|
||||||
|
|
||||||
// Create multiple agents
|
|
||||||
await createAgent({
|
|
||||||
id: agentToDeleteId,
|
|
||||||
name: 'Agent To Delete',
|
|
||||||
provider: 'test',
|
|
||||||
model: 'test-model',
|
|
||||||
author: authorId,
|
|
||||||
});
|
|
||||||
|
|
||||||
await createAgent({
|
|
||||||
id: agentToKeep1Id,
|
|
||||||
name: 'Agent To Keep 1',
|
|
||||||
provider: 'test',
|
|
||||||
model: 'test-model',
|
|
||||||
author: authorId,
|
|
||||||
});
|
|
||||||
|
|
||||||
await createAgent({
|
|
||||||
id: agentToKeep2Id,
|
|
||||||
name: 'Agent To Keep 2',
|
|
||||||
provider: 'test',
|
|
||||||
model: 'test-model',
|
|
||||||
author: authorId,
|
|
||||||
});
|
|
||||||
|
|
||||||
// Verify all agents exist
|
|
||||||
expect(await getAgent({ id: agentToDeleteId })).not.toBeNull();
|
|
||||||
expect(await getAgent({ id: agentToKeep1Id })).not.toBeNull();
|
|
||||||
expect(await getAgent({ id: agentToKeep2Id })).not.toBeNull();
|
|
||||||
|
|
||||||
// Delete one agent
|
|
||||||
await deleteAgent({ id: agentToDeleteId });
|
|
||||||
|
|
||||||
// Verify only the deleted agent is removed, others remain intact
|
|
||||||
expect(await getAgent({ id: agentToDeleteId })).toBeNull();
|
|
||||||
const keptAgent1 = await getAgent({ id: agentToKeep1Id });
|
|
||||||
const keptAgent2 = await getAgent({ id: agentToKeep2Id });
|
|
||||||
expect(keptAgent1).not.toBeNull();
|
|
||||||
expect(keptAgent1.name).toBe('Agent To Keep 1');
|
|
||||||
expect(keptAgent2).not.toBeNull();
|
|
||||||
expect(keptAgent2.name).toBe('Agent To Keep 2');
|
|
||||||
});
|
|
||||||
|
|
||||||
test('should preserve other agents in user favorites when one agent is deleted', async () => {
|
|
||||||
const agentToDeleteId = `agent_${uuidv4()}`;
|
|
||||||
const agentToKeep1Id = `agent_${uuidv4()}`;
|
|
||||||
const agentToKeep2Id = `agent_${uuidv4()}`;
|
|
||||||
const authorId = new mongoose.Types.ObjectId();
|
|
||||||
const userId = new mongoose.Types.ObjectId();
|
|
||||||
|
|
||||||
// Create multiple agents
|
|
||||||
await createAgent({
|
|
||||||
id: agentToDeleteId,
|
|
||||||
name: 'Agent To Delete',
|
|
||||||
provider: 'test',
|
|
||||||
model: 'test-model',
|
|
||||||
author: authorId,
|
|
||||||
});
|
|
||||||
|
|
||||||
await createAgent({
|
|
||||||
id: agentToKeep1Id,
|
|
||||||
name: 'Agent To Keep 1',
|
|
||||||
provider: 'test',
|
|
||||||
model: 'test-model',
|
|
||||||
author: authorId,
|
|
||||||
});
|
|
||||||
|
|
||||||
await createAgent({
|
|
||||||
id: agentToKeep2Id,
|
|
||||||
name: 'Agent To Keep 2',
|
|
||||||
provider: 'test',
|
|
||||||
model: 'test-model',
|
|
||||||
author: authorId,
|
|
||||||
});
|
|
||||||
|
|
||||||
// Create user with all three agents in favorites
|
|
||||||
await User.create({
|
|
||||||
_id: userId,
|
|
||||||
name: 'Test User',
|
|
||||||
email: `test-${uuidv4()}@example.com`,
|
|
||||||
provider: 'local',
|
|
||||||
favorites: [
|
|
||||||
{ agentId: agentToDeleteId },
|
|
||||||
{ agentId: agentToKeep1Id },
|
|
||||||
{ agentId: agentToKeep2Id },
|
|
||||||
],
|
|
||||||
});
|
|
||||||
|
|
||||||
// Verify user has all three agents in favorites
|
|
||||||
const userBefore = await User.findById(userId);
|
|
||||||
expect(userBefore.favorites).toHaveLength(3);
|
|
||||||
|
|
||||||
// Delete one agent
|
|
||||||
await deleteAgent({ id: agentToDeleteId });
|
|
||||||
|
|
||||||
// Verify only the deleted agent is removed from favorites
|
|
||||||
const userAfter = await User.findById(userId);
|
|
||||||
expect(userAfter.favorites).toHaveLength(2);
|
|
||||||
expect(userAfter.favorites.some((f) => f.agentId === agentToDeleteId)).toBe(false);
|
|
||||||
expect(userAfter.favorites.some((f) => f.agentId === agentToKeep1Id)).toBe(true);
|
|
||||||
expect(userAfter.favorites.some((f) => f.agentId === agentToKeep2Id)).toBe(true);
|
|
||||||
});
|
|
||||||
|
|
||||||
test('should not affect users who do not have deleted agent in favorites', async () => {
|
|
||||||
const agentToDeleteId = `agent_${uuidv4()}`;
|
|
||||||
const otherAgentId = `agent_${uuidv4()}`;
|
|
||||||
const authorId = new mongoose.Types.ObjectId();
|
|
||||||
const userWithDeletedAgentId = new mongoose.Types.ObjectId();
|
|
||||||
const userWithoutDeletedAgentId = new mongoose.Types.ObjectId();
|
|
||||||
|
|
||||||
// Create agents
|
|
||||||
await createAgent({
|
|
||||||
id: agentToDeleteId,
|
|
||||||
name: 'Agent To Delete',
|
|
||||||
provider: 'test',
|
|
||||||
model: 'test-model',
|
|
||||||
author: authorId,
|
|
||||||
});
|
|
||||||
|
|
||||||
await createAgent({
|
|
||||||
id: otherAgentId,
|
|
||||||
name: 'Other Agent',
|
|
||||||
provider: 'test',
|
|
||||||
model: 'test-model',
|
|
||||||
author: authorId,
|
|
||||||
});
|
|
||||||
|
|
||||||
// Create user with the agent to be deleted
|
|
||||||
await User.create({
|
|
||||||
_id: userWithDeletedAgentId,
|
|
||||||
name: 'User With Deleted Agent',
|
|
||||||
email: `user1-${uuidv4()}@example.com`,
|
|
||||||
provider: 'local',
|
|
||||||
favorites: [{ agentId: agentToDeleteId }, { model: 'gpt-4', endpoint: 'openAI' }],
|
|
||||||
});
|
|
||||||
|
|
||||||
// Create user without the agent to be deleted
|
|
||||||
await User.create({
|
|
||||||
_id: userWithoutDeletedAgentId,
|
|
||||||
name: 'User Without Deleted Agent',
|
|
||||||
email: `user2-${uuidv4()}@example.com`,
|
|
||||||
provider: 'local',
|
|
||||||
favorites: [{ agentId: otherAgentId }, { model: 'claude-3', endpoint: 'anthropic' }],
|
|
||||||
});
|
|
||||||
|
|
||||||
// Delete the agent
|
|
||||||
await deleteAgent({ id: agentToDeleteId });
|
|
||||||
|
|
||||||
// Verify user with deleted agent has it removed
|
|
||||||
const userWithDeleted = await User.findById(userWithDeletedAgentId);
|
|
||||||
expect(userWithDeleted.favorites).toHaveLength(1);
|
|
||||||
expect(userWithDeleted.favorites.some((f) => f.agentId === agentToDeleteId)).toBe(false);
|
|
||||||
expect(userWithDeleted.favorites.some((f) => f.model === 'gpt-4')).toBe(true);
|
|
||||||
|
|
||||||
// Verify user without deleted agent is completely unaffected
|
|
||||||
const userWithoutDeleted = await User.findById(userWithoutDeletedAgentId);
|
|
||||||
expect(userWithoutDeleted.favorites).toHaveLength(2);
|
|
||||||
expect(userWithoutDeleted.favorites.some((f) => f.agentId === otherAgentId)).toBe(true);
|
|
||||||
expect(userWithoutDeleted.favorites.some((f) => f.model === 'claude-3')).toBe(true);
|
|
||||||
});
|
|
||||||
|
|
||||||
test('should remove all user agents from favorites when deleteUserAgents is called', async () => {
|
|
||||||
const authorId = new mongoose.Types.ObjectId();
|
const authorId = new mongoose.Types.ObjectId();
|
||||||
const otherAuthorId = new mongoose.Types.ObjectId();
|
const otherAuthorId = new mongoose.Types.ObjectId();
|
||||||
const userId = new mongoose.Types.ObjectId();
|
|
||||||
|
|
||||||
const agent1Id = `agent_${uuidv4()}`;
|
const agentIds = [];
|
||||||
const agent2Id = `agent_${uuidv4()}`;
|
for (let i = 0; i < 5; i++) {
|
||||||
const otherAuthorAgentId = `agent_${uuidv4()}`;
|
const id = `agent_${uuidv4()}`;
|
||||||
|
agentIds.push(id);
|
||||||
// Create agents by the author to be deleted
|
|
||||||
await createAgent({
|
await createAgent({
|
||||||
id: agent1Id,
|
id,
|
||||||
name: 'Author Agent 1',
|
name: `Agent ${i}`,
|
||||||
provider: 'test',
|
provider: 'test',
|
||||||
model: 'test-model',
|
model: 'test-model',
|
||||||
author: authorId,
|
author: authorId,
|
||||||
});
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
for (let i = 0; i < 3; i++) {
|
||||||
await createAgent({
|
await createAgent({
|
||||||
id: agent2Id,
|
id: `other_agent_${uuidv4()}`,
|
||||||
name: 'Author Agent 2',
|
name: `Other Agent ${i}`,
|
||||||
provider: 'test',
|
|
||||||
model: 'test-model',
|
|
||||||
author: authorId,
|
|
||||||
});
|
|
||||||
|
|
||||||
// Create agent by different author (should not be deleted)
|
|
||||||
await createAgent({
|
|
||||||
id: otherAuthorAgentId,
|
|
||||||
name: 'Other Author Agent',
|
|
||||||
provider: 'test',
|
provider: 'test',
|
||||||
model: 'test-model',
|
model: 'test-model',
|
||||||
author: otherAuthorId,
|
author: otherAuthorId,
|
||||||
});
|
});
|
||||||
|
}
|
||||||
|
|
||||||
// Create user with all agents in favorites
|
const result = await getListAgents({ author: authorId.toString() });
|
||||||
await User.create({
|
|
||||||
_id: userId,
|
|
||||||
name: 'Test User',
|
|
||||||
email: `test-${uuidv4()}@example.com`,
|
|
||||||
provider: 'local',
|
|
||||||
favorites: [
|
|
||||||
{ agentId: agent1Id },
|
|
||||||
{ agentId: agent2Id },
|
|
||||||
{ agentId: otherAuthorAgentId },
|
|
||||||
{ model: 'gpt-4', endpoint: 'openAI' },
|
|
||||||
],
|
|
||||||
});
|
|
||||||
|
|
||||||
// Verify user has all favorites
|
expect(result).toBeDefined();
|
||||||
const userBefore = await User.findById(userId);
|
expect(result.data).toBeDefined();
|
||||||
expect(userBefore.favorites).toHaveLength(4);
|
expect(result.data).toHaveLength(5);
|
||||||
|
expect(result.has_more).toBe(true);
|
||||||
|
|
||||||
// Delete all agents by the author
|
for (const agent of result.data) {
|
||||||
await deleteUserAgents(authorId.toString());
|
expect(agent.author).toBe(authorId.toString());
|
||||||
|
}
|
||||||
// Verify author's agents are deleted from database
|
|
||||||
expect(await getAgent({ id: agent1Id })).toBeNull();
|
|
||||||
expect(await getAgent({ id: agent2Id })).toBeNull();
|
|
||||||
|
|
||||||
// Verify other author's agent still exists
|
|
||||||
expect(await getAgent({ id: otherAuthorAgentId })).not.toBeNull();
|
|
||||||
|
|
||||||
// Verify user favorites: author's agents removed, others remain
|
|
||||||
const userAfter = await User.findById(userId);
|
|
||||||
expect(userAfter.favorites).toHaveLength(2);
|
|
||||||
expect(userAfter.favorites.some((f) => f.agentId === agent1Id)).toBe(false);
|
|
||||||
expect(userAfter.favorites.some((f) => f.agentId === agent2Id)).toBe(false);
|
|
||||||
expect(userAfter.favorites.some((f) => f.agentId === otherAuthorAgentId)).toBe(true);
|
|
||||||
expect(userAfter.favorites.some((f) => f.model === 'gpt-4')).toBe(true);
|
|
||||||
});
|
|
||||||
|
|
||||||
test('should handle deleteUserAgents when agents are in multiple users favorites', async () => {
|
|
||||||
const authorId = new mongoose.Types.ObjectId();
|
|
||||||
const user1Id = new mongoose.Types.ObjectId();
|
|
||||||
const user2Id = new mongoose.Types.ObjectId();
|
|
||||||
const user3Id = new mongoose.Types.ObjectId();
|
|
||||||
|
|
||||||
const agent1Id = `agent_${uuidv4()}`;
|
|
||||||
const agent2Id = `agent_${uuidv4()}`;
|
|
||||||
const unrelatedAgentId = `agent_${uuidv4()}`;
|
|
||||||
|
|
||||||
// Create agents by the author
|
|
||||||
await createAgent({
|
|
||||||
id: agent1Id,
|
|
||||||
name: 'Author Agent 1',
|
|
||||||
provider: 'test',
|
|
||||||
model: 'test-model',
|
|
||||||
author: authorId,
|
|
||||||
});
|
|
||||||
|
|
||||||
await createAgent({
|
|
||||||
id: agent2Id,
|
|
||||||
name: 'Author Agent 2',
|
|
||||||
provider: 'test',
|
|
||||||
model: 'test-model',
|
|
||||||
author: authorId,
|
|
||||||
});
|
|
||||||
|
|
||||||
// Create users with various favorites configurations
|
|
||||||
await User.create({
|
|
||||||
_id: user1Id,
|
|
||||||
name: 'User 1',
|
|
||||||
email: `user1-${uuidv4()}@example.com`,
|
|
||||||
provider: 'local',
|
|
||||||
favorites: [{ agentId: agent1Id }, { agentId: agent2Id }],
|
|
||||||
});
|
|
||||||
|
|
||||||
await User.create({
|
|
||||||
_id: user2Id,
|
|
||||||
name: 'User 2',
|
|
||||||
email: `user2-${uuidv4()}@example.com`,
|
|
||||||
provider: 'local',
|
|
||||||
favorites: [{ agentId: agent1Id }, { model: 'claude-3', endpoint: 'anthropic' }],
|
|
||||||
});
|
|
||||||
|
|
||||||
await User.create({
|
|
||||||
_id: user3Id,
|
|
||||||
name: 'User 3',
|
|
||||||
email: `user3-${uuidv4()}@example.com`,
|
|
||||||
provider: 'local',
|
|
||||||
favorites: [{ agentId: unrelatedAgentId }, { model: 'gpt-4', endpoint: 'openAI' }],
|
|
||||||
});
|
|
||||||
|
|
||||||
// Delete all agents by the author
|
|
||||||
await deleteUserAgents(authorId.toString());
|
|
||||||
|
|
||||||
// Verify all users' favorites are correctly updated
|
|
||||||
const user1After = await User.findById(user1Id);
|
|
||||||
expect(user1After.favorites).toHaveLength(0);
|
|
||||||
|
|
||||||
const user2After = await User.findById(user2Id);
|
|
||||||
expect(user2After.favorites).toHaveLength(1);
|
|
||||||
expect(user2After.favorites.some((f) => f.agentId === agent1Id)).toBe(false);
|
|
||||||
expect(user2After.favorites.some((f) => f.model === 'claude-3')).toBe(true);
|
|
||||||
|
|
||||||
// User 3 should be completely unaffected
|
|
||||||
const user3After = await User.findById(user3Id);
|
|
||||||
expect(user3After.favorites).toHaveLength(2);
|
|
||||||
expect(user3After.favorites.some((f) => f.agentId === unrelatedAgentId)).toBe(true);
|
|
||||||
expect(user3After.favorites.some((f) => f.model === 'gpt-4')).toBe(true);
|
|
||||||
});
|
|
||||||
|
|
||||||
test('should handle deleteUserAgents when user has no agents', async () => {
|
|
||||||
const authorWithNoAgentsId = new mongoose.Types.ObjectId();
|
|
||||||
const otherAuthorId = new mongoose.Types.ObjectId();
|
|
||||||
const userId = new mongoose.Types.ObjectId();
|
|
||||||
|
|
||||||
const existingAgentId = `agent_${uuidv4()}`;
|
|
||||||
|
|
||||||
// Create agent by different author
|
|
||||||
await createAgent({
|
|
||||||
id: existingAgentId,
|
|
||||||
name: 'Existing Agent',
|
|
||||||
provider: 'test',
|
|
||||||
model: 'test-model',
|
|
||||||
author: otherAuthorId,
|
|
||||||
});
|
|
||||||
|
|
||||||
// Create user with favorites
|
|
||||||
await User.create({
|
|
||||||
_id: userId,
|
|
||||||
name: 'Test User',
|
|
||||||
email: `test-${uuidv4()}@example.com`,
|
|
||||||
provider: 'local',
|
|
||||||
favorites: [{ agentId: existingAgentId }, { model: 'gpt-4', endpoint: 'openAI' }],
|
|
||||||
});
|
|
||||||
|
|
||||||
// Delete agents for user with no agents (should be a no-op)
|
|
||||||
await deleteUserAgents(authorWithNoAgentsId.toString());
|
|
||||||
|
|
||||||
// Verify existing agent still exists
|
|
||||||
expect(await getAgent({ id: existingAgentId })).not.toBeNull();
|
|
||||||
|
|
||||||
// Verify user favorites are unchanged
|
|
||||||
const userAfter = await User.findById(userId);
|
|
||||||
expect(userAfter.favorites).toHaveLength(2);
|
|
||||||
expect(userAfter.favorites.some((f) => f.agentId === existingAgentId)).toBe(true);
|
|
||||||
expect(userAfter.favorites.some((f) => f.model === 'gpt-4')).toBe(true);
|
|
||||||
});
|
|
||||||
|
|
||||||
test('should handle deleteUserAgents when agents are not in any favorites', async () => {
|
|
||||||
const authorId = new mongoose.Types.ObjectId();
|
|
||||||
const userId = new mongoose.Types.ObjectId();
|
|
||||||
|
|
||||||
const agent1Id = `agent_${uuidv4()}`;
|
|
||||||
const agent2Id = `agent_${uuidv4()}`;
|
|
||||||
|
|
||||||
// Create agents by the author
|
|
||||||
await createAgent({
|
|
||||||
id: agent1Id,
|
|
||||||
name: 'Agent 1',
|
|
||||||
provider: 'test',
|
|
||||||
model: 'test-model',
|
|
||||||
author: authorId,
|
|
||||||
});
|
|
||||||
|
|
||||||
await createAgent({
|
|
||||||
id: agent2Id,
|
|
||||||
name: 'Agent 2',
|
|
||||||
provider: 'test',
|
|
||||||
model: 'test-model',
|
|
||||||
author: authorId,
|
|
||||||
});
|
|
||||||
|
|
||||||
// Create user with favorites that don't include these agents
|
|
||||||
await User.create({
|
|
||||||
_id: userId,
|
|
||||||
name: 'Test User',
|
|
||||||
email: `test-${uuidv4()}@example.com`,
|
|
||||||
provider: 'local',
|
|
||||||
favorites: [{ model: 'gpt-4', endpoint: 'openAI' }],
|
|
||||||
});
|
|
||||||
|
|
||||||
// Verify agents exist
|
|
||||||
expect(await getAgent({ id: agent1Id })).not.toBeNull();
|
|
||||||
expect(await getAgent({ id: agent2Id })).not.toBeNull();
|
|
||||||
|
|
||||||
// Delete all agents by the author
|
|
||||||
await deleteUserAgents(authorId.toString());
|
|
||||||
|
|
||||||
// Verify agents are deleted
|
|
||||||
expect(await getAgent({ id: agent1Id })).toBeNull();
|
|
||||||
expect(await getAgent({ id: agent2Id })).toBeNull();
|
|
||||||
|
|
||||||
// Verify user favorites are unchanged
|
|
||||||
const userAfter = await User.findById(userId);
|
|
||||||
expect(userAfter.favorites).toHaveLength(1);
|
|
||||||
expect(userAfter.favorites.some((f) => f.model === 'gpt-4')).toBe(true);
|
|
||||||
});
|
});
|
||||||
|
|
||||||
test('should update agent projects', async () => {
|
test('should update agent projects', async () => {
|
||||||
|
|
@ -1179,6 +690,26 @@ describe('models/Agent', () => {
|
||||||
expect(result).toBe(expected);
|
expect(result).toBe(expected);
|
||||||
});
|
});
|
||||||
|
|
||||||
|
test('should handle getListAgents with invalid author format', async () => {
|
||||||
|
try {
|
||||||
|
const result = await getListAgents({ author: 'invalid-object-id' });
|
||||||
|
expect(result.data).toEqual([]);
|
||||||
|
} catch (error) {
|
||||||
|
expect(error).toBeDefined();
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
test('should handle getListAgents with no agents', async () => {
|
||||||
|
const authorId = new mongoose.Types.ObjectId();
|
||||||
|
const result = await getListAgents({ author: authorId.toString() });
|
||||||
|
|
||||||
|
expect(result).toBeDefined();
|
||||||
|
expect(result.data).toEqual([]);
|
||||||
|
expect(result.has_more).toBe(false);
|
||||||
|
expect(result.first_id).toBeNull();
|
||||||
|
expect(result.last_id).toBeNull();
|
||||||
|
});
|
||||||
|
|
||||||
test('should handle updateAgentProjects with non-existent agent', async () => {
|
test('should handle updateAgentProjects with non-existent agent', async () => {
|
||||||
const nonExistentId = `agent_${uuidv4()}`;
|
const nonExistentId = `agent_${uuidv4()}`;
|
||||||
const userId = new mongoose.Types.ObjectId();
|
const userId = new mongoose.Types.ObjectId();
|
||||||
|
|
@ -2429,8 +1960,7 @@ describe('models/Agent', () => {
|
||||||
});
|
});
|
||||||
|
|
||||||
if (result) {
|
if (result) {
|
||||||
// Ephemeral agent ID is encoded with endpoint and model
|
expect(result.id).toBe(EPHEMERAL_AGENT_ID);
|
||||||
expect(result.id).toBe('openai__gpt-4');
|
|
||||||
expect(result.instructions).toBe('Test instructions');
|
expect(result.instructions).toBe('Test instructions');
|
||||||
expect(result.provider).toBe('openai');
|
expect(result.provider).toBe('openai');
|
||||||
expect(result.model).toBe('gpt-4');
|
expect(result.model).toBe('gpt-4');
|
||||||
|
|
@ -2448,7 +1978,7 @@ describe('models/Agent', () => {
|
||||||
const mockReq = { user: { id: 'user123' } };
|
const mockReq = { user: { id: 'user123' } };
|
||||||
const result = await loadAgent({
|
const result = await loadAgent({
|
||||||
req: mockReq,
|
req: mockReq,
|
||||||
agent_id: 'agent_non_existent',
|
agent_id: 'non_existent_agent',
|
||||||
endpoint: 'openai',
|
endpoint: 'openai',
|
||||||
model_parameters: { model: 'gpt-4' },
|
model_parameters: { model: 'gpt-4' },
|
||||||
});
|
});
|
||||||
|
|
@ -2575,7 +2105,7 @@ describe('models/Agent', () => {
|
||||||
test('should handle loadAgent with malformed req object', async () => {
|
test('should handle loadAgent with malformed req object', async () => {
|
||||||
const result = await loadAgent({
|
const result = await loadAgent({
|
||||||
req: null,
|
req: null,
|
||||||
agent_id: 'agent_test',
|
agent_id: 'test',
|
||||||
endpoint: 'openai',
|
endpoint: 'openai',
|
||||||
model_parameters: { model: 'gpt-4' },
|
model_parameters: { model: 'gpt-4' },
|
||||||
});
|
});
|
||||||
|
|
@ -2792,6 +2322,17 @@ describe('models/Agent', () => {
|
||||||
expect(result).toBeNull();
|
expect(result).toBeNull();
|
||||||
});
|
});
|
||||||
|
|
||||||
|
test('should handle getListAgents with no agents', async () => {
|
||||||
|
const authorId = new mongoose.Types.ObjectId();
|
||||||
|
const result = await getListAgents({ author: authorId.toString() });
|
||||||
|
|
||||||
|
expect(result).toBeDefined();
|
||||||
|
expect(result.data).toEqual([]);
|
||||||
|
expect(result.has_more).toBe(false);
|
||||||
|
expect(result.first_id).toBeNull();
|
||||||
|
expect(result.last_id).toBeNull();
|
||||||
|
});
|
||||||
|
|
||||||
test('should handle updateAgent with MongoDB operators mixed with direct updates', async () => {
|
test('should handle updateAgent with MongoDB operators mixed with direct updates', async () => {
|
||||||
const agentId = `agent_${uuidv4()}`;
|
const agentId = `agent_${uuidv4()}`;
|
||||||
const authorId = new mongoose.Types.ObjectId();
|
const authorId = new mongoose.Types.ObjectId();
|
||||||
|
|
|
||||||
|
|
@ -28,7 +28,7 @@ const getConvo = async (user, conversationId) => {
|
||||||
return await Conversation.findOne({ user, conversationId }).lean();
|
return await Conversation.findOne({ user, conversationId }).lean();
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
logger.error('[getConvo] Error getting single conversation', error);
|
logger.error('[getConvo] Error getting single conversation', error);
|
||||||
throw new Error('Error getting single conversation');
|
return { message: 'Error getting single conversation' };
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
@ -124,15 +124,10 @@ module.exports = {
|
||||||
updateOperation,
|
updateOperation,
|
||||||
{
|
{
|
||||||
new: true,
|
new: true,
|
||||||
upsert: metadata?.noUpsert !== true,
|
upsert: true,
|
||||||
},
|
},
|
||||||
);
|
);
|
||||||
|
|
||||||
if (!conversation) {
|
|
||||||
logger.debug('[saveConvo] Conversation not found, skipping update');
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
|
|
||||||
return conversation.toObject();
|
return conversation.toObject();
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
logger.error('[saveConvo] Error saving conversation', error);
|
logger.error('[saveConvo] Error saving conversation', error);
|
||||||
|
|
@ -156,21 +151,13 @@ module.exports = {
|
||||||
const result = await Conversation.bulkWrite(bulkOps);
|
const result = await Conversation.bulkWrite(bulkOps);
|
||||||
return result;
|
return result;
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
logger.error('[bulkSaveConvos] Error saving conversations in bulk', error);
|
logger.error('[saveBulkConversations] Error saving conversations in bulk', error);
|
||||||
throw new Error('Failed to save conversations in bulk.');
|
throw new Error('Failed to save conversations in bulk.');
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
getConvosByCursor: async (
|
getConvosByCursor: async (
|
||||||
user,
|
user,
|
||||||
{
|
{ cursor, limit = 25, isArchived = false, tags, search, order = 'desc' } = {},
|
||||||
cursor,
|
|
||||||
limit = 25,
|
|
||||||
isArchived = false,
|
|
||||||
tags,
|
|
||||||
search,
|
|
||||||
sortBy = 'updatedAt',
|
|
||||||
sortDirection = 'desc',
|
|
||||||
} = {},
|
|
||||||
) => {
|
) => {
|
||||||
const filters = [{ user }];
|
const filters = [{ user }];
|
||||||
if (isArchived) {
|
if (isArchived) {
|
||||||
|
|
@ -197,79 +184,35 @@ module.exports = {
|
||||||
filters.push({ conversationId: { $in: matchingIds } });
|
filters.push({ conversationId: { $in: matchingIds } });
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
logger.error('[getConvosByCursor] Error during meiliSearch', error);
|
logger.error('[getConvosByCursor] Error during meiliSearch', error);
|
||||||
throw new Error('Error during meiliSearch');
|
return { message: 'Error during meiliSearch' };
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
const validSortFields = ['title', 'createdAt', 'updatedAt'];
|
|
||||||
if (!validSortFields.includes(sortBy)) {
|
|
||||||
throw new Error(
|
|
||||||
`Invalid sortBy field: ${sortBy}. Must be one of ${validSortFields.join(', ')}`,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
const finalSortBy = sortBy;
|
|
||||||
const finalSortDirection = sortDirection === 'asc' ? 'asc' : 'desc';
|
|
||||||
|
|
||||||
let cursorFilter = null;
|
|
||||||
if (cursor) {
|
if (cursor) {
|
||||||
try {
|
filters.push({ updatedAt: { $lt: new Date(cursor) } });
|
||||||
const decoded = JSON.parse(Buffer.from(cursor, 'base64').toString());
|
|
||||||
const { primary, secondary } = decoded;
|
|
||||||
const primaryValue = finalSortBy === 'title' ? primary : new Date(primary);
|
|
||||||
const secondaryValue = new Date(secondary);
|
|
||||||
const op = finalSortDirection === 'asc' ? '$gt' : '$lt';
|
|
||||||
|
|
||||||
cursorFilter = {
|
|
||||||
$or: [
|
|
||||||
{ [finalSortBy]: { [op]: primaryValue } },
|
|
||||||
{
|
|
||||||
[finalSortBy]: primaryValue,
|
|
||||||
updatedAt: { [op]: secondaryValue },
|
|
||||||
},
|
|
||||||
],
|
|
||||||
};
|
|
||||||
} catch (err) {
|
|
||||||
logger.warn('[getConvosByCursor] Invalid cursor format, starting from beginning');
|
|
||||||
}
|
|
||||||
if (cursorFilter) {
|
|
||||||
filters.push(cursorFilter);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const query = filters.length === 1 ? filters[0] : { $and: filters };
|
const query = filters.length === 1 ? filters[0] : { $and: filters };
|
||||||
|
|
||||||
try {
|
try {
|
||||||
const sortOrder = finalSortDirection === 'asc' ? 1 : -1;
|
|
||||||
const sortObj = { [finalSortBy]: sortOrder };
|
|
||||||
|
|
||||||
if (finalSortBy !== 'updatedAt') {
|
|
||||||
sortObj.updatedAt = sortOrder;
|
|
||||||
}
|
|
||||||
|
|
||||||
const convos = await Conversation.find(query)
|
const convos = await Conversation.find(query)
|
||||||
.select(
|
.select(
|
||||||
'conversationId endpoint title createdAt updatedAt user model agent_id assistant_id spec iconURL',
|
'conversationId endpoint title createdAt updatedAt user model agent_id assistant_id spec iconURL',
|
||||||
)
|
)
|
||||||
.sort(sortObj)
|
.sort({ updatedAt: order === 'asc' ? 1 : -1 })
|
||||||
.limit(limit + 1)
|
.limit(limit + 1)
|
||||||
.lean();
|
.lean();
|
||||||
|
|
||||||
let nextCursor = null;
|
let nextCursor = null;
|
||||||
if (convos.length > limit) {
|
if (convos.length > limit) {
|
||||||
convos.pop(); // Remove extra item used to detect next page
|
const lastConvo = convos.pop();
|
||||||
// Create cursor from the last RETURNED item (not the popped one)
|
nextCursor = lastConvo.updatedAt.toISOString();
|
||||||
const lastReturned = convos[convos.length - 1];
|
|
||||||
const primaryValue = lastReturned[finalSortBy];
|
|
||||||
const primaryStr = finalSortBy === 'title' ? primaryValue : primaryValue.toISOString();
|
|
||||||
const secondaryStr = lastReturned.updatedAt.toISOString();
|
|
||||||
const composite = { primary: primaryStr, secondary: secondaryStr };
|
|
||||||
nextCursor = Buffer.from(JSON.stringify(composite)).toString('base64');
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return { conversations: convos, nextCursor };
|
return { conversations: convos, nextCursor };
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
logger.error('[getConvosByCursor] Error getting conversations', error);
|
logger.error('[getConvosByCursor] Error getting conversations', error);
|
||||||
throw new Error('Error getting conversations');
|
return { message: 'Error getting conversations' };
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
getConvosQueried: async (user, convoIds, cursor = null, limit = 25) => {
|
getConvosQueried: async (user, convoIds, cursor = null, limit = 25) => {
|
||||||
|
|
@ -297,9 +240,8 @@ module.exports = {
|
||||||
const limited = filtered.slice(0, limit + 1);
|
const limited = filtered.slice(0, limit + 1);
|
||||||
let nextCursor = null;
|
let nextCursor = null;
|
||||||
if (limited.length > limit) {
|
if (limited.length > limit) {
|
||||||
limited.pop(); // Remove extra item used to detect next page
|
const lastConvo = limited.pop();
|
||||||
// Create cursor from the last RETURNED item (not the popped one)
|
nextCursor = lastConvo.updatedAt.toISOString();
|
||||||
nextCursor = limited[limited.length - 1].updatedAt.toISOString();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const convoMap = {};
|
const convoMap = {};
|
||||||
|
|
@ -310,7 +252,7 @@ module.exports = {
|
||||||
return { conversations: limited, nextCursor, convoMap };
|
return { conversations: limited, nextCursor, convoMap };
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
logger.error('[getConvosQueried] Error getting conversations', error);
|
logger.error('[getConvosQueried] Error getting conversations', error);
|
||||||
throw new Error('Error fetching conversations');
|
return { message: 'Error fetching conversations' };
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
getConvo,
|
getConvo,
|
||||||
|
|
@ -327,7 +269,7 @@ module.exports = {
|
||||||
}
|
}
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
logger.error('[getConvoTitle] Error getting conversation title', error);
|
logger.error('[getConvoTitle] Error getting conversation title', error);
|
||||||
throw new Error('Error getting conversation title');
|
return { message: 'Error getting conversation title' };
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
/**
|
/**
|
||||||
|
|
|
||||||
|
|
@ -106,47 +106,6 @@ describe('Conversation Operations', () => {
|
||||||
expect(result.conversationId).toBe(newConversationId);
|
expect(result.conversationId).toBe(newConversationId);
|
||||||
});
|
});
|
||||||
|
|
||||||
it('should not create a conversation when noUpsert is true and conversation does not exist', async () => {
|
|
||||||
const nonExistentId = uuidv4();
|
|
||||||
const result = await saveConvo(
|
|
||||||
mockReq,
|
|
||||||
{ conversationId: nonExistentId, title: 'Ghost Title' },
|
|
||||||
{ noUpsert: true },
|
|
||||||
);
|
|
||||||
|
|
||||||
expect(result).toBeNull();
|
|
||||||
|
|
||||||
const dbConvo = await Conversation.findOne({ conversationId: nonExistentId });
|
|
||||||
expect(dbConvo).toBeNull();
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should update an existing conversation when noUpsert is true', async () => {
|
|
||||||
await saveConvo(mockReq, mockConversationData);
|
|
||||||
|
|
||||||
const result = await saveConvo(
|
|
||||||
mockReq,
|
|
||||||
{ conversationId: mockConversationData.conversationId, title: 'Updated Title' },
|
|
||||||
{ noUpsert: true },
|
|
||||||
);
|
|
||||||
|
|
||||||
expect(result).not.toBeNull();
|
|
||||||
expect(result.title).toBe('Updated Title');
|
|
||||||
expect(result.conversationId).toBe(mockConversationData.conversationId);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should still upsert by default when noUpsert is not provided', async () => {
|
|
||||||
const newId = uuidv4();
|
|
||||||
const result = await saveConvo(mockReq, {
|
|
||||||
conversationId: newId,
|
|
||||||
title: 'New Conversation',
|
|
||||||
endpoint: EModelEndpoint.openAI,
|
|
||||||
});
|
|
||||||
|
|
||||||
expect(result).not.toBeNull();
|
|
||||||
expect(result.conversationId).toBe(newId);
|
|
||||||
expect(result.title).toBe('New Conversation');
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should handle unsetFields metadata', async () => {
|
it('should handle unsetFields metadata', async () => {
|
||||||
const metadata = {
|
const metadata = {
|
||||||
unsetFields: { someField: 1 },
|
unsetFields: { someField: 1 },
|
||||||
|
|
@ -163,6 +122,7 @@ describe('Conversation Operations', () => {
|
||||||
|
|
||||||
describe('isTemporary conversation handling', () => {
|
describe('isTemporary conversation handling', () => {
|
||||||
it('should save a conversation with expiredAt when isTemporary is true', async () => {
|
it('should save a conversation with expiredAt when isTemporary is true', async () => {
|
||||||
|
// Mock app config with 24 hour retention
|
||||||
mockReq.config.interfaceConfig.temporaryChatRetention = 24;
|
mockReq.config.interfaceConfig.temporaryChatRetention = 24;
|
||||||
|
|
||||||
mockReq.body = { isTemporary: true };
|
mockReq.body = { isTemporary: true };
|
||||||
|
|
@ -175,6 +135,7 @@ describe('Conversation Operations', () => {
|
||||||
expect(result.expiredAt).toBeDefined();
|
expect(result.expiredAt).toBeDefined();
|
||||||
expect(result.expiredAt).toBeInstanceOf(Date);
|
expect(result.expiredAt).toBeInstanceOf(Date);
|
||||||
|
|
||||||
|
// Verify expiredAt is approximately 24 hours in the future
|
||||||
const expectedExpirationTime = new Date(beforeSave.getTime() + 24 * 60 * 60 * 1000);
|
const expectedExpirationTime = new Date(beforeSave.getTime() + 24 * 60 * 60 * 1000);
|
||||||
const actualExpirationTime = new Date(result.expiredAt);
|
const actualExpirationTime = new Date(result.expiredAt);
|
||||||
|
|
||||||
|
|
@ -196,6 +157,7 @@ describe('Conversation Operations', () => {
|
||||||
});
|
});
|
||||||
|
|
||||||
it('should save a conversation without expiredAt when isTemporary is not provided', async () => {
|
it('should save a conversation without expiredAt when isTemporary is not provided', async () => {
|
||||||
|
// No isTemporary in body
|
||||||
mockReq.body = {};
|
mockReq.body = {};
|
||||||
|
|
||||||
const result = await saveConvo(mockReq, mockConversationData);
|
const result = await saveConvo(mockReq, mockConversationData);
|
||||||
|
|
@ -205,6 +167,7 @@ describe('Conversation Operations', () => {
|
||||||
});
|
});
|
||||||
|
|
||||||
it('should use custom retention period from config', async () => {
|
it('should use custom retention period from config', async () => {
|
||||||
|
// Mock app config with 48 hour retention
|
||||||
mockReq.config.interfaceConfig.temporaryChatRetention = 48;
|
mockReq.config.interfaceConfig.temporaryChatRetention = 48;
|
||||||
|
|
||||||
mockReq.body = { isTemporary: true };
|
mockReq.body = { isTemporary: true };
|
||||||
|
|
@ -604,267 +567,4 @@ describe('Conversation Operations', () => {
|
||||||
await mongoose.connect(mongoServer.getUri());
|
await mongoose.connect(mongoServer.getUri());
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
describe('getConvosByCursor pagination', () => {
|
|
||||||
/**
|
|
||||||
* Helper to create conversations with specific timestamps
|
|
||||||
* Uses collection.insertOne to bypass Mongoose timestamps entirely
|
|
||||||
*/
|
|
||||||
const createConvoWithTimestamps = async (index, createdAt, updatedAt) => {
|
|
||||||
const conversationId = uuidv4();
|
|
||||||
// Use collection-level insert to bypass Mongoose timestamps
|
|
||||||
await Conversation.collection.insertOne({
|
|
||||||
conversationId,
|
|
||||||
user: 'user123',
|
|
||||||
title: `Conversation ${index}`,
|
|
||||||
endpoint: EModelEndpoint.openAI,
|
|
||||||
expiredAt: null,
|
|
||||||
isArchived: false,
|
|
||||||
createdAt,
|
|
||||||
updatedAt,
|
|
||||||
});
|
|
||||||
return Conversation.findOne({ conversationId }).lean();
|
|
||||||
};
|
|
||||||
|
|
||||||
it('should not skip conversations at page boundaries', async () => {
|
|
||||||
// Create 30 conversations to ensure pagination (limit is 25)
|
|
||||||
const baseTime = new Date('2026-01-01T00:00:00.000Z');
|
|
||||||
const convos = [];
|
|
||||||
|
|
||||||
for (let i = 0; i < 30; i++) {
|
|
||||||
const updatedAt = new Date(baseTime.getTime() - i * 60000); // Each 1 minute apart
|
|
||||||
const convo = await createConvoWithTimestamps(i, updatedAt, updatedAt);
|
|
||||||
convos.push(convo);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fetch first page
|
|
||||||
const page1 = await getConvosByCursor('user123', { limit: 25 });
|
|
||||||
|
|
||||||
expect(page1.conversations).toHaveLength(25);
|
|
||||||
expect(page1.nextCursor).toBeTruthy();
|
|
||||||
|
|
||||||
// Fetch second page using cursor
|
|
||||||
const page2 = await getConvosByCursor('user123', {
|
|
||||||
limit: 25,
|
|
||||||
cursor: page1.nextCursor,
|
|
||||||
});
|
|
||||||
|
|
||||||
// Should get remaining 5 conversations
|
|
||||||
expect(page2.conversations).toHaveLength(5);
|
|
||||||
expect(page2.nextCursor).toBeNull();
|
|
||||||
|
|
||||||
// Verify no duplicates and no gaps
|
|
||||||
const allIds = [
|
|
||||||
...page1.conversations.map((c) => c.conversationId),
|
|
||||||
...page2.conversations.map((c) => c.conversationId),
|
|
||||||
];
|
|
||||||
const uniqueIds = new Set(allIds);
|
|
||||||
|
|
||||||
expect(uniqueIds.size).toBe(30); // All 30 conversations accounted for
|
|
||||||
expect(allIds.length).toBe(30); // No duplicates
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should include conversation at exact page boundary (item 26 bug fix)', async () => {
|
|
||||||
// This test specifically verifies the fix for the bug where item 26
|
|
||||||
// (the first item that should appear on page 2) was being skipped
|
|
||||||
|
|
||||||
const baseTime = new Date('2026-01-01T12:00:00.000Z');
|
|
||||||
|
|
||||||
// Create exactly 26 conversations
|
|
||||||
const convos = [];
|
|
||||||
for (let i = 0; i < 26; i++) {
|
|
||||||
const updatedAt = new Date(baseTime.getTime() - i * 60000);
|
|
||||||
const convo = await createConvoWithTimestamps(i, updatedAt, updatedAt);
|
|
||||||
convos.push(convo);
|
|
||||||
}
|
|
||||||
|
|
||||||
// The 26th conversation (index 25) should be on page 2
|
|
||||||
const item26 = convos[25];
|
|
||||||
|
|
||||||
// Fetch first page with limit 25
|
|
||||||
const page1 = await getConvosByCursor('user123', { limit: 25 });
|
|
||||||
|
|
||||||
expect(page1.conversations).toHaveLength(25);
|
|
||||||
expect(page1.nextCursor).toBeTruthy();
|
|
||||||
|
|
||||||
// Item 26 should NOT be in page 1
|
|
||||||
const page1Ids = page1.conversations.map((c) => c.conversationId);
|
|
||||||
expect(page1Ids).not.toContain(item26.conversationId);
|
|
||||||
|
|
||||||
// Fetch second page
|
|
||||||
const page2 = await getConvosByCursor('user123', {
|
|
||||||
limit: 25,
|
|
||||||
cursor: page1.nextCursor,
|
|
||||||
});
|
|
||||||
|
|
||||||
// Item 26 MUST be in page 2 (this was the bug - it was being skipped)
|
|
||||||
expect(page2.conversations).toHaveLength(1);
|
|
||||||
expect(page2.conversations[0].conversationId).toBe(item26.conversationId);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should sort by updatedAt DESC by default', async () => {
|
|
||||||
// Create conversations with different updatedAt times
|
|
||||||
// Note: createdAt is older but updatedAt varies
|
|
||||||
const convo1 = await createConvoWithTimestamps(
|
|
||||||
1,
|
|
||||||
new Date('2026-01-01T00:00:00.000Z'), // oldest created
|
|
||||||
new Date('2026-01-03T00:00:00.000Z'), // most recently updated
|
|
||||||
);
|
|
||||||
|
|
||||||
const convo2 = await createConvoWithTimestamps(
|
|
||||||
2,
|
|
||||||
new Date('2026-01-02T00:00:00.000Z'), // middle created
|
|
||||||
new Date('2026-01-02T00:00:00.000Z'), // middle updated
|
|
||||||
);
|
|
||||||
|
|
||||||
const convo3 = await createConvoWithTimestamps(
|
|
||||||
3,
|
|
||||||
new Date('2026-01-03T00:00:00.000Z'), // newest created
|
|
||||||
new Date('2026-01-01T00:00:00.000Z'), // oldest updated
|
|
||||||
);
|
|
||||||
|
|
||||||
const result = await getConvosByCursor('user123');
|
|
||||||
|
|
||||||
// Should be sorted by updatedAt DESC (most recent first)
|
|
||||||
expect(result.conversations).toHaveLength(3);
|
|
||||||
expect(result.conversations[0].conversationId).toBe(convo1.conversationId); // Jan 3 updatedAt
|
|
||||||
expect(result.conversations[1].conversationId).toBe(convo2.conversationId); // Jan 2 updatedAt
|
|
||||||
expect(result.conversations[2].conversationId).toBe(convo3.conversationId); // Jan 1 updatedAt
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should handle conversations with same updatedAt (tie-breaker)', async () => {
|
|
||||||
const sameTime = new Date('2026-01-01T12:00:00.000Z');
|
|
||||||
|
|
||||||
// Create 3 conversations with exact same updatedAt
|
|
||||||
const convo1 = await createConvoWithTimestamps(1, sameTime, sameTime);
|
|
||||||
const convo2 = await createConvoWithTimestamps(2, sameTime, sameTime);
|
|
||||||
const convo3 = await createConvoWithTimestamps(3, sameTime, sameTime);
|
|
||||||
|
|
||||||
const result = await getConvosByCursor('user123');
|
|
||||||
|
|
||||||
// All 3 should be returned (no skipping due to same timestamps)
|
|
||||||
expect(result.conversations).toHaveLength(3);
|
|
||||||
|
|
||||||
const returnedIds = result.conversations.map((c) => c.conversationId);
|
|
||||||
expect(returnedIds).toContain(convo1.conversationId);
|
|
||||||
expect(returnedIds).toContain(convo2.conversationId);
|
|
||||||
expect(returnedIds).toContain(convo3.conversationId);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should handle cursor pagination with conversations updated during pagination', async () => {
|
|
||||||
// Simulate the scenario where a conversation is updated between page fetches
|
|
||||||
const baseTime = new Date('2026-01-01T00:00:00.000Z');
|
|
||||||
|
|
||||||
// Create 30 conversations
|
|
||||||
for (let i = 0; i < 30; i++) {
|
|
||||||
const updatedAt = new Date(baseTime.getTime() - i * 60000);
|
|
||||||
await createConvoWithTimestamps(i, updatedAt, updatedAt);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fetch first page
|
|
||||||
const page1 = await getConvosByCursor('user123', { limit: 25 });
|
|
||||||
expect(page1.conversations).toHaveLength(25);
|
|
||||||
|
|
||||||
// Now update one of the conversations that should be on page 2
|
|
||||||
// to have a newer updatedAt (simulating user activity during pagination)
|
|
||||||
const convosOnPage2 = await Conversation.find({ user: 'user123' })
|
|
||||||
.sort({ updatedAt: -1 })
|
|
||||||
.skip(25)
|
|
||||||
.limit(5);
|
|
||||||
|
|
||||||
if (convosOnPage2.length > 0) {
|
|
||||||
const updatedConvo = convosOnPage2[0];
|
|
||||||
await Conversation.updateOne(
|
|
||||||
{ _id: updatedConvo._id },
|
|
||||||
{ updatedAt: new Date('2026-01-02T00:00:00.000Z') }, // Much newer
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fetch second page with original cursor
|
|
||||||
const page2 = await getConvosByCursor('user123', {
|
|
||||||
limit: 25,
|
|
||||||
cursor: page1.nextCursor,
|
|
||||||
});
|
|
||||||
|
|
||||||
// The updated conversation might not be in page 2 anymore
|
|
||||||
// (it moved to the front), but we should still get remaining items
|
|
||||||
// without errors and without infinite loops
|
|
||||||
expect(page2.conversations.length).toBeGreaterThanOrEqual(0);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should correctly decode and use cursor for pagination', async () => {
|
|
||||||
const baseTime = new Date('2026-01-01T00:00:00.000Z');
|
|
||||||
|
|
||||||
// Create 30 conversations
|
|
||||||
for (let i = 0; i < 30; i++) {
|
|
||||||
const updatedAt = new Date(baseTime.getTime() - i * 60000);
|
|
||||||
await createConvoWithTimestamps(i, updatedAt, updatedAt);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fetch first page
|
|
||||||
const page1 = await getConvosByCursor('user123', { limit: 25 });
|
|
||||||
|
|
||||||
// Decode the cursor to verify it's based on the last RETURNED item
|
|
||||||
const decodedCursor = JSON.parse(Buffer.from(page1.nextCursor, 'base64').toString());
|
|
||||||
|
|
||||||
// The cursor should match the last item in page1 (item at index 24)
|
|
||||||
const lastReturnedItem = page1.conversations[24];
|
|
||||||
|
|
||||||
expect(new Date(decodedCursor.primary).getTime()).toBe(
|
|
||||||
new Date(lastReturnedItem.updatedAt).getTime(),
|
|
||||||
);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should support sortBy createdAt when explicitly requested', async () => {
|
|
||||||
// Create conversations with different timestamps
|
|
||||||
const convo1 = await createConvoWithTimestamps(
|
|
||||||
1,
|
|
||||||
new Date('2026-01-03T00:00:00.000Z'), // newest created
|
|
||||||
new Date('2026-01-01T00:00:00.000Z'), // oldest updated
|
|
||||||
);
|
|
||||||
|
|
||||||
const convo2 = await createConvoWithTimestamps(
|
|
||||||
2,
|
|
||||||
new Date('2026-01-01T00:00:00.000Z'), // oldest created
|
|
||||||
new Date('2026-01-03T00:00:00.000Z'), // newest updated
|
|
||||||
);
|
|
||||||
|
|
||||||
// Verify timestamps were set correctly
|
|
||||||
expect(new Date(convo1.createdAt).getTime()).toBe(
|
|
||||||
new Date('2026-01-03T00:00:00.000Z').getTime(),
|
|
||||||
);
|
|
||||||
expect(new Date(convo2.createdAt).getTime()).toBe(
|
|
||||||
new Date('2026-01-01T00:00:00.000Z').getTime(),
|
|
||||||
);
|
|
||||||
|
|
||||||
const result = await getConvosByCursor('user123', { sortBy: 'createdAt' });
|
|
||||||
|
|
||||||
// Should be sorted by createdAt DESC
|
|
||||||
expect(result.conversations).toHaveLength(2);
|
|
||||||
expect(result.conversations[0].conversationId).toBe(convo1.conversationId); // Jan 3 createdAt
|
|
||||||
expect(result.conversations[1].conversationId).toBe(convo2.conversationId); // Jan 1 createdAt
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should handle empty result set gracefully', async () => {
|
|
||||||
const result = await getConvosByCursor('user123');
|
|
||||||
|
|
||||||
expect(result.conversations).toHaveLength(0);
|
|
||||||
expect(result.nextCursor).toBeNull();
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should handle exactly limit number of conversations (no next page)', async () => {
|
|
||||||
const baseTime = new Date('2026-01-01T00:00:00.000Z');
|
|
||||||
|
|
||||||
// Create exactly 25 conversations (equal to default limit)
|
|
||||||
for (let i = 0; i < 25; i++) {
|
|
||||||
const updatedAt = new Date(baseTime.getTime() - i * 60000);
|
|
||||||
await createConvoWithTimestamps(i, updatedAt, updatedAt);
|
|
||||||
}
|
|
||||||
|
|
||||||
const result = await getConvosByCursor('user123', { limit: 25 });
|
|
||||||
|
|
||||||
expect(result.conversations).toHaveLength(25);
|
|
||||||
expect(result.nextCursor).toBeNull(); // No next page
|
|
||||||
});
|
|
||||||
});
|
|
||||||
});
|
});
|
||||||
|
|
|
||||||
|
|
@ -26,8 +26,7 @@ const getFiles = async (filter, _sortOptions, selectFields = { text: 0 }) => {
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Retrieves tool files (files that are embedded or have a fileIdentifier) from an array of file IDs.
|
* Retrieves tool files (files that are embedded or have a fileIdentifier) from an array of file IDs
|
||||||
* Note: execute_code files are handled separately by getCodeGeneratedFiles.
|
|
||||||
* @param {string[]} fileIds - Array of file_id strings to search for
|
* @param {string[]} fileIds - Array of file_id strings to search for
|
||||||
* @param {Set<EToolResources>} toolResourceSet - Optional filter for tool resources
|
* @param {Set<EToolResources>} toolResourceSet - Optional filter for tool resources
|
||||||
* @returns {Promise<Array<MongoFile>>} Files that match the criteria
|
* @returns {Promise<Array<MongoFile>>} Files that match the criteria
|
||||||
|
|
@ -38,25 +37,21 @@ const getToolFilesByIds = async (fileIds, toolResourceSet) => {
|
||||||
}
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
const orConditions = [];
|
|
||||||
|
|
||||||
if (toolResourceSet.has(EToolResources.context)) {
|
|
||||||
orConditions.push({ text: { $exists: true, $ne: null }, context: FileContext.agents });
|
|
||||||
}
|
|
||||||
if (toolResourceSet.has(EToolResources.file_search)) {
|
|
||||||
orConditions.push({ embedded: true });
|
|
||||||
}
|
|
||||||
|
|
||||||
if (orConditions.length === 0) {
|
|
||||||
return [];
|
|
||||||
}
|
|
||||||
|
|
||||||
const filter = {
|
const filter = {
|
||||||
file_id: { $in: fileIds },
|
file_id: { $in: fileIds },
|
||||||
context: { $ne: FileContext.execute_code }, // Exclude code-generated files
|
$or: [],
|
||||||
$or: orConditions,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
if (toolResourceSet.has(EToolResources.context)) {
|
||||||
|
filter.$or.push({ text: { $exists: true, $ne: null }, context: FileContext.agents });
|
||||||
|
}
|
||||||
|
if (toolResourceSet.has(EToolResources.file_search)) {
|
||||||
|
filter.$or.push({ embedded: true });
|
||||||
|
}
|
||||||
|
if (toolResourceSet.has(EToolResources.execute_code)) {
|
||||||
|
filter.$or.push({ 'metadata.fileIdentifier': { $exists: true } });
|
||||||
|
}
|
||||||
|
|
||||||
const selectFields = { text: 0 };
|
const selectFields = { text: 0 };
|
||||||
const sortOptions = { updatedAt: -1 };
|
const sortOptions = { updatedAt: -1 };
|
||||||
|
|
||||||
|
|
@ -67,70 +62,6 @@ const getToolFilesByIds = async (fileIds, toolResourceSet) => {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
|
||||||
* Retrieves files generated by code execution for a given conversation.
|
|
||||||
* These files are stored locally with fileIdentifier metadata for code env re-upload.
|
|
||||||
* @param {string} conversationId - The conversation ID to search for
|
|
||||||
* @param {string[]} [messageIds] - Optional array of messageIds to filter by (for linear thread filtering)
|
|
||||||
* @returns {Promise<Array<MongoFile>>} Files generated by code execution in the conversation
|
|
||||||
*/
|
|
||||||
const getCodeGeneratedFiles = async (conversationId, messageIds) => {
|
|
||||||
if (!conversationId) {
|
|
||||||
return [];
|
|
||||||
}
|
|
||||||
|
|
||||||
/** messageIds are required for proper thread filtering of code-generated files */
|
|
||||||
if (!messageIds || messageIds.length === 0) {
|
|
||||||
return [];
|
|
||||||
}
|
|
||||||
|
|
||||||
try {
|
|
||||||
const filter = {
|
|
||||||
conversationId,
|
|
||||||
context: FileContext.execute_code,
|
|
||||||
messageId: { $exists: true, $in: messageIds },
|
|
||||||
'metadata.fileIdentifier': { $exists: true },
|
|
||||||
};
|
|
||||||
|
|
||||||
const selectFields = { text: 0 };
|
|
||||||
const sortOptions = { createdAt: 1 };
|
|
||||||
|
|
||||||
return await getFiles(filter, sortOptions, selectFields);
|
|
||||||
} catch (error) {
|
|
||||||
logger.error('[getCodeGeneratedFiles] Error retrieving code generated files:', error);
|
|
||||||
return [];
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Retrieves user-uploaded execute_code files (not code-generated) by their file IDs.
|
|
||||||
* These are files with fileIdentifier metadata but context is NOT execute_code (e.g., agents or message_attachment).
|
|
||||||
* File IDs should be collected from message.files arrays in the current thread.
|
|
||||||
* @param {string[]} fileIds - Array of file IDs to fetch (from message.files in the thread)
|
|
||||||
* @returns {Promise<Array<MongoFile>>} User-uploaded execute_code files
|
|
||||||
*/
|
|
||||||
const getUserCodeFiles = async (fileIds) => {
|
|
||||||
if (!fileIds || fileIds.length === 0) {
|
|
||||||
return [];
|
|
||||||
}
|
|
||||||
|
|
||||||
try {
|
|
||||||
const filter = {
|
|
||||||
file_id: { $in: fileIds },
|
|
||||||
context: { $ne: FileContext.execute_code },
|
|
||||||
'metadata.fileIdentifier': { $exists: true },
|
|
||||||
};
|
|
||||||
|
|
||||||
const selectFields = { text: 0 };
|
|
||||||
const sortOptions = { createdAt: 1 };
|
|
||||||
|
|
||||||
return await getFiles(filter, sortOptions, selectFields);
|
|
||||||
} catch (error) {
|
|
||||||
logger.error('[getUserCodeFiles] Error retrieving user code files:', error);
|
|
||||||
return [];
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Creates a new file with a TTL of 1 hour.
|
* Creates a new file with a TTL of 1 hour.
|
||||||
* @param {MongoFile} data - The file data to be created, must contain file_id.
|
* @param {MongoFile} data - The file data to be created, must contain file_id.
|
||||||
|
|
@ -238,8 +169,6 @@ module.exports = {
|
||||||
findFileById,
|
findFileById,
|
||||||
getFiles,
|
getFiles,
|
||||||
getToolFilesByIds,
|
getToolFilesByIds,
|
||||||
getCodeGeneratedFiles,
|
|
||||||
getUserCodeFiles,
|
|
||||||
createFile,
|
createFile,
|
||||||
updateFile,
|
updateFile,
|
||||||
updateFileUsage,
|
updateFileUsage,
|
||||||
|
|
|
||||||
|
|
@ -1,7 +1,7 @@
|
||||||
const mongoose = require('mongoose');
|
const mongoose = require('mongoose');
|
||||||
const { v4: uuidv4 } = require('uuid');
|
const { v4: uuidv4 } = require('uuid');
|
||||||
|
const { createModels } = require('@librechat/data-schemas');
|
||||||
const { MongoMemoryServer } = require('mongodb-memory-server');
|
const { MongoMemoryServer } = require('mongodb-memory-server');
|
||||||
const { createModels, createMethods } = require('@librechat/data-schemas');
|
|
||||||
const {
|
const {
|
||||||
SystemRoles,
|
SystemRoles,
|
||||||
ResourceType,
|
ResourceType,
|
||||||
|
|
@ -9,6 +9,8 @@ const {
|
||||||
PrincipalType,
|
PrincipalType,
|
||||||
} = require('librechat-data-provider');
|
} = require('librechat-data-provider');
|
||||||
const { grantPermission } = require('~/server/services/PermissionService');
|
const { grantPermission } = require('~/server/services/PermissionService');
|
||||||
|
const { getFiles, createFile } = require('./File');
|
||||||
|
const { seedDefaultRoles } = require('~/models');
|
||||||
const { createAgent } = require('./Agent');
|
const { createAgent } = require('./Agent');
|
||||||
|
|
||||||
let File;
|
let File;
|
||||||
|
|
@ -16,10 +18,6 @@ let Agent;
|
||||||
let AclEntry;
|
let AclEntry;
|
||||||
let User;
|
let User;
|
||||||
let modelsToCleanup = [];
|
let modelsToCleanup = [];
|
||||||
let methods;
|
|
||||||
let getFiles;
|
|
||||||
let createFile;
|
|
||||||
let seedDefaultRoles;
|
|
||||||
|
|
||||||
describe('File Access Control', () => {
|
describe('File Access Control', () => {
|
||||||
let mongoServer;
|
let mongoServer;
|
||||||
|
|
@ -44,12 +42,6 @@ describe('File Access Control', () => {
|
||||||
AclEntry = dbModels.AclEntry;
|
AclEntry = dbModels.AclEntry;
|
||||||
User = dbModels.User;
|
User = dbModels.User;
|
||||||
|
|
||||||
// Create methods from data-schemas (includes file methods)
|
|
||||||
methods = createMethods(mongoose);
|
|
||||||
getFiles = methods.getFiles;
|
|
||||||
createFile = methods.createFile;
|
|
||||||
seedDefaultRoles = methods.seedDefaultRoles;
|
|
||||||
|
|
||||||
// Seed default roles
|
// Seed default roles
|
||||||
await seedDefaultRoles();
|
await seedDefaultRoles();
|
||||||
});
|
});
|
||||||
|
|
|
||||||
|
|
@ -573,326 +573,4 @@ describe('Message Operations', () => {
|
||||||
expect(bulk2.expiredAt).toBeNull();
|
expect(bulk2.expiredAt).toBeNull();
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
describe('Message cursor pagination', () => {
|
|
||||||
/**
|
|
||||||
* Helper to create messages with specific timestamps
|
|
||||||
* Uses collection.insertOne to bypass Mongoose timestamps
|
|
||||||
*/
|
|
||||||
const createMessageWithTimestamp = async (index, conversationId, createdAt) => {
|
|
||||||
const messageId = uuidv4();
|
|
||||||
await Message.collection.insertOne({
|
|
||||||
messageId,
|
|
||||||
conversationId,
|
|
||||||
user: 'user123',
|
|
||||||
text: `Message ${index}`,
|
|
||||||
isCreatedByUser: index % 2 === 0,
|
|
||||||
createdAt,
|
|
||||||
updatedAt: createdAt,
|
|
||||||
});
|
|
||||||
return Message.findOne({ messageId }).lean();
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Simulates the pagination logic from api/server/routes/messages.js
|
|
||||||
* This tests the exact query pattern used in the route
|
|
||||||
*/
|
|
||||||
const getMessagesByCursor = async ({
|
|
||||||
conversationId,
|
|
||||||
user,
|
|
||||||
pageSize = 25,
|
|
||||||
cursor = null,
|
|
||||||
sortBy = 'createdAt',
|
|
||||||
sortDirection = 'desc',
|
|
||||||
}) => {
|
|
||||||
const sortOrder = sortDirection === 'asc' ? 1 : -1;
|
|
||||||
const sortField = ['createdAt', 'updatedAt'].includes(sortBy) ? sortBy : 'createdAt';
|
|
||||||
const cursorOperator = sortDirection === 'asc' ? '$gt' : '$lt';
|
|
||||||
|
|
||||||
const filter = { conversationId, user };
|
|
||||||
if (cursor) {
|
|
||||||
filter[sortField] = { [cursorOperator]: new Date(cursor) };
|
|
||||||
}
|
|
||||||
|
|
||||||
const messages = await Message.find(filter)
|
|
||||||
.sort({ [sortField]: sortOrder })
|
|
||||||
.limit(pageSize + 1)
|
|
||||||
.lean();
|
|
||||||
|
|
||||||
let nextCursor = null;
|
|
||||||
if (messages.length > pageSize) {
|
|
||||||
messages.pop(); // Remove extra item used to detect next page
|
|
||||||
// Create cursor from the last RETURNED item (not the popped one)
|
|
||||||
nextCursor = messages[messages.length - 1][sortField];
|
|
||||||
}
|
|
||||||
|
|
||||||
return { messages, nextCursor };
|
|
||||||
};
|
|
||||||
|
|
||||||
it('should return messages for a conversation with pagination', async () => {
|
|
||||||
const conversationId = uuidv4();
|
|
||||||
const baseTime = new Date('2026-01-01T00:00:00.000Z');
|
|
||||||
|
|
||||||
// Create 30 messages to test pagination
|
|
||||||
for (let i = 0; i < 30; i++) {
|
|
||||||
const createdAt = new Date(baseTime.getTime() - i * 60000); // Each 1 minute apart
|
|
||||||
await createMessageWithTimestamp(i, conversationId, createdAt);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fetch first page (pageSize 25)
|
|
||||||
const page1 = await getMessagesByCursor({
|
|
||||||
conversationId,
|
|
||||||
user: 'user123',
|
|
||||||
pageSize: 25,
|
|
||||||
});
|
|
||||||
|
|
||||||
expect(page1.messages).toHaveLength(25);
|
|
||||||
expect(page1.nextCursor).toBeTruthy();
|
|
||||||
|
|
||||||
// Fetch second page using cursor
|
|
||||||
const page2 = await getMessagesByCursor({
|
|
||||||
conversationId,
|
|
||||||
user: 'user123',
|
|
||||||
pageSize: 25,
|
|
||||||
cursor: page1.nextCursor,
|
|
||||||
});
|
|
||||||
|
|
||||||
// Should get remaining 5 messages
|
|
||||||
expect(page2.messages).toHaveLength(5);
|
|
||||||
expect(page2.nextCursor).toBeNull();
|
|
||||||
|
|
||||||
// Verify no duplicates and no gaps
|
|
||||||
const allMessageIds = [
|
|
||||||
...page1.messages.map((m) => m.messageId),
|
|
||||||
...page2.messages.map((m) => m.messageId),
|
|
||||||
];
|
|
||||||
const uniqueIds = new Set(allMessageIds);
|
|
||||||
|
|
||||||
expect(uniqueIds.size).toBe(30); // All 30 messages accounted for
|
|
||||||
expect(allMessageIds.length).toBe(30); // No duplicates
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should not skip message at page boundary (item 26 bug fix)', async () => {
|
|
||||||
const conversationId = uuidv4();
|
|
||||||
const baseTime = new Date('2026-01-01T12:00:00.000Z');
|
|
||||||
|
|
||||||
// Create exactly 26 messages
|
|
||||||
const messages = [];
|
|
||||||
for (let i = 0; i < 26; i++) {
|
|
||||||
const createdAt = new Date(baseTime.getTime() - i * 60000);
|
|
||||||
const msg = await createMessageWithTimestamp(i, conversationId, createdAt);
|
|
||||||
messages.push(msg);
|
|
||||||
}
|
|
||||||
|
|
||||||
// The 26th message (index 25) should be on page 2
|
|
||||||
const item26 = messages[25];
|
|
||||||
|
|
||||||
// Fetch first page with pageSize 25
|
|
||||||
const page1 = await getMessagesByCursor({
|
|
||||||
conversationId,
|
|
||||||
user: 'user123',
|
|
||||||
pageSize: 25,
|
|
||||||
});
|
|
||||||
|
|
||||||
expect(page1.messages).toHaveLength(25);
|
|
||||||
expect(page1.nextCursor).toBeTruthy();
|
|
||||||
|
|
||||||
// Item 26 should NOT be in page 1
|
|
||||||
const page1Ids = page1.messages.map((m) => m.messageId);
|
|
||||||
expect(page1Ids).not.toContain(item26.messageId);
|
|
||||||
|
|
||||||
// Fetch second page
|
|
||||||
const page2 = await getMessagesByCursor({
|
|
||||||
conversationId,
|
|
||||||
user: 'user123',
|
|
||||||
pageSize: 25,
|
|
||||||
cursor: page1.nextCursor,
|
|
||||||
});
|
|
||||||
|
|
||||||
// Item 26 MUST be in page 2 (this was the bug - it was being skipped)
|
|
||||||
expect(page2.messages).toHaveLength(1);
|
|
||||||
expect(page2.messages[0].messageId).toBe(item26.messageId);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should sort by createdAt DESC by default', async () => {
|
|
||||||
const conversationId = uuidv4();
|
|
||||||
|
|
||||||
// Create messages with specific timestamps
|
|
||||||
const msg1 = await createMessageWithTimestamp(
|
|
||||||
1,
|
|
||||||
conversationId,
|
|
||||||
new Date('2026-01-01T00:00:00.000Z'),
|
|
||||||
);
|
|
||||||
const msg2 = await createMessageWithTimestamp(
|
|
||||||
2,
|
|
||||||
conversationId,
|
|
||||||
new Date('2026-01-02T00:00:00.000Z'),
|
|
||||||
);
|
|
||||||
const msg3 = await createMessageWithTimestamp(
|
|
||||||
3,
|
|
||||||
conversationId,
|
|
||||||
new Date('2026-01-03T00:00:00.000Z'),
|
|
||||||
);
|
|
||||||
|
|
||||||
const result = await getMessagesByCursor({
|
|
||||||
conversationId,
|
|
||||||
user: 'user123',
|
|
||||||
});
|
|
||||||
|
|
||||||
// Should be sorted by createdAt DESC (newest first) by default
|
|
||||||
expect(result.messages).toHaveLength(3);
|
|
||||||
expect(result.messages[0].messageId).toBe(msg3.messageId);
|
|
||||||
expect(result.messages[1].messageId).toBe(msg2.messageId);
|
|
||||||
expect(result.messages[2].messageId).toBe(msg1.messageId);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should support ascending sort direction', async () => {
|
|
||||||
const conversationId = uuidv4();
|
|
||||||
|
|
||||||
const msg1 = await createMessageWithTimestamp(
|
|
||||||
1,
|
|
||||||
conversationId,
|
|
||||||
new Date('2026-01-01T00:00:00.000Z'),
|
|
||||||
);
|
|
||||||
const msg2 = await createMessageWithTimestamp(
|
|
||||||
2,
|
|
||||||
conversationId,
|
|
||||||
new Date('2026-01-02T00:00:00.000Z'),
|
|
||||||
);
|
|
||||||
|
|
||||||
const result = await getMessagesByCursor({
|
|
||||||
conversationId,
|
|
||||||
user: 'user123',
|
|
||||||
sortDirection: 'asc',
|
|
||||||
});
|
|
||||||
|
|
||||||
// Should be sorted by createdAt ASC (oldest first)
|
|
||||||
expect(result.messages).toHaveLength(2);
|
|
||||||
expect(result.messages[0].messageId).toBe(msg1.messageId);
|
|
||||||
expect(result.messages[1].messageId).toBe(msg2.messageId);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should handle empty conversation', async () => {
|
|
||||||
const conversationId = uuidv4();
|
|
||||||
|
|
||||||
const result = await getMessagesByCursor({
|
|
||||||
conversationId,
|
|
||||||
user: 'user123',
|
|
||||||
});
|
|
||||||
|
|
||||||
expect(result.messages).toHaveLength(0);
|
|
||||||
expect(result.nextCursor).toBeNull();
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should only return messages for the specified user', async () => {
|
|
||||||
const conversationId = uuidv4();
|
|
||||||
const createdAt = new Date();
|
|
||||||
|
|
||||||
// Create a message for user123
|
|
||||||
await Message.collection.insertOne({
|
|
||||||
messageId: uuidv4(),
|
|
||||||
conversationId,
|
|
||||||
user: 'user123',
|
|
||||||
text: 'User message',
|
|
||||||
createdAt,
|
|
||||||
updatedAt: createdAt,
|
|
||||||
});
|
|
||||||
|
|
||||||
// Create a message for a different user
|
|
||||||
await Message.collection.insertOne({
|
|
||||||
messageId: uuidv4(),
|
|
||||||
conversationId,
|
|
||||||
user: 'otherUser',
|
|
||||||
text: 'Other user message',
|
|
||||||
createdAt,
|
|
||||||
updatedAt: createdAt,
|
|
||||||
});
|
|
||||||
|
|
||||||
const result = await getMessagesByCursor({
|
|
||||||
conversationId,
|
|
||||||
user: 'user123',
|
|
||||||
});
|
|
||||||
|
|
||||||
// Should only return user123's message
|
|
||||||
expect(result.messages).toHaveLength(1);
|
|
||||||
expect(result.messages[0].user).toBe('user123');
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should handle exactly pageSize number of messages (no next page)', async () => {
|
|
||||||
const conversationId = uuidv4();
|
|
||||||
const baseTime = new Date('2026-01-01T00:00:00.000Z');
|
|
||||||
|
|
||||||
// Create exactly 25 messages (equal to default pageSize)
|
|
||||||
for (let i = 0; i < 25; i++) {
|
|
||||||
const createdAt = new Date(baseTime.getTime() - i * 60000);
|
|
||||||
await createMessageWithTimestamp(i, conversationId, createdAt);
|
|
||||||
}
|
|
||||||
|
|
||||||
const result = await getMessagesByCursor({
|
|
||||||
conversationId,
|
|
||||||
user: 'user123',
|
|
||||||
pageSize: 25,
|
|
||||||
});
|
|
||||||
|
|
||||||
expect(result.messages).toHaveLength(25);
|
|
||||||
expect(result.nextCursor).toBeNull(); // No next page
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should handle pageSize of 1', async () => {
|
|
||||||
const conversationId = uuidv4();
|
|
||||||
const baseTime = new Date('2026-01-01T00:00:00.000Z');
|
|
||||||
|
|
||||||
// Create 3 messages
|
|
||||||
for (let i = 0; i < 3; i++) {
|
|
||||||
const createdAt = new Date(baseTime.getTime() - i * 60000);
|
|
||||||
await createMessageWithTimestamp(i, conversationId, createdAt);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fetch with pageSize 1
|
|
||||||
let cursor = null;
|
|
||||||
const allMessages = [];
|
|
||||||
|
|
||||||
for (let page = 0; page < 5; page++) {
|
|
||||||
const result = await getMessagesByCursor({
|
|
||||||
conversationId,
|
|
||||||
user: 'user123',
|
|
||||||
pageSize: 1,
|
|
||||||
cursor,
|
|
||||||
});
|
|
||||||
|
|
||||||
allMessages.push(...result.messages);
|
|
||||||
cursor = result.nextCursor;
|
|
||||||
|
|
||||||
if (!cursor) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Should get all 3 messages without duplicates
|
|
||||||
expect(allMessages).toHaveLength(3);
|
|
||||||
const uniqueIds = new Set(allMessages.map((m) => m.messageId));
|
|
||||||
expect(uniqueIds.size).toBe(3);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should handle messages with same createdAt timestamp', async () => {
|
|
||||||
const conversationId = uuidv4();
|
|
||||||
const sameTime = new Date('2026-01-01T12:00:00.000Z');
|
|
||||||
|
|
||||||
// Create multiple messages with the exact same timestamp
|
|
||||||
const messages = [];
|
|
||||||
for (let i = 0; i < 5; i++) {
|
|
||||||
const msg = await createMessageWithTimestamp(i, conversationId, sameTime);
|
|
||||||
messages.push(msg);
|
|
||||||
}
|
|
||||||
|
|
||||||
const result = await getMessagesByCursor({
|
|
||||||
conversationId,
|
|
||||||
user: 'user123',
|
|
||||||
pageSize: 10,
|
|
||||||
});
|
|
||||||
|
|
||||||
// All messages should be returned
|
|
||||||
expect(result.messages).toHaveLength(5);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
});
|
});
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,4 @@
|
||||||
const { ObjectId } = require('mongodb');
|
const { ObjectId } = require('mongodb');
|
||||||
const { escapeRegExp } = require('@librechat/api');
|
|
||||||
const { logger } = require('@librechat/data-schemas');
|
const { logger } = require('@librechat/data-schemas');
|
||||||
const {
|
const {
|
||||||
Constants,
|
Constants,
|
||||||
|
|
@ -15,6 +14,7 @@ const {
|
||||||
} = require('./Project');
|
} = require('./Project');
|
||||||
const { removeAllPermissions } = require('~/server/services/PermissionService');
|
const { removeAllPermissions } = require('~/server/services/PermissionService');
|
||||||
const { PromptGroup, Prompt, AclEntry } = require('~/db/models');
|
const { PromptGroup, Prompt, AclEntry } = require('~/db/models');
|
||||||
|
const { escapeRegExp } = require('~/server/utils');
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create a pipeline for the aggregation to get prompt groups
|
* Create a pipeline for the aggregation to get prompt groups
|
||||||
|
|
|
||||||
|
|
@ -114,28 +114,6 @@ async function updateAccessPermissions(roleName, permissionsUpdate, roleData) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Migrate legacy SHARED_GLOBAL → SHARE for PROMPTS and AGENTS.
|
|
||||||
// SHARED_GLOBAL was removed in favour of SHARE in PR #11283. If the DB still has
|
|
||||||
// SHARED_GLOBAL but not SHARE, inherit the value so sharing intent is preserved.
|
|
||||||
const legacySharedGlobalTypes = ['PROMPTS', 'AGENTS'];
|
|
||||||
for (const legacyPermType of legacySharedGlobalTypes) {
|
|
||||||
const existingTypePerms = currentPermissions[legacyPermType];
|
|
||||||
if (
|
|
||||||
existingTypePerms &&
|
|
||||||
'SHARED_GLOBAL' in existingTypePerms &&
|
|
||||||
!('SHARE' in existingTypePerms) &&
|
|
||||||
updates[legacyPermType] &&
|
|
||||||
// Don't override an explicit SHARE value the caller already provided
|
|
||||||
!('SHARE' in updates[legacyPermType])
|
|
||||||
) {
|
|
||||||
const inheritedValue = existingTypePerms['SHARED_GLOBAL'];
|
|
||||||
updates[legacyPermType]['SHARE'] = inheritedValue;
|
|
||||||
logger.info(
|
|
||||||
`Migrating '${roleName}' role ${legacyPermType}.SHARED_GLOBAL=${inheritedValue} → SHARE`,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for (const [permissionType, permissions] of Object.entries(updates)) {
|
for (const [permissionType, permissions] of Object.entries(updates)) {
|
||||||
const currentTypePermissions = currentPermissions[permissionType] || {};
|
const currentTypePermissions = currentPermissions[permissionType] || {};
|
||||||
updatedPermissions[permissionType] = { ...currentTypePermissions };
|
updatedPermissions[permissionType] = { ...currentTypePermissions };
|
||||||
|
|
@ -151,32 +129,6 @@ async function updateAccessPermissions(roleName, permissionsUpdate, roleData) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Clean up orphaned SHARED_GLOBAL fields left in DB after the schema rename.
|
|
||||||
// Since we $set the full permissions object, deleting from updatedPermissions
|
|
||||||
// is sufficient to remove the field from MongoDB.
|
|
||||||
for (const legacyPermType of legacySharedGlobalTypes) {
|
|
||||||
const existingTypePerms = currentPermissions[legacyPermType];
|
|
||||||
if (existingTypePerms && 'SHARED_GLOBAL' in existingTypePerms) {
|
|
||||||
if (!updates[legacyPermType]) {
|
|
||||||
// permType wasn't in the update payload so the migration block above didn't run.
|
|
||||||
// Create a writable copy and handle the SHARED_GLOBAL → SHARE inheritance here
|
|
||||||
// to avoid removing SHARED_GLOBAL without writing SHARE (data loss).
|
|
||||||
updatedPermissions[legacyPermType] = { ...existingTypePerms };
|
|
||||||
if (!('SHARE' in existingTypePerms)) {
|
|
||||||
updatedPermissions[legacyPermType]['SHARE'] = existingTypePerms['SHARED_GLOBAL'];
|
|
||||||
logger.info(
|
|
||||||
`Migrating '${roleName}' role ${legacyPermType}.SHARED_GLOBAL=${existingTypePerms['SHARED_GLOBAL']} → SHARE`,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
delete updatedPermissions[legacyPermType]['SHARED_GLOBAL'];
|
|
||||||
hasChanges = true;
|
|
||||||
logger.info(
|
|
||||||
`Removed legacy SHARED_GLOBAL field from '${roleName}' role ${legacyPermType} permissions`,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (hasChanges) {
|
if (hasChanges) {
|
||||||
const updateObj = { permissions: updatedPermissions };
|
const updateObj = { permissions: updatedPermissions };
|
||||||
|
|
||||||
|
|
|
||||||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue