diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile new file mode 100644 index 0000000000..7f5566fb97 --- /dev/null +++ b/.devcontainer/Dockerfile @@ -0,0 +1,5 @@ +FROM node:18-bullseye + +RUN useradd -m -s /bin/bash vscode +RUN mkdir -p /workspaces && chown -R vscode:vscode /workspaces +WORKDIR /workspaces diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json new file mode 100644 index 0000000000..a3bb780550 --- /dev/null +++ b/.devcontainer/devcontainer.json @@ -0,0 +1,18 @@ +{ + "dockerComposeFile": "docker-compose.yml", + "service": "app", + "workspaceFolder": "/workspaces", + "customizations": { + "vscode": { + "extensions": [], + "settings": { + "terminal.integrated.profiles.linux": { + "bash": null + } + } + } + }, + "postCreateCommand": "", + "features": { "ghcr.io/devcontainers/features/git:1": {} }, + "remoteUser": "vscode" +} diff --git a/.devcontainer/docker-compose.yml b/.devcontainer/docker-compose.yml new file mode 100644 index 0000000000..70ebf9b955 --- /dev/null +++ b/.devcontainer/docker-compose.yml @@ -0,0 +1,62 @@ +services: + app: + build: + context: .. + dockerfile: .devcontainer/Dockerfile + # restart: always + links: + - mongodb + - meilisearch + # ports: + # - 3080:3080 # Change it to 9000:3080 to use nginx + extra_hosts: # if you are running APIs on docker you need access to, you will need to uncomment this line and next + - "host.docker.internal:host-gateway" + + volumes: + # This is where VS Code should expect to find your project's source code and the value of "workspaceFolder" in .devcontainer/devcontainer.json + - ..:/workspaces:cached + # Uncomment the next line to use Docker from inside the container. See https://aka.ms/vscode-remote/samples/docker-from-docker-compose for details. + # - /var/run/docker.sock:/var/run/docker.sock + environment: + - HOST=0.0.0.0 + - MONGO_URI=mongodb://mongodb:27017/LibreChat + # - OPENAI_REVERSE_PROXY=http://host.docker.internal:8070/v1 + - MEILI_HOST=http://meilisearch:7700 + + # Runs app on the same network as the service container, allows "forwardPorts" in devcontainer.json function. + # network_mode: service:another-service + + # Use "forwardPorts" in **devcontainer.json** to forward an app port locally. + # (Adding the "ports" property to this file will not forward from a Codespace.) + + # Use a non-root user for all processes - See https://aka.ms/vscode-remote/containers/non-root for details. + user: vscode + + # Overrides default command so things don't shut down after the process ends. + command: /bin/sh -c "while sleep 1000; do :; done" + + mongodb: + container_name: chat-mongodb + expose: + - 27017 + # ports: + # - 27018:27017 + image: mongo + # restart: always + volumes: + - ./data-node:/data/db + command: mongod --noauth + meilisearch: + container_name: chat-meilisearch + image: getmeili/meilisearch:v1.5 + # restart: always + expose: + - 7700 + # Uncomment this to access meilisearch from outside docker + # ports: + # - 7700:7700 # if exposing these ports, make sure your master key is not the default value + environment: + - MEILI_NO_ANALYTICS=true + - MEILI_MASTER_KEY=5c71cf56d672d009e36070b5bc5e47b743535ae55c818ae3b735bb6ebfb4ba63 + volumes: + - ./meili_data_v1.5:/meili_data diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000000..396f0da3e5 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,17 @@ +**/.circleci +**/.editorconfig +**/.dockerignore +**/.git +**/.DS_Store +**/.vscode +**/node_modules + +# Specific patterns to ignore +data-node +meili_data* +librechat* +Dockerfile* +docs + +# Ignore all hidden files +.* diff --git a/.env.example b/.env.example new file mode 100644 index 0000000000..799e44525b --- /dev/null +++ b/.env.example @@ -0,0 +1,782 @@ +#=====================================================================# +# LibreChat Configuration # +#=====================================================================# +# Please refer to the reference documentation for assistance # +# with configuring your LibreChat environment. # +# # +# https://www.librechat.ai/docs/configuration/dotenv # +#=====================================================================# + +#==================================================# +# Server Configuration # +#==================================================# + +HOST=localhost +PORT=3080 + +MONGO_URI=mongodb://127.0.0.1:27017/LibreChat +#The maximum number of connections in the connection pool. */ +MONGO_MAX_POOL_SIZE= +#The minimum number of connections in the connection pool. */ +MONGO_MIN_POOL_SIZE= +#The maximum number of connections that may be in the process of being established concurrently by the connection pool. */ +MONGO_MAX_CONNECTING= +#The maximum number of milliseconds that a connection can remain idle in the pool before being removed and closed. */ +MONGO_MAX_IDLE_TIME_MS= +#The maximum time in milliseconds that a thread can wait for a connection to become available. */ +MONGO_WAIT_QUEUE_TIMEOUT_MS= +# Set to false to disable automatic index creation for all models associated with this connection. */ +MONGO_AUTO_INDEX= +# Set to `false` to disable Mongoose automatically calling `createCollection()` on every model created on this connection. */ +MONGO_AUTO_CREATE= + +DOMAIN_CLIENT=http://localhost:3080 +DOMAIN_SERVER=http://localhost:3080 + +NO_INDEX=true +# Use the address that is at most n number of hops away from the Express application. +# req.socket.remoteAddress is the first hop, and the rest are looked for in the X-Forwarded-For header from right to left. +# A value of 0 means that the first untrusted address would be req.socket.remoteAddress, i.e. there is no reverse proxy. +# Defaulted to 1. +TRUST_PROXY=1 + +# Minimum password length for user authentication +# Default: 8 +# Note: When using LDAP authentication, you may want to set this to 1 +# to bypass local password validation, as LDAP servers handle their own +# password policies. +# MIN_PASSWORD_LENGTH=8 + +#===============# +# JSON Logging # +#===============# + +# Use when process console logs in cloud deployment like GCP/AWS +CONSOLE_JSON=false + +#===============# +# Debug Logging # +#===============# + +DEBUG_LOGGING=true +DEBUG_CONSOLE=false + +#=============# +# Permissions # +#=============# + +# UID=1000 +# GID=1000 + +#===============# +# Configuration # +#===============# +# Use an absolute path, a relative path, or a URL + +# CONFIG_PATH="/alternative/path/to/librechat.yaml" + +#===================================================# +# Endpoints # +#===================================================# + +# ENDPOINTS=openAI,assistants,azureOpenAI,google,anthropic + +PROXY= + +#===================================# +# Known Endpoints - librechat.yaml # +#===================================# +# https://www.librechat.ai/docs/configuration/librechat_yaml/ai_endpoints + +# ANYSCALE_API_KEY= +# APIPIE_API_KEY= +# COHERE_API_KEY= +# DEEPSEEK_API_KEY= +# DATABRICKS_API_KEY= +# FIREWORKS_API_KEY= +# GROQ_API_KEY= +# HUGGINGFACE_TOKEN= +# MISTRAL_API_KEY= +# OPENROUTER_KEY= +# PERPLEXITY_API_KEY= +# SHUTTLEAI_API_KEY= +# TOGETHERAI_API_KEY= +# UNIFY_API_KEY= +# XAI_API_KEY= + +#============# +# Anthropic # +#============# + +ANTHROPIC_API_KEY=user_provided +# ANTHROPIC_MODELS=claude-opus-4-20250514,claude-sonnet-4-20250514,claude-3-7-sonnet-20250219,claude-3-5-sonnet-20241022,claude-3-5-haiku-20241022,claude-3-opus-20240229,claude-3-sonnet-20240229,claude-3-haiku-20240307 +# ANTHROPIC_REVERSE_PROXY= + +#============# +# Azure # +#============# + +# Note: these variables are DEPRECATED +# Use the `librechat.yaml` configuration for `azureOpenAI` instead +# You may also continue to use them if you opt out of using the `librechat.yaml` configuration + +# AZURE_OPENAI_DEFAULT_MODEL=gpt-3.5-turbo # Deprecated +# AZURE_OPENAI_MODELS=gpt-3.5-turbo,gpt-4 # Deprecated +# AZURE_USE_MODEL_AS_DEPLOYMENT_NAME=TRUE # Deprecated +# AZURE_API_KEY= # Deprecated +# AZURE_OPENAI_API_INSTANCE_NAME= # Deprecated +# AZURE_OPENAI_API_DEPLOYMENT_NAME= # Deprecated +# AZURE_OPENAI_API_VERSION= # Deprecated +# AZURE_OPENAI_API_COMPLETIONS_DEPLOYMENT_NAME= # Deprecated +# AZURE_OPENAI_API_EMBEDDINGS_DEPLOYMENT_NAME= # Deprecated + +#=================# +# AWS Bedrock # +#=================# + +# BEDROCK_AWS_DEFAULT_REGION=us-east-1 # A default region must be provided +# BEDROCK_AWS_ACCESS_KEY_ID=someAccessKey +# BEDROCK_AWS_SECRET_ACCESS_KEY=someSecretAccessKey +# BEDROCK_AWS_SESSION_TOKEN=someSessionToken + +# Note: This example list is not meant to be exhaustive. If omitted, all known, supported model IDs will be included for you. +# BEDROCK_AWS_MODELS=anthropic.claude-3-5-sonnet-20240620-v1:0,meta.llama3-1-8b-instruct-v1:0 + +# See all Bedrock model IDs here: https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html#model-ids-arns + +# Notes on specific models: +# The following models are not support due to not supporting streaming: +# ai21.j2-mid-v1 + +# The following models are not support due to not supporting conversation history: +# ai21.j2-ultra-v1, cohere.command-text-v14, cohere.command-light-text-v14 + +#============# +# Google # +#============# + +GOOGLE_KEY=user_provided + +# GOOGLE_REVERSE_PROXY= +# Some reverse proxies do not support the X-goog-api-key header, uncomment to pass the API key in Authorization header instead. +# GOOGLE_AUTH_HEADER=true + +# Gemini API (AI Studio) +# GOOGLE_MODELS=gemini-2.5-pro,gemini-2.5-flash,gemini-2.5-flash-lite,gemini-2.0-flash,gemini-2.0-flash-lite + +# Vertex AI +# GOOGLE_MODELS=gemini-2.5-pro,gemini-2.5-flash,gemini-2.5-flash-lite,gemini-2.0-flash-001,gemini-2.0-flash-lite-001 + +# GOOGLE_TITLE_MODEL=gemini-2.0-flash-lite-001 + +# GOOGLE_LOC=us-central1 + +# Google Safety Settings +# NOTE: These settings apply to both Vertex AI and Gemini API (AI Studio) +# +# For Vertex AI: +# To use the BLOCK_NONE setting, you need either: +# (a) Access through an allowlist via your Google account team, or +# (b) Switch to monthly invoiced billing: https://cloud.google.com/billing/docs/how-to/invoiced-billing +# +# For Gemini API (AI Studio): +# BLOCK_NONE is available by default, no special account requirements. +# +# Available options: BLOCK_NONE, BLOCK_ONLY_HIGH, BLOCK_MEDIUM_AND_ABOVE, BLOCK_LOW_AND_ABOVE +# +# GOOGLE_SAFETY_SEXUALLY_EXPLICIT=BLOCK_ONLY_HIGH +# GOOGLE_SAFETY_HATE_SPEECH=BLOCK_ONLY_HIGH +# GOOGLE_SAFETY_HARASSMENT=BLOCK_ONLY_HIGH +# GOOGLE_SAFETY_DANGEROUS_CONTENT=BLOCK_ONLY_HIGH +# GOOGLE_SAFETY_CIVIC_INTEGRITY=BLOCK_ONLY_HIGH + +#============# +# OpenAI # +#============# + +OPENAI_API_KEY=user_provided +# OPENAI_MODELS=gpt-5,gpt-5-codex,gpt-5-mini,gpt-5-nano,o3-pro,o3,o4-mini,gpt-4.1,gpt-4.1-mini,gpt-4.1-nano,o3-mini,o1-pro,o1,gpt-4o,gpt-4o-mini + +DEBUG_OPENAI=false + +# TITLE_CONVO=false +# OPENAI_TITLE_MODEL=gpt-4o-mini + +# OPENAI_SUMMARIZE=true +# OPENAI_SUMMARY_MODEL=gpt-4o-mini + +# OPENAI_FORCE_PROMPT=true + +# OPENAI_REVERSE_PROXY= + +# OPENAI_ORGANIZATION= + +#====================# +# Assistants API # +#====================# + +ASSISTANTS_API_KEY=user_provided +# ASSISTANTS_BASE_URL= +# ASSISTANTS_MODELS=gpt-4o,gpt-4o-mini,gpt-3.5-turbo-0125,gpt-3.5-turbo-16k-0613,gpt-3.5-turbo-16k,gpt-3.5-turbo,gpt-4,gpt-4-0314,gpt-4-32k-0314,gpt-4-0613,gpt-3.5-turbo-0613,gpt-3.5-turbo-1106,gpt-4-0125-preview,gpt-4-turbo-preview,gpt-4-1106-preview + +#==========================# +# Azure Assistants API # +#==========================# + +# Note: You should map your credentials with custom variables according to your Azure OpenAI Configuration +# The models for Azure Assistants are also determined by your Azure OpenAI configuration. + +# More info, including how to enable use of Assistants with Azure here: +# https://www.librechat.ai/docs/configuration/librechat_yaml/ai_endpoints/azure#using-assistants-with-azure + +CREDS_KEY=f34be427ebb29de8d88c107a71546019685ed8b241d8f2ed00c3df97ad2566f0 +CREDS_IV=e2341419ec3dd3d19b13a1a87fafcbfb + +# Azure AI Search +#----------------- +AZURE_AI_SEARCH_SERVICE_ENDPOINT= +AZURE_AI_SEARCH_INDEX_NAME= +AZURE_AI_SEARCH_API_KEY= + +AZURE_AI_SEARCH_API_VERSION= +AZURE_AI_SEARCH_SEARCH_OPTION_QUERY_TYPE= +AZURE_AI_SEARCH_SEARCH_OPTION_TOP= +AZURE_AI_SEARCH_SEARCH_OPTION_SELECT= + +# OpenAI Image Tools Customization +#---------------- +# IMAGE_GEN_OAI_API_KEY= # Create or reuse OpenAI API key for image generation tool +# IMAGE_GEN_OAI_BASEURL= # Custom OpenAI base URL for image generation tool +# IMAGE_GEN_OAI_AZURE_API_VERSION= # Custom Azure OpenAI deployments +# IMAGE_GEN_OAI_DESCRIPTION= +# IMAGE_GEN_OAI_DESCRIPTION_WITH_FILES=Custom description for image generation tool when files are present +# IMAGE_GEN_OAI_DESCRIPTION_NO_FILES=Custom description for image generation tool when no files are present +# IMAGE_EDIT_OAI_DESCRIPTION=Custom description for image editing tool +# IMAGE_GEN_OAI_PROMPT_DESCRIPTION=Custom prompt description for image generation tool +# IMAGE_EDIT_OAI_PROMPT_DESCRIPTION=Custom prompt description for image editing tool + +# DALL·E +#---------------- +# DALLE_API_KEY= +# DALLE3_API_KEY= +# DALLE2_API_KEY= +# DALLE3_SYSTEM_PROMPT= +# DALLE2_SYSTEM_PROMPT= +# DALLE_REVERSE_PROXY= +# DALLE3_BASEURL= +# DALLE2_BASEURL= + +# DALL·E (via Azure OpenAI) +# Note: requires some of the variables above to be set +#---------------- +# DALLE3_AZURE_API_VERSION= +# DALLE2_AZURE_API_VERSION= + +# Flux +#----------------- +FLUX_API_BASE_URL=https://api.us1.bfl.ai +# FLUX_API_BASE_URL = 'https://api.bfl.ml'; + +# Get your API key at https://api.us1.bfl.ai/auth/profile +# FLUX_API_KEY= + +# Google +#----------------- +GOOGLE_SEARCH_API_KEY= +GOOGLE_CSE_ID= + +# YOUTUBE +#----------------- +YOUTUBE_API_KEY= + +# Stable Diffusion +#----------------- +SD_WEBUI_URL=http://host.docker.internal:7860 + +# Tavily +#----------------- +TAVILY_API_KEY= + +# Traversaal +#----------------- +TRAVERSAAL_API_KEY= + +# WolframAlpha +#----------------- +WOLFRAM_APP_ID= + +# Zapier +#----------------- +ZAPIER_NLA_API_KEY= + +#==================================================# +# Search # +#==================================================# + +SEARCH=true +MEILI_NO_ANALYTICS=true +MEILI_HOST=http://0.0.0.0:7700 +MEILI_MASTER_KEY=DrhYf7zENyR6AlUCKmnz0eYASOQdl6zxH7s7MKFSfFCt + +# Optional: Disable indexing, useful in a multi-node setup +# where only one instance should perform an index sync. +# MEILI_NO_SYNC=true + +#==================================================# +# Speech to Text & Text to Speech # +#==================================================# + +STT_API_KEY= +TTS_API_KEY= + +#==================================================# +# RAG # +#==================================================# +# More info: https://www.librechat.ai/docs/configuration/rag_api + +# RAG_OPENAI_BASEURL= +# RAG_OPENAI_API_KEY= +# RAG_USE_FULL_CONTEXT= +# EMBEDDINGS_PROVIDER=openai +# EMBEDDINGS_MODEL=text-embedding-3-small + +#===================================================# +# User System # +#===================================================# + +#========================# +# Moderation # +#========================# + +OPENAI_MODERATION=false +OPENAI_MODERATION_API_KEY= +# OPENAI_MODERATION_REVERSE_PROXY= + +BAN_VIOLATIONS=true +BAN_DURATION=1000 * 60 * 60 * 2 +BAN_INTERVAL=20 + +LOGIN_VIOLATION_SCORE=1 +REGISTRATION_VIOLATION_SCORE=1 +CONCURRENT_VIOLATION_SCORE=1 +MESSAGE_VIOLATION_SCORE=1 +NON_BROWSER_VIOLATION_SCORE=20 +TTS_VIOLATION_SCORE=0 +STT_VIOLATION_SCORE=0 +FORK_VIOLATION_SCORE=0 +IMPORT_VIOLATION_SCORE=0 +FILE_UPLOAD_VIOLATION_SCORE=0 + +LOGIN_MAX=7 +LOGIN_WINDOW=5 +REGISTER_MAX=5 +REGISTER_WINDOW=60 + +LIMIT_CONCURRENT_MESSAGES=true +CONCURRENT_MESSAGE_MAX=2 + +LIMIT_MESSAGE_IP=true +MESSAGE_IP_MAX=40 +MESSAGE_IP_WINDOW=1 + +LIMIT_MESSAGE_USER=false +MESSAGE_USER_MAX=40 +MESSAGE_USER_WINDOW=1 + +ILLEGAL_MODEL_REQ_SCORE=5 + +#========================# +# Balance # +#========================# + +# CHECK_BALANCE=false +# START_BALANCE=20000 # note: the number of tokens that will be credited after registration. + +#========================# +# Registration and Login # +#========================# + +ALLOW_EMAIL_LOGIN=true +ALLOW_REGISTRATION=true +ALLOW_SOCIAL_LOGIN=false +ALLOW_SOCIAL_REGISTRATION=false +ALLOW_PASSWORD_RESET=false +# ALLOW_ACCOUNT_DELETION=true # note: enabled by default if omitted/commented out +ALLOW_UNVERIFIED_EMAIL_LOGIN=true + +SESSION_EXPIRY=1000 * 60 * 15 +REFRESH_TOKEN_EXPIRY=(1000 * 60 * 60 * 24) * 7 + +JWT_SECRET=16f8c0ef4a5d391b26034086c628469d3f9f497f08163ab9b40137092f2909ef +JWT_REFRESH_SECRET=eaa5191f2914e30b9387fd84e254e4ba6fc51b4654968a9b0803b456a54b8418 + +# Discord +DISCORD_CLIENT_ID= +DISCORD_CLIENT_SECRET= +DISCORD_CALLBACK_URL=/oauth/discord/callback + +# Facebook +FACEBOOK_CLIENT_ID= +FACEBOOK_CLIENT_SECRET= +FACEBOOK_CALLBACK_URL=/oauth/facebook/callback + +# GitHub +GITHUB_CLIENT_ID= +GITHUB_CLIENT_SECRET= +GITHUB_CALLBACK_URL=/oauth/github/callback +# GitHub Enterprise +# GITHUB_ENTERPRISE_BASE_URL= +# GITHUB_ENTERPRISE_USER_AGENT= + +# Google +GOOGLE_CLIENT_ID= +GOOGLE_CLIENT_SECRET= +GOOGLE_CALLBACK_URL=/oauth/google/callback + +# Apple +APPLE_CLIENT_ID= +APPLE_TEAM_ID= +APPLE_KEY_ID= +APPLE_PRIVATE_KEY_PATH= +APPLE_CALLBACK_URL=/oauth/apple/callback + +# OpenID +OPENID_CLIENT_ID= +OPENID_CLIENT_SECRET= +OPENID_ISSUER= +OPENID_SESSION_SECRET= +OPENID_SCOPE="openid profile email" +OPENID_CALLBACK_URL=/oauth/openid/callback +OPENID_REQUIRED_ROLE= +OPENID_REQUIRED_ROLE_TOKEN_KIND= +OPENID_REQUIRED_ROLE_PARAMETER_PATH= +OPENID_ADMIN_ROLE= +OPENID_ADMIN_ROLE_PARAMETER_PATH= +OPENID_ADMIN_ROLE_TOKEN_KIND= +# Set to determine which user info property returned from OpenID Provider to store as the User's username +OPENID_USERNAME_CLAIM= +# Set to determine which user info property returned from OpenID Provider to store as the User's name +OPENID_NAME_CLAIM= +# Optional audience parameter for OpenID authorization requests +OPENID_AUDIENCE= + +OPENID_BUTTON_LABEL= +OPENID_IMAGE_URL= +# Set to true to automatically redirect to the OpenID provider when a user visits the login page +# This will bypass the login form completely for users, only use this if OpenID is your only authentication method +OPENID_AUTO_REDIRECT=false +# Set to true to use PKCE (Proof Key for Code Exchange) for OpenID authentication +OPENID_USE_PKCE=false +#Set to true to reuse openid tokens for authentication management instead of using the mongodb session and the custom refresh token. +OPENID_REUSE_TOKENS= +#By default, signing key verification results are cached in order to prevent excessive HTTP requests to the JWKS endpoint. +#If a signing key matching the kid is found, this will be cached and the next time this kid is requested the signing key will be served from the cache. +#Default is true. +OPENID_JWKS_URL_CACHE_ENABLED= +OPENID_JWKS_URL_CACHE_TIME= # 600000 ms eq to 10 minutes leave empty to disable caching +#Set to true to trigger token exchange flow to acquire access token for the userinfo endpoint. +OPENID_ON_BEHALF_FLOW_FOR_USERINFO_REQUIRED= +OPENID_ON_BEHALF_FLOW_USERINFO_SCOPE="user.read" # example for Scope Needed for Microsoft Graph API +# Set to true to use the OpenID Connect end session endpoint for logout +OPENID_USE_END_SESSION_ENDPOINT= + +#========================# +# SharePoint Integration # +#========================# +# Requires Entra ID (OpenID) authentication to be configured + +# Enable SharePoint file picker in chat and agent panels +# ENABLE_SHAREPOINT_FILEPICKER=true + +# SharePoint tenant base URL (e.g., https://yourtenant.sharepoint.com) +# SHAREPOINT_BASE_URL=https://yourtenant.sharepoint.com + +# Microsoft Graph API And SharePoint scopes for file picker +# SHAREPOINT_PICKER_SHAREPOINT_SCOPE==https://yourtenant.sharepoint.com/AllSites.Read +# SHAREPOINT_PICKER_GRAPH_SCOPE=Files.Read.All +#========================# + +# SAML +# Note: If OpenID is enabled, SAML authentication will be automatically disabled. +SAML_ENTRY_POINT= +SAML_ISSUER= +SAML_CERT= +SAML_CALLBACK_URL=/oauth/saml/callback +SAML_SESSION_SECRET= + +# Attribute mappings (optional) +SAML_EMAIL_CLAIM= +SAML_USERNAME_CLAIM= +SAML_GIVEN_NAME_CLAIM= +SAML_FAMILY_NAME_CLAIM= +SAML_PICTURE_CLAIM= +SAML_NAME_CLAIM= + +# Logint buttion settings (optional) +SAML_BUTTON_LABEL= +SAML_IMAGE_URL= + +# Whether the SAML Response should be signed. +# - If "true", the entire `SAML Response` will be signed. +# - If "false" or unset, only the `SAML Assertion` will be signed (default behavior). +# SAML_USE_AUTHN_RESPONSE_SIGNED= + + +#===============================================# +# Microsoft Graph API / Entra ID Integration # +#===============================================# + +# Enable Entra ID people search integration in permissions/sharing system +# When enabled, the people picker will search both local database and Entra ID +USE_ENTRA_ID_FOR_PEOPLE_SEARCH=false + +# When enabled, entra id groups owners will be considered as members of the group +ENTRA_ID_INCLUDE_OWNERS_AS_MEMBERS=false + +# Microsoft Graph API scopes needed for people/group search +# Default scopes provide access to user profiles and group memberships +OPENID_GRAPH_SCOPES=User.Read,People.Read,GroupMember.Read.All + +# LDAP +LDAP_URL= +LDAP_BIND_DN= +LDAP_BIND_CREDENTIALS= +LDAP_USER_SEARCH_BASE= +#LDAP_SEARCH_FILTER="mail=" +LDAP_CA_CERT_PATH= +# LDAP_TLS_REJECT_UNAUTHORIZED= +# LDAP_STARTTLS= +# LDAP_LOGIN_USES_USERNAME=true +# LDAP_ID= +# LDAP_USERNAME= +# LDAP_EMAIL= +# LDAP_FULL_NAME= + +#========================# +# Email Password Reset # +#========================# + +EMAIL_SERVICE= +EMAIL_HOST= +EMAIL_PORT=25 +EMAIL_ENCRYPTION= +EMAIL_ENCRYPTION_HOSTNAME= +EMAIL_ALLOW_SELFSIGNED= +EMAIL_USERNAME= +EMAIL_PASSWORD= +EMAIL_FROM_NAME= +EMAIL_FROM=noreply@librechat.ai + +#========================# +# Mailgun API # +#========================# + +# MAILGUN_API_KEY=your-mailgun-api-key +# MAILGUN_DOMAIN=mg.yourdomain.com +# EMAIL_FROM=noreply@yourdomain.com +# EMAIL_FROM_NAME="LibreChat" + +# # Optional: For EU region +# MAILGUN_HOST=https://api.eu.mailgun.net + +#========================# +# Firebase CDN # +#========================# + +FIREBASE_API_KEY= +FIREBASE_AUTH_DOMAIN= +FIREBASE_PROJECT_ID= +FIREBASE_STORAGE_BUCKET= +FIREBASE_MESSAGING_SENDER_ID= +FIREBASE_APP_ID= + +#========================# +# S3 AWS Bucket # +#========================# + +AWS_ENDPOINT_URL= +AWS_ACCESS_KEY_ID= +AWS_SECRET_ACCESS_KEY= +AWS_REGION= +AWS_BUCKET_NAME= + +#========================# +# Azure Blob Storage # +#========================# + +AZURE_STORAGE_CONNECTION_STRING= +AZURE_STORAGE_PUBLIC_ACCESS=false +AZURE_CONTAINER_NAME=files + +#========================# +# Shared Links # +#========================# + +ALLOW_SHARED_LINKS=true +ALLOW_SHARED_LINKS_PUBLIC=true + +#==============================# +# Static File Cache Control # +#==============================# + +# Leave commented out to use defaults: 1 day (86400 seconds) for s-maxage and 2 days (172800 seconds) for max-age +# NODE_ENV must be set to production for these to take effect +# STATIC_CACHE_MAX_AGE=172800 +# STATIC_CACHE_S_MAX_AGE=86400 + +# If you have another service in front of your LibreChat doing compression, disable express based compression here +# DISABLE_COMPRESSION=true + +# If you have gzipped version of uploaded image images in the same folder, this will enable gzip scan and serving of these images +# Note: The images folder will be scanned on startup and a ma kept in memory. Be careful for large number of images. +# ENABLE_IMAGE_OUTPUT_GZIP_SCAN=true + +#===================================================# +# UI # +#===================================================# + +APP_TITLE=LibreChat +# CUSTOM_FOOTER="My custom footer" +HELP_AND_FAQ_URL=https://librechat.ai + +# SHOW_BIRTHDAY_ICON=true + +# Google tag manager id +#ANALYTICS_GTM_ID=user provided google tag manager id + +# limit conversation file imports to a certain number of bytes in size to avoid the container +# maxing out memory limitations by unremarking this line and supplying a file size in bytes +# such as the below example of 250 mib +# CONVERSATION_IMPORT_MAX_FILE_SIZE_BYTES=262144000 + + +#===============# +# REDIS Options # +#===============# + +# Enable Redis for caching and session storage +# USE_REDIS=true + +# Single Redis instance +# REDIS_URI=redis://127.0.0.1:6379 + +# Redis cluster (multiple nodes) +# REDIS_URI=redis://127.0.0.1:7001,redis://127.0.0.1:7002,redis://127.0.0.1:7003 + +# Redis with TLS/SSL encryption and CA certificate +# REDIS_URI=rediss://127.0.0.1:6380 +# REDIS_CA=/path/to/ca-cert.pem + +# Elasticache may need to use an alternate dnsLookup for TLS connections. see "Special Note: Aws Elasticache Clusters with TLS" on this webpage: https://www.npmjs.com/package/ioredis +# Enable alternative dnsLookup for redis +# REDIS_USE_ALTERNATIVE_DNS_LOOKUP=true + +# Redis authentication (if required) +# REDIS_USERNAME=your_redis_username +# REDIS_PASSWORD=your_redis_password + +# Redis key prefix configuration +# Use environment variable name for dynamic prefix (recommended for cloud deployments) +# REDIS_KEY_PREFIX_VAR=K_REVISION +# Or use static prefix directly +# REDIS_KEY_PREFIX=librechat + +# Redis connection limits +# REDIS_MAX_LISTENERS=40 + +# Redis ping interval in seconds (0 = disabled, >0 = enabled) +# When set to a positive integer, Redis clients will ping the server at this interval to keep connections alive +# When unset or 0, no pinging is performed (recommended for most use cases) +# REDIS_PING_INTERVAL=300 + +# Force specific cache namespaces to use in-memory storage even when Redis is enabled +# Comma-separated list of CacheKeys (e.g., ROLES,MESSAGES) +# FORCED_IN_MEMORY_CACHE_NAMESPACES=ROLES,MESSAGES + +# Leader Election Configuration (for multi-instance deployments with Redis) +# Duration in seconds that the leader lease is valid before it expires (default: 25) +# LEADER_LEASE_DURATION=25 +# Interval in seconds at which the leader renews its lease (default: 10) +# LEADER_RENEW_INTERVAL=10 +# Maximum number of retry attempts when renewing the lease fails (default: 3) +# LEADER_RENEW_ATTEMPTS=3 +# Delay in seconds between retry attempts when renewing the lease (default: 0.5) +# LEADER_RENEW_RETRY_DELAY=0.5 + +#==================================================# +# Others # +#==================================================# +# You should leave the following commented out # + +# NODE_ENV= + +# E2E_USER_EMAIL= +# E2E_USER_PASSWORD= + +#=====================================================# +# Cache Headers # +#=====================================================# +# Headers that control caching of the index.html # +# Default configuration prevents caching to ensure # +# users always get the latest version. Customize # +# only if you understand caching implications. # + +# INDEX_CACHE_CONTROL=no-cache, no-store, must-revalidate +# INDEX_PRAGMA=no-cache +# INDEX_EXPIRES=0 + +# no-cache: Forces validation with server before using cached version +# no-store: Prevents storing the response entirely +# must-revalidate: Prevents using stale content when offline + +#=====================================================# +# OpenWeather # +#=====================================================# +OPENWEATHER_API_KEY= + +#====================================# +# LibreChat Code Interpreter API # +#====================================# + +# https://code.librechat.ai +# LIBRECHAT_CODE_API_KEY=your-key + +#======================# +# Web Search # +#======================# + +# Note: All of the following variable names can be customized. +# Omit values to allow user to provide them. + +# For more information on configuration values, see: +# https://librechat.ai/docs/features/web_search + +# Search Provider (Required) +# SERPER_API_KEY=your_serper_api_key + +# Scraper (Required) +# FIRECRAWL_API_KEY=your_firecrawl_api_key +# Optional: Custom Firecrawl API URL +# FIRECRAWL_API_URL=your_firecrawl_api_url + +# Reranker (Required) +# JINA_API_KEY=your_jina_api_key +# or +# COHERE_API_KEY=your_cohere_api_key + +#======================# +# MCP Configuration # +#======================# + +# Treat 401/403 responses as OAuth requirement when no oauth metadata found +# MCP_OAUTH_ON_AUTH_ERROR=true + +# Timeout for OAuth detection requests in milliseconds +# MCP_OAUTH_DETECTION_TIMEOUT=5000 + +# Cache connection status checks for this many milliseconds to avoid expensive verification +# MCP_CONNECTION_CHECK_TTL=60000 + +# Skip code challenge method validation (e.g., for AWS Cognito that supports S256 but doesn't advertise it) +# When set to true, forces S256 code challenge even if not advertised in .well-known/openid-configuration +# MCP_SKIP_CODE_CHALLENGE_CHECK=false diff --git a/.github/CODE_OF_CONDUCT.md b/.github/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000..cb767cbd7c --- /dev/null +++ b/.github/CODE_OF_CONDUCT.md @@ -0,0 +1,132 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +We as members, contributors, and leaders pledge to make participation in our +community a harassment-free experience for everyone, regardless of age, body +size, visible or invisible disability, ethnicity, sex characteristics, gender +identity and expression, level of experience, education, socio-economic status, +nationality, personal appearance, race, religion, or sexual identity +and orientation. + +We pledge to act and interact in ways that contribute to an open, welcoming, +diverse, inclusive, and healthy community. + +## Our Standards + +Examples of behavior that contributes to a positive environment for our +community include: + +* Demonstrating empathy and kindness toward other people +* Being respectful of differing opinions, viewpoints, and experiences +* Giving and gracefully accepting constructive feedback +* Accepting responsibility and apologizing to those affected by our mistakes, + and learning from the experience +* Focusing on what is best not just for us as individuals, but for the + overall community + +Examples of unacceptable behavior include: + +* The use of sexualized language or imagery, and sexual attention or + advances of any kind +* Trolling, insulting or derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or email + address, without their explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Enforcement Responsibilities + +Community leaders are responsible for clarifying and enforcing our standards of +acceptable behavior and will take appropriate and fair corrective action in +response to any behavior that they deem inappropriate, threatening, offensive, +or harmful. + +Community leaders have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions that are +not aligned to this Code of Conduct, and will communicate reasons for moderation +decisions when appropriate. + +## Scope + +This Code of Conduct applies within all community spaces, and also applies when +an individual is officially representing the community in public spaces. +Examples of representing our community include using an official e-mail address, +posting via an official social media account, or acting as an appointed +representative at an online or offline event. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported to the community leaders responsible for enforcement here on GitHub or +on the official [Discord Server](https://discord.librechat.ai). +All complaints will be reviewed and investigated promptly and fairly. + +All community leaders are obligated to respect the privacy and security of the +reporter of any incident. + +## Enforcement Guidelines + +Community leaders will follow these Community Impact Guidelines in determining +the consequences for any action they deem in violation of this Code of Conduct: + +### 1. Correction + +**Community Impact**: Use of inappropriate language or other behavior deemed +unprofessional or unwelcome in the community. + +**Consequence**: A private, written warning from community leaders, providing +clarity around the nature of the violation and an explanation of why the +behavior was inappropriate. A public apology may be requested. + +### 2. Warning + +**Community Impact**: A violation through a single incident or series +of actions. + +**Consequence**: A warning with consequences for continued behavior. No +interaction with the people involved, including unsolicited interaction with +those enforcing the Code of Conduct, for a specified period of time. This +includes avoiding interactions in community spaces as well as external channels +like social media. Violating these terms may lead to a temporary or +permanent ban. + +### 3. Temporary Ban + +**Community Impact**: A serious violation of community standards, including +sustained inappropriate behavior. + +**Consequence**: A temporary ban from any sort of interaction or public +communication with the community for a specified period of time. No public or +private interaction with the people involved, including unsolicited interaction +with those enforcing the Code of Conduct, is allowed during this period. +Violating these terms may lead to a permanent ban. + +### 4. Permanent Ban + +**Community Impact**: Demonstrating a pattern of violation of community +standards, including sustained inappropriate behavior, harassment of an +individual, or aggression toward or disparagement of classes of individuals. + +**Consequence**: A permanent ban from any sort of public interaction within +the community. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], +version 2.0, available at +https://www.contributor-covenant.org/version/2/0/code_of_conduct.html. + +Community Impact Guidelines were inspired by [Mozilla's code of conduct +enforcement ladder](https://github.com/mozilla/diversity). + +[homepage]: https://www.contributor-covenant.org + +For answers to common questions about this code of conduct, see the FAQ at +https://www.contributor-covenant.org/faq. Translations are available at +https://www.contributor-covenant.org/translations. + +--- + +## [Go Back to ReadMe](../README.md) diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md new file mode 100644 index 0000000000..ad0a75ab9b --- /dev/null +++ b/.github/CONTRIBUTING.md @@ -0,0 +1,168 @@ +# Contributor Guidelines + +Thank you to all the contributors who have helped make this project possible! We welcome various types of contributions, such as bug reports, documentation improvements, feature requests, and code contributions. + +## Contributing Guidelines + +If the feature you would like to contribute has not already received prior approval from the project maintainers (i.e., the feature is currently on the [roadmap](https://github.com/users/danny-avila/projects/2)), please submit a request in the [Feature Requests & Suggestions category](https://github.com/danny-avila/LibreChat/discussions/new?category=feature-requests-suggestions) of the discussions board before beginning work on it. The requests should include specific implementation details, including areas of the application that will be affected by the change (including designs if applicable), and any other relevant information that might be required for a speedy review. However, proposals are not required for small changes, bug fixes, or documentation improvements. Small changes and bug fixes should be tied to an [issue](https://github.com/danny-avila/LibreChat/issues) and included in the corresponding pull request for tracking purposes. + +Please note that a pull request involving a feature that has not been reviewed and approved by the project maintainers may be rejected. We appreciate your understanding and cooperation. + +If you would like to discuss the changes you wish to make, join our [Discord community](https://discord.librechat.ai), where you can engage with other contributors and seek guidance from the community. + +## Our Standards + +We strive to maintain a positive and inclusive environment within our project community. We expect all contributors to adhere to the following standards: + +- Using welcoming and inclusive language. +- Being respectful of differing viewpoints and experiences. +- Gracefully accepting constructive criticism. +- Focusing on what is best for the community. +- Showing empathy towards other community members. + +Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that do not align with these standards. + +## To contribute to this project, please adhere to the following guidelines: + +## 1. Development Setup + +1. Use Node.JS 20.x. +2. Install typescript globally: `npm i -g typescript`. +3. Run `npm ci` to install dependencies. +4. Build the data provider: `npm run build:data-provider`. +5. Build data schemas: `npm run build:data-schemas`. +6. Build API methods: `npm run build:api`. +7. Setup and run unit tests: + - Copy `.env.test`: `cp api/test/.env.test.example api/test/.env.test`. + - Run backend unit tests: `npm run test:api`. + - Run frontend unit tests: `npm run test:client`. +8. Setup and run integration tests: + - Build client: `cd client && npm run build`. + - Create `.env`: `cp .env.example .env`. + - Install [MongoDB Community Edition](https://www.mongodb.com/docs/manual/administration/install-community/), ensure that `mongosh` connects to your local instance. + - Run: `npx install playwright`, then `npx playwright install`. + - Copy `config.local`: `cp e2e/config.local.example.ts e2e/config.local.ts`. + - Copy `librechat.yaml`: `cp librechat.example.yaml librechat.yaml`. + - Run: `npm run e2e`. + +## 2. Development Notes + +1. Before starting work, make sure your main branch has the latest commits with `npm run update`. +3. Run linting command to find errors: `npm run lint`. Alternatively, ensure husky pre-commit checks are functioning. +3. After your changes, reinstall packages in your current branch using `npm run reinstall` and ensure everything still works. + - Restart the ESLint server ("ESLint: Restart ESLint Server" in VS Code command bar) and your IDE after reinstalling or updating. +4. Clear web app localStorage and cookies before and after changes. +5. For frontend changes, compile typescript before and after changes to check for introduced errors: `cd client && npm run build`. +6. Run backend unit tests: `npm run test:api`. +7. Run frontend unit tests: `npm run test:client`. +8. Run integration tests: `npm run e2e`. + +## 3. Git Workflow + +We utilize a GitFlow workflow to manage changes to this project's codebase. Follow these general steps when contributing code: + +1. Fork the repository and create a new branch with a descriptive slash-based name (e.g., `new/feature/x`). +2. Implement your changes and ensure that all tests pass. +3. Commit your changes using conventional commit messages with GitFlow flags. Begin the commit message with a tag indicating the change type, such as "feat" (new feature), "fix" (bug fix), "docs" (documentation), or "refactor" (code refactoring), followed by a brief summary of the changes (e.g., `feat: Add new feature X to the project`). +4. Submit a pull request with a clear and concise description of your changes and the reasons behind them. +5. We will review your pull request, provide feedback as needed, and eventually merge the approved changes into the main branch. + +## 4. Commit Message Format + +We follow the [semantic format](https://gist.github.com/joshbuchea/6f47e86d2510bce28f8e7f42ae84c716) for commit messages. + +### Example + +``` +feat: add hat wobble +^--^ ^------------^ +| | +| +-> Summary in present tense. +| ++-------> Type: chore, docs, feat, fix, refactor, style, or test. +``` + +### Commit Guidelines +- Do your best to reduce the number of commits, organizing them as much possible. Look into [squashing commits](https://www.freecodecamp.org/news/git-squash-commits/) in order to keep a neat history. +- For those that care about maximizing commits for stats, adhere to the above as I 'squash and merge' an unorganized and/or unformatted commit history, which reduces the number of your commits to 1,: +``` +* Update Br.tsx + +* Update Es.tsx + +* Update Br.tsx +``` + + +## 5. Pull Request Process + +When submitting a pull request, please follow these guidelines: + +- Ensure that any installation or build dependencies are removed before the end of the layer when doing a build. +- Update the README.md with details of changes to the interface, including new environment variables, exposed ports, useful file locations, and container parameters. +- Increase the version numbers in any example files and the README.md to reflect the new version that the pull request represents. We use [SemVer](http://semver.org/) for versioning. + +Ensure that your changes meet the following criteria: + +- All tests pass as highlighted [above](#1-development-notes). +- The code is well-formatted and adheres to our coding standards. +- The commit history is clean and easy to follow. You can use `git rebase` or `git merge --squash` to clean your commit history before submitting the pull request. +- The pull request description clearly outlines the changes and the reasons behind them. Be sure to include the steps to test the pull request. + +## 6. Naming Conventions + +Apply the following naming conventions to branches, labels, and other Git-related entities: + +- **Branch names:** Descriptive and slash-based (e.g., `new/feature/x`). +- **Labels:** Descriptive and kebab case (e.g., `bug-fix`). +- **JS/TS:** Directories and file names: Descriptive and camelCase. First letter uppercased for React files (e.g., `helperFunction.ts, ReactComponent.tsx`). +- **Docs:** Directories and file names: Descriptive and snake_case (e.g., `config_files.md`). + +## 7. TypeScript Conversion + +1. **Original State**: The project was initially developed entirely in JavaScript (JS). + +2. **Frontend Transition**: + - We are in the process of transitioning the frontend from JS to TypeScript (TS). + - The transition is nearing completion. + - This conversion is feasible due to React's capability to intermix JS and TS prior to code compilation. It's standard practice to compile/bundle the code in such scenarios. + +3. **Backend Considerations**: + - Transitioning the backend to TypeScript would be a more intricate process, especially for an established Express.js server. + + - **Options for Transition**: + - **Single Phase Overhaul**: This involves converting the entire backend to TypeScript in one go. It's the most straightforward approach but can be disruptive, especially for larger codebases. + + - **Incremental Transition**: Convert parts of the backend progressively. This can be done by: + - Maintaining a separate directory for TypeScript files. + - Gradually migrating and testing individual modules or routes. + - Using a build tool like `tsc` to compile TypeScript files independently until the entire transition is complete. + + - **Compilation Considerations**: + - Introducing a compilation step for the server is an option. This would involve using tools like `ts-node` for development and `tsc` for production builds. + - However, this is not a conventional approach for Express.js servers and could introduce added complexity, especially in terms of build and deployment processes. + + - **Current Stance**: At present, this backend transition is of lower priority and might not be pursued. + +## 8. Module Import Conventions + +- `npm` packages first, + - from longest line (top) to shortest (bottom) + +- Followed by typescript types (pertains to data-provider and client workspaces) + - longest line (top) to shortest (bottom) + - types from package come first + +- Lastly, local imports + - longest line (top) to shortest (bottom) + - imports with alias `~` treated the same as relative import with respect to line length + +**Note:** ESLint will automatically enforce these import conventions when you run `npm run lint --fix` or through pre-commit hooks. + +--- + +Please ensure that you adapt this summary to fit the specific context and nuances of your project. + +--- + +## [Go Back to ReadMe](../README.md) diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml new file mode 100644 index 0000000000..37ef799acb --- /dev/null +++ b/.github/FUNDING.yml @@ -0,0 +1,13 @@ +# These are supported funding model platforms + +github: [danny-avila] +patreon: # Replace with a single Patreon username +open_collective: # Replace with a single Open Collective username +ko_fi: # Replace with a single Ko-fi username +tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel +community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry +liberapay: # Replace with a single Liberapay username +issuehunt: # Replace with a single IssueHunt username +otechie: # Replace with a single Otechie username +lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry +custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2'] diff --git a/.github/ISSUE_TEMPLATE/BUG-REPORT.yml b/.github/ISSUE_TEMPLATE/BUG-REPORT.yml new file mode 100644 index 0000000000..610396959f --- /dev/null +++ b/.github/ISSUE_TEMPLATE/BUG-REPORT.yml @@ -0,0 +1,96 @@ +name: Bug Report +description: File a bug report +title: "[Bug]: " +labels: ["🐛 bug"] +body: + - type: markdown + attributes: + value: | + Thanks for taking the time to fill out this bug report! + + Before submitting, please: + - Search existing [Issues and Discussions](https://github.com/danny-avila/LibreChat/discussions) to see if your bug has already been reported + - Use [Discussions](https://github.com/danny-avila/LibreChat/discussions) instead of Issues for: + - General inquiries + - Help with setup + - Questions about whether you're experiencing a bug + - type: textarea + id: what-happened + attributes: + label: What happened? + description: Also tell us, what did you expect to happen? + placeholder: Please give as many details as possible + validations: + required: true + - type: textarea + id: version-info + attributes: + label: Version Information + description: | + If using Docker, please run and provide the output of: + ```bash + docker images | grep librechat + ``` + + If running from source, please run and provide the output of: + ```bash + git rev-parse HEAD + ``` + placeholder: Paste the output here + validations: + required: true + - type: textarea + id: steps-to-reproduce + attributes: + label: Steps to Reproduce + description: Please list the steps needed to reproduce the issue. + placeholder: "1. Step 1\n2. Step 2\n3. Step 3" + validations: + required: true + - type: dropdown + id: browsers + attributes: + label: What browsers are you seeing the problem on? + multiple: true + options: + - Firefox + - Chrome + - Safari + - Microsoft Edge + - Mobile (iOS) + - Mobile (Android) + - type: textarea + id: logs + attributes: + label: Relevant log output + description: | + Please paste relevant logs that were created when reproducing the error. + + Log locations: + - Docker: Project root directory ./logs + - npm: ./api/logs + + There are two types of logs that can help diagnose the issue: + - debug logs (debug-YYYY-MM-DD.log) + - error logs (error-YYYY-MM-DD.log) + + Error logs contain exact stack traces and are especially helpful, but both can provide valuable information. + Please only include the relevant portions of logs that correspond to when you reproduced the error. + + For UI-related issues, browser console logs can be very helpful. You can provide these as screenshots or paste the text here. + render: shell + validations: + required: true + - type: textarea + id: screenshots + attributes: + label: Screenshots + description: If applicable, add screenshots to help explain your problem. You can drag and drop, paste images directly here or link to them. + - type: checkboxes + id: terms + attributes: + label: Code of Conduct + description: By submitting this issue, you agree to follow our [Code of Conduct](https://github.com/danny-avila/LibreChat/blob/main/.github/CODE_OF_CONDUCT.md) + options: + - label: I agree to follow this project's Code of Conduct + required: true \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/FEATURE-REQUEST.yml b/.github/ISSUE_TEMPLATE/FEATURE-REQUEST.yml new file mode 100644 index 0000000000..613c9e0a01 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/FEATURE-REQUEST.yml @@ -0,0 +1,49 @@ +name: Feature Request +description: File a feature request +title: "[Enhancement]: " +labels: ["✨ enhancement"] +body: + - type: markdown + attributes: + value: | + Thank you for taking the time to fill this out! + - type: textarea + id: what + attributes: + label: What features would you like to see added? + description: Please provide as many details as possible. + placeholder: Please provide as many details as possible. + validations: + required: true + - type: textarea + id: details + attributes: + label: More details + description: Please provide additional details if needed. + placeholder: Please provide additional details if needed. + validations: + required: true + - type: dropdown + id: subject + attributes: + label: Which components are impacted by your request? + multiple: true + options: + - General + - UI + - Endpoints + - Plugins + - Other + - type: textarea + id: screenshots + attributes: + label: Pictures + description: If relevant, please include images to help clarify your request. You can drag and drop images directly here, paste them, or provide a link to them. + - type: checkboxes + id: terms + attributes: + label: Code of Conduct + description: By submitting this issue, you agree to follow our [Code of Conduct](https://github.com/danny-avila/LibreChat/blob/main/.github/CODE_OF_CONDUCT.md) + options: + - label: I agree to follow this project's Code of Conduct + required: true diff --git a/.github/ISSUE_TEMPLATE/LOCIZE_TRANSLATION_ACCESS_REQUEST.yml b/.github/ISSUE_TEMPLATE/LOCIZE_TRANSLATION_ACCESS_REQUEST.yml new file mode 100644 index 0000000000..49b01a814d --- /dev/null +++ b/.github/ISSUE_TEMPLATE/LOCIZE_TRANSLATION_ACCESS_REQUEST.yml @@ -0,0 +1,42 @@ +name: Locize Translation Access Request +description: Request access to an additional language in Locize for LibreChat translations. +title: "Locize Access Request: " +labels: ["🌍 i18n", "🔑 access request"] +body: + - type: markdown + attributes: + value: | + Thank you for your interest in contributing to LibreChat translations! + Please fill out the form below to request access to an additional language in **Locize**. + + **🔗 Available Languages:** [View the list here](https://www.librechat.ai/docs/translation) + + **📌 Note:** Ensure that the requested language is supported before submitting your request. + - type: input + id: account_name + attributes: + label: Locize Account Name + description: Please provide your Locize account name (e.g., John Doe). + placeholder: e.g., John Doe + validations: + required: true + - type: input + id: language_requested + attributes: + label: Language Code (ISO 639-1) + description: | + Enter the **ISO 639-1** language code for the language you want to translate into. + Example: `es` for Spanish, `zh-Hant` for Traditional Chinese. + + **🔗 Reference:** [Available Languages](https://www.librechat.ai/docs/translation) + placeholder: e.g., es + validations: + required: true + - type: checkboxes + id: agreement + attributes: + label: Agreement + description: By submitting this request, you confirm that you will contribute responsibly and adhere to the project guidelines. + options: + - label: I agree to use my access solely for contributing to LibreChat translations. + required: true \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/NEW-LANGUAGE-REQUEST.yml b/.github/ISSUE_TEMPLATE/NEW-LANGUAGE-REQUEST.yml new file mode 100644 index 0000000000..5fddced9f8 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/NEW-LANGUAGE-REQUEST.yml @@ -0,0 +1,33 @@ +name: New Language Request +description: Request to add a new language for LibreChat translations. +title: "New Language Request: " +labels: ["✨ enhancement", "🌍 i18n"] +body: + - type: markdown + attributes: + value: | + Thank you for taking the time to submit a new language request! Please fill out the following details so we can review your request. + - type: input + id: language_name + attributes: + label: Language Name + description: Please provide the full name of the language (e.g., Spanish, Mandarin). + placeholder: e.g., Spanish + validations: + required: true + - type: input + id: iso_code + attributes: + label: ISO 639-1 Code + description: Please provide the ISO 639-1 code for the language (e.g., es for Spanish). You can refer to [this list](https://www.w3schools.com/tags/ref_language_codes.asp) for valid codes. + placeholder: e.g., es + validations: + required: true + - type: checkboxes + id: terms + attributes: + label: Code of Conduct + description: By submitting this issue, you agree to follow our [Code of Conduct](https://github.com/danny-avila/LibreChat/blob/main/.github/CODE_OF_CONDUCT.md). + options: + - label: I agree to follow this project's Code of Conduct + required: true diff --git a/.github/SECURITY.md b/.github/SECURITY.md new file mode 100644 index 0000000000..b01e04e016 --- /dev/null +++ b/.github/SECURITY.md @@ -0,0 +1,63 @@ +# Security Policy + +At LibreChat, we prioritize the security of our project and value the contributions of security researchers in helping us improve the security of our codebase. If you discover a security vulnerability within our project, we appreciate your responsible disclosure. Please follow the guidelines below to report any vulnerabilities to us: + +**Note: Only report sensitive vulnerability details via the appropriate private communication channels mentioned below. Public channels, such as GitHub issues and Discord, should be used for initiating contact and establishing private communication channels.** + +## Communication Channels + +When reporting a security vulnerability, you have the following options to reach out to us: + +- **Option 1: GitHub Security Advisory System**: We encourage you to use GitHub's Security Advisory system to report any security vulnerabilities you find. This allows us to receive vulnerability reports directly through GitHub. For more information on how to submit a security advisory report, please refer to the [GitHub Security Advisories documentation](https://docs.github.com/en/code-security/getting-started-with-security-vulnerability-alerts/about-github-security-advisories). + +- **Option 2: GitHub Issues**: You can initiate first contact via GitHub Issues. However, please note that initial contact through GitHub Issues should not include any sensitive details. + +- **Option 3: Discord Server**: You can join our [Discord community](https://discord.librechat.ai) and initiate first contact in the `#issues` channel. However, please ensure that initial contact through Discord does not include any sensitive details. + +_After the initial contact, we will establish a private communication channel for further discussion._ + +### When submitting a vulnerability report, please provide us with the following information: + +- A clear description of the vulnerability, including steps to reproduce it. +- The version(s) of the project affected by the vulnerability. +- Any additional information that may be useful for understanding and addressing the issue. + +We strive to acknowledge vulnerability reports within 72 hours and will keep you informed of the progress towards resolution. + +## Security Updates and Patching + +We are committed to maintaining the security of our open-source project, LibreChat, and promptly addressing any identified vulnerabilities. To ensure the security of our project, we adhere to the following practices: + +- We prioritize security updates for the current major release of our software. +- We actively monitor the GitHub Security Advisory system and the `#issues` channel on Discord for any vulnerability reports. +- We promptly review and validate reported vulnerabilities and take appropriate actions to address them. +- We release security patches and updates in a timely manner to mitigate any identified vulnerabilities. + +Please note that as a security-conscious community, we may not always disclose detailed information about security issues until we have determined that doing so would not put our users or the project at risk. We appreciate your understanding and cooperation in these matters. + +## Scope + +This security policy applies to the following GitHub repository: + +- Repository: [LibreChat](https://github.librechat.ai) + +## Contact + +If you have any questions or concerns regarding the security of our project, please join our [Discord community](https://discord.librechat.ai) and report them in the appropriate channel. You can also reach out to us by [opening an issue](https://github.com/danny-avila/LibreChat/issues/new) on GitHub. Please note that the response time may vary depending on the nature and severity of the inquiry. + +## Acknowledgments + +We would like to express our gratitude to the security researchers and community members who help us improve the security of our project. Your contributions are invaluable, and we sincerely appreciate your efforts. + +## Bug Bounty Program + +We currently do not have a bug bounty program in place. However, we welcome and appreciate any + + security-related contributions through pull requests (PRs) that address vulnerabilities in our codebase. We believe in the power of collaboration to improve the security of our project and invite you to join us in making it more robust. + +**Reference** +- https://cheatsheetseries.owasp.org/cheatsheets/Vulnerability_Disclosure_Cheat_Sheet.html + +--- + +## [Go Back to ReadMe](../README.md) diff --git a/.github/configuration-release.json b/.github/configuration-release.json new file mode 100644 index 0000000000..68fe80ed8f --- /dev/null +++ b/.github/configuration-release.json @@ -0,0 +1,60 @@ +{ + "categories": [ + { + "title": "### ✨ New Features", + "labels": ["feat"] + }, + { + "title": "### 🌍 Internationalization", + "labels": ["i18n"] + }, + { + "title": "### 👐 Accessibility", + "labels": ["a11y"] + }, + { + "title": "### 🔧 Fixes", + "labels": ["Fix", "fix"] + }, + { + "title": "### ⚙️ Other Changes", + "labels": ["ci", "style", "docs", "refactor", "chore"] + } + ], + "ignore_labels": [ + "🔁 duplicate", + "📊 analytics", + "🌱 good first issue", + "🔍 investigation", + "🙏 help wanted", + "❌ invalid", + "❓ question", + "🚫 wontfix", + "🚀 release", + "version" + ], + "base_branches": ["main"], + "sort": { + "order": "ASC", + "on_property": "mergedAt" + }, + "label_extractor": [ + { + "pattern": "^(?:[^A-Za-z0-9]*)(feat|fix|chore|docs|refactor|ci|style|a11y|i18n)\\s*:", + "target": "$1", + "flags": "i", + "on_property": "title", + "method": "match" + }, + { + "pattern": "^(?:[^A-Za-z0-9]*)(v\\d+\\.\\d+\\.\\d+(?:-rc\\d+)?).*", + "target": "version", + "flags": "i", + "on_property": "title", + "method": "match" + } + ], + "template": "## [#{{TO_TAG}}] - #{{TO_TAG_DATE}}\n\nChanges from #{{FROM_TAG}} to #{{TO_TAG}}.\n\n#{{CHANGELOG}}\n\n[See full release details][release-#{{TO_TAG}}]\n\n[release-#{{TO_TAG}}]: https://github.com/#{{OWNER}}/#{{REPO}}/releases/tag/#{{TO_TAG}}\n\n---", + "pr_template": "- #{{TITLE}} by **@#{{AUTHOR}}** in [##{{NUMBER}}](#{{URL}})", + "empty_template": "- no changes" +} \ No newline at end of file diff --git a/.github/configuration-unreleased.json b/.github/configuration-unreleased.json new file mode 100644 index 0000000000..29eaf5e13b --- /dev/null +++ b/.github/configuration-unreleased.json @@ -0,0 +1,68 @@ +{ + "categories": [ + { + "title": "### ✨ New Features", + "labels": ["feat"] + }, + { + "title": "### 🌍 Internationalization", + "labels": ["i18n"] + }, + { + "title": "### 👐 Accessibility", + "labels": ["a11y"] + }, + { + "title": "### 🔧 Fixes", + "labels": ["Fix", "fix"] + }, + { + "title": "### ⚙️ Other Changes", + "labels": ["ci", "style", "docs", "refactor", "chore"] + } + ], + "ignore_labels": [ + "🔁 duplicate", + "📊 analytics", + "🌱 good first issue", + "🔍 investigation", + "🙏 help wanted", + "❌ invalid", + "❓ question", + "🚫 wontfix", + "🚀 release", + "version", + "action" + ], + "base_branches": ["main"], + "sort": { + "order": "ASC", + "on_property": "mergedAt" + }, + "label_extractor": [ + { + "pattern": "^(?:[^A-Za-z0-9]*)(feat|fix|chore|docs|refactor|ci|style|a11y|i18n)\\s*:", + "target": "$1", + "flags": "i", + "on_property": "title", + "method": "match" + }, + { + "pattern": "^(?:[^A-Za-z0-9]*)(v\\d+\\.\\d+\\.\\d+(?:-rc\\d+)?).*", + "target": "version", + "flags": "i", + "on_property": "title", + "method": "match" + }, + { + "pattern": "^(?:[^A-Za-z0-9]*)(action)\\b.*", + "target": "action", + "flags": "i", + "on_property": "title", + "method": "match" + } + ], + "template": "## [Unreleased]\n\n#{{CHANGELOG}}\n\n---", + "pr_template": "- #{{TITLE}} by **@#{{AUTHOR}}** in [##{{NUMBER}}](#{{URL}})", + "empty_template": "- no changes" +} \ No newline at end of file diff --git a/.github/playwright.yml b/.github/playwright.yml new file mode 100644 index 0000000000..28eca14d58 --- /dev/null +++ b/.github/playwright.yml @@ -0,0 +1,72 @@ +# name: Playwright Tests +# on: +# pull_request: +# branches: +# - main +# - dev +# - release/* +# paths: +# - 'api/**' +# - 'client/**' +# - 'packages/**' +# - 'e2e/**' +# jobs: +# tests_e2e: +# name: Run Playwright tests +# if: github.event.pull_request.head.repo.full_name == 'danny-avila/LibreChat' +# timeout-minutes: 60 +# runs-on: ubuntu-latest +# env: +# NODE_ENV: CI +# CI: true +# SEARCH: false +# BINGAI_TOKEN: user_provided +# CHATGPT_TOKEN: user_provided +# MONGO_URI: ${{ secrets.MONGO_URI }} +# OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} +# E2E_USER_EMAIL: ${{ secrets.E2E_USER_EMAIL }} +# E2E_USER_PASSWORD: ${{ secrets.E2E_USER_PASSWORD }} +# JWT_SECRET: ${{ secrets.JWT_SECRET }} +# JWT_REFRESH_SECRET: ${{ secrets.JWT_REFRESH_SECRET }} +# CREDS_KEY: ${{ secrets.CREDS_KEY }} +# CREDS_IV: ${{ secrets.CREDS_IV }} +# DOMAIN_CLIENT: ${{ secrets.DOMAIN_CLIENT }} +# DOMAIN_SERVER: ${{ secrets.DOMAIN_SERVER }} +# PLAYWRIGHT_SKIP_BROWSER_DOWNLOAD: 1 # Skip downloading during npm install +# PLAYWRIGHT_BROWSERS_PATH: 0 # Places binaries to node_modules/@playwright/test +# TITLE_CONVO: false +# steps: +# - uses: actions/checkout@v4 +# - uses: actions/setup-node@v4 +# with: +# node-version: 18 +# cache: 'npm' + +# - name: Install global dependencies +# run: npm ci + +# # - name: Remove sharp dependency +# # run: rm -rf node_modules/sharp + +# # - name: Install sharp with linux dependencies +# # run: cd api && SHARP_IGNORE_GLOBAL_LIBVIPS=1 npm install --arch=x64 --platform=linux --libc=glibc sharp + +# - name: Build Client +# run: npm run frontend + +# - name: Install Playwright +# run: | +# npx playwright install-deps +# npm install -D @playwright/test@latest +# npx playwright install chromium + +# - name: Run Playwright tests +# run: npm run e2e:ci + +# - name: Upload playwright report +# uses: actions/upload-artifact@v3 +# if: always() +# with: +# name: playwright-report +# path: e2e/playwright-report/ +# retention-days: 30 \ No newline at end of file diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md new file mode 100644 index 0000000000..cb637787f1 --- /dev/null +++ b/.github/pull_request_template.md @@ -0,0 +1,41 @@ +# Pull Request Template + +⚠️ Before Submitting a PR, Please Review: +- Please ensure that you have thoroughly read and understood the [Contributing Docs](https://github.com/danny-avila/LibreChat/blob/main/.github/CONTRIBUTING.md) before submitting your Pull Request. + +⚠️ Documentation Updates Notice: +- Kindly note that documentation updates are managed in this repository: [librechat.ai](https://github.com/LibreChat-AI/librechat.ai) + +## Summary + +Please provide a brief summary of your changes and the related issue. Include any motivation and context that is relevant to your changes. If there are any dependencies necessary for your changes, please list them here. + +## Change Type + +Please delete any irrelevant options. + +- [ ] Bug fix (non-breaking change which fixes an issue) +- [ ] New feature (non-breaking change which adds functionality) +- [ ] Breaking change (fix or feature that would cause existing functionality to not work as expected) +- [ ] This change requires a documentation update +- [ ] Translation update + +## Testing + +Please describe your test process and include instructions so that we can reproduce your test. If there are any important variables for your testing configuration, list them here. + +### **Test Configuration**: + +## Checklist + +Please delete any irrelevant options. + +- [ ] My code adheres to this project's style guidelines +- [ ] I have performed a self-review of my own code +- [ ] I have commented in any complex areas of my code +- [ ] I have made pertinent documentation changes +- [ ] My changes do not introduce new warnings +- [ ] I have written tests demonstrating that my changes are effective or that my feature works +- [ ] Local unit tests pass with my changes +- [ ] Any changes dependent on mine have been merged and published in downstream modules. +- [ ] A pull request for updating the documentation has been submitted. diff --git a/.github/workflows/a11y.yml b/.github/workflows/a11y.yml new file mode 100644 index 0000000000..a7cfd08169 --- /dev/null +++ b/.github/workflows/a11y.yml @@ -0,0 +1,26 @@ +name: Lint for accessibility issues + +on: + pull_request: + paths: + - 'client/src/**' + workflow_dispatch: + inputs: + run_workflow: + description: 'Set to true to run this workflow' + required: true + default: 'false' + +jobs: + axe-linter: + runs-on: ubuntu-latest + if: > + (github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name == 'danny-avila/LibreChat') || + (github.event_name == 'workflow_dispatch' && github.event.inputs.run_workflow == 'true') + + steps: + - uses: actions/checkout@v4 + - uses: dequelabs/axe-linter-action@v1 + with: + api_key: ${{ secrets.AXE_LINTER_API_KEY }} + github_token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/backend-review.yml b/.github/workflows/backend-review.yml new file mode 100644 index 0000000000..8375f398c3 --- /dev/null +++ b/.github/workflows/backend-review.yml @@ -0,0 +1,75 @@ +name: Backend Unit Tests +on: + pull_request: + branches: + - main + - dev + - dev-staging + - release/* + paths: + - 'api/**' + - 'packages/**' +jobs: + tests_Backend: + name: Run Backend unit tests + timeout-minutes: 60 + runs-on: ubuntu-latest + env: + MONGO_URI: ${{ secrets.MONGO_URI }} + OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} + JWT_SECRET: ${{ secrets.JWT_SECRET }} + CREDS_KEY: ${{ secrets.CREDS_KEY }} + CREDS_IV: ${{ secrets.CREDS_IV }} + BAN_VIOLATIONS: ${{ secrets.BAN_VIOLATIONS }} + BAN_DURATION: ${{ secrets.BAN_DURATION }} + BAN_INTERVAL: ${{ secrets.BAN_INTERVAL }} + NODE_ENV: CI + steps: + - uses: actions/checkout@v4 + - name: Use Node.js 20.x + uses: actions/setup-node@v4 + with: + node-version: 20 + cache: 'npm' + + - name: Install dependencies + run: npm ci + + - name: Install Data Provider Package + run: npm run build:data-provider + + - name: Install Data Schemas Package + run: npm run build:data-schemas + + - name: Install API Package + run: npm run build:api + + - name: Create empty auth.json file + run: | + mkdir -p api/data + echo '{}' > api/data/auth.json + + - name: Check for Circular dependency in rollup + working-directory: ./packages/data-provider + run: | + output=$(npm run rollup:api) + echo "$output" + if echo "$output" | grep -q "Circular dependency"; then + echo "Error: Circular dependency detected!" + exit 1 + fi + + - name: Prepare .env.test file + run: cp api/test/.env.test.example api/test/.env.test + + - name: Run unit tests + run: cd api && npm run test:ci + + - name: Run librechat-data-provider unit tests + run: cd packages/data-provider && npm run test:ci + + - name: Run @librechat/data-schemas unit tests + run: cd packages/data-schemas && npm run test:ci + + - name: Run @librechat/api unit tests + run: cd packages/api && npm run test:ci diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml new file mode 100644 index 0000000000..a2131c4b98 --- /dev/null +++ b/.github/workflows/build.yml @@ -0,0 +1,38 @@ +name: Linux_Container_Workflow + +on: + workflow_dispatch: + +env: + RUNNER_VERSION: 2.293.0 + +jobs: + build-and-push: + runs-on: ubuntu-latest + steps: + # checkout the repo + - name: 'Checkout GitHub Action' + uses: actions/checkout@main + + - name: 'Login via Azure CLI' + uses: azure/login@v1 + with: + creds: ${{ secrets.AZURE_CREDENTIALS }} + + - name: 'Build GitHub Runner container image' + uses: azure/docker-login@v1 + with: + login-server: ${{ secrets.REGISTRY_LOGIN_SERVER }} + username: ${{ secrets.REGISTRY_USERNAME }} + password: ${{ secrets.REGISTRY_PASSWORD }} + - run: | + docker build --build-arg RUNNER_VERSION=${{ env.RUNNER_VERSION }} -t ${{ secrets.REGISTRY_LOGIN_SERVER }}/pwd9000-github-runner-lin:${{ env.RUNNER_VERSION }} . + + - name: 'Push container image to ACR' + uses: azure/docker-login@v1 + with: + login-server: ${{ secrets.REGISTRY_LOGIN_SERVER }} + username: ${{ secrets.REGISTRY_USERNAME }} + password: ${{ secrets.REGISTRY_PASSWORD }} + - run: | + docker push ${{ secrets.REGISTRY_LOGIN_SERVER }}/pwd9000-github-runner-lin:${{ env.RUNNER_VERSION }} diff --git a/.github/workflows/cache-integration-tests.yml b/.github/workflows/cache-integration-tests.yml new file mode 100644 index 0000000000..251b61564a --- /dev/null +++ b/.github/workflows/cache-integration-tests.yml @@ -0,0 +1,90 @@ +name: Cache Integration Tests + +on: + pull_request: + branches: + - main + - dev + - dev-staging + - release/* + paths: + - 'packages/api/src/cache/**' + - 'packages/api/src/cluster/**' + - 'packages/api/src/mcp/**' + - 'redis-config/**' + - '.github/workflows/cache-integration-tests.yml' + +jobs: + cache_integration_tests: + name: Integration Tests that use actual Redis Cache + timeout-minutes: 30 + runs-on: ubuntu-latest + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Use Node.js 20.x + uses: actions/setup-node@v4 + with: + node-version: 20 + cache: 'npm' + + - name: Install Redis tools + run: | + sudo apt-get update + sudo apt-get install -y redis-server redis-tools + + - name: Start Single Redis Instance + run: | + redis-server --daemonize yes --port 6379 + sleep 2 + # Verify single Redis is running + redis-cli -p 6379 ping || exit 1 + + - name: Start Redis Cluster + working-directory: redis-config + run: | + chmod +x start-cluster.sh stop-cluster.sh + ./start-cluster.sh + sleep 10 + # Verify cluster is running + redis-cli -p 7001 cluster info || exit 1 + redis-cli -p 7002 cluster info || exit 1 + redis-cli -p 7003 cluster info || exit 1 + + - name: Install dependencies + run: npm ci + + - name: Build packages + run: | + npm run build:data-provider + npm run build:data-schemas + npm run build:api + + - name: Run all cache integration tests (Single Redis Node) + working-directory: packages/api + env: + NODE_ENV: test + USE_REDIS: true + USE_REDIS_CLUSTER: false + REDIS_URI: redis://127.0.0.1:6379 + run: npm run test:cache-integration + + - name: Run all cache integration tests (Redis Cluster) + working-directory: packages/api + env: + NODE_ENV: test + USE_REDIS: true + USE_REDIS_CLUSTER: true + REDIS_URI: redis://127.0.0.1:7001,redis://127.0.0.1:7002,redis://127.0.0.1:7003 + run: npm run test:cache-integration + + - name: Stop Redis Cluster + if: always() + working-directory: redis-config + run: ./stop-cluster.sh || true + + - name: Stop Single Redis Instance + if: always() + run: redis-cli -p 6379 shutdown || true diff --git a/.github/workflows/client.yml b/.github/workflows/client.yml new file mode 100644 index 0000000000..e3e3e445e4 --- /dev/null +++ b/.github/workflows/client.yml @@ -0,0 +1,64 @@ +name: Publish `@librechat/client` to NPM + +on: + push: + branches: + - main + paths: + - 'packages/client/package.json' + workflow_dispatch: + inputs: + reason: + description: 'Reason for manual trigger' + required: false + default: 'Manual publish requested' + +permissions: + id-token: write # Required for OIDC trusted publishing + contents: read + +jobs: + build-and-publish: + runs-on: ubuntu-latest + environment: publish # Must match npm trusted publisher config + steps: + - uses: actions/checkout@v4 + + - name: Use Node.js + uses: actions/setup-node@v4 + with: + node-version: '20.x' + registry-url: 'https://registry.npmjs.org' + + - name: Update npm for OIDC support + run: npm install -g npm@latest # Must be 11.5.1+ for provenance + + - name: Install client dependencies + run: cd packages/client && npm ci + + - name: Build client + run: cd packages/client && npm run build + + - name: Check version change + id: check + working-directory: packages/client + run: | + PACKAGE_VERSION=$(node -p "require('./package.json').version") + PUBLISHED_VERSION=$(npm view @librechat/client version 2>/dev/null || echo "0.0.0") + if [ "$PACKAGE_VERSION" = "$PUBLISHED_VERSION" ]; then + echo "No version change, skipping publish" + echo "skip=true" >> $GITHUB_OUTPUT + else + echo "Version changed, proceeding with publish" + echo "skip=false" >> $GITHUB_OUTPUT + fi + + - name: Pack package + if: steps.check.outputs.skip != 'true' + working-directory: packages/client + run: npm pack + + - name: Publish + if: steps.check.outputs.skip != 'true' + working-directory: packages/client + run: npm publish *.tgz --access public --provenance diff --git a/.github/workflows/data-provider.yml b/.github/workflows/data-provider.yml new file mode 100644 index 0000000000..9a514b0076 --- /dev/null +++ b/.github/workflows/data-provider.yml @@ -0,0 +1,47 @@ +name: Publish `librechat-data-provider` to NPM + +on: + push: + branches: + - main + paths: + - 'packages/data-provider/package.json' + workflow_dispatch: + inputs: + reason: + description: 'Reason for manual trigger' + required: false + default: 'Manual publish requested' + +permissions: + id-token: write # Required for OIDC trusted publishing + contents: read + +jobs: + build: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-node@v4 + with: + node-version: 20 + - run: cd packages/data-provider && npm ci + - run: cd packages/data-provider && npm run build + + publish-npm: + needs: build + runs-on: ubuntu-latest + environment: publish # Must match npm trusted publisher config + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-node@v4 + with: + node-version: 20 + registry-url: 'https://registry.npmjs.org' + + - name: Update npm for OIDC support + run: npm install -g npm@latest # Must be 11.5.1+ for provenance + + - run: cd packages/data-provider && npm ci + - run: cd packages/data-provider && npm run build + - run: cd packages/data-provider && npm publish --provenance diff --git a/.github/workflows/data-schemas.yml b/.github/workflows/data-schemas.yml new file mode 100644 index 0000000000..882dc4f4b6 --- /dev/null +++ b/.github/workflows/data-schemas.yml @@ -0,0 +1,64 @@ +name: Publish `@librechat/data-schemas` to NPM + +on: + push: + branches: + - main + paths: + - 'packages/data-schemas/package.json' + workflow_dispatch: + inputs: + reason: + description: 'Reason for manual trigger' + required: false + default: 'Manual publish requested' + +permissions: + id-token: write # Required for OIDC trusted publishing + contents: read + +jobs: + build-and-publish: + runs-on: ubuntu-latest + environment: publish # Must match npm trusted publisher config + steps: + - uses: actions/checkout@v4 + + - name: Use Node.js + uses: actions/setup-node@v4 + with: + node-version: '20.x' + registry-url: 'https://registry.npmjs.org' + + - name: Update npm for OIDC support + run: npm install -g npm@latest # Must be 11.5.1+ for provenance + + - name: Install dependencies + run: cd packages/data-schemas && npm ci + + - name: Build + run: cd packages/data-schemas && npm run build + + - name: Check version change + id: check + working-directory: packages/data-schemas + run: | + PACKAGE_VERSION=$(node -p "require('./package.json').version") + PUBLISHED_VERSION=$(npm view @librechat/data-schemas version 2>/dev/null || echo "0.0.0") + if [ "$PACKAGE_VERSION" = "$PUBLISHED_VERSION" ]; then + echo "No version change, skipping publish" + echo "skip=true" >> $GITHUB_OUTPUT + else + echo "Version changed, proceeding with publish" + echo "skip=false" >> $GITHUB_OUTPUT + fi + + - name: Pack package + if: steps.check.outputs.skip != 'true' + working-directory: packages/data-schemas + run: npm pack + + - name: Publish + if: steps.check.outputs.skip != 'true' + working-directory: packages/data-schemas + run: npm publish *.tgz --access public --provenance diff --git a/.github/workflows/deploy-dev.yml b/.github/workflows/deploy-dev.yml new file mode 100644 index 0000000000..a255932e3e --- /dev/null +++ b/.github/workflows/deploy-dev.yml @@ -0,0 +1,46 @@ +name: Update Test Server + +on: + workflow_run: + workflows: ["Docker Dev Branch Images Build"] + types: + - completed + workflow_dispatch: + +jobs: + deploy: + runs-on: ubuntu-latest + if: | + github.repository == 'danny-avila/LibreChat' && + (github.event_name == 'workflow_dispatch' || + (github.event.workflow_run.conclusion == 'success' && github.event.workflow_run.head_branch == 'dev')) + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Install SSH Key + uses: shimataro/ssh-key-action@v2 + with: + key: ${{ secrets.DO_SSH_PRIVATE_KEY }} + known_hosts: ${{ secrets.DO_KNOWN_HOSTS }} + + - name: Run update script on DigitalOcean Droplet + env: + DO_HOST: ${{ secrets.DO_HOST }} + DO_USER: ${{ secrets.DO_USER }} + run: | + ssh -o StrictHostKeyChecking=no ${DO_USER}@${DO_HOST} << EOF + sudo -i -u danny bash << 'EEOF' + cd ~/LibreChat && \ + git fetch origin main && \ + sudo npm run stop:deployed && \ + sudo docker images --format "{{.Repository}}:{{.ID}}" | grep -E "lc-dev|librechat" | cut -d: -f2 | xargs -r sudo docker rmi -f || true && \ + sudo npm run update:deployed && \ + git checkout dev && \ + git pull origin dev && \ + git checkout do-deploy && \ + git rebase dev && \ + sudo npm run start:deployed && \ + echo "Update completed. Application should be running now." + EEOF + EOF diff --git a/.github/workflows/deploy.yml b/.github/workflows/deploy.yml new file mode 100644 index 0000000000..5c143b4531 --- /dev/null +++ b/.github/workflows/deploy.yml @@ -0,0 +1,38 @@ +name: Deploy_GHRunner_Linux_ACI + +on: + workflow_dispatch: + +env: + RUNNER_VERSION: 2.293.0 + ACI_RESOURCE_GROUP: 'Demo-ACI-GitHub-Runners-RG' + ACI_NAME: 'gh-runner-linux-01' + DNS_NAME_LABEL: 'gh-lin-01' + GH_OWNER: ${{ github.repository_owner }} + GH_REPOSITORY: 'LibreChat' #Change here to deploy self hosted runner ACI to another repo. + +jobs: + deploy-gh-runner-aci: + runs-on: ubuntu-latest + steps: + # checkout the repo + - name: 'Checkout GitHub Action' + uses: actions/checkout@v4 + + - name: 'Login via Azure CLI' + uses: azure/login@v1 + with: + creds: ${{ secrets.AZURE_CREDENTIALS }} + + - name: 'Deploy to Azure Container Instances' + uses: 'azure/aci-deploy@v1' + with: + resource-group: ${{ env.ACI_RESOURCE_GROUP }} + image: ${{ secrets.REGISTRY_LOGIN_SERVER }}/pwd9000-github-runner-lin:${{ env.RUNNER_VERSION }} + registry-login-server: ${{ secrets.REGISTRY_LOGIN_SERVER }} + registry-username: ${{ secrets.REGISTRY_USERNAME }} + registry-password: ${{ secrets.REGISTRY_PASSWORD }} + name: ${{ env.ACI_NAME }} + dns-name-label: ${{ env.DNS_NAME_LABEL }} + environment-variables: GH_TOKEN=${{ secrets.PAT_TOKEN }} GH_OWNER=${{ env.GH_OWNER }} GH_REPOSITORY=${{ env.GH_REPOSITORY }} + location: 'eastus' diff --git a/.github/workflows/dev-branch-images.yml b/.github/workflows/dev-branch-images.yml new file mode 100644 index 0000000000..b7ad470314 --- /dev/null +++ b/.github/workflows/dev-branch-images.yml @@ -0,0 +1,72 @@ +name: Docker Dev Branch Images Build + +on: + workflow_dispatch: + push: + branches: + - dev + paths: + - 'api/**' + - 'client/**' + - 'packages/**' + +jobs: + build: + runs-on: ubuntu-latest + strategy: + matrix: + include: + - target: api-build + file: Dockerfile.multi + image_name: lc-dev-api + - target: node + file: Dockerfile + image_name: lc-dev + + steps: + # Check out the repository + - name: Checkout + uses: actions/checkout@v4 + + # Set up QEMU + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + + # Set up Docker Buildx + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + # Log in to GitHub Container Registry + - name: Log in to GitHub Container Registry + uses: docker/login-action@v2 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + # Login to Docker Hub + - name: Login to Docker Hub + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + # Prepare the environment + - name: Prepare environment + run: | + cp .env.example .env + + # Build and push Docker images for each target + - name: Build and push Docker images + uses: docker/build-push-action@v5 + with: + context: . + file: ${{ matrix.file }} + push: true + tags: | + ghcr.io/${{ github.repository_owner }}/${{ matrix.image_name }}:${{ github.sha }} + ghcr.io/${{ github.repository_owner }}/${{ matrix.image_name }}:latest + ${{ secrets.DOCKERHUB_USERNAME }}/${{ matrix.image_name }}:${{ github.sha }} + ${{ secrets.DOCKERHUB_USERNAME }}/${{ matrix.image_name }}:latest + platforms: linux/amd64,linux/arm64 + target: ${{ matrix.target }} \ No newline at end of file diff --git a/.github/workflows/dev-images.yml b/.github/workflows/dev-images.yml new file mode 100644 index 0000000000..41d427c6c8 --- /dev/null +++ b/.github/workflows/dev-images.yml @@ -0,0 +1,72 @@ +name: Docker Dev Images Build + +on: + workflow_dispatch: + push: + branches: + - main + paths: + - 'api/**' + - 'client/**' + - 'packages/**' + +jobs: + build: + runs-on: ubuntu-latest + strategy: + matrix: + include: + - target: api-build + file: Dockerfile.multi + image_name: librechat-dev-api + - target: node + file: Dockerfile + image_name: librechat-dev + + steps: + # Check out the repository + - name: Checkout + uses: actions/checkout@v4 + + # Set up QEMU + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + + # Set up Docker Buildx + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + # Log in to GitHub Container Registry + - name: Log in to GitHub Container Registry + uses: docker/login-action@v2 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + # Login to Docker Hub + - name: Login to Docker Hub + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + # Prepare the environment + - name: Prepare environment + run: | + cp .env.example .env + + # Build and push Docker images for each target + - name: Build and push Docker images + uses: docker/build-push-action@v5 + with: + context: . + file: ${{ matrix.file }} + push: true + tags: | + ghcr.io/${{ github.repository_owner }}/${{ matrix.image_name }}:${{ github.sha }} + ghcr.io/${{ github.repository_owner }}/${{ matrix.image_name }}:latest + ${{ secrets.DOCKERHUB_USERNAME }}/${{ matrix.image_name }}:${{ github.sha }} + ${{ secrets.DOCKERHUB_USERNAME }}/${{ matrix.image_name }}:latest + platforms: linux/amd64,linux/arm64 + target: ${{ matrix.target }} diff --git a/.github/workflows/dev-staging-images.yml b/.github/workflows/dev-staging-images.yml new file mode 100644 index 0000000000..e63dc5f0af --- /dev/null +++ b/.github/workflows/dev-staging-images.yml @@ -0,0 +1,66 @@ +name: Docker Dev Staging Images Build + +on: + workflow_dispatch: + +jobs: + build: + runs-on: ubuntu-latest + strategy: + matrix: + include: + - target: api-build + file: Dockerfile.multi + image_name: lc-dev-staging-api + - target: node + file: Dockerfile + image_name: lc-dev-staging + + steps: + # Check out the repository + - name: Checkout + uses: actions/checkout@v4 + + # Set up QEMU + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + + # Set up Docker Buildx + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + # Log in to GitHub Container Registry + - name: Log in to GitHub Container Registry + uses: docker/login-action@v2 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + # Login to Docker Hub + - name: Login to Docker Hub + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + # Prepare the environment + - name: Prepare environment + run: | + cp .env.example .env + + # Build and push Docker images for each target + - name: Build and push Docker images + uses: docker/build-push-action@v5 + with: + context: . + file: ${{ matrix.file }} + push: true + tags: | + ghcr.io/${{ github.repository_owner }}/${{ matrix.image_name }}:${{ github.sha }} + ghcr.io/${{ github.repository_owner }}/${{ matrix.image_name }}:latest + ${{ secrets.DOCKERHUB_USERNAME }}/${{ matrix.image_name }}:${{ github.sha }} + ${{ secrets.DOCKERHUB_USERNAME }}/${{ matrix.image_name }}:latest + platforms: linux/amd64,linux/arm64 + target: ${{ matrix.target }} + diff --git a/.github/workflows/eslint-ci.yml b/.github/workflows/eslint-ci.yml new file mode 100644 index 0000000000..8203da4e8b --- /dev/null +++ b/.github/workflows/eslint-ci.yml @@ -0,0 +1,60 @@ +name: ESLint Code Quality Checks + +on: + pull_request: + branches: + - main + - dev + - dev-staging + - release/* + paths: + - 'api/**' + - 'client/**' + +jobs: + eslint_checks: + name: Run ESLint Linting + runs-on: ubuntu-latest + permissions: + contents: read + security-events: write + actions: read + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Set up Node.js 20.x + uses: actions/setup-node@v4 + with: + node-version: 20 + cache: npm + + - name: Install dependencies + run: npm ci + + # Run ESLint on changed files within the api/ and client/ directories. + - name: Run ESLint on changed files + run: | + # Extract the base commit SHA from the pull_request event payload. + BASE_SHA=$(jq --raw-output .pull_request.base.sha "$GITHUB_EVENT_PATH") + echo "Base commit SHA: $BASE_SHA" + + # Get changed files (only JS/TS files in api/ or client/) + CHANGED_FILES=$(git diff --name-only --diff-filter=ACMRTUXB "$BASE_SHA" HEAD | grep -E '^(api|client)/.*\.(js|jsx|ts|tsx)$' || true) + + # Debug output + echo "Changed files:" + echo "$CHANGED_FILES" + + # Ensure there are files to lint before running ESLint + if [[ -z "$CHANGED_FILES" ]]; then + echo "No matching files changed. Skipping ESLint." + exit 0 + fi + + # Run ESLint + npx eslint --no-error-on-unmatched-pattern \ + --config eslint.config.mjs \ + $CHANGED_FILES diff --git a/.github/workflows/frontend-review.yml b/.github/workflows/frontend-review.yml new file mode 100644 index 0000000000..907d07d307 --- /dev/null +++ b/.github/workflows/frontend-review.yml @@ -0,0 +1,57 @@ +name: Frontend Unit Tests + +on: + pull_request: + branches: + - main + - dev + - dev-staging + - release/* + paths: + - 'client/**' + - 'packages/data-provider/**' + +jobs: + tests_frontend_ubuntu: + name: Run frontend unit tests on Ubuntu + timeout-minutes: 60 + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Use Node.js 20.x + uses: actions/setup-node@v4 + with: + node-version: 20 + cache: 'npm' + + - name: Install dependencies + run: npm ci + + - name: Build Client + run: npm run frontend:ci + + - name: Run unit tests + run: npm run test:ci --verbose + working-directory: client + + tests_frontend_windows: + name: Run frontend unit tests on Windows + timeout-minutes: 60 + runs-on: windows-latest + steps: + - uses: actions/checkout@v4 + - name: Use Node.js 20.x + uses: actions/setup-node@v4 + with: + node-version: 20 + cache: 'npm' + + - name: Install dependencies + run: npm ci + + - name: Build Client + run: npm run frontend:ci + + - name: Run unit tests + run: npm run test:ci --verbose + working-directory: client \ No newline at end of file diff --git a/.github/workflows/generate_embeddings.yml b/.github/workflows/generate_embeddings.yml new file mode 100644 index 0000000000..c514f9c1d6 --- /dev/null +++ b/.github/workflows/generate_embeddings.yml @@ -0,0 +1,20 @@ +name: 'generate_embeddings' +on: + workflow_dispatch: + push: + branches: + - main + paths: + - 'docs/**' + +jobs: + generate: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - uses: supabase/embeddings-generator@v0.0.5 + with: + supabase-url: ${{ secrets.SUPABASE_URL }} + supabase-service-role-key: ${{ secrets.SUPABASE_SERVICE_ROLE_KEY }} + openai-key: ${{ secrets.OPENAI_DOC_EMBEDDINGS_KEY }} + docs-root-path: 'docs' \ No newline at end of file diff --git a/.github/workflows/helmcharts.yml b/.github/workflows/helmcharts.yml new file mode 100644 index 0000000000..2b9f7f45de --- /dev/null +++ b/.github/workflows/helmcharts.yml @@ -0,0 +1,75 @@ +name: Build Helm Charts on Tag + +# The workflow is triggered when a tag is pushed +on: + push: + tags: + - "chart-*" + +jobs: + release: + permissions: + contents: write + packages: write + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Configure Git + run: | + git config user.name "$GITHUB_ACTOR" + git config user.email "$GITHUB_ACTOR@users.noreply.github.com" + + - name: Install Helm + uses: azure/setup-helm@v4 + env: + GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}" + + - name: Build Subchart Deps + run: | + cd helm/librechat + helm dependency build + cd ../librechat-rag-api + helm dependency build + + - name: Get Chart Version + id: chart-version + run: | + CHART_VERSION=$(echo "${{ github.ref_name }}" | cut -d'-' -f2) + echo "CHART_VERSION=${CHART_VERSION}" >> "$GITHUB_OUTPUT" + + # Log in to GitHub Container Registry + - name: Log in to GitHub Container Registry + uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + # Run Helm OCI Charts Releaser + # This is for the librechat chart + - name: Release Helm OCI Charts for librechat + uses: appany/helm-oci-chart-releaser@v0.4.2 + with: + name: librechat + repository: ${{ github.actor }}/librechat-chart + tag: ${{ steps.chart-version.outputs.CHART_VERSION }} + path: helm/librechat + registry: ghcr.io + registry_username: ${{ github.actor }} + registry_password: ${{ secrets.GITHUB_TOKEN }} + + # this is for the librechat-rag-api chart + - name: Release Helm OCI Charts for librechat-rag-api + uses: appany/helm-oci-chart-releaser@v0.4.2 + with: + name: librechat-rag-api + repository: ${{ github.actor }}/librechat-chart + tag: ${{ steps.chart-version.outputs.CHART_VERSION }} + path: helm/librechat-rag-api + registry: ghcr.io + registry_username: ${{ github.actor }} + registry_password: ${{ secrets.GITHUB_TOKEN }} \ No newline at end of file diff --git a/.github/workflows/i18n-unused-keys.yml b/.github/workflows/i18n-unused-keys.yml new file mode 100644 index 0000000000..8f773532d3 --- /dev/null +++ b/.github/workflows/i18n-unused-keys.yml @@ -0,0 +1,149 @@ +name: Detect Unused i18next Strings + +# This workflow checks for unused i18n keys in translation files. +# It has special handling for: +# - com_ui_special_var_* keys that are dynamically constructed +# - com_agents_category_* keys that are stored in the database and used dynamically + +on: + pull_request: + paths: + - "client/src/**" + - "api/**" + - "packages/data-provider/src/**" + - "packages/client/**" + - "packages/data-schemas/src/**" + +jobs: + detect-unused-i18n-keys: + runs-on: ubuntu-latest + permissions: + pull-requests: write + steps: + - name: Checkout repository + uses: actions/checkout@v3 + + - name: Find unused i18next keys + id: find-unused + run: | + echo "🔍 Scanning for unused i18next keys..." + + # Define paths + I18N_FILE="client/src/locales/en/translation.json" + SOURCE_DIRS=("client/src" "api" "packages/data-provider/src" "packages/client" "packages/data-schemas/src") + + # Check if translation file exists + if [[ ! -f "$I18N_FILE" ]]; then + echo "::error title=Missing i18n File::Translation file not found: $I18N_FILE" + exit 1 + fi + + # Extract all keys from the JSON file + KEYS=$(jq -r 'keys[]' "$I18N_FILE") + + # Track unused keys + UNUSED_KEYS=() + + # Check if each key is used in the source code + for KEY in $KEYS; do + FOUND=false + + # Special case for dynamically constructed special variable keys + if [[ "$KEY" == com_ui_special_var_* ]]; then + # Check if TSpecialVarLabel is used in the codebase + for DIR in "${SOURCE_DIRS[@]}"; do + if grep -r --include=\*.{js,jsx,ts,tsx} -q "TSpecialVarLabel" "$DIR"; then + FOUND=true + break + fi + done + + # Also check if the key is directly used somewhere + if [[ "$FOUND" == false ]]; then + for DIR in "${SOURCE_DIRS[@]}"; do + if grep -r --include=\*.{js,jsx,ts,tsx} -q "$KEY" "$DIR"; then + FOUND=true + break + fi + done + fi + # Special case for agent category keys that are dynamically used from database + elif [[ "$KEY" == com_agents_category_* ]]; then + # Check if agent category localization is being used + for DIR in "${SOURCE_DIRS[@]}"; do + # Check for dynamic category label/description usage + if grep -r --include=\*.{js,jsx,ts,tsx} -E "category\.(label|description).*startsWith.*['\"]com_" "$DIR" > /dev/null 2>&1 || \ + # Check for the method that defines these keys + grep -r --include=\*.{js,jsx,ts,tsx} "ensureDefaultCategories" "$DIR" > /dev/null 2>&1 || \ + # Check for direct usage in agentCategory.ts + grep -r --include=\*.ts -E "label:.*['\"]$KEY['\"]" "$DIR" > /dev/null 2>&1 || \ + grep -r --include=\*.ts -E "description:.*['\"]$KEY['\"]" "$DIR" > /dev/null 2>&1; then + FOUND=true + break + fi + done + + # Also check if the key is directly used somewhere + if [[ "$FOUND" == false ]]; then + for DIR in "${SOURCE_DIRS[@]}"; do + if grep -r --include=\*.{js,jsx,ts,tsx} -q "$KEY" "$DIR"; then + FOUND=true + break + fi + done + fi + else + # Regular check for other keys + for DIR in "${SOURCE_DIRS[@]}"; do + if grep -r --include=\*.{js,jsx,ts,tsx} -q "$KEY" "$DIR"; then + FOUND=true + break + fi + done + fi + + if [[ "$FOUND" == false ]]; then + UNUSED_KEYS+=("$KEY") + fi + done + + # Output results + if [[ ${#UNUSED_KEYS[@]} -gt 0 ]]; then + echo "🛑 Found ${#UNUSED_KEYS[@]} unused i18n keys:" + echo "unused_keys=$(echo "${UNUSED_KEYS[@]}" | jq -R -s -c 'split(" ")')" >> $GITHUB_ENV + for KEY in "${UNUSED_KEYS[@]}"; do + echo "::warning title=Unused i18n Key::'$KEY' is defined but not used in the codebase." + done + else + echo "✅ No unused i18n keys detected!" + echo "unused_keys=[]" >> $GITHUB_ENV + fi + + - name: Post verified comment on PR + if: env.unused_keys != '[]' + run: | + PR_NUMBER=$(jq --raw-output .pull_request.number "$GITHUB_EVENT_PATH") + + # Format the unused keys list as checkboxes for easy manual checking. + FILTERED_KEYS=$(echo "$unused_keys" | jq -r '.[]' | grep -v '^\s*$' | sed 's/^/- [ ] `/;s/$/`/' ) + + COMMENT_BODY=$(cat <> $GITHUB_ENV + + # Set up QEMU + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + + # Set up Docker Buildx + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + # Log in to GitHub Container Registry + - name: Log in to GitHub Container Registry + uses: docker/login-action@v2 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + # Login to Docker Hub + - name: Login to Docker Hub + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + # Prepare the environment + - name: Prepare environment + run: | + cp .env.example .env + + # Build and push Docker images for each target + - name: Build and push Docker images + uses: docker/build-push-action@v5 + with: + context: . + file: ${{ matrix.file }} + push: true + tags: | + ghcr.io/${{ github.repository_owner }}/${{ matrix.image_name }}:${{ env.LATEST_TAG }} + ghcr.io/${{ github.repository_owner }}/${{ matrix.image_name }}:latest + ${{ secrets.DOCKERHUB_USERNAME }}/${{ matrix.image_name }}:${{ env.LATEST_TAG }} + ${{ secrets.DOCKERHUB_USERNAME }}/${{ matrix.image_name }}:latest + platforms: linux/amd64,linux/arm64 + target: ${{ matrix.target }} diff --git a/.github/workflows/tag-images.yml b/.github/workflows/tag-images.yml new file mode 100644 index 0000000000..e90f43978a --- /dev/null +++ b/.github/workflows/tag-images.yml @@ -0,0 +1,67 @@ +name: Docker Images Build on Tag + +on: + push: + tags: + - '*' + +jobs: + build: + runs-on: ubuntu-latest + strategy: + matrix: + include: + - target: api-build + file: Dockerfile.multi + image_name: librechat-api + - target: node + file: Dockerfile + image_name: librechat + + steps: + # Check out the repository + - name: Checkout + uses: actions/checkout@v4 + + # Set up QEMU + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + + # Set up Docker Buildx + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + # Log in to GitHub Container Registry + - name: Log in to GitHub Container Registry + uses: docker/login-action@v2 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + # Login to Docker Hub + - name: Login to Docker Hub + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + # Prepare the environment + - name: Prepare environment + run: | + cp .env.example .env + + # Build and push Docker images for each target + - name: Build and push Docker images + uses: docker/build-push-action@v5 + with: + context: . + file: ${{ matrix.file }} + push: true + tags: | + ghcr.io/${{ github.repository_owner }}/${{ matrix.image_name }}:${{ github.ref_name }} + ghcr.io/${{ github.repository_owner }}/${{ matrix.image_name }}:latest + ${{ secrets.DOCKERHUB_USERNAME }}/${{ matrix.image_name }}:${{ github.ref_name }} + ${{ secrets.DOCKERHUB_USERNAME }}/${{ matrix.image_name }}:latest + platforms: linux/amd64,linux/arm64 + target: ${{ matrix.target }} diff --git a/.github/workflows/unused-packages.yml b/.github/workflows/unused-packages.yml new file mode 100644 index 0000000000..f67c1d23be --- /dev/null +++ b/.github/workflows/unused-packages.yml @@ -0,0 +1,281 @@ +name: Detect Unused NPM Packages + +on: + pull_request: + paths: + - 'package.json' + - 'package-lock.json' + - 'client/**' + - 'api/**' + - 'packages/client/**' + - 'packages/api/**' + +jobs: + detect-unused-packages: + runs-on: ubuntu-latest + permissions: + pull-requests: write + + steps: + - uses: actions/checkout@v4 + + - name: Use Node.js 20.x + uses: actions/setup-node@v4 + with: + node-version: 20 + cache: 'npm' + + - name: Install depcheck + run: npm install -g depcheck + + - name: Validate JSON files + run: | + for FILE in package.json client/package.json api/package.json packages/client/package.json; do + if [[ -f "$FILE" ]]; then + jq empty "$FILE" || (echo "::error title=Invalid JSON::$FILE is invalid" && exit 1) + fi + done + + - name: Extract Dependencies Used in Scripts + id: extract-used-scripts + run: | + extract_deps_from_scripts() { + local package_file=$1 + if [[ -f "$package_file" ]]; then + jq -r '.scripts | to_entries[].value' "$package_file" | \ + grep -oE '([a-zA-Z0-9_-]+)' | sort -u > used_scripts.txt + else + touch used_scripts.txt + fi + } + + extract_deps_from_scripts "package.json" + mv used_scripts.txt root_used_deps.txt + + extract_deps_from_scripts "client/package.json" + mv used_scripts.txt client_used_deps.txt + + extract_deps_from_scripts "api/package.json" + mv used_scripts.txt api_used_deps.txt + + - name: Extract Dependencies Used in Source Code + id: extract-used-code + run: | + extract_deps_from_code() { + local folder=$1 + local output_file=$2 + + # Initialize empty output file + > "$output_file" + + if [[ -d "$folder" ]]; then + # Extract require() statements (use explicit includes for portability) + grep -rEho "require\\(['\"]([a-zA-Z0-9@/._-]+)['\"]\\)" "$folder" \ + --include='*.js' --include='*.ts' --include='*.tsx' --include='*.jsx' --include='*.mjs' --include='*.cjs' 2>/dev/null | \ + sed -E "s/require\\(['\"]([a-zA-Z0-9@/._-]+)['\"]\\)/\1/" >> "$output_file" || true + + # Extract ES6 imports - import x from 'module' + grep -rEho "import .* from ['\"]([a-zA-Z0-9@/._-]+)['\"]" "$folder" \ + --include='*.js' --include='*.ts' --include='*.tsx' --include='*.jsx' --include='*.mjs' --include='*.cjs' 2>/dev/null | \ + sed -E "s/import .* from ['\"]([a-zA-Z0-9@/._-]+)['\"]/\1/" >> "$output_file" || true + + # import 'module' (side-effect imports) + grep -rEho "import ['\"]([a-zA-Z0-9@/._-]+)['\"]" "$folder" \ + --include='*.js' --include='*.ts' --include='*.tsx' --include='*.jsx' --include='*.mjs' --include='*.cjs' 2>/dev/null | \ + sed -E "s/import ['\"]([a-zA-Z0-9@/._-]+)['\"]/\1/" >> "$output_file" || true + + # export { x } from 'module' or export * from 'module' + grep -rEho "export .* from ['\"]([a-zA-Z0-9@/._-]+)['\"]" "$folder" \ + --include='*.js' --include='*.ts' --include='*.tsx' --include='*.jsx' --include='*.mjs' --include='*.cjs' 2>/dev/null | \ + sed -E "s/export .* from ['\"]([a-zA-Z0-9@/._-]+)['\"]/\1/" >> "$output_file" || true + + # import type { x } from 'module' (TypeScript) + grep -rEho "import type .* from ['\"]([a-zA-Z0-9@/._-]+)['\"]" "$folder" \ + --include='*.ts' --include='*.tsx' 2>/dev/null | \ + sed -E "s/import type .* from ['\"]([a-zA-Z0-9@/._-]+)['\"]/\1/" >> "$output_file" || true + + # Remove subpath imports but keep the base package + # For scoped packages: '@scope/pkg/subpath' -> '@scope/pkg' + # For regular packages: 'pkg/subpath' -> 'pkg' + # Scoped packages (must keep @scope/package, strip anything after) + sed -i -E 's|^(@[a-zA-Z0-9_-]+/[a-zA-Z0-9_-]+)/.*|\1|' "$output_file" 2>/dev/null || true + # Non-scoped packages (keep package name, strip subpath) + sed -i -E 's|^([a-zA-Z0-9_-]+)/.*|\1|' "$output_file" 2>/dev/null || true + + sort -u "$output_file" -o "$output_file" + fi + } + + extract_deps_from_code "." root_used_code.txt + extract_deps_from_code "client" client_used_code.txt + extract_deps_from_code "api" api_used_code.txt + + # Extract dependencies used by workspace packages + # These packages are used in the workspace but dependencies are provided by parent package.json + extract_deps_from_code "packages/client" packages_client_used_code.txt + extract_deps_from_code "packages/api" packages_api_used_code.txt + + - name: Get @librechat/client dependencies + id: get-librechat-client-deps + run: | + if [[ -f "packages/client/package.json" ]]; then + # Get all dependencies from @librechat/client (dependencies, devDependencies, and peerDependencies) + DEPS=$(jq -r '.dependencies // {} | keys[]' packages/client/package.json 2>/dev/null || echo "") + DEV_DEPS=$(jq -r '.devDependencies // {} | keys[]' packages/client/package.json 2>/dev/null || echo "") + PEER_DEPS=$(jq -r '.peerDependencies // {} | keys[]' packages/client/package.json 2>/dev/null || echo "") + + # Combine all dependencies + echo "$DEPS" > librechat_client_deps.txt + echo "$DEV_DEPS" >> librechat_client_deps.txt + echo "$PEER_DEPS" >> librechat_client_deps.txt + + # Also include dependencies that are imported in packages/client + cat packages_client_used_code.txt >> librechat_client_deps.txt + + # Remove empty lines and sort + grep -v '^$' librechat_client_deps.txt | sort -u > temp_deps.txt + mv temp_deps.txt librechat_client_deps.txt + else + touch librechat_client_deps.txt + fi + + - name: Get @librechat/api dependencies + id: get-librechat-api-deps + run: | + if [[ -f "packages/api/package.json" ]]; then + # Get all dependencies from @librechat/api (dependencies, devDependencies, and peerDependencies) + DEPS=$(jq -r '.dependencies // {} | keys[]' packages/api/package.json 2>/dev/null || echo "") + DEV_DEPS=$(jq -r '.devDependencies // {} | keys[]' packages/api/package.json 2>/dev/null || echo "") + PEER_DEPS=$(jq -r '.peerDependencies // {} | keys[]' packages/api/package.json 2>/dev/null || echo "") + + # Combine all dependencies + echo "$DEPS" > librechat_api_deps.txt + echo "$DEV_DEPS" >> librechat_api_deps.txt + echo "$PEER_DEPS" >> librechat_api_deps.txt + + # Also include dependencies that are imported in packages/api + cat packages_api_used_code.txt >> librechat_api_deps.txt + + # Remove empty lines and sort + grep -v '^$' librechat_api_deps.txt | sort -u > temp_deps.txt + mv temp_deps.txt librechat_api_deps.txt + else + touch librechat_api_deps.txt + fi + + - name: Extract Workspace Dependencies + id: extract-workspace-deps + run: | + # Function to get dependencies from a workspace package that are used by another package + get_workspace_package_deps() { + local package_json=$1 + local output_file=$2 + + # Get all workspace dependencies (starting with @librechat/) + if [[ -f "$package_json" ]]; then + local workspace_deps=$(jq -r '.dependencies // {} | to_entries[] | select(.key | startswith("@librechat/")) | .key' "$package_json" 2>/dev/null || echo "") + + # For each workspace dependency, get its dependencies + for dep in $workspace_deps; do + # Convert @librechat/api to packages/api + local workspace_path=$(echo "$dep" | sed 's/@librechat\//packages\//') + local workspace_package_json="${workspace_path}/package.json" + + if [[ -f "$workspace_package_json" ]]; then + # Extract all dependencies from the workspace package + jq -r '.dependencies // {} | keys[]' "$workspace_package_json" 2>/dev/null >> "$output_file" + # Also extract peerDependencies + jq -r '.peerDependencies // {} | keys[]' "$workspace_package_json" 2>/dev/null >> "$output_file" + fi + done + fi + + if [[ -f "$output_file" ]]; then + sort -u "$output_file" -o "$output_file" + else + touch "$output_file" + fi + } + + # Get workspace dependencies for each package + get_workspace_package_deps "package.json" root_workspace_deps.txt + get_workspace_package_deps "client/package.json" client_workspace_deps.txt + get_workspace_package_deps "api/package.json" api_workspace_deps.txt + + - name: Run depcheck for root package.json + id: check-root + run: | + if [[ -f "package.json" ]]; then + UNUSED=$(depcheck --json | jq -r '.dependencies | join("\n")' || echo "") + # Exclude dependencies used in scripts, code, and workspace packages + UNUSED=$(comm -23 <(echo "$UNUSED" | sort) <(cat root_used_deps.txt root_used_code.txt root_workspace_deps.txt | sort) || echo "") + echo "ROOT_UNUSED<> $GITHUB_ENV + echo "$UNUSED" >> $GITHUB_ENV + echo "EOF" >> $GITHUB_ENV + fi + + - name: Run depcheck for client/package.json + id: check-client + run: | + if [[ -f "client/package.json" ]]; then + chmod -R 755 client + cd client + UNUSED=$(depcheck --json | jq -r '.dependencies | join("\n")' || echo "") + # Exclude dependencies used in scripts, code, workspace packages, and @librechat/client imports + UNUSED=$(comm -23 <(echo "$UNUSED" | sort) <(cat ../client_used_deps.txt ../client_used_code.txt ../client_workspace_deps.txt ../packages_client_used_code.txt ../librechat_client_deps.txt 2>/dev/null | sort -u) || echo "") + # Filter out false positives + UNUSED=$(echo "$UNUSED" | grep -v "^micromark-extension-llm-math$" || echo "") + echo "CLIENT_UNUSED<> $GITHUB_ENV + echo "$UNUSED" >> $GITHUB_ENV + echo "EOF" >> $GITHUB_ENV + cd .. + fi + + - name: Run depcheck for api/package.json + id: check-api + run: | + if [[ -f "api/package.json" ]]; then + chmod -R 755 api + cd api + UNUSED=$(depcheck --json | jq -r '.dependencies | join("\n")' || echo "") + # Exclude dependencies used in scripts, code, workspace packages, and @librechat/api imports + UNUSED=$(comm -23 <(echo "$UNUSED" | sort) <(cat ../api_used_deps.txt ../api_used_code.txt ../api_workspace_deps.txt ../packages_api_used_code.txt ../librechat_api_deps.txt 2>/dev/null | sort -u) || echo "") + echo "API_UNUSED<> $GITHUB_ENV + echo "$UNUSED" >> $GITHUB_ENV + echo "EOF" >> $GITHUB_ENV + cd .. + fi + + - name: Post comment on PR if unused dependencies are found + if: env.ROOT_UNUSED != '' || env.CLIENT_UNUSED != '' || env.API_UNUSED != '' + run: | + PR_NUMBER=$(jq --raw-output .pull_request.number "$GITHUB_EVENT_PATH") + + ROOT_LIST=$(echo "$ROOT_UNUSED" | awk '{print "- `" $0 "`"}') + CLIENT_LIST=$(echo "$CLIENT_UNUSED" | awk '{print "- `" $0 "`"}') + API_LIST=$(echo "$API_UNUSED" | awk '{print "- `" $0 "`"}') + + COMMENT_BODY=$(cat </**"], + "program": "${workspaceFolder}/api/server/index.js", + "env": { + "NODE_ENV": "production", + "NODE_TLS_REJECT_UNAUTHORIZED": "0" + }, + "console": "integratedTerminal", + "envFile": "${workspaceFolder}/.env" + } + ] +} diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 0000000000..a8cb8282bd --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,236 @@ +# Changelog + +All notable changes to this project will be documented in this file. + + + + + + +## [Unreleased] + +### ✨ New Features + +- ✨ feat: implement search parameter updates by **@mawburn** in [#7151](https://github.com/danny-avila/LibreChat/pull/7151) +- 🎏 feat: Add MCP support for Streamable HTTP Transport by **@benverhees** in [#7353](https://github.com/danny-avila/LibreChat/pull/7353) +- 🔒 feat: Add Content Security Policy using Helmet middleware by **@rubentalstra** in [#7377](https://github.com/danny-avila/LibreChat/pull/7377) +- ✨ feat: Add Normalization for MCP Server Names by **@danny-avila** in [#7421](https://github.com/danny-avila/LibreChat/pull/7421) +- 📊 feat: Improve Helm Chart by **@hofq** in [#3638](https://github.com/danny-avila/LibreChat/pull/3638) +- 🦾 feat: Claude-4 Support by **@danny-avila** in [#7509](https://github.com/danny-avila/LibreChat/pull/7509) +- 🪨 feat: Bedrock Support for Claude-4 Reasoning by **@danny-avila** in [#7517](https://github.com/danny-avila/LibreChat/pull/7517) + +### 🌍 Internationalization + +- 🌍 i18n: Add `Danish` and `Czech` and `Catalan` localization support by **@rubentalstra** in [#7373](https://github.com/danny-avila/LibreChat/pull/7373) +- 🌍 i18n: Update translation.json with latest translations by **@github-actions[bot]** in [#7375](https://github.com/danny-avila/LibreChat/pull/7375) +- 🌍 i18n: Update translation.json with latest translations by **@github-actions[bot]** in [#7468](https://github.com/danny-avila/LibreChat/pull/7468) + +### 🔧 Fixes + +- 💬 fix: update aria-label for accessibility in ConvoLink component by **@berry-13** in [#7320](https://github.com/danny-avila/LibreChat/pull/7320) +- 🔑 fix: use `apiKey` instead of `openAIApiKey` in OpenAI-like Config by **@danny-avila** in [#7337](https://github.com/danny-avila/LibreChat/pull/7337) +- 🔄 fix: update navigation logic in `useFocusChatEffect` to ensure correct search parameters are used by **@mawburn** in [#7340](https://github.com/danny-avila/LibreChat/pull/7340) +- 🔄 fix: Improve MCP Connection Cleanup by **@danny-avila** in [#7400](https://github.com/danny-avila/LibreChat/pull/7400) +- 🛡️ fix: Preset and Validation Logic for URL Query Params by **@danny-avila** in [#7407](https://github.com/danny-avila/LibreChat/pull/7407) +- 🌘 fix: artifact of preview text is illegible in dark mode by **@nhtruong** in [#7405](https://github.com/danny-avila/LibreChat/pull/7405) +- 🛡️ fix: Temporarily Remove CSP until Configurable by **@danny-avila** in [#7419](https://github.com/danny-avila/LibreChat/pull/7419) +- 💽 fix: Exclude index page `/` from static cache settings by **@sbruel** in [#7382](https://github.com/danny-avila/LibreChat/pull/7382) + +### ⚙️ Other Changes + +- 📜 docs: CHANGELOG for release v0.7.8 by **@github-actions[bot]** in [#7290](https://github.com/danny-avila/LibreChat/pull/7290) +- 📦 chore: Update API Package Dependencies by **@danny-avila** in [#7359](https://github.com/danny-avila/LibreChat/pull/7359) +- 📜 docs: Unreleased Changelog by **@github-actions[bot]** in [#7321](https://github.com/danny-avila/LibreChat/pull/7321) +- 📜 docs: Unreleased Changelog by **@github-actions[bot]** in [#7434](https://github.com/danny-avila/LibreChat/pull/7434) +- 🛡️ chore: `multer` v2.0.0 for CVE-2025-47935 and CVE-2025-47944 by **@danny-avila** in [#7454](https://github.com/danny-avila/LibreChat/pull/7454) +- 📂 refactor: Improve `FileAttachment` & File Form Deletion by **@danny-avila** in [#7471](https://github.com/danny-avila/LibreChat/pull/7471) +- 📊 chore: Remove Old Helm Chart by **@hofq** in [#7512](https://github.com/danny-avila/LibreChat/pull/7512) +- 🪖 chore: bump helm app version to v0.7.8 by **@austin-barrington** in [#7524](https://github.com/danny-avila/LibreChat/pull/7524) + + + +--- +## [v0.7.8] - + +Changes from v0.7.8-rc1 to v0.7.8. + +### ✨ New Features + +- ✨ feat: Enhance form submission for touch screens by **@berry-13** in [#7198](https://github.com/danny-avila/LibreChat/pull/7198) +- 🔍 feat: Additional Tavily API Tool Parameters by **@glowforge-opensource** in [#7232](https://github.com/danny-avila/LibreChat/pull/7232) +- 🐋 feat: Add python to Dockerfile for increased MCP compatibility by **@technicalpickles** in [#7270](https://github.com/danny-avila/LibreChat/pull/7270) + +### 🔧 Fixes + +- 🔧 fix: Google Gemma Support & OpenAI Reasoning Instructions by **@danny-avila** in [#7196](https://github.com/danny-avila/LibreChat/pull/7196) +- 🛠️ fix: Conversation Navigation State by **@danny-avila** in [#7210](https://github.com/danny-avila/LibreChat/pull/7210) +- 🔄 fix: o-Series Model Regex for System Messages by **@danny-avila** in [#7245](https://github.com/danny-avila/LibreChat/pull/7245) +- 🔖 fix: Custom Headers for Initial MCP SSE Connection by **@danny-avila** in [#7246](https://github.com/danny-avila/LibreChat/pull/7246) +- 🛡️ fix: Deep Clone `MCPOptions` for User MCP Connections by **@danny-avila** in [#7247](https://github.com/danny-avila/LibreChat/pull/7247) +- 🔄 fix: URL Param Race Condition and File Draft Persistence by **@danny-avila** in [#7257](https://github.com/danny-avila/LibreChat/pull/7257) +- 🔄 fix: Assistants Endpoint & Minor Issues by **@danny-avila** in [#7274](https://github.com/danny-avila/LibreChat/pull/7274) +- 🔄 fix: Ollama Think Tag Edge Case with Tools by **@danny-avila** in [#7275](https://github.com/danny-avila/LibreChat/pull/7275) + +### ⚙️ Other Changes + +- 📜 docs: CHANGELOG for release v0.7.8-rc1 by **@github-actions[bot]** in [#7153](https://github.com/danny-avila/LibreChat/pull/7153) +- 🔄 refactor: Artifact Visibility Management by **@danny-avila** in [#7181](https://github.com/danny-avila/LibreChat/pull/7181) +- 📦 chore: Bump Package Security by **@danny-avila** in [#7183](https://github.com/danny-avila/LibreChat/pull/7183) +- 🌿 refactor: Unmount Fork Popover on Hide for Better Performance by **@danny-avila** in [#7189](https://github.com/danny-avila/LibreChat/pull/7189) +- 🧰 chore: ESLint configuration to enforce Prettier formatting rules by **@mawburn** in [#7186](https://github.com/danny-avila/LibreChat/pull/7186) +- 🎨 style: Improve KaTeX Rendering for LaTeX Equations by **@andresgit** in [#7223](https://github.com/danny-avila/LibreChat/pull/7223) +- 📝 docs: Update `.env.example` Google models by **@marlonka** in [#7254](https://github.com/danny-avila/LibreChat/pull/7254) +- 💬 refactor: MCP Chat Visibility Option, Google Rates, Remove OpenAPI Plugins by **@danny-avila** in [#7286](https://github.com/danny-avila/LibreChat/pull/7286) +- 📜 docs: Unreleased Changelog by **@github-actions[bot]** in [#7214](https://github.com/danny-avila/LibreChat/pull/7214) + + + +[See full release details][release-v0.7.8] + +[release-v0.7.8]: https://github.com/danny-avila/LibreChat/releases/tag/v0.7.8 + +--- +## [v0.7.8-rc1] - + +Changes from v0.7.7 to v0.7.8-rc1. + +### ✨ New Features + +- 🔍 feat: Mistral OCR API / Upload Files as Text by **@danny-avila** in [#6274](https://github.com/danny-avila/LibreChat/pull/6274) +- 🤖 feat: Support OpenAI Web Search models by **@danny-avila** in [#6313](https://github.com/danny-avila/LibreChat/pull/6313) +- 🔗 feat: Agent Chain (Mixture-of-Agents) by **@danny-avila** in [#6374](https://github.com/danny-avila/LibreChat/pull/6374) +- ⌛ feat: `initTimeout` for Slow Starting MCP Servers by **@perweij** in [#6383](https://github.com/danny-avila/LibreChat/pull/6383) +- 🚀 feat: `S3` Integration for File handling and Image uploads by **@rubentalstra** in [#6142](https://github.com/danny-avila/LibreChat/pull/6142) +- 🔒feat: Enable OpenID Auto-Redirect by **@leondape** in [#6066](https://github.com/danny-avila/LibreChat/pull/6066) +- 🚀 feat: Integrate `Azure Blob Storage` for file handling and image uploads by **@rubentalstra** in [#6153](https://github.com/danny-avila/LibreChat/pull/6153) +- 🚀 feat: Add support for custom `AWS` endpoint in `S3` by **@rubentalstra** in [#6431](https://github.com/danny-avila/LibreChat/pull/6431) +- 🚀 feat: Add support for LDAP STARTTLS in LDAP authentication by **@rubentalstra** in [#6438](https://github.com/danny-avila/LibreChat/pull/6438) +- 🚀 feat: Refactor schema exports and update package version to 0.0.4 by **@rubentalstra** in [#6455](https://github.com/danny-avila/LibreChat/pull/6455) +- 🔼 feat: Add Auto Submit For URL Query Params by **@mjaverto** in [#6440](https://github.com/danny-avila/LibreChat/pull/6440) +- 🛠 feat: Enhance Redis Integration, Rate Limiters & Log Headers by **@danny-avila** in [#6462](https://github.com/danny-avila/LibreChat/pull/6462) +- 💵 feat: Add Automatic Balance Refill by **@rubentalstra** in [#6452](https://github.com/danny-avila/LibreChat/pull/6452) +- 🗣️ feat: add support for gpt-4o-transcribe models by **@berry-13** in [#6483](https://github.com/danny-avila/LibreChat/pull/6483) +- 🎨 feat: UI Refresh for Enhanced UX by **@berry-13** in [#6346](https://github.com/danny-avila/LibreChat/pull/6346) +- 🌍 feat: Add support for Hungarian language localization by **@rubentalstra** in [#6508](https://github.com/danny-avila/LibreChat/pull/6508) +- 🚀 feat: Add Gemini 2.5 Token/Context Values, Increase Max Possible Output to 64k by **@danny-avila** in [#6563](https://github.com/danny-avila/LibreChat/pull/6563) +- 🚀 feat: Enhance MCP Connections For Multi-User Support by **@danny-avila** in [#6610](https://github.com/danny-avila/LibreChat/pull/6610) +- 🚀 feat: Enhance S3 URL Expiry with Refresh; fix: S3 File Deletion by **@danny-avila** in [#6647](https://github.com/danny-avila/LibreChat/pull/6647) +- 🚀 feat: enhance UI components and refactor settings by **@berry-13** in [#6625](https://github.com/danny-avila/LibreChat/pull/6625) +- 💬 feat: move TemporaryChat to the Header by **@berry-13** in [#6646](https://github.com/danny-avila/LibreChat/pull/6646) +- 🚀 feat: Use Model Specs + Specific Endpoints, Limit Providers for Agents by **@danny-avila** in [#6650](https://github.com/danny-avila/LibreChat/pull/6650) +- 🪙 feat: Sync Balance Config on Login by **@danny-avila** in [#6671](https://github.com/danny-avila/LibreChat/pull/6671) +- 🔦 feat: MCP Support for Non-Agent Endpoints by **@danny-avila** in [#6775](https://github.com/danny-avila/LibreChat/pull/6775) +- 🗃️ feat: Code Interpreter File Persistence between Sessions by **@danny-avila** in [#6790](https://github.com/danny-avila/LibreChat/pull/6790) +- 🖥️ feat: Code Interpreter API for Non-Agent Endpoints by **@danny-avila** in [#6803](https://github.com/danny-avila/LibreChat/pull/6803) +- ⚡ feat: Self-hosted Artifacts Static Bundler URL by **@danny-avila** in [#6827](https://github.com/danny-avila/LibreChat/pull/6827) +- 🐳 feat: Add Jemalloc and UV to Docker Builds by **@danny-avila** in [#6836](https://github.com/danny-avila/LibreChat/pull/6836) +- 🤖 feat: GPT-4.1 by **@danny-avila** in [#6880](https://github.com/danny-avila/LibreChat/pull/6880) +- 👋 feat: remove Edge TTS by **@berry-13** in [#6885](https://github.com/danny-avila/LibreChat/pull/6885) +- feat: nav optimization by **@berry-13** in [#5785](https://github.com/danny-avila/LibreChat/pull/5785) +- 🗺️ feat: Add Parameter Location Mapping for OpenAPI actions by **@peeeteeer** in [#6858](https://github.com/danny-avila/LibreChat/pull/6858) +- 🤖 feat: Support `o4-mini` and `o3` Models by **@danny-avila** in [#6928](https://github.com/danny-avila/LibreChat/pull/6928) +- 🎨 feat: OpenAI Image Tools (GPT-Image-1) by **@danny-avila** in [#7079](https://github.com/danny-avila/LibreChat/pull/7079) +- 🗓️ feat: Add Special Variables for Prompts & Agents, Prompt UI Improvements by **@danny-avila** in [#7123](https://github.com/danny-avila/LibreChat/pull/7123) + +### 🌍 Internationalization + +- 🌍 i18n: Add Thai Language Support and Update Translations by **@rubentalstra** in [#6219](https://github.com/danny-avila/LibreChat/pull/6219) +- 🌍 i18n: Update translation.json with latest translations by **@github-actions[bot]** in [#6220](https://github.com/danny-avila/LibreChat/pull/6220) +- 🌍 i18n: Update translation.json with latest translations by **@github-actions[bot]** in [#6240](https://github.com/danny-avila/LibreChat/pull/6240) +- 🌍 i18n: Update translation.json with latest translations by **@github-actions[bot]** in [#6241](https://github.com/danny-avila/LibreChat/pull/6241) +- 🌍 i18n: Update translation.json with latest translations by **@github-actions[bot]** in [#6277](https://github.com/danny-avila/LibreChat/pull/6277) +- 🌍 i18n: Update translation.json with latest translations by **@github-actions[bot]** in [#6414](https://github.com/danny-avila/LibreChat/pull/6414) +- 🌍 i18n: Update translation.json with latest translations by **@github-actions[bot]** in [#6505](https://github.com/danny-avila/LibreChat/pull/6505) +- 🌍 i18n: Update translation.json with latest translations by **@github-actions[bot]** in [#6530](https://github.com/danny-avila/LibreChat/pull/6530) +- 🌍 i18n: Add Persian Localization Support by **@rubentalstra** in [#6669](https://github.com/danny-avila/LibreChat/pull/6669) +- 🌍 i18n: Update translation.json with latest translations by **@github-actions[bot]** in [#6667](https://github.com/danny-avila/LibreChat/pull/6667) +- 🌍 i18n: Update translation.json with latest translations by **@github-actions[bot]** in [#7126](https://github.com/danny-avila/LibreChat/pull/7126) +- 🌍 i18n: Update translation.json with latest translations by **@github-actions[bot]** in [#7148](https://github.com/danny-avila/LibreChat/pull/7148) + +### 👐 Accessibility + +- 🎨 a11y: Update Model Spec Description Text by **@berry-13** in [#6294](https://github.com/danny-avila/LibreChat/pull/6294) +- 🗑️ a11y: Add Accessible Name to Button for File Attachment Removal by **@kangabell** in [#6709](https://github.com/danny-avila/LibreChat/pull/6709) +- ⌨️ a11y: enhance accessibility & visual consistency by **@berry-13** in [#6866](https://github.com/danny-avila/LibreChat/pull/6866) +- 🙌 a11y: Searchbar/Conversations List Focus by **@danny-avila** in [#7096](https://github.com/danny-avila/LibreChat/pull/7096) +- 👐 a11y: Improve Fork and SplitText Accessibility by **@danny-avila** in [#7147](https://github.com/danny-avila/LibreChat/pull/7147) + +### 🔧 Fixes + +- 🐛 fix: Avatar Type Definitions in Agent/Assistant Schemas by **@danny-avila** in [#6235](https://github.com/danny-avila/LibreChat/pull/6235) +- 🔧 fix: MeiliSearch Field Error and Patch Incorrect Import by #6210 by **@rubentalstra** in [#6245](https://github.com/danny-avila/LibreChat/pull/6245) +- 🔏 fix: Enhance Two-Factor Authentication by **@rubentalstra** in [#6247](https://github.com/danny-avila/LibreChat/pull/6247) +- 🐛 fix: Await saveMessage in abortMiddleware to ensure proper execution by **@sh4shii** in [#6248](https://github.com/danny-avila/LibreChat/pull/6248) +- 🔧 fix: Axios Proxy Usage And Bump `mongoose` by **@danny-avila** in [#6298](https://github.com/danny-avila/LibreChat/pull/6298) +- 🔧 fix: comment out MCP servers to resolve service run issues by **@KunalScriptz** in [#6316](https://github.com/danny-avila/LibreChat/pull/6316) +- 🔧 fix: Update Token Calculations and Mapping, MCP `env` Initialization by **@danny-avila** in [#6406](https://github.com/danny-avila/LibreChat/pull/6406) +- 🐞 fix: Agent "Resend" Message Attachments + Source Icon Styling by **@danny-avila** in [#6408](https://github.com/danny-avila/LibreChat/pull/6408) +- 🐛 fix: Prevent Crash on Duplicate Message ID by **@Odrec** in [#6392](https://github.com/danny-avila/LibreChat/pull/6392) +- 🔐 fix: Invalid Key Length in 2FA Encryption by **@rubentalstra** in [#6432](https://github.com/danny-avila/LibreChat/pull/6432) +- 🏗️ fix: Fix Agents Token Spend Race Conditions, Expand Test Coverage by **@danny-avila** in [#6480](https://github.com/danny-avila/LibreChat/pull/6480) +- 🔃 fix: Draft Clearing, Claude Titles, Remove Default Vision Max Tokens by **@danny-avila** in [#6501](https://github.com/danny-avila/LibreChat/pull/6501) +- 🔧 fix: Update username reference to use user.name in greeting display by **@rubentalstra** in [#6534](https://github.com/danny-avila/LibreChat/pull/6534) +- 🔧 fix: S3 Download Stream with Key Extraction and Blob Storage Encoding for Vision by **@danny-avila** in [#6557](https://github.com/danny-avila/LibreChat/pull/6557) +- 🔧 fix: Mistral type strictness for `usage` & update token values/windows by **@danny-avila** in [#6562](https://github.com/danny-avila/LibreChat/pull/6562) +- 🔧 fix: Consolidate Text Parsing and TTS Edge Initialization by **@danny-avila** in [#6582](https://github.com/danny-avila/LibreChat/pull/6582) +- 🔧 fix: Ensure continuation in image processing on base64 encoding from Blob Storage by **@danny-avila** in [#6619](https://github.com/danny-avila/LibreChat/pull/6619) +- ✉️ fix: Fallback For User Name In Email Templates by **@danny-avila** in [#6620](https://github.com/danny-avila/LibreChat/pull/6620) +- 🔧 fix: Azure Blob Integration and File Source References by **@rubentalstra** in [#6575](https://github.com/danny-avila/LibreChat/pull/6575) +- 🐛 fix: Safeguard against undefined addedEndpoints by **@wipash** in [#6654](https://github.com/danny-avila/LibreChat/pull/6654) +- 🤖 fix: Gemini 2.5 Vision Support by **@danny-avila** in [#6663](https://github.com/danny-avila/LibreChat/pull/6663) +- 🔄 fix: Avatar & Error Handling Enhancements by **@danny-avila** in [#6687](https://github.com/danny-avila/LibreChat/pull/6687) +- 🔧 fix: Chat Middleware, Zod Conversion, Auto-Save and S3 URL Refresh by **@danny-avila** in [#6720](https://github.com/danny-avila/LibreChat/pull/6720) +- 🔧 fix: Agent Capability Checks & DocumentDB Compatibility for Agent Resource Removal by **@danny-avila** in [#6726](https://github.com/danny-avila/LibreChat/pull/6726) +- 🔄 fix: Improve audio MIME type detection and handling by **@berry-13** in [#6707](https://github.com/danny-avila/LibreChat/pull/6707) +- 🪺 fix: Update Role Handling due to New Schema Shape by **@danny-avila** in [#6774](https://github.com/danny-avila/LibreChat/pull/6774) +- 🗨️ fix: Show ModelSpec Greeting by **@berry-13** in [#6770](https://github.com/danny-avila/LibreChat/pull/6770) +- 🔧 fix: Keyv and Proxy Issues, and More Memory Optimizations by **@danny-avila** in [#6867](https://github.com/danny-avila/LibreChat/pull/6867) +- ✨ fix: Implement dynamic text sizing for greeting and name display by **@berry-13** in [#6833](https://github.com/danny-avila/LibreChat/pull/6833) +- 📝 fix: Mistral OCR Image Support and Azure Agent Titles by **@danny-avila** in [#6901](https://github.com/danny-avila/LibreChat/pull/6901) +- 📢 fix: Invalid `engineTTS` and Conversation State on Navigation by **@berry-13** in [#6904](https://github.com/danny-avila/LibreChat/pull/6904) +- 🛠️ fix: Improve Accessibility and Display of Conversation Menu by **@danny-avila** in [#6913](https://github.com/danny-avila/LibreChat/pull/6913) +- 🔧 fix: Agent Resource Form, Convo Menu Style, Ensure Draft Clears on Submission by **@danny-avila** in [#6925](https://github.com/danny-avila/LibreChat/pull/6925) +- 🔀 fix: MCP Improvements, Auto-Save Drafts, Artifact Markup by **@danny-avila** in [#7040](https://github.com/danny-avila/LibreChat/pull/7040) +- 🐋 fix: Improve Deepseek Compatbility by **@danny-avila** in [#7132](https://github.com/danny-avila/LibreChat/pull/7132) +- 🐙 fix: Add Redis Ping Interval to Prevent Connection Drops by **@peeeteeer** in [#7127](https://github.com/danny-avila/LibreChat/pull/7127) + +### ⚙️ Other Changes + +- 📦 refactor: Move DB Models to `@librechat/data-schemas` by **@rubentalstra** in [#6210](https://github.com/danny-avila/LibreChat/pull/6210) +- 📦 chore: Patch `axios` to address CVE-2025-27152 by **@danny-avila** in [#6222](https://github.com/danny-avila/LibreChat/pull/6222) +- ⚠️ refactor: Use Error Content Part Instead Of Throwing Error for Agents by **@danny-avila** in [#6262](https://github.com/danny-avila/LibreChat/pull/6262) +- 🏃‍♂️ refactor: Improve Agent Run Context & Misc. Changes by **@danny-avila** in [#6448](https://github.com/danny-avila/LibreChat/pull/6448) +- 📝 docs: librechat.example.yaml by **@ineiti** in [#6442](https://github.com/danny-avila/LibreChat/pull/6442) +- 🏃‍♂️ refactor: More Agent Context Improvements during Run by **@danny-avila** in [#6477](https://github.com/danny-avila/LibreChat/pull/6477) +- 🔃 refactor: Allow streaming for `o1` models by **@danny-avila** in [#6509](https://github.com/danny-avila/LibreChat/pull/6509) +- 🔧 chore: `Vite` Plugin Upgrades & Config Optimizations by **@rubentalstra** in [#6547](https://github.com/danny-avila/LibreChat/pull/6547) +- 🔧 refactor: Consolidate Logging, Model Selection & Actions Optimizations, Minor Fixes by **@danny-avila** in [#6553](https://github.com/danny-avila/LibreChat/pull/6553) +- 🎨 style: Address Minor UI Refresh Issues by **@berry-13** in [#6552](https://github.com/danny-avila/LibreChat/pull/6552) +- 🔧 refactor: Enhance Model & Endpoint Configurations with Global Indicators 🌍 by **@berry-13** in [#6578](https://github.com/danny-avila/LibreChat/pull/6578) +- 💬 style: Chat UI, Greeting, and Message adjustments by **@berry-13** in [#6612](https://github.com/danny-avila/LibreChat/pull/6612) +- ⚡ refactor: DocumentDB Compatibility for Balance Updates by **@danny-avila** in [#6673](https://github.com/danny-avila/LibreChat/pull/6673) +- 🧹 chore: Update ESLint rules for React hooks by **@rubentalstra** in [#6685](https://github.com/danny-avila/LibreChat/pull/6685) +- 🪙 chore: Update Gemini Pricing by **@RedwindA** in [#6731](https://github.com/danny-avila/LibreChat/pull/6731) +- 🪺 refactor: Nest Permission fields for Roles by **@rubentalstra** in [#6487](https://github.com/danny-avila/LibreChat/pull/6487) +- 📦 chore: Update `caniuse-lite` dependency to version 1.0.30001706 by **@rubentalstra** in [#6482](https://github.com/danny-avila/LibreChat/pull/6482) +- ⚙️ refactor: OAuth Flow Signal, Type Safety, Tool Progress & Updated Packages by **@danny-avila** in [#6752](https://github.com/danny-avila/LibreChat/pull/6752) +- 📦 chore: bump vite from 6.2.3 to 6.2.5 by **@dependabot[bot]** in [#6745](https://github.com/danny-avila/LibreChat/pull/6745) +- 💾 chore: Enhance Local Storage Handling and Update MCP SDK by **@danny-avila** in [#6809](https://github.com/danny-avila/LibreChat/pull/6809) +- 🤖 refactor: Improve Agents Memory Usage, Bump Keyv, Grok 3 by **@danny-avila** in [#6850](https://github.com/danny-avila/LibreChat/pull/6850) +- 💾 refactor: Enhance Memory In Image Encodings & Client Disposal by **@danny-avila** in [#6852](https://github.com/danny-avila/LibreChat/pull/6852) +- 🔁 refactor: Token Event Handler and Standardize `maxTokens` Key by **@danny-avila** in [#6886](https://github.com/danny-avila/LibreChat/pull/6886) +- 🔍 refactor: Search & Message Retrieval by **@berry-13** in [#6903](https://github.com/danny-avila/LibreChat/pull/6903) +- 🎨 style: standardize dropdown styling & fix z-Index layering by **@berry-13** in [#6939](https://github.com/danny-avila/LibreChat/pull/6939) +- 📙 docs: CONTRIBUTING.md by **@dblock** in [#6831](https://github.com/danny-avila/LibreChat/pull/6831) +- 🧭 refactor: Modernize Nav/Header by **@danny-avila** in [#7094](https://github.com/danny-avila/LibreChat/pull/7094) +- 🪶 refactor: Chat Input Focus for Conversation Navigations & ChatForm Optimizations by **@danny-avila** in [#7100](https://github.com/danny-avila/LibreChat/pull/7100) +- 🔃 refactor: Streamline Navigation, Message Loading UX by **@danny-avila** in [#7118](https://github.com/danny-avila/LibreChat/pull/7118) +- 📜 docs: Unreleased changelog by **@github-actions[bot]** in [#6265](https://github.com/danny-avila/LibreChat/pull/6265) + + + +[See full release details][release-v0.7.8-rc1] + +[release-v0.7.8-rc1]: https://github.com/danny-avila/LibreChat/releases/tag/v0.7.8-rc1 + +--- diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000000..b5e1b73819 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,57 @@ +# v0.8.2-rc1 + +# Base node image +FROM node:20-alpine AS node + +# Install jemalloc +RUN apk add --no-cache jemalloc +RUN apk add --no-cache python3 py3-pip uv + +# Set environment variable to use jemalloc +ENV LD_PRELOAD=/usr/lib/libjemalloc.so.2 + +# Add `uv` for extended MCP support +COPY --from=ghcr.io/astral-sh/uv:0.9.5-python3.12-alpine /usr/local/bin/uv /usr/local/bin/uvx /bin/ +RUN uv --version + +RUN mkdir -p /app && chown node:node /app +WORKDIR /app + +USER node + +COPY --chown=node:node package.json package-lock.json ./ +COPY --chown=node:node api/package.json ./api/package.json +COPY --chown=node:node client/package.json ./client/package.json +COPY --chown=node:node packages/data-provider/package.json ./packages/data-provider/package.json +COPY --chown=node:node packages/data-schemas/package.json ./packages/data-schemas/package.json +COPY --chown=node:node packages/api/package.json ./packages/api/package.json + +RUN \ + # Allow mounting of these files, which have no default + touch .env ; \ + # Create directories for the volumes to inherit the correct permissions + mkdir -p /app/client/public/images /app/api/logs /app/uploads ; \ + npm config set fetch-retry-maxtimeout 600000 ; \ + npm config set fetch-retries 5 ; \ + npm config set fetch-retry-mintimeout 15000 ; \ + npm ci --no-audit + +COPY --chown=node:node . . + +RUN \ + # React client build + NODE_OPTIONS="--max-old-space-size=2048" npm run frontend; \ + npm prune --production; \ + npm cache clean --force + +# Node API setup +EXPOSE 3080 +ENV HOST=0.0.0.0 +CMD ["npm", "run", "backend"] + +# Optional: for client with nginx routing +# FROM nginx:stable-alpine AS nginx-client +# WORKDIR /usr/share/nginx/html +# COPY --from=node /app/client/dist /usr/share/nginx/html +# COPY client/nginx.conf /etc/nginx/conf.d/default.conf +# ENTRYPOINT ["nginx", "-g", "daemon off;"] diff --git a/Dockerfile.multi b/Dockerfile.multi new file mode 100644 index 0000000000..89a07087f0 --- /dev/null +++ b/Dockerfile.multi @@ -0,0 +1,82 @@ +# Dockerfile.multi +# v0.8.2-rc1 + +# Base for all builds +FROM node:20-alpine AS base-min +# Install jemalloc +RUN apk add --no-cache jemalloc +# Set environment variable to use jemalloc +ENV LD_PRELOAD=/usr/lib/libjemalloc.so.2 +WORKDIR /app +RUN apk --no-cache add curl +RUN npm config set fetch-retry-maxtimeout 600000 && \ + npm config set fetch-retries 5 && \ + npm config set fetch-retry-mintimeout 15000 +COPY package*.json ./ +COPY packages/data-provider/package*.json ./packages/data-provider/ +COPY packages/api/package*.json ./packages/api/ +COPY packages/data-schemas/package*.json ./packages/data-schemas/ +COPY packages/client/package*.json ./packages/client/ +COPY client/package*.json ./client/ +COPY api/package*.json ./api/ + +# Install all dependencies for every build +FROM base-min AS base +WORKDIR /app +RUN npm ci + +# Build `data-provider` package +FROM base AS data-provider-build +WORKDIR /app/packages/data-provider +COPY packages/data-provider ./ +RUN npm run build + +# Build `data-schemas` package +FROM base AS data-schemas-build +WORKDIR /app/packages/data-schemas +COPY packages/data-schemas ./ +COPY --from=data-provider-build /app/packages/data-provider/dist /app/packages/data-provider/dist +RUN npm run build + +# Build `api` package +FROM base AS api-package-build +WORKDIR /app/packages/api +COPY packages/api ./ +COPY --from=data-provider-build /app/packages/data-provider/dist /app/packages/data-provider/dist +COPY --from=data-schemas-build /app/packages/data-schemas/dist /app/packages/data-schemas/dist +RUN npm run build + +# Build `client` package +FROM base AS client-package-build +WORKDIR /app/packages/client +COPY packages/client ./ +RUN npm run build + +# Client build +FROM base AS client-build +WORKDIR /app/client +COPY client ./ +COPY --from=data-provider-build /app/packages/data-provider/dist /app/packages/data-provider/dist +COPY --from=client-package-build /app/packages/client/dist /app/packages/client/dist +COPY --from=client-package-build /app/packages/client/src /app/packages/client/src +ENV NODE_OPTIONS="--max-old-space-size=2048" +RUN npm run build + +# API setup (including client dist) +FROM base-min AS api-build +# Add `uv` for extended MCP support +COPY --from=ghcr.io/astral-sh/uv:0.6.13 /uv /uvx /bin/ +RUN uv --version +WORKDIR /app +# Install only production deps +RUN npm ci --omit=dev +COPY api ./api +COPY config ./config +COPY --from=data-provider-build /app/packages/data-provider/dist ./packages/data-provider/dist +COPY --from=data-schemas-build /app/packages/data-schemas/dist ./packages/data-schemas/dist +COPY --from=api-package-build /app/packages/api/dist ./packages/api/dist +COPY --from=client-build /app/client/dist ./client/dist +WORKDIR /app/api +EXPOSE 3080 +ENV HOST=0.0.0.0 +CMD ["node", "server/index.js"] \ No newline at end of file diff --git a/LICENSE.md b/LICENSE similarity index 97% rename from LICENSE.md rename to LICENSE index ee1ca3d9b4..535850a920 100644 --- a/LICENSE.md +++ b/LICENSE @@ -1,6 +1,6 @@ MIT License -Copyright (c) 2023 Danny Avila +Copyright (c) 2025 LibreChat Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/README.md b/README.md index d0ca06a9ca..a96e47f70f 100644 --- a/README.md +++ b/README.md @@ -1,224 +1,217 @@ -# ChatGPT Clone # -https://user-images.githubusercontent.com/110412045/223754183-8b7f45ce-6517-4bd5-9b39-c624745bf399.mp4 +

+ + + +

+ LibreChat +

+

-## All AI Conversations under One Roof. ## - Assistant AIs are the future and OpenAI revolutionized this movement with ChatGPT. While numerous methods exist to integrate them, this app commemorates the original styling of ChatGPT, with the ability to integrate any current/future AI models, while improving upon original client features, such as conversation search and prompt templates (currently WIP). +

+ + + + + + + + + + + + +

- This project was started early in Feb '23, anticipating the release of the official ChatGPT API from OpenAI, and now uses it. Through this clone, you can avoid ChatGPT Plus in favor of free or pay-per-call APIs. I will soon deploy a demo of this app. Feel free to contribute, clone, or fork. Currently dockerized. +

+ + Deploy on Railway + + + Deploy on Zeabur + + + Deploy on Sealos + +

-## Updates -
-2023-03-09 -Released v.0.0.2 - -Adds Sydney (jailbroken Bing AI) to the model menu. Thank you [DavesDevFails](https://github.com/DavesDevFails) for bringing it to my attention in this [issue](https://github.com/danny-avila/chatgpt-clone/issues/13). Bing/Sydney now correctly cite links, more styling to come. Fix some overlooked bugs, and model menu doesn't close upon deleting a customGpt. +

+ + Translation Progress + +

-I've re-enabled the ChatGPT browser client (free version) since it might be working for most people, it no longer works for me. Sydney is the best free route anyway. -
+# ✨ Features -
-
-2023-03-07 -Due to increased interest in the repo, I've dockerized the app as of this update for quick setup! See setup instructions below. I realize this still takes some time with installing docker dependencies, so it's on the roadmap to have a deployed demo. Besides this, I've made major improvements for a lot of the existing features across the board, mainly UI/UX. +- 🖥️ **UI & Experience** inspired by ChatGPT with enhanced design and features +- 🤖 **AI Model Selection**: + - Anthropic (Claude), AWS Bedrock, OpenAI, Azure OpenAI, Google, Vertex AI, OpenAI Responses API (incl. Azure) + - [Custom Endpoints](https://www.librechat.ai/docs/quick_start/custom_endpoints): Use any OpenAI-compatible API with LibreChat, no proxy required + - Compatible with [Local & Remote AI Providers](https://www.librechat.ai/docs/configuration/librechat_yaml/ai_endpoints): + - Ollama, groq, Cohere, Mistral AI, Apple MLX, koboldcpp, together.ai, + - OpenRouter, Helicone, Perplexity, ShuttleAI, Deepseek, Qwen, and more -Also worth noting, the method to access the Free Version is no longer working, so I've removed it from model selection until further notice. -
-Previous Updates +- 🔧 **[Code Interpreter API](https://www.librechat.ai/docs/features/code_interpreter)**: + - Secure, Sandboxed Execution in Python, Node.js (JS/TS), Go, C/C++, Java, PHP, Rust, and Fortran + - Seamless File Handling: Upload, process, and download files directly + - No Privacy Concerns: Fully isolated and secure execution -
-2023-03-04 -Custom prompt prefixing and labeling is now supported through the official API. This nets some interesting results when you need ChatGPT for specific uses or entertainment. Select 'CustomGPT' in the model menu to configure this, and you can choose to save the configuration or reference it by conversation. Model selection will change by conversation. -
-
-2023-03-01 -Official ChatGPT API is out! Removed davinci since the official API is extremely fast and 10x less expensive. Since user labeling and prompt prefixing is officially supported, I will add a View feature so you can set this within chat, which gives the UI an added use case. I've kept the BrowserClient, since it's free to use like the official site. +- 🔦 **Agents & Tools Integration**: + - **[LibreChat Agents](https://www.librechat.ai/docs/features/agents)**: + - No-Code Custom Assistants: Build specialized, AI-driven helpers + - Agent Marketplace: Discover and deploy community-built agents + - Collaborative Sharing: Share agents with specific users and groups + - Flexible & Extensible: Use MCP Servers, tools, file search, code execution, and more + - Compatible with Custom Endpoints, OpenAI, Azure, Anthropic, AWS Bedrock, Google, Vertex AI, Responses API, and more + - [Model Context Protocol (MCP) Support](https://modelcontextprotocol.io/clients#librechat) for Tools -The Messages UI correctly mirrors code syntax highlighting. The exact replication of the cursor is not 1-to-1 yet, but pretty close. Later on in the project, I'll implement tests for code edge cases and explore the possibility of running code in-browser. Right now, unknown code defaults to javascript, but will detect language as close as possible. -
-
-2023-02-21 -BingAI is integrated (although sadly limited by Microsoft with the 5 msg/convo limit, 50 msgs/day). I will need to handle the case when Bing refuses to give more answers on top of the other styling features I have in mind. Official ChatGPT use is back with the new BrowserClient. Brainstorming how to handle the UI when the Ai model changes, since conversations can't be persisted between them (or perhaps build a way to achieve this at some level). -
-
-2023-02-15 -Just got access to Bing AI so I'll be focusing on integrating that through waylaidwanderer's 'experimental' BingAIClient. -
-
-2023-02-14 +- 🔍 **Web Search**: + - Search the internet and retrieve relevant information to enhance your AI context + - Combines search providers, content scrapers, and result rerankers for optimal results + - **Customizable Jina Reranking**: Configure custom Jina API URLs for reranking services + - **[Learn More →](https://www.librechat.ai/docs/features/web_search)** -Official ChatGPT use is no longer possible though I recently used it with waylaidwanderer's [reverse proxy method](https://github.com/waylaidwanderer/node-chatgpt-api/blob/main/README.md#using-a-reverse-proxy), and before that, through leaked models he also discovered. +- 🪄 **Generative UI with Code Artifacts**: + - [Code Artifacts](https://youtu.be/GfTj7O4gmd0?si=WJbdnemZpJzBrJo3) allow creation of React, HTML, and Mermaid diagrams directly in chat -Currently, this project is only functional with the `text-davinci-003` model. -
-
+- 🎨 **Image Generation & Editing** + - Text-to-image and image-to-image with [GPT-Image-1](https://www.librechat.ai/docs/features/image_gen#1--openai-image-tools-recommended) + - Text-to-image with [DALL-E (3/2)](https://www.librechat.ai/docs/features/image_gen#2--dalle-legacy), [Stable Diffusion](https://www.librechat.ai/docs/features/image_gen#3--stable-diffusion-local), [Flux](https://www.librechat.ai/docs/features/image_gen#4--flux), or any [MCP server](https://www.librechat.ai/docs/features/image_gen#5--model-context-protocol-mcp) + - Produce stunning visuals from prompts or refine existing images with a single instruction -# Table of Contents - * [Roadmap](#roadmap) - * [Features](#features) - * [Tech Stack](#tech-stack) - * [Getting Started](#getting-started) - * [Prerequisites](#prerequisites) - * [Usage](#usage) - * [Local (npm)](#npm) - * [Docker](#docker) - * [Access Tokens](#access-tokens) - * [Updating](#updating) - * [Use Cases](#use-cases) - * [Origin](#origin) - * [Caveats](#caveats) - * [Contributing](#contributing) - * [License](#license) +- 💾 **Presets & Context Management**: + - Create, Save, & Share Custom Presets + - Switch between AI Endpoints and Presets mid-chat + - Edit, Resubmit, and Continue Messages with Conversation branching + - Create and share prompts with specific users and groups + - [Fork Messages & Conversations](https://www.librechat.ai/docs/features/fork) for Advanced Context control -## Roadmap +- 💬 **Multimodal & File Interactions**: + - Upload and analyze images with Claude 3, GPT-4.5, GPT-4o, o1, Llama-Vision, and Gemini 📸 + - Chat with Files using Custom Endpoints, OpenAI, Azure, Anthropic, AWS Bedrock, & Google 🗃️ -> **Warning** +- 🌎 **Multilingual UI**: + - English, 中文 (简体), 中文 (繁體), العربية, Deutsch, Español, Français, Italiano + - Polski, Português (PT), Português (BR), Русский, 日本語, Svenska, 한국어, Tiếng Việt + - Türkçe, Nederlands, עברית, Català, Čeština, Dansk, Eesti, فارسی + - Suomi, Magyar, Հայերեն, Bahasa Indonesia, ქართული, Latviešu, ไทย, ئۇيغۇرچە -> This is a work in progress. I'm building this in public. FYI there is still a lot of tech debt to cleanup. You can follow the progress here or on my [Linkedin](https://www.linkedin.com/in/danny-avila). +- 🧠 **Reasoning UI**: + - Dynamic Reasoning UI for Chain-of-Thought/Reasoning AI models like DeepSeek-R1 -Here are my recently completed and planned features: +- 🎨 **Customizable Interface**: + - Customizable Dropdown & Interface that adapts to both power users and newcomers -- [x] Persistent conversation -- [x] Rename, delete conversations -- [x] UI Error handling -- [x] Bing AI integration -- [x] AI model change handling (start new convos within existing, remembers last selected) -- [x] Code block handling (highlighting, markdown, clipboard, language detection) -- [x] Markdown handling -- [x] Customize prompt prefix/label (custom ChatGPT using official API) -- [x] Server convo pagination (limit fetch and load more with 'show more' button) -- [x] Config file for easy startup (docker compose) -- [ ] Bing AI Styling (for suggested responses, convo end, etc.) - **In progress** -- [ ] Add warning before clearing convos -- [ ] Build test suite for CI/CD -- [ ] Conversation Search (by title) -- [ ] Resubmit/edit sent messages -- [ ] Semantic Search Option (requires more tokens) -- [ ] Prompt Templates/Search -- [ ] Refactor/clean up code (tech debt) -- [ ] Optional use of local storage for credentials -- [ ] Mobile styling (half-finished) -- [ ] Deploy demo +- 🗣️ **Speech & Audio**: + - Chat hands-free with Speech-to-Text and Text-to-Speech + - Automatically send and play Audio + - Supports OpenAI, Azure OpenAI, and Elevenlabs -### Features +- 📥 **Import & Export Conversations**: + - Import Conversations from LibreChat, ChatGPT, Chatbot UI + - Export conversations as screenshots, markdown, text, json -- Response streaming identical to ChatGPT through server-sent events -- UI from original ChatGPT, including Dark mode -- AI model selection (official ChatGPT API, BingAI, ChatGPT Free) -- Create and Save custom ChatGPTs* +- 🔍 **Search & Discovery**: + - Search all messages/conversations -^* ChatGPT can be 'customized' by setting a system message or prompt prefix and alternate 'role' to the API request +- 👥 **Multi-User & Secure Access**: + - Multi-User, Secure Authentication with OAuth2, LDAP, & Email Login Support + - Built-in Moderation, and Token spend tools -[More info here](https://platform.openai.com/docs/guides/chat/instructing-chat-models). Here's an [example from this app.]() +- ⚙️ **Configuration & Deployment**: + - Configure Proxy, Reverse Proxy, Docker, & many Deployment options + - Use completely local or deploy on the cloud -### Tech Stack +- 📖 **Open-Source & Community**: + - Completely Open-Source & Built in Public + - Community-driven development, support, and feedback -- Utilizes [node-chatgpt-api](https://github.com/waylaidwanderer/node-chatgpt-api) -- No React boilerplate/toolchain/clone tutorials, created from scratch with react@latest -- Use of Tailwind CSS and [shadcn/ui](https://github.com/shadcn/ui) components -- Docker, useSWR, Redux, Express, MongoDB, [Keyv](https://www.npmjs.com/package/keyv) +[For a thorough review of our features, see our docs here](https://docs.librechat.ai/) 📚 -## Getting Started +## 🪶 All-In-One AI Conversations with LibreChat -### Prerequisites -- npm -- Node.js >= 19.0.0 -- MongoDB installed or [MongoDB Atlas](https://account.mongodb.com/account/login) (required if not using Docker) -- [Docker (optional)](https://www.docker.com/get-started/) -- [OpenAI API key](https://platform.openai.com/account/api-keys) -- BingAI, ChatGPT access tokens (optional, free AIs) +LibreChat brings together the future of assistant AIs with the revolutionary technology of OpenAI's ChatGPT. Celebrating the original styling, LibreChat gives you the ability to integrate multiple AI models. It also integrates and enhances original client features such as conversation and message search, prompt templates and plugins. -## Usage +With LibreChat, you no longer need to opt for ChatGPT Plus and can instead use free or pay-per-call APIs. We welcome contributions, cloning, and forking to enhance the capabilities of this advanced chatbot platform. -- **Clone/download** the repo down where desired -```bash - git clone https://github.com/danny-avila/chatgpt-clone.git -``` -- If using MongoDB Atlas, remove `&w=majority` from default connection string. +[![Watch the video](https://raw.githubusercontent.com/LibreChat-AI/librechat.ai/main/public/images/changelog/v0.7.6.gif)](https://www.youtube.com/watch?v=ilfwGQtJNlI) -### Local -- **Run npm** install in both the api and client directories -- **Provide** all credentials, (API keys, access tokens, and Mongo Connection String) in api/.env [(see .env example)](api/.env.example) -- **Run** `npm run build` in /client/ dir, `npm start` in /api/ dir -- **Visit** http://localhost:3080 (default port) & enjoy +Click on the thumbnail to open the video☝️ -### Docker +--- -- **Provide** all credentials, (API keys, access tokens, and Mongo Connection String) in [docker-compose.yml](docker-compose.yml) under api service -- **Build images** in both /api/ and /client/ directories (will eventually share through docker hub) - - `api/` - ```bash - docker build -t node-api . - ``` - - `client/` - ```bash - docker build -t react-client . - ``` -- **Run** `docker-compose build` in project root dir and then `docker-compose up` to start the app +## 🌐 Resources -### Access Tokens +**GitHub Repo:** + - **RAG API:** [github.com/danny-avila/rag_api](https://github.com/danny-avila/rag_api) + - **Website:** [github.com/LibreChat-AI/librechat.ai](https://github.com/LibreChat-AI/librechat.ai) -
-ChatGPT Free Instructions +**Other:** + - **Website:** [librechat.ai](https://librechat.ai) + - **Documentation:** [librechat.ai/docs](https://librechat.ai/docs) + - **Blog:** [librechat.ai/blog](https://librechat.ai/blog) +--- -**This has been disabled as is no longer working as of 3-07-23** +## 📝 Changelog +Keep up with the latest updates by visiting the releases page and notes: +- [Releases](https://github.com/danny-avila/LibreChat/releases) +- [Changelog](https://www.librechat.ai/changelog) -To get your Access token For ChatGPT 'Free Version', login to chat.openai.com, then visit https://chat.openai.com/api/auth/session. +**⚠️ Please consult the [changelog](https://www.librechat.ai/changelog) for breaking changes before updating.** +--- -**Warning:** There may be a high chance of your account being banned with this method. Continue doing so at your own risk. +## ⭐ Star History -
+

+ + Star History Chart + +

+

+ + danny-avila%2FLibreChat | Trendshift + + + ROSS Index - Fastest Growing Open-Source Startups in Q1 2024 | Runa Capital + +

-
-BingAI Instructions -The Bing Access Token is the "_U" cookie from bing.com. Use dev tools or an extension while logged into the site to view it. +--- -**Note:** Specific error handling and styling for this model is still in progress. -
+## ✨ Contributions -### Updating -- As the project is still a work-in-progress, you should pull the latest and run the steps over. Reset your browser cache/clear site data. +Contributions, suggestions, bug reports and fixes are welcome! -## Use Cases ## +For new features, components, or extensions, please open an issue and discuss before sending a PR. - - One stop shop for all conversational AIs, with the added bonus of searching past conversations. - - Using the official API, you'd have to generate 7.5 million words to expense the same cost as ChatGPT Plus ($20). - - ChatGPT/Google Bard/Bing AI conversations are lost in space or - cannot be searched past a certain timeframe. - - **Customize ChatGPT** +If you'd like to help translate LibreChat into your language, we'd love your contribution! Improving our translations not only makes LibreChat more accessible to users around the world but also enhances the overall user experience. Please check out our [Translation Guide](https://www.librechat.ai/docs/translation). - ![use case example](./images/use_case3.png "Make a Custom GPT") +--- - - **API is not as limited as ChatGPT Free (at [chat.openai.com](https://chat.openai.com/chat))** +## 💖 This project exists in its current state thanks to all the people who contribute - ![use case example](./images/use_case2.png "chat.openai.com is getting more limited by the day!") + + + - - **ChatGPT Free is down.** +--- - ![use case example](./images/use_case.png "GPT is down! Plus is too expensive!") +## 🎉 Special Thanks +We thank [Locize](https://locize.com) for their translation management tools that support multiple languages in LibreChat. -## Origin ## - This project was originally created as a Minimum Viable Product (or MVP) for the [@HackReactor](https://github.com/hackreactor/) Bootcamp. It was built with OpenAI response streaming and most of the UI completed in under 20 hours. During the end of that time, I had most of the UI and basic functionality done. This was created without using any boilerplates or templates, including create-react-app and other toolchains. I didn't follow any 'un-official chatgpt' video tutorials, and simply referenced the official site for the UI. The purpose of the exercise was to learn setting up a full stack project from scratch. Please feel free to give feedback, suggestions, or fork the project for your own use. - - -## Caveats -### Regarding use of Official ChatGPT API -From [@waylaidwanderer](https://github.com/waylaidwanderer/node-chatgpt-api/blob/main/README.md#caveats): - -Since `gpt-3.5-turbo` is ChatGPT's underlying model, I had to do my best to replicate the way the official ChatGPT website uses it. -This means my implementation or the underlying model may not behave exactly the same in some ways: -- Conversations are not tied to any user IDs, so if that's important to you, you should implement your own user ID system. -- ChatGPT's model parameters (temperature, frequency penalty, etc.) are unknown, so I set some defaults that I thought would be reasonable. -- Conversations are limited to roughly the last 3000 tokens, so earlier messages may be forgotten during longer conversations. - - This works in a similar way to ChatGPT, except I'm pretty sure they have some additional way of retrieving context from earlier messages when needed (which can probably be achieved with embeddings, but I consider that out-of-scope for now). - -## Contributing -If you'd like to contribute, please create a pull request with a detailed description of your changes. - -## License -This project is licensed under the MIT License. +

+ + Locize Logo + +

diff --git a/api/.dockerignore b/api/.dockerignore deleted file mode 100644 index 7af7f04757..0000000000 --- a/api/.dockerignore +++ /dev/null @@ -1,2 +0,0 @@ -/node_modules -.env \ No newline at end of file diff --git a/api/.env.example b/api/.env.example deleted file mode 100644 index 47576c119a..0000000000 --- a/api/.env.example +++ /dev/null @@ -1,7 +0,0 @@ -OPENAI_KEY= -PORT=3080 -NODE_ENV=development -# Change this to your MongoDB URI if different and I recommend appending chatgpt-clone -MONGO_URI="mongodb://127.0.0.1:27017/chatgpt-clone" -CHATGPT_TOKEN="" -BING_TOKEN="" \ No newline at end of file diff --git a/api/Dockerfile b/api/Dockerfile deleted file mode 100644 index fd21cf4d1a..0000000000 --- a/api/Dockerfile +++ /dev/null @@ -1,14 +0,0 @@ -FROM node:19-alpine -WORKDIR /api -# copy package.json into the container at /api -COPY package*.json /api/ -# install dependencies -RUN npm install -# Copy the current directory contents into the container at /api -COPY . /api/ -# Make port 3080 available to the world outside this container -EXPOSE 3080 -# Run the app when the container launches -CMD ["npm", "start"] - -# docker build -t node-api . \ No newline at end of file diff --git a/api/app/bingai.js b/api/app/bingai.js deleted file mode 100644 index 4dfa8a71d5..0000000000 --- a/api/app/bingai.js +++ /dev/null @@ -1,55 +0,0 @@ -require('dotenv').config(); -const { KeyvFile } = require('keyv-file'); - -const askBing = async ({ text, progressCallback, convo }) => { - const { BingAIClient } = (await import('@waylaidwanderer/chatgpt-api')); - - const bingAIClient = new BingAIClient({ - // "_U" cookie from bing.com - userToken: process.env.BING_TOKEN, - // If the above doesn't work, provide all your cookies as a string instead - // cookies: '', - debug: false, - cache: { store: new KeyvFile({ filename: './data/cache.json' }) } - }); - - let options = { - onProgress: async (partialRes) => await progressCallback(partialRes), - }; - - if (convo) { - options = { ...options, ...convo }; - } - - const res = await bingAIClient.sendMessage(text, options - ); - - return res; - - // Example response for reference - // { - // conversationSignature: 'wwZ2GC/qRgEqP3VSNIhbPGwtno5RcuBhzZFASOM+Sxg=', - // conversationId: '51D|BingProd|026D3A4017554DE6C446798144B6337F4D47D5B76E62A31F31D0B1D0A95ED868', - // clientId: '914800201536527', - // invocationId: 1, - // conversationExpiryTime: '2023-02-15T21:48:46.2892088Z', - // response: 'Hello, this is Bing. Nice to meet you. 😊', - // details: { - // text: 'Hello, this is Bing. Nice to meet you. 😊', - // author: 'bot', - // createdAt: '2023-02-15T15:48:43.0631898+00:00', - // timestamp: '2023-02-15T15:48:43.0631898+00:00', - // messageId: '9d0c9a80-91b1-49ab-b9b1-b457dc3fe247', - // requestId: '5b252ef8-4f09-4c08-b6f5-4499d2e12fba', - // offense: 'None', - // adaptiveCards: [ [Object] ], - // sourceAttributions: [], - // feedback: { tag: null, updatedOn: null, type: 'None' }, - // contentOrigin: 'DeepLeo', - // privacy: null, - // suggestedResponses: [ [Object], [Object], [Object] ] - // } - // } -}; - -module.exports = { askBing }; diff --git a/api/app/chatgpt-browser.js b/api/app/chatgpt-browser.js deleted file mode 100644 index 442d2a731d..0000000000 --- a/api/app/chatgpt-browser.js +++ /dev/null @@ -1,33 +0,0 @@ -require('dotenv').config(); -const { KeyvFile } = require('keyv-file'); - -const clientOptions = { - // Warning: This will expose your access token to a third party. Consider the risks before using this. - reverseProxyUrl: 'https://chatgpt.duti.tech/api/conversation', - // Access token from https://chat.openai.com/api/auth/session - accessToken: process.env.CHATGPT_TOKEN, - // debug: true -}; - -const browserClient = async ({ text, progressCallback, convo }) => { - const { ChatGPTBrowserClient } = await import('@waylaidwanderer/chatgpt-api'); - - const store = { - store: new KeyvFile({ filename: './data/cache.json' }) - }; - - const client = new ChatGPTBrowserClient(clientOptions, store); - - let options = { - onProgress: async (partialRes) => await progressCallback(partialRes) - }; - - if (!!convo.parentMessageId && !!convo.conversationId) { - options = { ...options, ...convo }; - } - - const res = await client.sendMessage(text, options); - return res; -}; - -module.exports = { browserClient }; diff --git a/api/app/chatgpt-client.js b/api/app/chatgpt-client.js deleted file mode 100644 index ce3c0e2271..0000000000 --- a/api/app/chatgpt-client.js +++ /dev/null @@ -1,31 +0,0 @@ -require('dotenv').config(); -const { KeyvFile } = require('keyv-file'); - -const clientOptions = { - modelOptions: { - model: 'gpt-3.5-turbo' - }, - debug: false -}; - -const askClient = async ({ text, progressCallback, convo }) => { - const ChatGPTClient = (await import('@waylaidwanderer/chatgpt-api')).default; - const store = { - store: new KeyvFile({ filename: './data/cache.json' }) - }; - - const client = new ChatGPTClient(process.env.OPENAI_KEY, clientOptions, store); - - let options = { - onProgress: async (partialRes) => await progressCallback(partialRes) - }; - - if (!!convo.parentMessageId && !!convo.conversationId) { - options = { ...options, ...convo }; - } - - const res = await client.sendMessage(text, options); - return res; -}; - -module.exports = { askClient }; diff --git a/api/app/chatgpt-custom.js b/api/app/chatgpt-custom.js deleted file mode 100644 index d31901c75f..0000000000 --- a/api/app/chatgpt-custom.js +++ /dev/null @@ -1,37 +0,0 @@ -require('dotenv').config(); -const { KeyvFile } = require('keyv-file'); - -const clientOptions = { - modelOptions: { - model: 'gpt-3.5-turbo' - }, - debug: false -}; - -const customClient = async ({ text, progressCallback, convo, promptPrefix, chatGptLabel }) => { - const ChatGPTClient = (await import('@waylaidwanderer/chatgpt-api')).default; - const store = { - store: new KeyvFile({ filename: './data/cache.json' }) - }; - - clientOptions.chatGptLabel = chatGptLabel; - - if (promptPrefix.length > 0) { - clientOptions.promptPrefix = promptPrefix; - } - - const client = new ChatGPTClient(process.env.OPENAI_KEY, clientOptions, store); - - let options = { - onProgress: async (partialRes) => await progressCallback(partialRes) - }; - - if (!!convo.parentMessageId && !!convo.conversationId) { - options = { ...options, ...convo }; - } - - const res = await client.sendMessage(text, options); - return res; -}; - -module.exports = customClient; diff --git a/api/app/chatgpt.js b/api/app/chatgpt.js deleted file mode 100644 index 18edcfca83..0000000000 --- a/api/app/chatgpt.js +++ /dev/null @@ -1,38 +0,0 @@ -require('dotenv').config(); -const Keyv = require('keyv'); -const { Configuration, OpenAIApi } = require('openai'); -const messageStore = new Keyv(process.env.MONGODB_URI, { namespace: 'chatgpt' }); - -const ask = async (question, progressCallback, convo) => { - const { ChatGPTAPI } = await import('chatgpt'); - const api = new ChatGPTAPI({ apiKey: process.env.OPENAI_KEY, messageStore }); - let options = { - onProgress: async (partialRes) => { - if (partialRes.text.length > 0) { - await progressCallback(partialRes); - } - } - }; - - if (!!convo.parentMessageId && !!convo.conversationId) { - options = { ...options, ...convo }; - } - - const res = await api.sendMessage(question, options); - return res; -}; - -const titleConvo = async (message, response, model) => { - const configuration = new Configuration({ - apiKey: process.env.OPENAI_KEY - }); - const openai = new OpenAIApi(configuration); - const completion = await openai.createCompletion({ - model: 'text-davinci-002', - prompt: `Write a short title in title case, ideally in 5 words or less, and do not refer to the user or ${model}, that summarizes this conversation:\nUser:"${message}"\n${model}:"${response}"\nTitle: ` - }); - - return completion.data.choices[0].text.replace(/\n/g, ''); -}; - -module.exports = { ask, titleConvo }; diff --git a/api/app/citeText.js b/api/app/citeText.js deleted file mode 100644 index 536c981d4e..0000000000 --- a/api/app/citeText.js +++ /dev/null @@ -1,29 +0,0 @@ -const citationRegex = /\[\^\d+?\^]/g; - -const citeText = (res, noLinks = false) => { - let result = res.text || res; - const citations = Array.from(new Set(result.match(citationRegex))); - if (citations?.length === 0) return result; - - if (noLinks) { - citations.forEach((citation) => { - const digit = citation.match(/\d+?/g)[0]; - result = result.replaceAll(citation, `[${digit}](#) `); - }); - - return result; - } - - let sources = res.details.sourceAttributions; - if (sources?.length === 0) return result; - sources = sources.map((source) => source.seeMoreUrl); - - citations.forEach((citation) => { - const digit = citation.match(/\d+?/g)[0]; - result = result.replaceAll(citation, `[${digit}](${sources[digit - 1]}) `); - }); - - return result; -}; - -module.exports = citeText; diff --git a/api/app/clients/BaseClient.js b/api/app/clients/BaseClient.js new file mode 100644 index 0000000000..e85a550e26 --- /dev/null +++ b/api/app/clients/BaseClient.js @@ -0,0 +1,1433 @@ +const crypto = require('crypto'); +const fetch = require('node-fetch'); +const { logger } = require('@librechat/data-schemas'); +const { + countTokens, + getBalanceConfig, + extractFileContext, + encodeAndFormatAudios, + encodeAndFormatVideos, + encodeAndFormatDocuments, +} = require('@librechat/api'); +const { + Constants, + ErrorTypes, + FileSources, + ContentTypes, + excludedKeys, + EModelEndpoint, + isParamEndpoint, + isAgentsEndpoint, + supportsBalanceCheck, +} = require('librechat-data-provider'); +const { + updateMessage, + getMessages, + saveMessage, + saveConvo, + getConvo, + getFiles, +} = require('~/models'); +const { getStrategyFunctions } = require('~/server/services/Files/strategies'); +const { checkBalance } = require('~/models/balanceMethods'); +const { truncateToolCallOutputs } = require('./prompts'); +const TextStream = require('./TextStream'); + +class BaseClient { + constructor(apiKey, options = {}) { + this.apiKey = apiKey; + this.sender = options.sender ?? 'AI'; + this.contextStrategy = null; + this.currentDateString = new Date().toLocaleDateString('en-us', { + year: 'numeric', + month: 'long', + day: 'numeric', + }); + /** @type {boolean} */ + this.skipSaveConvo = false; + /** @type {boolean} */ + this.skipSaveUserMessage = false; + /** @type {string} */ + this.user; + /** @type {string} */ + this.conversationId; + /** @type {string} */ + this.responseMessageId; + /** @type {string} */ + this.parentMessageId; + /** @type {TAttachment[]} */ + this.attachments; + /** The key for the usage object's input tokens + * @type {string} */ + this.inputTokensKey = 'prompt_tokens'; + /** The key for the usage object's output tokens + * @type {string} */ + this.outputTokensKey = 'completion_tokens'; + /** @type {Set} */ + this.savedMessageIds = new Set(); + /** + * Flag to determine if the client re-submitted the latest assistant message. + * @type {boolean | undefined} */ + this.continued; + /** + * Flag to determine if the client has already fetched the conversation while saving new messages. + * @type {boolean | undefined} */ + this.fetchedConvo; + /** @type {TMessage[]} */ + this.currentMessages = []; + /** @type {import('librechat-data-provider').VisionModes | undefined} */ + this.visionMode; + } + + setOptions() { + throw new Error("Method 'setOptions' must be implemented."); + } + + async getCompletion() { + throw new Error("Method 'getCompletion' must be implemented."); + } + + /** @type {sendCompletion} */ + async sendCompletion() { + throw new Error("Method 'sendCompletion' must be implemented."); + } + + getSaveOptions() { + throw new Error('Subclasses must implement getSaveOptions'); + } + + async buildMessages() { + throw new Error('Subclasses must implement buildMessages'); + } + + async summarizeMessages() { + throw new Error('Subclasses attempted to call summarizeMessages without implementing it'); + } + + /** + * @returns {string} + */ + getResponseModel() { + if (isAgentsEndpoint(this.options.endpoint) && this.options.agent && this.options.agent.id) { + return this.options.agent.id; + } + + return this.modelOptions?.model ?? this.model; + } + + /** + * Abstract method to get the token count for a message. Subclasses must implement this method. + * @param {TMessage} responseMessage + * @returns {number} + */ + getTokenCountForResponse(responseMessage) { + logger.debug('[BaseClient] `recordTokenUsage` not implemented.', responseMessage); + } + + /** + * Abstract method to record token usage. Subclasses must implement this method. + * If a correction to the token usage is needed, the method should return an object with the corrected token counts. + * Should only be used if `recordCollectedUsage` was not used instead. + * @param {string} [model] + * @param {AppConfig['balance']} [balance] + * @param {number} promptTokens + * @param {number} completionTokens + * @returns {Promise} + */ + async recordTokenUsage({ model, balance, promptTokens, completionTokens }) { + logger.debug('[BaseClient] `recordTokenUsage` not implemented.', { + model, + balance, + promptTokens, + completionTokens, + }); + } + + /** + * Makes an HTTP request and logs the process. + * + * @param {RequestInfo} url - The URL to make the request to. Can be a string or a Request object. + * @param {RequestInit} [init] - Optional init options for the request. + * @returns {Promise} - A promise that resolves to the response of the fetch request. + */ + async fetch(_url, init) { + let url = _url; + if (this.options.directEndpoint) { + url = this.options.reverseProxyUrl; + } + logger.debug(`Making request to ${url}`); + if (typeof Bun !== 'undefined') { + return await fetch(url, init); + } + return await fetch(url, init); + } + + getBuildMessagesOptions() { + throw new Error('Subclasses must implement getBuildMessagesOptions'); + } + + async generateTextStream(text, onProgress, options = {}) { + const stream = new TextStream(text, options); + await stream.processTextStream(onProgress); + } + + /** + * @returns {[string|undefined, string|undefined]} + */ + processOverideIds() { + /** @type {Record} */ + let { overrideConvoId, overrideUserMessageId } = this.options?.req?.body ?? {}; + if (overrideConvoId) { + const [conversationId, index] = overrideConvoId.split(Constants.COMMON_DIVIDER); + overrideConvoId = conversationId; + if (index !== '0') { + this.skipSaveConvo = true; + } + } + if (overrideUserMessageId) { + const [userMessageId, index] = overrideUserMessageId.split(Constants.COMMON_DIVIDER); + overrideUserMessageId = userMessageId; + if (index !== '0') { + this.skipSaveUserMessage = true; + } + } + + return [overrideConvoId, overrideUserMessageId]; + } + + async setMessageOptions(opts = {}) { + if (opts && opts.replaceOptions) { + this.setOptions(opts); + } + + const [overrideConvoId, overrideUserMessageId] = this.processOverideIds(); + const { isEdited, isContinued } = opts; + const user = opts.user ?? null; + this.user = user; + const saveOptions = this.getSaveOptions(); + this.abortController = opts.abortController ?? new AbortController(); + const requestConvoId = overrideConvoId ?? opts.conversationId; + const conversationId = requestConvoId ?? crypto.randomUUID(); + const parentMessageId = opts.parentMessageId ?? Constants.NO_PARENT; + const userMessageId = + overrideUserMessageId ?? opts.overrideParentMessageId ?? crypto.randomUUID(); + let responseMessageId = opts.responseMessageId ?? crypto.randomUUID(); + let head = isEdited ? responseMessageId : parentMessageId; + this.currentMessages = (await this.loadHistory(conversationId, head)) ?? []; + this.conversationId = conversationId; + + if (isEdited && !isContinued) { + responseMessageId = crypto.randomUUID(); + head = responseMessageId; + this.currentMessages[this.currentMessages.length - 1].messageId = head; + } + + if (opts.isRegenerate && responseMessageId.endsWith('_')) { + responseMessageId = crypto.randomUUID(); + } + + this.responseMessageId = responseMessageId; + + return { + ...opts, + user, + head, + saveOptions, + userMessageId, + requestConvoId, + conversationId, + parentMessageId, + responseMessageId, + }; + } + + createUserMessage({ messageId, parentMessageId, conversationId, text }) { + return { + messageId, + parentMessageId, + conversationId, + sender: 'User', + text, + isCreatedByUser: true, + }; + } + + async handleStartMethods(message, opts) { + const { + user, + head, + saveOptions, + userMessageId, + requestConvoId, + conversationId, + parentMessageId, + responseMessageId, + } = await this.setMessageOptions(opts); + + const userMessage = opts.isEdited + ? this.currentMessages[this.currentMessages.length - 2] + : this.createUserMessage({ + messageId: userMessageId, + parentMessageId, + conversationId, + text: message, + }); + + if (typeof opts?.getReqData === 'function') { + opts.getReqData({ + userMessage, + conversationId, + responseMessageId, + sender: this.sender, + }); + } + + if (typeof opts?.onStart === 'function') { + const isNewConvo = !requestConvoId && parentMessageId === Constants.NO_PARENT; + opts.onStart(userMessage, responseMessageId, isNewConvo); + } + + return { + ...opts, + user, + head, + conversationId, + responseMessageId, + saveOptions, + userMessage, + }; + } + + /** + * Adds instructions to the messages array. If the instructions object is empty or undefined, + * the original messages array is returned. Otherwise, the instructions are added to the messages + * array either at the beginning (default) or preserving the last message at the end. + * + * @param {Array} messages - An array of messages. + * @param {Object} instructions - An object containing instructions to be added to the messages. + * @param {boolean} [beforeLast=false] - If true, adds instructions before the last message; if false, adds at the beginning. + * @returns {Array} An array containing messages and instructions, or the original messages if instructions are empty. + */ + addInstructions(messages, instructions, beforeLast = false) { + if (!instructions || Object.keys(instructions).length === 0) { + return messages; + } + + if (!beforeLast) { + return [instructions, ...messages]; + } + + // Legacy behavior: add instructions before the last message + const payload = []; + if (messages.length > 1) { + payload.push(...messages.slice(0, -1)); + } + + payload.push(instructions); + + if (messages.length > 0) { + payload.push(messages[messages.length - 1]); + } + + return payload; + } + + async handleTokenCountMap(tokenCountMap) { + if (this.clientName === EModelEndpoint.agents) { + return; + } + if (this.currentMessages.length === 0) { + return; + } + + for (let i = 0; i < this.currentMessages.length; i++) { + // Skip the last message, which is the user message. + if (i === this.currentMessages.length - 1) { + break; + } + + const message = this.currentMessages[i]; + const { messageId } = message; + const update = {}; + + if (messageId === tokenCountMap.summaryMessage?.messageId) { + logger.debug(`[BaseClient] Adding summary props to ${messageId}.`); + + update.summary = tokenCountMap.summaryMessage.content; + update.summaryTokenCount = tokenCountMap.summaryMessage.tokenCount; + } + + if (message.tokenCount && !update.summaryTokenCount) { + logger.debug(`[BaseClient] Skipping ${messageId}: already had a token count.`); + continue; + } + + const tokenCount = tokenCountMap[messageId]; + if (tokenCount) { + message.tokenCount = tokenCount; + update.tokenCount = tokenCount; + await this.updateMessageInDatabase({ messageId, ...update }); + } + } + } + + concatenateMessages(messages) { + return messages.reduce((acc, message) => { + const nameOrRole = message.name ?? message.role; + return acc + `${nameOrRole}:\n${message.content}\n\n`; + }, ''); + } + + /** + * This method processes an array of messages and returns a context of messages that fit within a specified token limit. + * It iterates over the messages from newest to oldest, adding them to the context until the token limit is reached. + * If the token limit would be exceeded by adding a message, that message is not added to the context and remains in the original array. + * The method uses `push` and `pop` operations for efficient array manipulation, and reverses the context array at the end to maintain the original order of the messages. + * + * @param {Object} params + * @param {TMessage[]} params.messages - An array of messages, each with a `tokenCount` property. The messages should be ordered from oldest to newest. + * @param {number} [params.maxContextTokens] - The max number of tokens allowed in the context. If not provided, defaults to `this.maxContextTokens`. + * @param {{ role: 'system', content: text, tokenCount: number }} [params.instructions] - Instructions already added to the context at index 0. + * @returns {Promise<{ + * context: TMessage[], + * remainingContextTokens: number, + * messagesToRefine: TMessage[], + * }>} An object with three properties: `context`, `remainingContextTokens`, and `messagesToRefine`. + * `context` is an array of messages that fit within the token limit. + * `remainingContextTokens` is the number of tokens remaining within the limit after adding the messages to the context. + * `messagesToRefine` is an array of messages that were not added to the context because they would have exceeded the token limit. + */ + async getMessagesWithinTokenLimit({ messages: _messages, maxContextTokens, instructions }) { + // Every reply is primed with <|start|>assistant<|message|>, so we + // start with 3 tokens for the label after all messages have been counted. + let currentTokenCount = 3; + const instructionsTokenCount = instructions?.tokenCount ?? 0; + let remainingContextTokens = + (maxContextTokens ?? this.maxContextTokens) - instructionsTokenCount; + const messages = [..._messages]; + + const context = []; + + if (currentTokenCount < remainingContextTokens) { + while (messages.length > 0 && currentTokenCount < remainingContextTokens) { + if (messages.length === 1 && instructions) { + break; + } + const poppedMessage = messages.pop(); + const { tokenCount } = poppedMessage; + + if (poppedMessage && currentTokenCount + tokenCount <= remainingContextTokens) { + context.push(poppedMessage); + currentTokenCount += tokenCount; + } else { + messages.push(poppedMessage); + break; + } + } + } + + if (instructions) { + context.push(_messages[0]); + messages.shift(); + } + + const prunedMemory = messages; + remainingContextTokens -= currentTokenCount; + + return { + context: context.reverse(), + remainingContextTokens, + messagesToRefine: prunedMemory, + }; + } + + async handleContextStrategy({ + instructions, + orderedMessages, + formattedMessages, + buildTokenMap = true, + }) { + let _instructions; + let tokenCount; + + if (instructions) { + ({ tokenCount, ..._instructions } = instructions); + } + + _instructions && logger.debug('[BaseClient] instructions tokenCount: ' + tokenCount); + if (tokenCount && tokenCount > this.maxContextTokens) { + const info = `${tokenCount} / ${this.maxContextTokens}`; + const errorMessage = `{ "type": "${ErrorTypes.INPUT_LENGTH}", "info": "${info}" }`; + logger.warn(`Instructions token count exceeds max token count (${info}).`); + throw new Error(errorMessage); + } + + if (this.clientName === EModelEndpoint.agents) { + const { dbMessages, editedIndices } = truncateToolCallOutputs( + orderedMessages, + this.maxContextTokens, + this.getTokenCountForMessage.bind(this), + ); + + if (editedIndices.length > 0) { + logger.debug('[BaseClient] Truncated tool call outputs:', editedIndices); + for (const index of editedIndices) { + formattedMessages[index].content = dbMessages[index].content; + } + orderedMessages = dbMessages; + } + } + + let orderedWithInstructions = this.addInstructions(orderedMessages, instructions); + + let { context, remainingContextTokens, messagesToRefine } = + await this.getMessagesWithinTokenLimit({ + messages: orderedWithInstructions, + instructions, + }); + + logger.debug('[BaseClient] Context Count (1/2)', { + remainingContextTokens, + maxContextTokens: this.maxContextTokens, + }); + + let summaryMessage; + let summaryTokenCount; + let { shouldSummarize } = this; + + // Calculate the difference in length to determine how many messages were discarded if any + let payload; + let { length } = formattedMessages; + length += instructions != null ? 1 : 0; + const diff = length - context.length; + const firstMessage = orderedWithInstructions[0]; + const usePrevSummary = + shouldSummarize && + diff === 1 && + firstMessage?.summary && + this.previous_summary.messageId === firstMessage.messageId; + + if (diff > 0) { + payload = formattedMessages.slice(diff); + logger.debug( + `[BaseClient] Difference between original payload (${length}) and context (${context.length}): ${diff}`, + ); + } + + payload = this.addInstructions(payload ?? formattedMessages, _instructions); + + const latestMessage = orderedWithInstructions[orderedWithInstructions.length - 1]; + if (payload.length === 0 && !shouldSummarize && latestMessage) { + const info = `${latestMessage.tokenCount} / ${this.maxContextTokens}`; + const errorMessage = `{ "type": "${ErrorTypes.INPUT_LENGTH}", "info": "${info}" }`; + logger.warn(`Prompt token count exceeds max token count (${info}).`); + throw new Error(errorMessage); + } else if ( + _instructions && + payload.length === 1 && + payload[0].content === _instructions.content + ) { + const info = `${tokenCount + 3} / ${this.maxContextTokens}`; + const errorMessage = `{ "type": "${ErrorTypes.INPUT_LENGTH}", "info": "${info}" }`; + logger.warn( + `Including instructions, the prompt token count exceeds remaining max token count (${info}).`, + ); + throw new Error(errorMessage); + } + + if (usePrevSummary) { + summaryMessage = { role: 'system', content: firstMessage.summary }; + summaryTokenCount = firstMessage.summaryTokenCount; + payload.unshift(summaryMessage); + remainingContextTokens -= summaryTokenCount; + } else if (shouldSummarize && messagesToRefine.length > 0) { + ({ summaryMessage, summaryTokenCount } = await this.summarizeMessages({ + messagesToRefine, + remainingContextTokens, + })); + summaryMessage && payload.unshift(summaryMessage); + remainingContextTokens -= summaryTokenCount; + } + + // Make sure to only continue summarization logic if the summary message was generated + shouldSummarize = summaryMessage != null && shouldSummarize === true; + + logger.debug('[BaseClient] Context Count (2/2)', { + remainingContextTokens, + maxContextTokens: this.maxContextTokens, + }); + + /** @type {Record | undefined} */ + let tokenCountMap; + if (buildTokenMap) { + const currentPayload = shouldSummarize ? orderedWithInstructions : context; + tokenCountMap = currentPayload.reduce((map, message, index) => { + const { messageId } = message; + if (!messageId) { + return map; + } + + if (shouldSummarize && index === messagesToRefine.length - 1 && !usePrevSummary) { + map.summaryMessage = { ...summaryMessage, messageId, tokenCount: summaryTokenCount }; + } + + map[messageId] = currentPayload[index].tokenCount; + return map; + }, {}); + } + + const promptTokens = this.maxContextTokens - remainingContextTokens; + + logger.debug('[BaseClient] tokenCountMap:', tokenCountMap); + logger.debug('[BaseClient]', { + promptTokens, + remainingContextTokens, + payloadSize: payload.length, + maxContextTokens: this.maxContextTokens, + }); + + return { payload, tokenCountMap, promptTokens, messages: orderedWithInstructions }; + } + + async sendMessage(message, opts = {}) { + const appConfig = this.options.req?.config; + /** @type {Promise} */ + let userMessagePromise; + const { user, head, isEdited, conversationId, responseMessageId, saveOptions, userMessage } = + await this.handleStartMethods(message, opts); + + if (opts.progressCallback) { + opts.onProgress = opts.progressCallback.call(null, { + ...(opts.progressOptions ?? {}), + parentMessageId: userMessage.messageId, + messageId: responseMessageId, + }); + } + + const { editedContent } = opts; + + // It's not necessary to push to currentMessages + // depending on subclass implementation of handling messages + // When this is an edit, all messages are already in currentMessages, both user and response + if (isEdited) { + let latestMessage = this.currentMessages[this.currentMessages.length - 1]; + if (!latestMessage) { + latestMessage = { + messageId: responseMessageId, + conversationId, + parentMessageId: userMessage.messageId, + isCreatedByUser: false, + model: this.modelOptions?.model ?? this.model, + sender: this.sender, + }; + this.currentMessages.push(userMessage, latestMessage); + } else if (editedContent != null) { + // Handle editedContent for content parts + if (editedContent && latestMessage.content && Array.isArray(latestMessage.content)) { + const { index, text, type } = editedContent; + if (index >= 0 && index < latestMessage.content.length) { + const contentPart = latestMessage.content[index]; + if (type === ContentTypes.THINK && contentPart.type === ContentTypes.THINK) { + contentPart[ContentTypes.THINK] = text; + } else if (type === ContentTypes.TEXT && contentPart.type === ContentTypes.TEXT) { + contentPart[ContentTypes.TEXT] = text; + } + } + } + } + this.continued = true; + } else { + this.currentMessages.push(userMessage); + } + + /** + * When the userMessage is pushed to currentMessages, the parentMessage is the userMessageId. + * this only matters when buildMessages is utilizing the parentMessageId, and may vary on implementation + */ + const parentMessageId = isEdited ? head : userMessage.messageId; + this.parentMessageId = parentMessageId; + let { + prompt: payload, + tokenCountMap, + promptTokens, + } = await this.buildMessages( + this.currentMessages, + parentMessageId, + this.getBuildMessagesOptions(opts), + opts, + ); + + if (tokenCountMap) { + logger.debug('[BaseClient] tokenCountMap', tokenCountMap); + if (tokenCountMap[userMessage.messageId]) { + userMessage.tokenCount = tokenCountMap[userMessage.messageId]; + logger.debug('[BaseClient] userMessage', userMessage); + } + + this.handleTokenCountMap(tokenCountMap); + } + + if (!isEdited && !this.skipSaveUserMessage) { + userMessagePromise = this.saveMessageToDatabase(userMessage, saveOptions, user); + this.savedMessageIds.add(userMessage.messageId); + if (typeof opts?.getReqData === 'function') { + opts.getReqData({ + userMessagePromise, + }); + } + } + + const balanceConfig = getBalanceConfig(appConfig); + if ( + balanceConfig?.enabled && + supportsBalanceCheck[this.options.endpointType ?? this.options.endpoint] + ) { + await checkBalance({ + req: this.options.req, + res: this.options.res, + txData: { + user: this.user, + tokenType: 'prompt', + amount: promptTokens, + endpoint: this.options.endpoint, + model: this.modelOptions?.model ?? this.model, + endpointTokenConfig: this.options.endpointTokenConfig, + }, + }); + } + + const { completion, metadata } = await this.sendCompletion(payload, opts); + if (this.abortController) { + this.abortController.requestCompleted = true; + } + + /** @type {TMessage} */ + const responseMessage = { + messageId: responseMessageId, + conversationId, + parentMessageId: userMessage.messageId, + isCreatedByUser: false, + isEdited, + model: this.getResponseModel(), + sender: this.sender, + promptTokens, + iconURL: this.options.iconURL, + endpoint: this.options.endpoint, + ...(this.metadata ?? {}), + metadata, + }; + + if (typeof completion === 'string') { + responseMessage.text = completion; + } else if ( + Array.isArray(completion) && + (this.clientName === EModelEndpoint.agents || + isParamEndpoint(this.options.endpoint, this.options.endpointType)) + ) { + responseMessage.text = ''; + + if (!opts.editedContent || this.currentMessages.length === 0) { + responseMessage.content = completion; + } else { + const latestMessage = this.currentMessages[this.currentMessages.length - 1]; + if (!latestMessage?.content) { + responseMessage.content = completion; + } else { + const existingContent = [...latestMessage.content]; + const { type: editedType } = opts.editedContent; + responseMessage.content = this.mergeEditedContent( + existingContent, + completion, + editedType, + ); + } + } + } else if (Array.isArray(completion)) { + responseMessage.text = completion.join(''); + } + + if ( + tokenCountMap && + this.recordTokenUsage && + this.getTokenCountForResponse && + this.getTokenCount + ) { + let completionTokens; + + /** + * Metadata about input/output costs for the current message. The client + * should provide a function to get the current stream usage metadata; if not, + * use the legacy token estimations. + * @type {StreamUsage | null} */ + const usage = this.getStreamUsage != null ? this.getStreamUsage() : null; + + if (usage != null && Number(usage[this.outputTokensKey]) > 0) { + responseMessage.tokenCount = usage[this.outputTokensKey]; + completionTokens = responseMessage.tokenCount; + await this.updateUserMessageTokenCount({ + usage, + tokenCountMap, + userMessage, + userMessagePromise, + opts, + }); + } else { + responseMessage.tokenCount = this.getTokenCountForResponse(responseMessage); + completionTokens = responseMessage.tokenCount; + await this.recordTokenUsage({ + usage, + promptTokens, + completionTokens, + balance: balanceConfig, + model: responseMessage.model, + }); + } + } + + if (userMessagePromise) { + await userMessagePromise; + } + + if (this.artifactPromises) { + responseMessage.attachments = (await Promise.all(this.artifactPromises)).filter((a) => a); + } + + if (this.options.attachments) { + try { + saveOptions.files = this.options.attachments.map((attachments) => attachments.file_id); + } catch (error) { + logger.error('[BaseClient] Error mapping attachments for conversation', error); + } + } + + responseMessage.databasePromise = this.saveMessageToDatabase( + responseMessage, + saveOptions, + user, + ); + this.savedMessageIds.add(responseMessage.messageId); + delete responseMessage.tokenCount; + return responseMessage; + } + + /** + * Stream usage should only be used for user message token count re-calculation if: + * - The stream usage is available, with input tokens greater than 0, + * - the client provides a function to calculate the current token count, + * - files are being resent with every message (default behavior; or if `false`, with no attachments), + * - the `promptPrefix` (custom instructions) is not set. + * + * In these cases, the legacy token estimations would be more accurate. + * + * TODO: included system messages in the `orderedMessages` accounting, potentially as a + * separate message in the UI. ChatGPT does this through "hidden" system messages. + * @param {object} params + * @param {StreamUsage} params.usage + * @param {Record} params.tokenCountMap + * @param {TMessage} params.userMessage + * @param {Promise} params.userMessagePromise + * @param {object} params.opts + */ + async updateUserMessageTokenCount({ + usage, + tokenCountMap, + userMessage, + userMessagePromise, + opts, + }) { + /** @type {boolean} */ + const shouldUpdateCount = + this.calculateCurrentTokenCount != null && + Number(usage[this.inputTokensKey]) > 0 && + (this.options.resendFiles || + (!this.options.resendFiles && !this.options.attachments?.length)) && + !this.options.promptPrefix; + + if (!shouldUpdateCount) { + return; + } + + const userMessageTokenCount = this.calculateCurrentTokenCount({ + currentMessageId: userMessage.messageId, + tokenCountMap, + usage, + }); + + if (userMessageTokenCount === userMessage.tokenCount) { + return; + } + + userMessage.tokenCount = userMessageTokenCount; + /* + Note: `AgentController` saves the user message if not saved here + (noted by `savedMessageIds`), so we update the count of its `userMessage` reference + */ + if (typeof opts?.getReqData === 'function') { + opts.getReqData({ + userMessage, + }); + } + /* + Note: we update the user message to be sure it gets the calculated token count; + though `AgentController` saves the user message if not saved here + (noted by `savedMessageIds`), EditController does not + */ + await userMessagePromise; + await this.updateMessageInDatabase({ + messageId: userMessage.messageId, + tokenCount: userMessageTokenCount, + }); + } + + async loadHistory(conversationId, parentMessageId = null) { + logger.debug('[BaseClient] Loading history:', { conversationId, parentMessageId }); + + const messages = (await getMessages({ conversationId })) ?? []; + + if (messages.length === 0) { + return []; + } + + let mapMethod = null; + if (this.getMessageMapMethod) { + mapMethod = this.getMessageMapMethod(); + } + + let _messages = this.constructor.getMessagesForConversation({ + messages, + parentMessageId, + mapMethod, + }); + + _messages = await this.addPreviousAttachments(_messages); + + if (!this.shouldSummarize) { + return _messages; + } + + // Find the latest message with a 'summary' property + for (let i = _messages.length - 1; i >= 0; i--) { + if (_messages[i]?.summary) { + this.previous_summary = _messages[i]; + break; + } + } + + if (this.previous_summary) { + const { messageId, summary, tokenCount, summaryTokenCount } = this.previous_summary; + logger.debug('[BaseClient] Previous summary:', { + messageId, + summary, + tokenCount, + summaryTokenCount, + }); + } + + return _messages; + } + + /** + * Save a message to the database. + * @param {TMessage} message + * @param {Partial} endpointOptions + * @param {string | null} user + */ + async saveMessageToDatabase(message, endpointOptions, user = null) { + if (this.user && user !== this.user) { + throw new Error('User mismatch.'); + } + + const savedMessage = await saveMessage( + this.options?.req, + { + ...message, + endpoint: this.options.endpoint, + unfinished: false, + user, + }, + { context: 'api/app/clients/BaseClient.js - saveMessageToDatabase #saveMessage' }, + ); + + if (this.skipSaveConvo) { + return { message: savedMessage }; + } + + const fieldsToKeep = { + conversationId: message.conversationId, + endpoint: this.options.endpoint, + endpointType: this.options.endpointType, + ...endpointOptions, + }; + + const existingConvo = + this.fetchedConvo === true + ? null + : await getConvo(this.options?.req?.user?.id, message.conversationId); + + const unsetFields = {}; + const exceptions = new Set(['spec', 'iconURL']); + const hasNonEphemeralAgent = + isAgentsEndpoint(this.options.endpoint) && + endpointOptions?.agent_id && + endpointOptions.agent_id !== Constants.EPHEMERAL_AGENT_ID; + if (hasNonEphemeralAgent) { + exceptions.add('model'); + } + if (existingConvo != null) { + this.fetchedConvo = true; + for (const key in existingConvo) { + if (!key) { + continue; + } + if (excludedKeys.has(key) && !exceptions.has(key)) { + continue; + } + + if (endpointOptions?.[key] === undefined) { + unsetFields[key] = 1; + } + } + } + + const conversation = await saveConvo(this.options?.req, fieldsToKeep, { + context: 'api/app/clients/BaseClient.js - saveMessageToDatabase #saveConvo', + unsetFields, + }); + + return { message: savedMessage, conversation }; + } + + /** + * Update a message in the database. + * @param {Partial} message + */ + async updateMessageInDatabase(message) { + await updateMessage(this.options.req, message); + } + + /** + * Iterate through messages, building an array based on the parentMessageId. + * + * This function constructs a conversation thread by traversing messages from a given parentMessageId up to the root message. + * It handles cyclic references by ensuring that a message is not processed more than once. + * If the 'summary' option is set to true and a message has a 'summary' property: + * - The message's 'role' is set to 'system'. + * - The message's 'text' is set to its 'summary'. + * - If the message has a 'summaryTokenCount', the message's 'tokenCount' is set to 'summaryTokenCount'. + * The traversal stops at the message with the 'summary' property. + * + * Each message object should have an 'id' or 'messageId' property and may have a 'parentMessageId' property. + * The 'parentMessageId' is the ID of the message that the current message is a reply to. + * If 'parentMessageId' is not present, null, or is Constants.NO_PARENT, + * the message is considered a root message. + * + * @param {Object} options - The options for the function. + * @param {TMessage[]} options.messages - An array of message objects. Each object should have either an 'id' or 'messageId' property, and may have a 'parentMessageId' property. + * @param {string} options.parentMessageId - The ID of the parent message to start the traversal from. + * @param {Function} [options.mapMethod] - An optional function to map over the ordered messages. If provided, it will be applied to each message in the resulting array. + * @param {boolean} [options.summary=false] - If set to true, the traversal modifies messages with 'summary' and 'summaryTokenCount' properties and stops at the message with a 'summary' property. + * @returns {TMessage[]} An array containing the messages in the order they should be displayed, starting with the most recent message with a 'summary' property if the 'summary' option is true, and ending with the message identified by 'parentMessageId'. + */ + static getMessagesForConversation({ + messages, + parentMessageId, + mapMethod = null, + summary = false, + }) { + if (!messages || messages.length === 0) { + return []; + } + + const orderedMessages = []; + let currentMessageId = parentMessageId; + const visitedMessageIds = new Set(); + + while (currentMessageId) { + if (visitedMessageIds.has(currentMessageId)) { + break; + } + const message = messages.find((msg) => { + const messageId = msg.messageId ?? msg.id; + return messageId === currentMessageId; + }); + + visitedMessageIds.add(currentMessageId); + + if (!message) { + break; + } + + if (summary && message.summary) { + message.role = 'system'; + message.text = message.summary; + } + + if (summary && message.summaryTokenCount) { + message.tokenCount = message.summaryTokenCount; + } + + orderedMessages.push(message); + + if (summary && message.summary) { + break; + } + + currentMessageId = + message.parentMessageId === Constants.NO_PARENT ? null : message.parentMessageId; + } + + orderedMessages.reverse(); + + if (mapMethod) { + return orderedMessages.map(mapMethod); + } + + return orderedMessages; + } + + /** + * Algorithm adapted from "6. Counting tokens for chat API calls" of + * https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb + * + * An additional 3 tokens need to be added for assistant label priming after all messages have been counted. + * In our implementation, this is accounted for in the getMessagesWithinTokenLimit method. + * + * The content parts example was adapted from the following example: + * https://github.com/openai/openai-cookbook/pull/881/files + * + * Note: image token calculation is to be done elsewhere where we have access to the image metadata + * + * @param {Object} message + */ + getTokenCountForMessage(message) { + // Note: gpt-3.5-turbo and gpt-4 may update over time. Use default for these as well as for unknown models + let tokensPerMessage = 3; + let tokensPerName = 1; + const model = this.modelOptions?.model ?? this.model; + + if (model === 'gpt-3.5-turbo-0301') { + tokensPerMessage = 4; + tokensPerName = -1; + } + + const processValue = (value) => { + if (Array.isArray(value)) { + for (let item of value) { + if ( + !item || + !item.type || + item.type === ContentTypes.THINK || + item.type === ContentTypes.ERROR || + item.type === ContentTypes.IMAGE_URL + ) { + continue; + } + + if (item.type === ContentTypes.TOOL_CALL && item.tool_call != null) { + const toolName = item.tool_call?.name || ''; + if (toolName != null && toolName && typeof toolName === 'string') { + numTokens += this.getTokenCount(toolName); + } + + const args = item.tool_call?.args || ''; + if (args != null && args && typeof args === 'string') { + numTokens += this.getTokenCount(args); + } + + const output = item.tool_call?.output || ''; + if (output != null && output && typeof output === 'string') { + numTokens += this.getTokenCount(output); + } + continue; + } + + const nestedValue = item[item.type]; + + if (!nestedValue) { + continue; + } + + processValue(nestedValue); + } + } else if (typeof value === 'string') { + numTokens += this.getTokenCount(value); + } else if (typeof value === 'number') { + numTokens += this.getTokenCount(value.toString()); + } else if (typeof value === 'boolean') { + numTokens += this.getTokenCount(value.toString()); + } + }; + + let numTokens = tokensPerMessage; + for (let [key, value] of Object.entries(message)) { + processValue(value); + + if (key === 'name') { + numTokens += tokensPerName; + } + } + return numTokens; + } + + /** + * Merges completion content with existing content when editing TEXT or THINK types + * @param {Array} existingContent - The existing content array + * @param {Array} newCompletion - The new completion content + * @param {string} editedType - The type of content being edited + * @returns {Array} The merged content array + */ + mergeEditedContent(existingContent, newCompletion, editedType) { + if (!newCompletion.length) { + return existingContent.concat(newCompletion); + } + + if (editedType !== ContentTypes.TEXT && editedType !== ContentTypes.THINK) { + return existingContent.concat(newCompletion); + } + + const lastIndex = existingContent.length - 1; + const lastExisting = existingContent[lastIndex]; + const firstNew = newCompletion[0]; + + if (lastExisting?.type !== firstNew?.type || firstNew?.type !== editedType) { + return existingContent.concat(newCompletion); + } + + const mergedContent = [...existingContent]; + if (editedType === ContentTypes.TEXT) { + mergedContent[lastIndex] = { + ...mergedContent[lastIndex], + [ContentTypes.TEXT]: + (mergedContent[lastIndex][ContentTypes.TEXT] || '') + (firstNew[ContentTypes.TEXT] || ''), + }; + } else { + mergedContent[lastIndex] = { + ...mergedContent[lastIndex], + [ContentTypes.THINK]: + (mergedContent[lastIndex][ContentTypes.THINK] || '') + + (firstNew[ContentTypes.THINK] || ''), + }; + } + + // Add remaining completion items + return mergedContent.concat(newCompletion.slice(1)); + } + + async sendPayload(payload, opts = {}) { + if (opts && typeof opts === 'object') { + this.setOptions(opts); + } + + return await this.sendCompletion(payload, opts); + } + + async addDocuments(message, attachments) { + const documentResult = await encodeAndFormatDocuments( + this.options.req, + attachments, + { + provider: this.options.agent?.provider ?? this.options.endpoint, + endpoint: this.options.agent?.endpoint ?? this.options.endpoint, + useResponsesApi: this.options.agent?.model_parameters?.useResponsesApi, + }, + getStrategyFunctions, + ); + message.documents = + documentResult.documents && documentResult.documents.length + ? documentResult.documents + : undefined; + return documentResult.files; + } + + async addVideos(message, attachments) { + const videoResult = await encodeAndFormatVideos( + this.options.req, + attachments, + { + provider: this.options.agent?.provider ?? this.options.endpoint, + endpoint: this.options.agent?.endpoint ?? this.options.endpoint, + }, + getStrategyFunctions, + ); + message.videos = + videoResult.videos && videoResult.videos.length ? videoResult.videos : undefined; + return videoResult.files; + } + + async addAudios(message, attachments) { + const audioResult = await encodeAndFormatAudios( + this.options.req, + attachments, + { + provider: this.options.agent?.provider ?? this.options.endpoint, + endpoint: this.options.agent?.endpoint ?? this.options.endpoint, + }, + getStrategyFunctions, + ); + message.audios = + audioResult.audios && audioResult.audios.length ? audioResult.audios : undefined; + return audioResult.files; + } + + /** + * Extracts text context from attachments and sets it on the message. + * This handles text that was already extracted from files (OCR, transcriptions, document text, etc.) + * @param {TMessage} message - The message to add context to + * @param {MongoFile[]} attachments - Array of file attachments + * @returns {Promise} + */ + async addFileContextToMessage(message, attachments) { + const fileContext = await extractFileContext({ + attachments, + req: this.options?.req, + tokenCountFn: (text) => countTokens(text), + }); + + if (fileContext) { + message.fileContext = fileContext; + } + } + + async processAttachments(message, attachments) { + const categorizedAttachments = { + images: [], + videos: [], + audios: [], + documents: [], + }; + + const allFiles = []; + + for (const file of attachments) { + /** @type {FileSources} */ + const source = file.source ?? FileSources.local; + if (source === FileSources.text) { + allFiles.push(file); + continue; + } + if (file.embedded === true || file.metadata?.fileIdentifier != null) { + allFiles.push(file); + continue; + } + + if (file.type.startsWith('image/')) { + categorizedAttachments.images.push(file); + } else if (file.type === 'application/pdf') { + categorizedAttachments.documents.push(file); + allFiles.push(file); + } else if (file.type.startsWith('video/')) { + categorizedAttachments.videos.push(file); + allFiles.push(file); + } else if (file.type.startsWith('audio/')) { + categorizedAttachments.audios.push(file); + allFiles.push(file); + } + } + + const [imageFiles] = await Promise.all([ + categorizedAttachments.images.length > 0 + ? this.addImageURLs(message, categorizedAttachments.images) + : Promise.resolve([]), + categorizedAttachments.documents.length > 0 + ? this.addDocuments(message, categorizedAttachments.documents) + : Promise.resolve([]), + categorizedAttachments.videos.length > 0 + ? this.addVideos(message, categorizedAttachments.videos) + : Promise.resolve([]), + categorizedAttachments.audios.length > 0 + ? this.addAudios(message, categorizedAttachments.audios) + : Promise.resolve([]), + ]); + + allFiles.push(...imageFiles); + + const seenFileIds = new Set(); + const uniqueFiles = []; + + for (const file of allFiles) { + if (file.file_id && !seenFileIds.has(file.file_id)) { + seenFileIds.add(file.file_id); + uniqueFiles.push(file); + } else if (!file.file_id) { + uniqueFiles.push(file); + } + } + + return uniqueFiles; + } + + /** + * @param {TMessage[]} _messages + * @returns {Promise} + */ + async addPreviousAttachments(_messages) { + if (!this.options.resendFiles) { + return _messages; + } + + const seen = new Set(); + const attachmentsProcessed = + this.options.attachments && !(this.options.attachments instanceof Promise); + if (attachmentsProcessed) { + for (const attachment of this.options.attachments) { + seen.add(attachment.file_id); + } + } + + /** + * + * @param {TMessage} message + */ + const processMessage = async (message) => { + if (!this.message_file_map) { + /** @type {Record */ + this.message_file_map = {}; + } + + const fileIds = []; + for (const file of message.files) { + if (seen.has(file.file_id)) { + continue; + } + fileIds.push(file.file_id); + seen.add(file.file_id); + } + + if (fileIds.length === 0) { + return message; + } + + const files = await getFiles( + { + file_id: { $in: fileIds }, + }, + {}, + {}, + ); + + await this.addFileContextToMessage(message, files); + await this.processAttachments(message, files); + + this.message_file_map[message.messageId] = files; + return message; + }; + + const promises = []; + + for (const message of _messages) { + if (!message.files) { + promises.push(message); + continue; + } + + promises.push(processMessage(message)); + } + + const messages = await Promise.all(promises); + + this.checkVisionRequest(Object.values(this.message_file_map ?? {}).flat()); + return messages; + } +} + +module.exports = BaseClient; diff --git a/api/app/clients/OllamaClient.js b/api/app/clients/OllamaClient.js new file mode 100644 index 0000000000..d0dda519fe --- /dev/null +++ b/api/app/clients/OllamaClient.js @@ -0,0 +1,167 @@ +const { z } = require('zod'); +const axios = require('axios'); +const { Ollama } = require('ollama'); +const { sleep } = require('@librechat/agents'); +const { logger } = require('@librechat/data-schemas'); +const { Constants } = require('librechat-data-provider'); +const { resolveHeaders, deriveBaseURL } = require('@librechat/api'); + +const ollamaPayloadSchema = z.object({ + mirostat: z.number().optional(), + mirostat_eta: z.number().optional(), + mirostat_tau: z.number().optional(), + num_ctx: z.number().optional(), + repeat_last_n: z.number().optional(), + repeat_penalty: z.number().optional(), + temperature: z.number().optional(), + seed: z.number().nullable().optional(), + stop: z.array(z.string()).optional(), + tfs_z: z.number().optional(), + num_predict: z.number().optional(), + top_k: z.number().optional(), + top_p: z.number().optional(), + stream: z.optional(z.boolean()), + model: z.string(), +}); + +/** + * @param {string} imageUrl + * @returns {string} + * @throws {Error} + */ +const getValidBase64 = (imageUrl) => { + const parts = imageUrl.split(';base64,'); + + if (parts.length === 2) { + return parts[1]; + } else { + logger.error('Invalid or no Base64 string found in URL.'); + } +}; + +class OllamaClient { + constructor(options = {}) { + const host = deriveBaseURL(options.baseURL ?? 'http://localhost:11434'); + this.streamRate = options.streamRate ?? Constants.DEFAULT_STREAM_RATE; + this.headers = options.headers ?? {}; + /** @type {Ollama} */ + this.client = new Ollama({ host }); + } + + /** + * Fetches Ollama models from the specified base API path. + * @param {string} baseURL + * @param {Object} [options] - Optional configuration + * @param {Partial} [options.user] - User object for header resolution + * @param {Record} [options.headers] - Headers to include in the request + * @returns {Promise} The Ollama models. + * @throws {Error} Throws if the Ollama API request fails + */ + static async fetchModels(baseURL, options = {}) { + if (!baseURL) { + return []; + } + + const ollamaEndpoint = deriveBaseURL(baseURL); + + const resolvedHeaders = resolveHeaders({ + headers: options.headers, + user: options.user, + }); + + /** @type {Promise>} */ + const response = await axios.get(`${ollamaEndpoint}/api/tags`, { + headers: resolvedHeaders, + timeout: 5000, + }); + + const models = response.data.models.map((tag) => tag.name); + return models; + } + + /** + * @param {ChatCompletionMessage[]} messages + * @returns {OllamaMessage[]} + */ + static formatOpenAIMessages(messages) { + const ollamaMessages = []; + + for (const message of messages) { + if (typeof message.content === 'string') { + ollamaMessages.push({ + role: message.role, + content: message.content, + }); + continue; + } + + let aggregatedText = ''; + let imageUrls = []; + + for (const content of message.content) { + if (content.type === 'text') { + aggregatedText += content.text + ' '; + } else if (content.type === 'image_url') { + imageUrls.push(getValidBase64(content.image_url.url)); + } + } + + const ollamaMessage = { + role: message.role, + content: aggregatedText.trim(), + }; + + if (imageUrls.length > 0) { + ollamaMessage.images = imageUrls; + } + + ollamaMessages.push(ollamaMessage); + } + + return ollamaMessages; + } + + /*** + * @param {Object} params + * @param {ChatCompletionPayload} params.payload + * @param {onTokenProgress} params.onProgress + * @param {AbortController} params.abortController + */ + async chatCompletion({ payload, onProgress, abortController = null }) { + let intermediateReply = ''; + + const parameters = ollamaPayloadSchema.parse(payload); + const messages = OllamaClient.formatOpenAIMessages(payload.messages); + + if (parameters.stream) { + const stream = await this.client.chat({ + messages, + ...parameters, + }); + + for await (const chunk of stream) { + const token = chunk.message.content; + intermediateReply += token; + onProgress(token); + if (abortController.signal.aborted) { + stream.controller.abort(); + break; + } + + await sleep(this.streamRate); + } + } + // TODO: regular completion + else { + // const generation = await this.client.generate(payload); + } + + return intermediateReply; + } + catch(err) { + logger.error('[OllamaClient.chatCompletion]', err); + throw err; + } +} + +module.exports = { OllamaClient, ollamaPayloadSchema }; diff --git a/api/app/clients/TextStream.js b/api/app/clients/TextStream.js new file mode 100644 index 0000000000..ea3d8980e9 --- /dev/null +++ b/api/app/clients/TextStream.js @@ -0,0 +1,60 @@ +const { Readable } = require('stream'); +const { logger } = require('@librechat/data-schemas'); + +class TextStream extends Readable { + constructor(text, options = {}) { + super(options); + this.text = text; + this.currentIndex = 0; + this.minChunkSize = options.minChunkSize ?? 2; + this.maxChunkSize = options.maxChunkSize ?? 4; + this.delay = options.delay ?? 20; // Time in milliseconds + } + + _read() { + const { delay, minChunkSize, maxChunkSize } = this; + + if (this.currentIndex < this.text.length) { + setTimeout(() => { + const remainingChars = this.text.length - this.currentIndex; + const chunkSize = Math.min(this.randomInt(minChunkSize, maxChunkSize + 1), remainingChars); + + const chunk = this.text.slice(this.currentIndex, this.currentIndex + chunkSize); + this.push(chunk); + this.currentIndex += chunkSize; + }, delay); + } else { + this.push(null); // signal end of data + } + } + + randomInt(min, max) { + return Math.floor(Math.random() * (max - min)) + min; + } + + async processTextStream(onProgressCallback) { + const streamPromise = new Promise((resolve, reject) => { + this.on('data', (chunk) => { + onProgressCallback(chunk.toString()); + }); + + this.on('end', () => { + // logger.debug('[processTextStream] Stream ended'); + resolve(); + }); + + this.on('error', (err) => { + reject(err); + }); + }); + + try { + await streamPromise; + } catch (err) { + logger.error('[processTextStream] Error in text stream:', err); + // Handle the error appropriately, e.g., return an error message or throw an error + } + } +} + +module.exports = TextStream; diff --git a/api/app/clients/index.js b/api/app/clients/index.js new file mode 100644 index 0000000000..3dbe397b31 --- /dev/null +++ b/api/app/clients/index.js @@ -0,0 +1,7 @@ +const TextStream = require('./TextStream'); +const toolUtils = require('./tools/util'); + +module.exports = { + TextStream, + ...toolUtils, +}; diff --git a/api/app/clients/prompts/artifacts.js b/api/app/clients/prompts/artifacts.js new file mode 100644 index 0000000000..915ccae629 --- /dev/null +++ b/api/app/clients/prompts/artifacts.js @@ -0,0 +1,537 @@ +const dedent = require('dedent'); +const { EModelEndpoint, ArtifactModes } = require('librechat-data-provider'); +const { generateShadcnPrompt } = require('~/app/clients/prompts/shadcn-docs/generate'); +const { components } = require('~/app/clients/prompts/shadcn-docs/components'); + +/** @deprecated */ +// eslint-disable-next-line no-unused-vars +const artifactsPromptV1 = dedent`The assistant can create and reference artifacts during conversations. + +Artifacts are for substantial, self-contained content that users might modify or reuse, displayed in a separate UI window for clarity. + +# Good artifacts are... +- Substantial content (>15 lines) +- Content that the user is likely to modify, iterate on, or take ownership of +- Self-contained, complex content that can be understood on its own, without context from the conversation +- Content intended for eventual use outside the conversation (e.g., reports, emails, presentations) +- Content likely to be referenced or reused multiple times + +# Don't use artifacts for... +- Simple, informational, or short content, such as brief code snippets, mathematical equations, or small examples +- Primarily explanatory, instructional, or illustrative content, such as examples provided to clarify a concept +- Suggestions, commentary, or feedback on existing artifacts +- Conversational or explanatory content that doesn't represent a standalone piece of work +- Content that is dependent on the current conversational context to be useful +- Content that is unlikely to be modified or iterated upon by the user +- Request from users that appears to be a one-off question + +# Usage notes +- One artifact per message unless specifically requested +- Prefer in-line content (don't use artifacts) when possible. Unnecessary use of artifacts can be jarring for users. +- If a user asks the assistant to "draw an SVG" or "make a website," the assistant does not need to explain that it doesn't have these capabilities. Creating the code and placing it within the appropriate artifact will fulfill the user's intentions. +- If asked to generate an image, the assistant can offer an SVG instead. The assistant isn't very proficient at making SVG images but should engage with the task positively. Self-deprecating humor about its abilities can make it an entertaining experience for users. +- The assistant errs on the side of simplicity and avoids overusing artifacts for content that can be effectively presented within the conversation. +- Always provide complete, specific, and fully functional content without any placeholders, ellipses, or 'remains the same' comments. + + + When collaborating with the user on creating content that falls into compatible categories, the assistant should follow these steps: + + 1. Create the artifact using the following format: + + :::artifact{identifier="unique-identifier" type="mime-type" title="Artifact Title"} + \`\`\` + Your artifact content here + \`\`\` + ::: + + 2. Assign an identifier to the \`identifier\` attribute. For updates, reuse the prior identifier. For new artifacts, the identifier should be descriptive and relevant to the content, using kebab-case (e.g., "example-code-snippet"). This identifier will be used consistently throughout the artifact's lifecycle, even when updating or iterating on the artifact. + 3. Include a \`title\` attribute to provide a brief title or description of the content. + 4. Add a \`type\` attribute to specify the type of content the artifact represents. Assign one of the following values to the \`type\` attribute: + - HTML: "text/html" + - The user interface can render single file HTML pages placed within the artifact tags. HTML, JS, and CSS should be in a single file when using the \`text/html\` type. + - Images from the web are not allowed, but you can use placeholder images by specifying the width and height like so \`placeholder\` + - The only place external scripts can be imported from is https://cdnjs.cloudflare.com + - Mermaid Diagrams: "application/vnd.mermaid" + - The user interface will render Mermaid diagrams placed within the artifact tags. + - React Components: "application/vnd.react" + - Use this for displaying either: React elements, e.g. \`Hello World!\`, React pure functional components, e.g. \`() => Hello World!\`, React functional components with Hooks, or React component classes + - When creating a React component, ensure it has no required props (or provide default values for all props) and use a default export. + - Use Tailwind classes for styling. DO NOT USE ARBITRARY VALUES (e.g. \`h-[600px]\`). + - Base React is available to be imported. To use hooks, first import it at the top of the artifact, e.g. \`import { useState } from "react"\` + - The lucide-react@0.263.1 library is available to be imported. e.g. \`import { Camera } from "lucide-react"\` & \`\` + - The recharts charting library is available to be imported, e.g. \`import { LineChart, XAxis, ... } from "recharts"\` & \` ...\` + - The assistant can use prebuilt components from the \`shadcn/ui\` library after it is imported: \`import { Alert, AlertDescription, AlertTitle, AlertDialog, AlertDialogAction } from '/components/ui/alert';\`. If using components from the shadcn/ui library, the assistant mentions this to the user and offers to help them install the components if necessary. + - Components MUST be imported from \`/components/ui/name\` and NOT from \`/components/name\` or \`@/components/ui/name\`. + - NO OTHER LIBRARIES (e.g. zod, hookform) ARE INSTALLED OR ABLE TO BE IMPORTED. + - Images from the web are not allowed, but you can use placeholder images by specifying the width and height like so \`placeholder\` + - If you are unable to follow the above requirements for any reason, don't use artifacts and use regular code blocks instead, which will not attempt to render the component. + 5. Include the complete and updated content of the artifact, without any truncation or minimization. Don't use "// rest of the code remains the same...". + 6. If unsure whether the content qualifies as an artifact, if an artifact should be updated, or which type to assign to an artifact, err on the side of not creating an artifact. + 7. Always use triple backticks (\`\`\`) to enclose the content within the artifact, regardless of the content type. + + +Here are some examples of correct usage of artifacts: + + + + This example demonstrates how to create a Mermaid artifact for a simple flow chart. + + + + Can you create a simple flow chart showing the process of making tea using Mermaid? + + + Sure! Here's a simple flow chart depicting the process of making tea using Mermaid syntax: + + :::artifact{identifier="tea-making-flowchart" type="application/vnd.mermaid" title="Flow chart: Making Tea"} + \`\`\`mermaid + graph TD + A[Start] --> B{Water boiled?} + B -->|Yes| C[Add tea leaves to cup] + B -->|No| D[Boil water] + D --> B + C --> E[Pour boiling water into cup] + E --> F[Steep tea for desired time] + F --> G[Remove tea leaves] + G --> H[Add milk or sugar, if desired] + H --> I[Enjoy your tea!] + I --> J[End] + \`\`\` + ::: + + This flow chart uses Mermaid syntax to visualize the steps involved in making a cup of tea. Here's a brief explanation of the process: + + 1. Start + 2. Check if water is boiled + 3. If not boiled, boil the water + 4. Once water is boiled, add tea leaves to the cup + 5. Pour boiling water into the cup + 6. Steep the tea for the desired time + 7. Remove the tea leaves + 8. Optionally add milk or sugar + 9. Enjoy your tea! + 10. End + + This chart provides a clear visual representation of the tea-making process. You can easily modify or expand this chart if you want to add more details or steps to the process. Let me know if you'd like any changes or have any questions! + + +`; + +const artifactsPrompt = dedent`The assistant can create and reference artifacts during conversations. + +Artifacts are for substantial, self-contained content that users might modify or reuse, displayed in a separate UI window for clarity. + +# Good artifacts are... +- Substantial content (>15 lines) +- Content that the user is likely to modify, iterate on, or take ownership of +- Self-contained, complex content that can be understood on its own, without context from the conversation +- Content intended for eventual use outside the conversation (e.g., reports, emails, presentations) +- Content likely to be referenced or reused multiple times + +# Don't use artifacts for... +- Simple, informational, or short content, such as brief code snippets, mathematical equations, or small examples +- Primarily explanatory, instructional, or illustrative content, such as examples provided to clarify a concept +- Suggestions, commentary, or feedback on existing artifacts +- Conversational or explanatory content that doesn't represent a standalone piece of work +- Content that is dependent on the current conversational context to be useful +- Content that is unlikely to be modified or iterated upon by the user +- Request from users that appears to be a one-off question + +# Usage notes +- One artifact per message unless specifically requested +- Prefer in-line content (don't use artifacts) when possible. Unnecessary use of artifacts can be jarring for users. +- If a user asks the assistant to "draw an SVG" or "make a website," the assistant does not need to explain that it doesn't have these capabilities. Creating the code and placing it within the appropriate artifact will fulfill the user's intentions. +- If asked to generate an image, the assistant can offer an SVG instead. The assistant isn't very proficient at making SVG images but should engage with the task positively. Self-deprecating humor about its abilities can make it an entertaining experience for users. +- The assistant errs on the side of simplicity and avoids overusing artifacts for content that can be effectively presented within the conversation. +- Always provide complete, specific, and fully functional content for artifacts without any snippets, placeholders, ellipses, or 'remains the same' comments. +- If an artifact is not necessary or requested, the assistant should not mention artifacts at all, and respond to the user accordingly. + + + When collaborating with the user on creating content that falls into compatible categories, the assistant should follow these steps: + + 1. Create the artifact using the following format: + + :::artifact{identifier="unique-identifier" type="mime-type" title="Artifact Title"} + \`\`\` + Your artifact content here + \`\`\` + ::: + + 2. Assign an identifier to the \`identifier\` attribute. For updates, reuse the prior identifier. For new artifacts, the identifier should be descriptive and relevant to the content, using kebab-case (e.g., "example-code-snippet"). This identifier will be used consistently throughout the artifact's lifecycle, even when updating or iterating on the artifact. + 3. Include a \`title\` attribute to provide a brief title or description of the content. + 4. Add a \`type\` attribute to specify the type of content the artifact represents. Assign one of the following values to the \`type\` attribute: + - HTML: "text/html" + - The user interface can render single file HTML pages placed within the artifact tags. HTML, JS, and CSS should be in a single file when using the \`text/html\` type. + - Images from the web are not allowed, but you can use placeholder images by specifying the width and height like so \`placeholder\` + - The only place external scripts can be imported from is https://cdnjs.cloudflare.com + - SVG: "image/svg+xml" + - The user interface will render the Scalable Vector Graphics (SVG) image within the artifact tags. + - The assistant should specify the viewbox of the SVG rather than defining a width/height + - Markdown: "text/markdown" or "text/md" + - The user interface will render Markdown content placed within the artifact tags. + - Supports standard Markdown syntax including headers, lists, links, images, code blocks, tables, and more. + - Both "text/markdown" and "text/md" are accepted as valid MIME types for Markdown content. + - Mermaid Diagrams: "application/vnd.mermaid" + - The user interface will render Mermaid diagrams placed within the artifact tags. + - React Components: "application/vnd.react" + - Use this for displaying either: React elements, e.g. \`Hello World!\`, React pure functional components, e.g. \`() => Hello World!\`, React functional components with Hooks, or React component classes + - When creating a React component, ensure it has no required props (or provide default values for all props) and use a default export. + - Use Tailwind classes for styling. DO NOT USE ARBITRARY VALUES (e.g. \`h-[600px]\`). + - Base React is available to be imported. To use hooks, first import it at the top of the artifact, e.g. \`import { useState } from "react"\` + - The lucide-react@0.394.0 library is available to be imported. e.g. \`import { Camera } from "lucide-react"\` & \`\` + - The recharts charting library is available to be imported, e.g. \`import { LineChart, XAxis, ... } from "recharts"\` & \` ...\` + - The three.js library is available to be imported, e.g. \`import * as THREE from "three";\` + - The date-fns library is available to be imported, e.g. \`import { compareAsc, format } from "date-fns";\` + - The react-day-picker library is available to be imported, e.g. \`import { DayPicker } from "react-day-picker";\` + - The assistant can use prebuilt components from the \`shadcn/ui\` library after it is imported: \`import { Alert, AlertDescription, AlertTitle, AlertDialog, AlertDialogAction } from '/components/ui/alert';\`. If using components from the shadcn/ui library, the assistant mentions this to the user and offers to help them install the components if necessary. + - Components MUST be imported from \`/components/ui/name\` and NOT from \`/components/name\` or \`@/components/ui/name\`. + - NO OTHER LIBRARIES (e.g. zod, hookform) ARE INSTALLED OR ABLE TO BE IMPORTED. + - Images from the web are not allowed, but you can use placeholder images by specifying the width and height like so \`placeholder\` + - When iterating on code, ensure that the code is complete and functional without any snippets, placeholders, or ellipses. + - If you are unable to follow the above requirements for any reason, don't use artifacts and use regular code blocks instead, which will not attempt to render the component. + 5. Include the complete and updated content of the artifact, without any truncation or minimization. Don't use "// rest of the code remains the same...". + 6. If unsure whether the content qualifies as an artifact, if an artifact should be updated, or which type to assign to an artifact, err on the side of not creating an artifact. + 7. Always use triple backticks (\`\`\`) to enclose the content within the artifact, regardless of the content type. + + +Here are some examples of correct usage of artifacts: + + + + This example demonstrates how to create a Mermaid artifact for a simple flow chart. + + + + Can you create a simple flow chart showing the process of making tea using Mermaid? + + + Sure! Here's a simple flow chart depicting the process of making tea using Mermaid syntax: + + :::artifact{identifier="tea-making-flowchart" type="application/vnd.mermaid" title="Flow chart: Making Tea"} + \`\`\`mermaid + graph TD + A[Start] --> B{Water boiled?} + B -->|Yes| C[Add tea leaves to cup] + B -->|No| D[Boil water] + D --> B + C --> E[Pour boiling water into cup] + E --> F[Steep tea for desired time] + F --> G[Remove tea leaves] + G --> H[Add milk or sugar, if desired] + H --> I[Enjoy your tea!] + I --> J[End] + \`\`\` + ::: + + This flow chart uses Mermaid syntax to visualize the steps involved in making a cup of tea. Here's a brief explanation of the process: + + 1. Start + 2. Check if water is boiled + 3. If not boiled, boil the water + 4. Once water is boiled, add tea leaves to the cup + 5. Pour boiling water into the cup + 6. Steep the tea for the desired time + 7. Remove the tea leaves + 8. Optionally add milk or sugar + 9. Enjoy your tea! + 10. End + + This chart provides a clear visual representation of the tea-making process. You can easily modify or expand this chart if you want to add more details or steps to the process. Let me know if you'd like any changes or have any questions! + + + + + Create a simple React counter component + + Here's a simple React counter component: + + :::artifact{identifier="react-counter" type="application/vnd.react" title="React Counter"} + \`\`\` + import { useState } from 'react'; + + export default function Counter() { + const [count, setCount] = useState(0); + return ( +
+

Count: {count}

+ +
+ ); + } + \`\`\` + ::: + + This component creates a simple counter with an increment button. +
+
+ + + Create a basic HTML structure for a blog post + + Here's a basic HTML structure for a blog post: + + :::artifact{identifier="blog-post-html" type="text/html" title="Blog Post HTML"} + \`\`\` + + + + + + My Blog Post + + + +
+

My First Blog Post

+
+
+
+

This is the content of my blog post. It's short and sweet!

+
+
+
+

© 2023 My Blog

+
+ + + \`\`\` + ::: + + This HTML structure provides a simple layout for a blog post. +
+
+
`; + +const artifactsOpenAIPrompt = dedent`The assistant can create and reference artifacts during conversations. + +Artifacts are for substantial, self-contained content that users might modify or reuse, displayed in a separate UI window for clarity. + +# Good artifacts are... +- Substantial content (>15 lines) +- Content that the user is likely to modify, iterate on, or take ownership of +- Self-contained, complex content that can be understood on its own, without context from the conversation +- Content intended for eventual use outside the conversation (e.g., reports, emails, presentations) +- Content likely to be referenced or reused multiple times + +# Don't use artifacts for... +- Simple, informational, or short content, such as brief code snippets, mathematical equations, or small examples +- Primarily explanatory, instructional, or illustrative content, such as examples provided to clarify a concept +- Suggestions, commentary, or feedback on existing artifacts +- Conversational or explanatory content that doesn't represent a standalone piece of work +- Content that is dependent on the current conversational context to be useful +- Content that is unlikely to be modified or iterated upon by the user +- Request from users that appears to be a one-off question + +# Usage notes +- One artifact per message unless specifically requested +- Prefer in-line content (don't use artifacts) when possible. Unnecessary use of artifacts can be jarring for users. +- If a user asks the assistant to "draw an SVG" or "make a website," the assistant does not need to explain that it doesn't have these capabilities. Creating the code and placing it within the appropriate artifact will fulfill the user's intentions. +- If asked to generate an image, the assistant can offer an SVG instead. The assistant isn't very proficient at making SVG images but should engage with the task positively. Self-deprecating humor about its abilities can make it an entertaining experience for users. +- The assistant errs on the side of simplicity and avoids overusing artifacts for content that can be effectively presented within the conversation. +- Always provide complete, specific, and fully functional content for artifacts without any snippets, placeholders, ellipses, or 'remains the same' comments. +- If an artifact is not necessary or requested, the assistant should not mention artifacts at all, and respond to the user accordingly. + +## Artifact Instructions + When collaborating with the user on creating content that falls into compatible categories, the assistant should follow these steps: + + 1. Create the artifact using the following remark-directive markdown format: + + :::artifact{identifier="unique-identifier" type="mime-type" title="Artifact Title"} + \`\`\` + Your artifact content here + \`\`\` + ::: + + a. Example of correct format: + + :::artifact{identifier="example-artifact" type="text/plain" title="Example Artifact"} + \`\`\` + This is the content of the artifact. + It can span multiple lines. + \`\`\` + ::: + + b. Common mistakes to avoid: + - Don't split the opening ::: line + - Don't add extra backticks outside the artifact structure + - Don't omit the closing ::: + + 2. Assign an identifier to the \`identifier\` attribute. For updates, reuse the prior identifier. For new artifacts, the identifier should be descriptive and relevant to the content, using kebab-case (e.g., "example-code-snippet"). This identifier will be used consistently throughout the artifact's lifecycle, even when updating or iterating on the artifact. + 3. Include a \`title\` attribute to provide a brief title or description of the content. + 4. Add a \`type\` attribute to specify the type of content the artifact represents. Assign one of the following values to the \`type\` attribute: + - HTML: "text/html" + - The user interface can render single file HTML pages placed within the artifact tags. HTML, JS, and CSS should be in a single file when using the \`text/html\` type. + - Images from the web are not allowed, but you can use placeholder images by specifying the width and height like so \`placeholder\` + - The only place external scripts can be imported from is https://cdnjs.cloudflare.com + - SVG: "image/svg+xml" + - The user interface will render the Scalable Vector Graphics (SVG) image within the artifact tags. + - The assistant should specify the viewbox of the SVG rather than defining a width/height + - Markdown: "text/markdown" or "text/md" + - The user interface will render Markdown content placed within the artifact tags. + - Supports standard Markdown syntax including headers, lists, links, images, code blocks, tables, and more. + - Both "text/markdown" and "text/md" are accepted as valid MIME types for Markdown content. + - Mermaid Diagrams: "application/vnd.mermaid" + - The user interface will render Mermaid diagrams placed within the artifact tags. + - React Components: "application/vnd.react" + - Use this for displaying either: React elements, e.g. \`Hello World!\`, React pure functional components, e.g. \`() => Hello World!\`, React functional components with Hooks, or React component classes + - When creating a React component, ensure it has no required props (or provide default values for all props) and use a default export. + - Use Tailwind classes for styling. DO NOT USE ARBITRARY VALUES (e.g. \`h-[600px]\`). + - Base React is available to be imported. To use hooks, first import it at the top of the artifact, e.g. \`import { useState } from "react"\` + - The lucide-react@0.394.0 library is available to be imported. e.g. \`import { Camera } from "lucide-react"\` & \`\` + - The recharts charting library is available to be imported, e.g. \`import { LineChart, XAxis, ... } from "recharts"\` & \` ...\` + - The three.js library is available to be imported, e.g. \`import * as THREE from "three";\` + - The date-fns library is available to be imported, e.g. \`import { compareAsc, format } from "date-fns";\` + - The react-day-picker library is available to be imported, e.g. \`import { DayPicker } from "react-day-picker";\` + - The assistant can use prebuilt components from the \`shadcn/ui\` library after it is imported: \`import { Alert, AlertDescription, AlertTitle, AlertDialog, AlertDialogAction } from '/components/ui/alert';\`. If using components from the shadcn/ui library, the assistant mentions this to the user and offers to help them install the components if necessary. + - Components MUST be imported from \`/components/ui/name\` and NOT from \`/components/name\` or \`@/components/ui/name\`. + - NO OTHER LIBRARIES (e.g. zod, hookform) ARE INSTALLED OR ABLE TO BE IMPORTED. + - Images from the web are not allowed, but you can use placeholder images by specifying the width and height like so \`placeholder\` + - When iterating on code, ensure that the code is complete and functional without any snippets, placeholders, or ellipses. + - If you are unable to follow the above requirements for any reason, don't use artifacts and use regular code blocks instead, which will not attempt to render the component. + 5. Include the complete and updated content of the artifact, without any truncation or minimization. Don't use "// rest of the code remains the same...". + 6. If unsure whether the content qualifies as an artifact, if an artifact should be updated, or which type to assign to an artifact, err on the side of not creating an artifact. + 7. NEVER use triple backticks to enclose the artifact, ONLY the content within the artifact. + +Here are some examples of correct usage of artifacts: + +## Examples + +### Example 1 + + This example demonstrates how to create a Mermaid artifact for a simple flow chart. + + User: Can you create a simple flow chart showing the process of making tea using Mermaid? + + Assistant: Sure! Here's a simple flow chart depicting the process of making tea using Mermaid syntax: + + :::artifact{identifier="tea-making-flowchart" type="application/vnd.mermaid" title="Flow chart: Making Tea"} + \`\`\`mermaid + graph TD + A[Start] --> B{Water boiled?} + B -->|Yes| C[Add tea leaves to cup] + B -->|No| D[Boil water] + D --> B + C --> E[Pour boiling water into cup] + E --> F[Steep tea for desired time] + F --> G[Remove tea leaves] + G --> H[Add milk or sugar, if desired] + H --> I[Enjoy your tea!] + I --> J[End] + \`\`\` + ::: + + This flow chart uses Mermaid syntax to visualize the steps involved in making a cup of tea. Here's a brief explanation of the process: + + 1. Start + 2. Check if water is boiled + 3. If not boiled, boil the water + 4. Once water is boiled, add tea leaves to the cup + 5. Pour boiling water into the cup + 6. Steep the tea for the desired time + 7. Remove the tea leaves + 8. Optionally add milk or sugar + 9. Enjoy your tea! + 10. End + + This chart provides a clear visual representation of the tea-making process. You can easily modify or expand this chart if you want to add more details or steps to the process. Let me know if you'd like any changes or have any questions! + +--- + +### Example 2 + + User: Create a simple React counter component + + Assistant: Here's a simple React counter component: + + :::artifact{identifier="react-counter" type="application/vnd.react" title="React Counter"} + \`\`\` + import { useState } from 'react'; + + export default function Counter() { + const [count, setCount] = useState(0); + return ( +
+

Count: {count}

+ +
+ ); + } + \`\`\` + ::: + + This component creates a simple counter with an increment button. + +--- + +### Example 3 + User: Create a basic HTML structure for a blog post + Assistant: Here's a basic HTML structure for a blog post: + + :::artifact{identifier="blog-post-html" type="text/html" title="Blog Post HTML"} + \`\`\` + + + + + + My Blog Post + + + +
+

My First Blog Post

+
+
+
+

This is the content of my blog post. It's short and sweet!

+
+
+
+

© 2023 My Blog

+
+ + + \`\`\` + ::: + + This HTML structure provides a simple layout for a blog post. + +---`; + +/** + * + * @param {Object} params + * @param {EModelEndpoint | string} params.endpoint - The current endpoint + * @param {ArtifactModes} params.artifacts - The current artifact mode + * @returns + */ +const generateArtifactsPrompt = ({ endpoint, artifacts }) => { + if (artifacts === ArtifactModes.CUSTOM) { + return null; + } + + let prompt = artifactsPrompt; + if (endpoint !== EModelEndpoint.anthropic) { + prompt = artifactsOpenAIPrompt; + } + + if (artifacts === ArtifactModes.SHADCNUI) { + prompt += generateShadcnPrompt({ components, useXML: endpoint === EModelEndpoint.anthropic }); + } + + return prompt; +}; + +module.exports = generateArtifactsPrompt; diff --git a/api/app/clients/prompts/createContextHandlers.js b/api/app/clients/prompts/createContextHandlers.js new file mode 100644 index 0000000000..87c48bcf92 --- /dev/null +++ b/api/app/clients/prompts/createContextHandlers.js @@ -0,0 +1,160 @@ +const axios = require('axios'); +const { logger } = require('@librechat/data-schemas'); +const { isEnabled, generateShortLivedToken } = require('@librechat/api'); + +const footer = `Use the context as your learned knowledge to better answer the user. + +In your response, remember to follow these guidelines: +- If you don't know the answer, simply say that you don't know. +- If you are unsure how to answer, ask for clarification. +- Avoid mentioning that you obtained the information from the context. +`; + +function createContextHandlers(req, userMessageContent) { + if (!process.env.RAG_API_URL) { + return; + } + + const queryPromises = []; + const processedFiles = []; + const processedIds = new Set(); + const jwtToken = generateShortLivedToken(req.user.id); + const useFullContext = isEnabled(process.env.RAG_USE_FULL_CONTEXT); + + const query = async (file) => { + if (useFullContext) { + return axios.get(`${process.env.RAG_API_URL}/documents/${file.file_id}/context`, { + headers: { + Authorization: `Bearer ${jwtToken}`, + }, + }); + } + + return axios.post( + `${process.env.RAG_API_URL}/query`, + { + file_id: file.file_id, + query: userMessageContent, + k: 4, + }, + { + headers: { + Authorization: `Bearer ${jwtToken}`, + 'Content-Type': 'application/json', + }, + }, + ); + }; + + const processFile = async (file) => { + if (file.embedded && !processedIds.has(file.file_id)) { + try { + const promise = query(file); + queryPromises.push(promise); + processedFiles.push(file); + processedIds.add(file.file_id); + } catch (error) { + logger.error(`Error processing file ${file.filename}:`, error); + } + } + }; + + const createContext = async () => { + try { + if (!queryPromises.length || !processedFiles.length) { + return ''; + } + + const oneFile = processedFiles.length === 1; + const header = `The user has attached ${oneFile ? 'a' : processedFiles.length} file${ + !oneFile ? 's' : '' + } to the conversation:`; + + const files = `${ + oneFile + ? '' + : ` + ` + }${processedFiles + .map( + (file) => ` + + ${file.filename} + ${file.type} + `, + ) + .join('')}${ + oneFile + ? '' + : ` + ` + }`; + + const resolvedQueries = await Promise.all(queryPromises); + + const context = + resolvedQueries.length === 0 + ? '\n\tThe semantic search did not return any results.' + : resolvedQueries + .map((queryResult, index) => { + const file = processedFiles[index]; + let contextItems = queryResult.data; + + const generateContext = (currentContext) => + ` + + ${file.filename} + ${currentContext} + + `; + + if (useFullContext) { + return generateContext(`\n${contextItems}`); + } + + contextItems = queryResult.data + .map((item) => { + const pageContent = item[0].page_content; + return ` + + + `; + }) + .join(''); + + return generateContext(contextItems); + }) + .join(''); + + if (useFullContext) { + const prompt = `${header} + ${context} + ${footer}`; + + return prompt; + } + + const prompt = `${header} + ${files} + + A semantic search was executed with the user's message as the query, retrieving the following context inside XML tags. + + ${context} + + + ${footer}`; + + return prompt; + } catch (error) { + logger.error('Error creating context:', error); + throw error; + } + }; + + return { + processFile, + createContext, + }; +} + +module.exports = createContextHandlers; diff --git a/api/app/clients/prompts/createVisionPrompt.js b/api/app/clients/prompts/createVisionPrompt.js new file mode 100644 index 0000000000..5d8a7bbf51 --- /dev/null +++ b/api/app/clients/prompts/createVisionPrompt.js @@ -0,0 +1,34 @@ +/** + * Generates a prompt instructing the user to describe an image in detail, tailored to different types of visual content. + * @param {boolean} pluralized - Whether to pluralize the prompt for multiple images. + * @returns {string} - The generated vision prompt. + */ +const createVisionPrompt = (pluralized = false) => { + return `Please describe the image${ + pluralized ? 's' : '' + } in detail, covering relevant aspects such as: + + For photographs, illustrations, or artwork: + - The main subject(s) and their appearance, positioning, and actions + - The setting, background, and any notable objects or elements + - Colors, lighting, and overall mood or atmosphere + - Any interesting details, textures, or patterns + - The style, technique, or medium used (if discernible) + + For screenshots or images containing text: + - The content and purpose of the text + - The layout, formatting, and organization of the information + - Any notable visual elements, such as logos, icons, or graphics + - The overall context or message conveyed by the screenshot + + For graphs, charts, or data visualizations: + - The type of graph or chart (e.g., bar graph, line chart, pie chart) + - The variables being compared or analyzed + - Any trends, patterns, or outliers in the data + - The axis labels, scales, and units of measurement + - The title, legend, and any additional context provided + + Be as specific and descriptive as possible while maintaining clarity and concision.`; +}; + +module.exports = createVisionPrompt; diff --git a/api/app/clients/prompts/formatAgentMessages.spec.js b/api/app/clients/prompts/formatAgentMessages.spec.js new file mode 100644 index 0000000000..1aee3edf71 --- /dev/null +++ b/api/app/clients/prompts/formatAgentMessages.spec.js @@ -0,0 +1,361 @@ +const { ToolMessage } = require('@langchain/core/messages'); +const { ContentTypes } = require('librechat-data-provider'); +const { HumanMessage, AIMessage, SystemMessage } = require('@langchain/core/messages'); +const { formatAgentMessages } = require('./formatMessages'); + +describe('formatAgentMessages', () => { + it('should format simple user and AI messages', () => { + const payload = [ + { role: 'user', content: 'Hello' }, + { role: 'assistant', content: 'Hi there!' }, + ]; + const result = formatAgentMessages(payload); + expect(result).toHaveLength(2); + expect(result[0]).toBeInstanceOf(HumanMessage); + expect(result[1]).toBeInstanceOf(AIMessage); + }); + + it('should handle system messages', () => { + const payload = [{ role: 'system', content: 'You are a helpful assistant.' }]; + const result = formatAgentMessages(payload); + expect(result).toHaveLength(1); + expect(result[0]).toBeInstanceOf(SystemMessage); + }); + + it('should format messages with content arrays', () => { + const payload = [ + { + role: 'user', + content: [{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Hello' }], + }, + ]; + const result = formatAgentMessages(payload); + expect(result).toHaveLength(1); + expect(result[0]).toBeInstanceOf(HumanMessage); + }); + + it('should handle tool calls and create ToolMessages', () => { + const payload = [ + { + role: 'assistant', + content: [ + { + type: ContentTypes.TEXT, + [ContentTypes.TEXT]: 'Let me check that for you.', + tool_call_ids: ['123'], + }, + { + type: ContentTypes.TOOL_CALL, + tool_call: { + id: '123', + name: 'search', + args: '{"query":"weather"}', + output: 'The weather is sunny.', + }, + }, + ], + }, + ]; + const result = formatAgentMessages(payload); + expect(result).toHaveLength(2); + expect(result[0]).toBeInstanceOf(AIMessage); + expect(result[1]).toBeInstanceOf(ToolMessage); + expect(result[0].tool_calls).toHaveLength(1); + expect(result[1].tool_call_id).toBe('123'); + }); + + it('should handle multiple content parts in assistant messages', () => { + const payload = [ + { + role: 'assistant', + content: [ + { type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Part 1' }, + { type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Part 2' }, + ], + }, + ]; + const result = formatAgentMessages(payload); + expect(result).toHaveLength(1); + expect(result[0]).toBeInstanceOf(AIMessage); + expect(result[0].content).toHaveLength(2); + }); + + it('should throw an error for invalid tool call structure', () => { + const payload = [ + { + role: 'assistant', + content: [ + { + type: ContentTypes.TOOL_CALL, + tool_call: { + id: '123', + name: 'search', + args: '{"query":"weather"}', + output: 'The weather is sunny.', + }, + }, + ], + }, + ]; + expect(() => formatAgentMessages(payload)).toThrow('Invalid tool call structure'); + }); + + it('should handle tool calls with non-JSON args', () => { + const payload = [ + { + role: 'assistant', + content: [ + { type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Checking...', tool_call_ids: ['123'] }, + { + type: ContentTypes.TOOL_CALL, + tool_call: { + id: '123', + name: 'search', + args: 'non-json-string', + output: 'Result', + }, + }, + ], + }, + ]; + const result = formatAgentMessages(payload); + expect(result).toHaveLength(2); + expect(result[0].tool_calls[0].args).toStrictEqual({ input: 'non-json-string' }); + }); + + it('should handle complex tool calls with multiple steps', () => { + const payload = [ + { + role: 'assistant', + content: [ + { + type: ContentTypes.TEXT, + [ContentTypes.TEXT]: "I'll search for that information.", + tool_call_ids: ['search_1'], + }, + { + type: ContentTypes.TOOL_CALL, + tool_call: { + id: 'search_1', + name: 'search', + args: '{"query":"weather in New York"}', + output: 'The weather in New York is currently sunny with a temperature of 75°F.', + }, + }, + { + type: ContentTypes.TEXT, + [ContentTypes.TEXT]: "Now, I'll convert the temperature.", + tool_call_ids: ['convert_1'], + }, + { + type: ContentTypes.TOOL_CALL, + tool_call: { + id: 'convert_1', + name: 'convert_temperature', + args: '{"temperature": 75, "from": "F", "to": "C"}', + output: '23.89°C', + }, + }, + { type: ContentTypes.TEXT, [ContentTypes.TEXT]: "Here's your answer." }, + ], + }, + ]; + + const result = formatAgentMessages(payload); + + expect(result).toHaveLength(5); + expect(result[0]).toBeInstanceOf(AIMessage); + expect(result[1]).toBeInstanceOf(ToolMessage); + expect(result[2]).toBeInstanceOf(AIMessage); + expect(result[3]).toBeInstanceOf(ToolMessage); + expect(result[4]).toBeInstanceOf(AIMessage); + + // Check first AIMessage + expect(result[0].content).toBe("I'll search for that information."); + expect(result[0].tool_calls).toHaveLength(1); + expect(result[0].tool_calls[0]).toEqual({ + id: 'search_1', + name: 'search', + args: { query: 'weather in New York' }, + }); + + // Check first ToolMessage + expect(result[1].tool_call_id).toBe('search_1'); + expect(result[1].name).toBe('search'); + expect(result[1].content).toBe( + 'The weather in New York is currently sunny with a temperature of 75°F.', + ); + + // Check second AIMessage + expect(result[2].content).toBe("Now, I'll convert the temperature."); + expect(result[2].tool_calls).toHaveLength(1); + expect(result[2].tool_calls[0]).toEqual({ + id: 'convert_1', + name: 'convert_temperature', + args: { temperature: 75, from: 'F', to: 'C' }, + }); + + // Check second ToolMessage + expect(result[3].tool_call_id).toBe('convert_1'); + expect(result[3].name).toBe('convert_temperature'); + expect(result[3].content).toBe('23.89°C'); + + // Check final AIMessage + expect(result[4].content).toStrictEqual([ + { [ContentTypes.TEXT]: "Here's your answer.", type: ContentTypes.TEXT }, + ]); + }); + + it.skip('should not produce two consecutive assistant messages and format content correctly', () => { + const payload = [ + { role: 'user', content: 'Hello' }, + { + role: 'assistant', + content: [{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Hi there!' }], + }, + { + role: 'assistant', + content: [{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'How can I help you?' }], + }, + { role: 'user', content: "What's the weather?" }, + { + role: 'assistant', + content: [ + { + type: ContentTypes.TEXT, + [ContentTypes.TEXT]: 'Let me check that for you.', + tool_call_ids: ['weather_1'], + }, + { + type: ContentTypes.TOOL_CALL, + tool_call: { + id: 'weather_1', + name: 'check_weather', + args: '{"location":"New York"}', + output: 'Sunny, 75°F', + }, + }, + ], + }, + { + role: 'assistant', + content: [ + { type: ContentTypes.TEXT, [ContentTypes.TEXT]: "Here's the weather information." }, + ], + }, + ]; + + const result = formatAgentMessages(payload); + + // Check correct message count and types + expect(result).toHaveLength(6); + expect(result[0]).toBeInstanceOf(HumanMessage); + expect(result[1]).toBeInstanceOf(AIMessage); + expect(result[2]).toBeInstanceOf(HumanMessage); + expect(result[3]).toBeInstanceOf(AIMessage); + expect(result[4]).toBeInstanceOf(ToolMessage); + expect(result[5]).toBeInstanceOf(AIMessage); + + // Check content of messages + expect(result[0].content).toStrictEqual([ + { [ContentTypes.TEXT]: 'Hello', type: ContentTypes.TEXT }, + ]); + expect(result[1].content).toStrictEqual([ + { [ContentTypes.TEXT]: 'Hi there!', type: ContentTypes.TEXT }, + { [ContentTypes.TEXT]: 'How can I help you?', type: ContentTypes.TEXT }, + ]); + expect(result[2].content).toStrictEqual([ + { [ContentTypes.TEXT]: "What's the weather?", type: ContentTypes.TEXT }, + ]); + expect(result[3].content).toBe('Let me check that for you.'); + expect(result[4].content).toBe('Sunny, 75°F'); + expect(result[5].content).toStrictEqual([ + { [ContentTypes.TEXT]: "Here's the weather information.", type: ContentTypes.TEXT }, + ]); + + // Check that there are no consecutive AIMessages + const messageTypes = result.map((message) => message.constructor); + for (let i = 0; i < messageTypes.length - 1; i++) { + expect(messageTypes[i] === AIMessage && messageTypes[i + 1] === AIMessage).toBe(false); + } + + // Additional check to ensure the consecutive assistant messages were combined + expect(result[1].content).toHaveLength(2); + }); + + it('should skip THINK type content parts', () => { + const payload = [ + { + role: 'assistant', + content: [ + { type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Initial response' }, + { type: ContentTypes.THINK, [ContentTypes.THINK]: 'Reasoning about the problem...' }, + { type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Final answer' }, + ], + }, + ]; + + const result = formatAgentMessages(payload); + + expect(result).toHaveLength(1); + expect(result[0]).toBeInstanceOf(AIMessage); + expect(result[0].content).toEqual('Initial response\nFinal answer'); + }); + + it('should join TEXT content as string when THINK content type is present', () => { + const payload = [ + { + role: 'assistant', + content: [ + { type: ContentTypes.THINK, [ContentTypes.THINK]: 'Analyzing the problem...' }, + { type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'First part of response' }, + { type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Second part of response' }, + { type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Final part of response' }, + ], + }, + ]; + + const result = formatAgentMessages(payload); + + expect(result).toHaveLength(1); + expect(result[0]).toBeInstanceOf(AIMessage); + expect(typeof result[0].content).toBe('string'); + expect(result[0].content).toBe( + 'First part of response\nSecond part of response\nFinal part of response', + ); + expect(result[0].content).not.toContain('Analyzing the problem...'); + }); + + it('should exclude ERROR type content parts', () => { + const payload = [ + { + role: 'assistant', + content: [ + { type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Hello there' }, + { + type: ContentTypes.ERROR, + [ContentTypes.ERROR]: + 'An error occurred while processing the request: Something went wrong', + }, + { type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Final answer' }, + ], + }, + ]; + + const result = formatAgentMessages(payload); + + expect(result).toHaveLength(1); + expect(result[0]).toBeInstanceOf(AIMessage); + expect(result[0].content).toEqual([ + { type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Hello there' }, + { type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Final answer' }, + ]); + + // Make sure no error content exists in the result + const hasErrorContent = result[0].content.some( + (item) => + item.type === ContentTypes.ERROR || JSON.stringify(item).includes('An error occurred'), + ); + expect(hasErrorContent).toBe(false); + }); +}); diff --git a/api/app/clients/prompts/formatGoogleInputs.js b/api/app/clients/prompts/formatGoogleInputs.js new file mode 100644 index 0000000000..c929df8b51 --- /dev/null +++ b/api/app/clients/prompts/formatGoogleInputs.js @@ -0,0 +1,42 @@ +/** + * Formats an object to match the struct_val, list_val, string_val, float_val, and int_val format. + * + * @param {Object} obj - The object to be formatted. + * @returns {Object} The formatted object. + * + * Handles different types: + * - Arrays are wrapped in list_val and each element is processed. + * - Objects are recursively processed. + * - Strings are wrapped in string_val. + * - Numbers are wrapped in float_val or int_val depending on whether they are floating-point or integers. + */ +function formatGoogleInputs(obj) { + const formattedObj = {}; + + for (const key in obj) { + if (Object.prototype.hasOwnProperty.call(obj, key)) { + const value = obj[key]; + + // Handle arrays + if (Array.isArray(value)) { + formattedObj[key] = { list_val: value.map((item) => formatGoogleInputs(item)) }; + } + // Handle objects + else if (typeof value === 'object' && value !== null) { + formattedObj[key] = formatGoogleInputs(value); + } + // Handle numbers + else if (typeof value === 'number') { + formattedObj[key] = Number.isInteger(value) ? { int_val: value } : { float_val: value }; + } + // Handle other types (e.g., strings) + else { + formattedObj[key] = { string_val: [value] }; + } + } + } + + return { struct_val: formattedObj }; +} + +module.exports = formatGoogleInputs; diff --git a/api/app/clients/prompts/formatGoogleInputs.spec.js b/api/app/clients/prompts/formatGoogleInputs.spec.js new file mode 100644 index 0000000000..8fef9dfb5f --- /dev/null +++ b/api/app/clients/prompts/formatGoogleInputs.spec.js @@ -0,0 +1,274 @@ +const formatGoogleInputs = require('./formatGoogleInputs'); + +describe('formatGoogleInputs', () => { + it('formats message correctly', () => { + const input = { + messages: [ + { + content: 'hi', + author: 'user', + }, + ], + context: 'context', + examples: [ + { + input: { + author: 'user', + content: 'user input', + }, + output: { + author: 'bot', + content: 'bot output', + }, + }, + ], + parameters: { + temperature: 0.2, + topP: 0.8, + topK: 40, + maxOutputTokens: 1024, + }, + }; + + const expectedOutput = { + struct_val: { + messages: { + list_val: [ + { + struct_val: { + content: { + string_val: ['hi'], + }, + author: { + string_val: ['user'], + }, + }, + }, + ], + }, + context: { + string_val: ['context'], + }, + examples: { + list_val: [ + { + struct_val: { + input: { + struct_val: { + author: { + string_val: ['user'], + }, + content: { + string_val: ['user input'], + }, + }, + }, + output: { + struct_val: { + author: { + string_val: ['bot'], + }, + content: { + string_val: ['bot output'], + }, + }, + }, + }, + }, + ], + }, + parameters: { + struct_val: { + temperature: { + float_val: 0.2, + }, + topP: { + float_val: 0.8, + }, + topK: { + int_val: 40, + }, + maxOutputTokens: { + int_val: 1024, + }, + }, + }, + }, + }; + + const result = formatGoogleInputs(input); + expect(JSON.stringify(result)).toEqual(JSON.stringify(expectedOutput)); + }); + + it('formats real payload parts', () => { + const input = { + instances: [ + { + context: 'context', + examples: [ + { + input: { + author: 'user', + content: 'user input', + }, + output: { + author: 'bot', + content: 'user output', + }, + }, + ], + messages: [ + { + author: 'user', + content: 'hi', + }, + ], + }, + ], + parameters: { + candidateCount: 1, + maxOutputTokens: 1024, + temperature: 0.2, + topP: 0.8, + topK: 40, + }, + }; + const expectedOutput = { + struct_val: { + instances: { + list_val: [ + { + struct_val: { + context: { string_val: ['context'] }, + examples: { + list_val: [ + { + struct_val: { + input: { + struct_val: { + author: { string_val: ['user'] }, + content: { string_val: ['user input'] }, + }, + }, + output: { + struct_val: { + author: { string_val: ['bot'] }, + content: { string_val: ['user output'] }, + }, + }, + }, + }, + ], + }, + messages: { + list_val: [ + { + struct_val: { + author: { string_val: ['user'] }, + content: { string_val: ['hi'] }, + }, + }, + ], + }, + }, + }, + ], + }, + parameters: { + struct_val: { + candidateCount: { int_val: 1 }, + maxOutputTokens: { int_val: 1024 }, + temperature: { float_val: 0.2 }, + topP: { float_val: 0.8 }, + topK: { int_val: 40 }, + }, + }, + }, + }; + + const result = formatGoogleInputs(input); + expect(JSON.stringify(result)).toEqual(JSON.stringify(expectedOutput)); + }); + + it('helps create valid payload parts', () => { + const instances = { + context: 'context', + examples: [ + { + input: { + author: 'user', + content: 'user input', + }, + output: { + author: 'bot', + content: 'user output', + }, + }, + ], + messages: [ + { + author: 'user', + content: 'hi', + }, + ], + }; + + const expectedInstances = { + struct_val: { + context: { string_val: ['context'] }, + examples: { + list_val: [ + { + struct_val: { + input: { + struct_val: { + author: { string_val: ['user'] }, + content: { string_val: ['user input'] }, + }, + }, + output: { + struct_val: { + author: { string_val: ['bot'] }, + content: { string_val: ['user output'] }, + }, + }, + }, + }, + ], + }, + messages: { + list_val: [ + { + struct_val: { + author: { string_val: ['user'] }, + content: { string_val: ['hi'] }, + }, + }, + ], + }, + }, + }; + + const parameters = { + candidateCount: 1, + maxOutputTokens: 1024, + temperature: 0.2, + topP: 0.8, + topK: 40, + }; + const expectedParameters = { + struct_val: { + candidateCount: { int_val: 1 }, + maxOutputTokens: { int_val: 1024 }, + temperature: { float_val: 0.2 }, + topP: { float_val: 0.8 }, + topK: { int_val: 40 }, + }, + }; + + const instancesResult = formatGoogleInputs(instances); + const parametersResult = formatGoogleInputs(parameters); + expect(JSON.stringify(instancesResult)).toEqual(JSON.stringify(expectedInstances)); + expect(JSON.stringify(parametersResult)).toEqual(JSON.stringify(expectedParameters)); + }); +}); diff --git a/api/app/clients/prompts/formatMessages.js b/api/app/clients/prompts/formatMessages.js new file mode 100644 index 0000000000..376178b6be --- /dev/null +++ b/api/app/clients/prompts/formatMessages.js @@ -0,0 +1,245 @@ +const { ToolMessage } = require('@langchain/core/messages'); +const { EModelEndpoint, ContentTypes } = require('librechat-data-provider'); +const { HumanMessage, AIMessage, SystemMessage } = require('@langchain/core/messages'); + +/** + * Formats a message to OpenAI Vision API payload format. + * + * @param {Object} params - The parameters for formatting. + * @param {Object} params.message - The message object to format. + * @param {string} [params.message.role] - The role of the message sender (must be 'user'). + * @param {string} [params.message.content] - The text content of the message. + * @param {EModelEndpoint} [params.endpoint] - Identifier for specific endpoint handling + * @param {Array} [params.image_urls] - The image_urls to attach to the message. + * @returns {(Object)} - The formatted message. + */ +const formatVisionMessage = ({ message, image_urls, endpoint }) => { + if (endpoint === EModelEndpoint.anthropic) { + message.content = [...image_urls, { type: ContentTypes.TEXT, text: message.content }]; + return message; + } + + message.content = [{ type: ContentTypes.TEXT, text: message.content }, ...image_urls]; + + return message; +}; + +/** + * Formats a message to OpenAI payload format based on the provided options. + * + * @param {Object} params - The parameters for formatting. + * @param {Object} params.message - The message object to format. + * @param {string} [params.message.role] - The role of the message sender (e.g., 'user', 'assistant'). + * @param {string} [params.message._name] - The name associated with the message. + * @param {string} [params.message.sender] - The sender of the message. + * @param {string} [params.message.text] - The text content of the message. + * @param {string} [params.message.content] - The content of the message. + * @param {Array} [params.message.image_urls] - The image_urls attached to the message for Vision API. + * @param {string} [params.userName] - The name of the user. + * @param {string} [params.assistantName] - The name of the assistant. + * @param {string} [params.endpoint] - Identifier for specific endpoint handling + * @param {boolean} [params.langChain=false] - Whether to return a LangChain message object. + * @returns {(Object|HumanMessage|AIMessage|SystemMessage)} - The formatted message. + */ +const formatMessage = ({ message, userName, assistantName, endpoint, langChain = false }) => { + let { role: _role, _name, sender, text, content: _content, lc_id } = message; + if (lc_id && lc_id[2] && !langChain) { + const roleMapping = { + SystemMessage: 'system', + HumanMessage: 'user', + AIMessage: 'assistant', + }; + _role = roleMapping[lc_id[2]]; + } + const role = _role ?? (sender && sender?.toLowerCase() === 'user' ? 'user' : 'assistant'); + const content = _content ?? text ?? ''; + const formattedMessage = { + role, + content, + }; + + const { image_urls } = message; + if (Array.isArray(image_urls) && image_urls.length > 0 && role === 'user') { + return formatVisionMessage({ + message: formattedMessage, + image_urls: message.image_urls, + endpoint, + }); + } + + if (_name) { + formattedMessage.name = _name; + } + + if (userName && formattedMessage.role === 'user') { + formattedMessage.name = userName; + } + + if (assistantName && formattedMessage.role === 'assistant') { + formattedMessage.name = assistantName; + } + + if (formattedMessage.name) { + // Conform to API regex: ^[a-zA-Z0-9_-]{1,64}$ + // https://community.openai.com/t/the-format-of-the-name-field-in-the-documentation-is-incorrect/175684/2 + formattedMessage.name = formattedMessage.name.replace(/[^a-zA-Z0-9_-]/g, '_'); + + if (formattedMessage.name.length > 64) { + formattedMessage.name = formattedMessage.name.substring(0, 64); + } + } + + if (!langChain) { + return formattedMessage; + } + + if (role === 'user') { + return new HumanMessage(formattedMessage); + } else if (role === 'assistant') { + return new AIMessage(formattedMessage); + } else { + return new SystemMessage(formattedMessage); + } +}; + +/** + * Formats an array of messages for LangChain. + * + * @param {Array} messages - The array of messages to format. + * @param {Object} formatOptions - The options for formatting each message. + * @param {string} [formatOptions.userName] - The name of the user. + * @param {string} [formatOptions.assistantName] - The name of the assistant. + * @returns {Array<(HumanMessage|AIMessage|SystemMessage)>} - The array of formatted LangChain messages. + */ +const formatLangChainMessages = (messages, formatOptions) => + messages.map((msg) => formatMessage({ ...formatOptions, message: msg, langChain: true })); + +/** + * Formats a LangChain message object by merging properties from `lc_kwargs` or `kwargs` and `additional_kwargs`. + * + * @param {Object} message - The message object to format. + * @param {Object} [message.lc_kwargs] - Contains properties to be merged. Either this or `message.kwargs` should be provided. + * @param {Object} [message.kwargs] - Contains properties to be merged. Either this or `message.lc_kwargs` should be provided. + * @param {Object} [message.kwargs.additional_kwargs] - Additional properties to be merged. + * + * @returns {Object} The formatted LangChain message. + */ +const formatFromLangChain = (message) => { + const { additional_kwargs, ...message_kwargs } = message.lc_kwargs ?? message.kwargs; + return { + ...message_kwargs, + ...additional_kwargs, + }; +}; + +/** + * Formats an array of messages for LangChain, handling tool calls and creating ToolMessage instances. + * + * @param {Array>} payload - The array of messages to format. + * @returns {Array<(HumanMessage|AIMessage|SystemMessage|ToolMessage)>} - The array of formatted LangChain messages, including ToolMessages for tool calls. + */ +const formatAgentMessages = (payload) => { + const messages = []; + + for (const message of payload) { + if (typeof message.content === 'string') { + message.content = [{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: message.content }]; + } + if (message.role !== 'assistant') { + messages.push(formatMessage({ message, langChain: true })); + continue; + } + + let currentContent = []; + let lastAIMessage = null; + + let hasReasoning = false; + for (const part of message.content) { + if (part.type === ContentTypes.TEXT && part.tool_call_ids) { + /* + If there's pending content, it needs to be aggregated as a single string to prepare for tool calls. + For Anthropic models, the "tool_calls" field on a message is only respected if content is a string. + */ + if (currentContent.length > 0) { + let content = currentContent.reduce((acc, curr) => { + if (curr.type === ContentTypes.TEXT) { + return `${acc}${curr[ContentTypes.TEXT]}\n`; + } + return acc; + }, ''); + content = `${content}\n${part[ContentTypes.TEXT] ?? ''}`.trim(); + lastAIMessage = new AIMessage({ content }); + messages.push(lastAIMessage); + currentContent = []; + continue; + } + + // Create a new AIMessage with this text and prepare for tool calls + lastAIMessage = new AIMessage({ + content: part.text || '', + }); + + messages.push(lastAIMessage); + } else if (part.type === ContentTypes.TOOL_CALL) { + if (!lastAIMessage) { + throw new Error('Invalid tool call structure: No preceding AIMessage with tool_call_ids'); + } + + // Note: `tool_calls` list is defined when constructed by `AIMessage` class, and outputs should be excluded from it + const { output, args: _args, ...tool_call } = part.tool_call; + // TODO: investigate; args as dictionary may need to be provider-or-tool-specific + let args = _args; + try { + args = JSON.parse(_args); + } catch (e) { + if (typeof _args === 'string') { + args = { input: _args }; + } + } + + tool_call.args = args; + lastAIMessage.tool_calls.push(tool_call); + + // Add the corresponding ToolMessage + messages.push( + new ToolMessage({ + tool_call_id: tool_call.id, + name: tool_call.name, + content: output || '', + }), + ); + } else if (part.type === ContentTypes.THINK) { + hasReasoning = true; + continue; + } else if (part.type === ContentTypes.ERROR || part.type === ContentTypes.AGENT_UPDATE) { + continue; + } else { + currentContent.push(part); + } + } + + if (hasReasoning) { + currentContent = currentContent + .reduce((acc, curr) => { + if (curr.type === ContentTypes.TEXT) { + return `${acc}${curr[ContentTypes.TEXT]}\n`; + } + return acc; + }, '') + .trim(); + } + + if (currentContent.length > 0) { + messages.push(new AIMessage({ content: currentContent })); + } + } + + return messages; +}; + +module.exports = { + formatMessage, + formatFromLangChain, + formatAgentMessages, + formatLangChainMessages, +}; diff --git a/api/app/clients/prompts/formatMessages.spec.js b/api/app/clients/prompts/formatMessages.spec.js new file mode 100644 index 0000000000..97e40b0caa --- /dev/null +++ b/api/app/clients/prompts/formatMessages.spec.js @@ -0,0 +1,276 @@ +const { Constants } = require('librechat-data-provider'); +const { HumanMessage, AIMessage, SystemMessage } = require('@langchain/core/messages'); +const { formatMessage, formatLangChainMessages, formatFromLangChain } = require('./formatMessages'); + +describe('formatMessage', () => { + it('formats user message', () => { + const input = { + message: { + sender: 'user', + text: 'Hello', + }, + userName: 'John', + }; + const result = formatMessage(input); + expect(result).toEqual({ + role: 'user', + content: 'Hello', + name: 'John', + }); + }); + + it('sanitizes the name by replacing invalid characters (per OpenAI)', () => { + const input = { + message: { + sender: 'user', + text: 'Hello', + }, + userName: ' John$Doe@Example! ', + }; + const result = formatMessage(input); + expect(result).toEqual({ + role: 'user', + content: 'Hello', + name: '_John_Doe_Example__', + }); + }); + + it('trims the name to a maximum length of 64 characters', () => { + const longName = 'a'.repeat(100); + const input = { + message: { + sender: 'user', + text: 'Hello', + }, + userName: longName, + }; + const result = formatMessage(input); + expect(result.name.length).toBe(64); + expect(result.name).toBe('a'.repeat(64)); + }); + + it('formats a realistic user message', () => { + const input = { + message: { + _id: '6512cdfb92cbf69fea615331', + messageId: 'b620bf73-c5c3-4a38-b724-76886aac24c4', + __v: 0, + conversationId: '5c23d24f-941f-4aab-85df-127b596c8aa5', + createdAt: Date.now(), + error: false, + finish_reason: null, + isCreatedByUser: true, + model: null, + parentMessageId: Constants.NO_PARENT, + sender: 'User', + text: 'hi', + tokenCount: 5, + unfinished: false, + updatedAt: Date.now(), + user: '6512cdf475f05c86d44c31d2', + }, + userName: 'John', + }; + const result = formatMessage(input); + expect(result).toEqual({ + role: 'user', + content: 'hi', + name: 'John', + }); + }); + + it('formats assistant message', () => { + const input = { + message: { + sender: 'assistant', + text: 'Hi there', + }, + assistantName: 'Assistant', + }; + const result = formatMessage(input); + expect(result).toEqual({ + role: 'assistant', + content: 'Hi there', + name: 'Assistant', + }); + }); + + it('formats system message', () => { + const input = { + message: { + role: 'system', + text: 'Hi there', + }, + }; + const result = formatMessage(input); + expect(result).toEqual({ + role: 'system', + content: 'Hi there', + }); + }); + + it('formats user message with langChain', () => { + const input = { + message: { + sender: 'user', + text: 'Hello', + }, + userName: 'John', + langChain: true, + }; + const result = formatMessage(input); + expect(result).toBeInstanceOf(HumanMessage); + expect(result.lc_kwargs.content).toEqual(input.message.text); + expect(result.lc_kwargs.name).toEqual(input.userName); + }); + + it('formats assistant message with langChain', () => { + const input = { + message: { + sender: 'assistant', + text: 'Hi there', + }, + assistantName: 'Assistant', + langChain: true, + }; + const result = formatMessage(input); + expect(result).toBeInstanceOf(AIMessage); + expect(result.lc_kwargs.content).toEqual(input.message.text); + expect(result.lc_kwargs.name).toEqual(input.assistantName); + }); + + it('formats system message with langChain', () => { + const input = { + message: { + role: 'system', + text: 'This is a system message.', + }, + langChain: true, + }; + const result = formatMessage(input); + expect(result).toBeInstanceOf(SystemMessage); + expect(result.lc_kwargs.content).toEqual(input.message.text); + }); + + it('formats langChain messages into OpenAI payload format', () => { + const human = { + message: new HumanMessage({ + content: 'Hello', + }), + }; + const system = { + message: new SystemMessage({ + content: 'Hello', + }), + }; + const ai = { + message: new AIMessage({ + content: 'Hello', + }), + }; + const humanResult = formatMessage(human); + const systemResult = formatMessage(system); + const aiResult = formatMessage(ai); + expect(humanResult).toEqual({ + role: 'user', + content: 'Hello', + }); + expect(systemResult).toEqual({ + role: 'system', + content: 'Hello', + }); + expect(aiResult).toEqual({ + role: 'assistant', + content: 'Hello', + }); + }); +}); + +describe('formatLangChainMessages', () => { + it('formats an array of messages for LangChain', () => { + const messages = [ + { + role: 'system', + content: 'This is a system message', + }, + { + sender: 'user', + text: 'Hello', + }, + { + sender: 'assistant', + text: 'Hi there', + }, + ]; + const formatOptions = { + userName: 'John', + assistantName: 'Assistant', + }; + const result = formatLangChainMessages(messages, formatOptions); + expect(result).toHaveLength(3); + expect(result[0]).toBeInstanceOf(SystemMessage); + expect(result[1]).toBeInstanceOf(HumanMessage); + expect(result[2]).toBeInstanceOf(AIMessage); + + expect(result[0].lc_kwargs.content).toEqual(messages[0].content); + expect(result[1].lc_kwargs.content).toEqual(messages[1].text); + expect(result[2].lc_kwargs.content).toEqual(messages[2].text); + + expect(result[1].lc_kwargs.name).toEqual(formatOptions.userName); + expect(result[2].lc_kwargs.name).toEqual(formatOptions.assistantName); + }); + + describe('formatFromLangChain', () => { + it('should merge kwargs and additional_kwargs', () => { + const message = { + kwargs: { + content: 'some content', + name: 'dan', + additional_kwargs: { + function_call: { + name: 'dall-e', + arguments: '{\n "input": "Subject: hedgehog, Style: cute"\n}', + }, + }, + }, + }; + + const expected = { + content: 'some content', + name: 'dan', + function_call: { + name: 'dall-e', + arguments: '{\n "input": "Subject: hedgehog, Style: cute"\n}', + }, + }; + + expect(formatFromLangChain(message)).toEqual(expected); + }); + + it('should handle messages without additional_kwargs', () => { + const message = { + kwargs: { + content: 'some content', + name: 'dan', + }, + }; + + const expected = { + content: 'some content', + name: 'dan', + }; + + expect(formatFromLangChain(message)).toEqual(expected); + }); + + it('should handle empty messages', () => { + const message = { + kwargs: {}, + }; + + const expected = {}; + + expect(formatFromLangChain(message)).toEqual(expected); + }); + }); +}); diff --git a/api/app/clients/prompts/index.js b/api/app/clients/prompts/index.js new file mode 100644 index 0000000000..ba4859efe3 --- /dev/null +++ b/api/app/clients/prompts/index.js @@ -0,0 +1,13 @@ +const formatMessages = require('./formatMessages'); +const summaryPrompts = require('./summaryPrompts'); +const truncate = require('./truncate'); +const createVisionPrompt = require('./createVisionPrompt'); +const createContextHandlers = require('./createContextHandlers'); + +module.exports = { + ...formatMessages, + ...summaryPrompts, + ...truncate, + createVisionPrompt, + createContextHandlers, +}; diff --git a/api/app/clients/prompts/shadcn-docs/components.js b/api/app/clients/prompts/shadcn-docs/components.js new file mode 100644 index 0000000000..b67c47d50f --- /dev/null +++ b/api/app/clients/prompts/shadcn-docs/components.js @@ -0,0 +1,495 @@ +// Essential Components +const essentialComponents = { + avatar: { + componentName: 'Avatar', + importDocs: 'import { Avatar, AvatarFallback, AvatarImage } from "/components/ui/avatar"', + usageDocs: ` + + + CN +`, + }, + button: { + componentName: 'Button', + importDocs: 'import { Button } from "/components/ui/button"', + usageDocs: ` +`, + }, + card: { + componentName: 'Card', + importDocs: ` +import { + Card, + CardContent, + CardDescription, + CardFooter, + CardHeader, + CardTitle, +} from "/components/ui/card"`, + usageDocs: ` + + + Card Title + Card Description + + +

Card Content

+
+ +

Card Footer

+
+
`, + }, + checkbox: { + componentName: 'Checkbox', + importDocs: 'import { Checkbox } from "/components/ui/checkbox"', + usageDocs: '', + }, + input: { + componentName: 'Input', + importDocs: 'import { Input } from "/components/ui/input"', + usageDocs: '', + }, + label: { + componentName: 'Label', + importDocs: 'import { Label } from "/components/ui/label"', + usageDocs: '', + }, + radioGroup: { + componentName: 'RadioGroup', + importDocs: ` +import { Label } from "/components/ui/label" +import { RadioGroup, RadioGroupItem } from "/components/ui/radio-group"`, + usageDocs: ` + +
+ + +
+
+ + +
+
`, + }, + select: { + componentName: 'Select', + importDocs: ` +import { + Select, + SelectContent, + SelectItem, + SelectTrigger, + SelectValue, +} from "/components/ui/select"`, + usageDocs: ` +`, + }, + textarea: { + componentName: 'Textarea', + importDocs: 'import { Textarea } from "/components/ui/textarea"', + usageDocs: '