diff --git a/.devcontainer/docker-compose.yml b/.devcontainer/docker-compose.yml index 277ac84f85..e7c36c5535 100644 --- a/.devcontainer/docker-compose.yml +++ b/.devcontainer/docker-compose.yml @@ -1,5 +1,3 @@ -version: "3.8" - services: app: build: diff --git a/.env.example b/.env.example index c0537a0bc1..d87021ea4b 100644 --- a/.env.example +++ b/.env.example @@ -2,11 +2,9 @@ # LibreChat Configuration # #=====================================================================# # Please refer to the reference documentation for assistance # -# with configuring your LibreChat environment. The guide is # -# available both online and within your local LibreChat # -# directory: # -# Online: https://docs.librechat.ai/install/configuration/dotenv.html # -# Locally: ./docs/install/configuration/dotenv.md # +# with configuring your LibreChat environment. # +# # +# https://www.librechat.ai/docs/configuration/dotenv # #=====================================================================# #==================================================# @@ -23,6 +21,13 @@ DOMAIN_SERVER=http://localhost:3080 NO_INDEX=true +#===============# +# JSON Logging # +#===============# + +# Use when process console logs in cloud deployment like GCP/AWS +CONSOLE_JSON=false + #===============# # Debug Logging # #===============# @@ -40,6 +45,7 @@ DEBUG_CONSOLE=false #===============# # Configuration # #===============# +# Use an absolute path, a relative path, or a URL # CONFIG_PATH="/alternative/path/to/librechat.yaml" @@ -47,35 +53,43 @@ DEBUG_CONSOLE=false # Endpoints # #===================================================# -# ENDPOINTS=openAI,assistants,azureOpenAI,bingAI,google,gptPlugins,anthropic +# ENDPOINTS=openAI,assistants,azureOpenAI,google,gptPlugins,anthropic PROXY= #===================================# # Known Endpoints - librechat.yaml # #===================================# -# https://docs.librechat.ai/install/configuration/ai_endpoints.html +# https://www.librechat.ai/docs/configuration/librechat_yaml/ai_endpoints +# ANYSCALE_API_KEY= +# APIPIE_API_KEY= +# COHERE_API_KEY= +# DEEPSEEK_API_KEY= +# DATABRICKS_API_KEY= +# FIREWORKS_API_KEY= # GROQ_API_KEY= +# HUGGINGFACE_TOKEN= # MISTRAL_API_KEY= # OPENROUTER_KEY= -# ANYSCALE_API_KEY= -# FIREWORKS_API_KEY= # PERPLEXITY_API_KEY= +# SHUTTLEAI_API_KEY= # TOGETHERAI_API_KEY= +# UNIFY_API_KEY= +# XAI_API_KEY= #============# # Anthropic # #============# ANTHROPIC_API_KEY=user_provided -# ANTHROPIC_MODELS=claude-3-opus-20240229,claude-3-sonnet-20240229,claude-2.1,claude-2,claude-1.2,claude-1,claude-1-100k,claude-instant-1,claude-instant-1-100k +# ANTHROPIC_MODELS=claude-3-5-haiku-20241022,claude-3-5-sonnet-20241022,claude-3-5-sonnet-latest,claude-3-5-sonnet-20240620,claude-3-opus-20240229,claude-3-sonnet-20240229,claude-3-haiku-20240307,claude-2.1,claude-2,claude-1.2,claude-1,claude-1-100k,claude-instant-1,claude-instant-1-100k +# ANTHROPIC_REVERSE_PROXY= #============# # Azure # #============# - # Note: these variables are DEPRECATED # Use the `librechat.yaml` configuration for `azureOpenAI` instead # You may also continue to use them if you opt out of using the `librechat.yaml` configuration @@ -91,41 +105,86 @@ ANTHROPIC_API_KEY=user_provided # AZURE_OPENAI_API_EMBEDDINGS_DEPLOYMENT_NAME= # Deprecated # PLUGINS_USE_AZURE="true" # Deprecated -#============# -# BingAI # -#============# +#=================# +# AWS Bedrock # +#=================# -BINGAI_TOKEN=user_provided -# BINGAI_HOST=https://cn.bing.com +# BEDROCK_AWS_DEFAULT_REGION=us-east-1 # A default region must be provided +# BEDROCK_AWS_ACCESS_KEY_ID=someAccessKey +# BEDROCK_AWS_SECRET_ACCESS_KEY=someSecretAccessKey +# BEDROCK_AWS_SESSION_TOKEN=someSessionToken + +# Note: This example list is not meant to be exhaustive. If omitted, all known, supported model IDs will be included for you. +# BEDROCK_AWS_MODELS=anthropic.claude-3-5-sonnet-20240620-v1:0,meta.llama3-1-8b-instruct-v1:0 + +# See all Bedrock model IDs here: https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html#model-ids-arns + +# Notes on specific models: +# The following models are not support due to not supporting streaming: +# ai21.j2-mid-v1 + +# The following models are not support due to not supporting conversation history: +# ai21.j2-ultra-v1, cohere.command-text-v14, cohere.command-light-text-v14 #============# # Google # #============# GOOGLE_KEY=user_provided -# GOOGLE_MODELS=gemini-pro,gemini-pro-vision,chat-bison,chat-bison-32k,codechat-bison,codechat-bison-32k,text-bison,text-bison-32k,text-unicorn,code-gecko,code-bison,code-bison-32k + # GOOGLE_REVERSE_PROXY= +# Some reverse proxies do not support the X-goog-api-key header, uncomment to pass the API key in Authorization header instead. +# GOOGLE_AUTH_HEADER=true + +# Gemini API (AI Studio) +# GOOGLE_MODELS=gemini-2.0-flash-exp,gemini-2.0-flash-thinking-exp-1219,gemini-exp-1121,gemini-exp-1114,gemini-1.5-flash-latest,gemini-1.0-pro,gemini-1.0-pro-001,gemini-1.0-pro-latest,gemini-1.0-pro-vision-latest,gemini-1.5-pro-latest,gemini-pro,gemini-pro-vision + +# Vertex AI +# GOOGLE_MODELS=gemini-1.5-flash-preview-0514,gemini-1.5-pro-preview-0514,gemini-1.0-pro-vision-001,gemini-1.0-pro-002,gemini-1.0-pro-001,gemini-pro-vision,gemini-1.0-pro + +# GOOGLE_TITLE_MODEL=gemini-pro + +# GOOGLE_LOC=us-central1 + +# Google Safety Settings +# NOTE: These settings apply to both Vertex AI and Gemini API (AI Studio) +# +# For Vertex AI: +# To use the BLOCK_NONE setting, you need either: +# (a) Access through an allowlist via your Google account team, or +# (b) Switch to monthly invoiced billing: https://cloud.google.com/billing/docs/how-to/invoiced-billing +# +# For Gemini API (AI Studio): +# BLOCK_NONE is available by default, no special account requirements. +# +# Available options: BLOCK_NONE, BLOCK_ONLY_HIGH, BLOCK_MEDIUM_AND_ABOVE, BLOCK_LOW_AND_ABOVE +# +# GOOGLE_SAFETY_SEXUALLY_EXPLICIT=BLOCK_ONLY_HIGH +# GOOGLE_SAFETY_HATE_SPEECH=BLOCK_ONLY_HIGH +# GOOGLE_SAFETY_HARASSMENT=BLOCK_ONLY_HIGH +# GOOGLE_SAFETY_DANGEROUS_CONTENT=BLOCK_ONLY_HIGH +# GOOGLE_SAFETY_CIVIC_INTEGRITY=BLOCK_ONLY_HIGH #============# # OpenAI # #============# OPENAI_API_KEY=user_provided -# OPENAI_MODELS=gpt-3.5-turbo-0125,gpt-3.5-turbo-0301,gpt-3.5-turbo,gpt-4,gpt-4-0613,gpt-4-vision-preview,gpt-3.5-turbo-0613,gpt-3.5-turbo-16k-0613,gpt-4-0125-preview,gpt-4-turbo-preview,gpt-4-1106-preview,gpt-3.5-turbo-1106,gpt-3.5-turbo-instruct,gpt-3.5-turbo-instruct-0914,gpt-3.5-turbo-16k +# OPENAI_MODELS=o1,o1-mini,o1-preview,gpt-4o,chatgpt-4o-latest,gpt-4o-mini,gpt-3.5-turbo-0125,gpt-3.5-turbo-0301,gpt-3.5-turbo,gpt-4,gpt-4-0613,gpt-4-vision-preview,gpt-3.5-turbo-0613,gpt-3.5-turbo-16k-0613,gpt-4-0125-preview,gpt-4-turbo-preview,gpt-4-1106-preview,gpt-3.5-turbo-1106,gpt-3.5-turbo-instruct,gpt-3.5-turbo-instruct-0914,gpt-3.5-turbo-16k DEBUG_OPENAI=false # TITLE_CONVO=false -# OPENAI_TITLE_MODEL=gpt-3.5-turbo +# OPENAI_TITLE_MODEL=gpt-4o-mini # OPENAI_SUMMARIZE=true -# OPENAI_SUMMARY_MODEL=gpt-3.5-turbo +# OPENAI_SUMMARY_MODEL=gpt-4o-mini # OPENAI_FORCE_PROMPT=true # OPENAI_REVERSE_PROXY= -# OPENAI_ORGANIZATION= +# OPENAI_ORGANIZATION= #====================# # Assistants API # @@ -133,19 +192,29 @@ DEBUG_OPENAI=false ASSISTANTS_API_KEY=user_provided # ASSISTANTS_BASE_URL= -# ASSISTANTS_MODELS=gpt-3.5-turbo-0125,gpt-3.5-turbo-16k-0613,gpt-3.5-turbo-16k,gpt-3.5-turbo,gpt-4,gpt-4-0314,gpt-4-32k-0314,gpt-4-0613,gpt-3.5-turbo-0613,gpt-3.5-turbo-1106,gpt-4-0125-preview,gpt-4-turbo-preview,gpt-4-1106-preview +# ASSISTANTS_MODELS=gpt-4o,gpt-4o-mini,gpt-3.5-turbo-0125,gpt-3.5-turbo-16k-0613,gpt-3.5-turbo-16k,gpt-3.5-turbo,gpt-4,gpt-4-0314,gpt-4-32k-0314,gpt-4-0613,gpt-3.5-turbo-0613,gpt-3.5-turbo-1106,gpt-4-0125-preview,gpt-4-turbo-preview,gpt-4-1106-preview + +#==========================# +# Azure Assistants API # +#==========================# + +# Note: You should map your credentials with custom variables according to your Azure OpenAI Configuration +# The models for Azure Assistants are also determined by your Azure OpenAI configuration. + +# More info, including how to enable use of Assistants with Azure here: +# https://www.librechat.ai/docs/configuration/librechat_yaml/ai_endpoints/azure#using-assistants-with-azure #============# # OpenRouter # #============# - +# !!!Warning: Use the variable above instead of this one. Using this one will override the OpenAI endpoint # OPENROUTER_API_KEY= #============# # Plugins # #============# -# PLUGIN_MODELS=gpt-4,gpt-4-turbo-preview,gpt-4-0125-preview,gpt-4-1106-preview,gpt-4-0613,gpt-3.5-turbo,gpt-3.5-turbo-0125,gpt-3.5-turbo-1106,gpt-3.5-turbo-0613 +# PLUGIN_MODELS=gpt-4o,gpt-4o-mini,gpt-4,gpt-4-turbo-preview,gpt-4-0125-preview,gpt-4-1106-preview,gpt-4-0613,gpt-3.5-turbo,gpt-3.5-turbo-0125,gpt-3.5-turbo-1106,gpt-3.5-turbo-0613 DEBUG_PLUGINS=true @@ -180,11 +249,16 @@ AZURE_AI_SEARCH_SEARCH_OPTION_SELECT= # DALLE3_AZURE_API_VERSION= # DALLE2_AZURE_API_VERSION= + # Google #----------------- -GOOGLE_API_KEY= +GOOGLE_SEARCH_API_KEY= GOOGLE_CSE_ID= +# YOUTUBE +#----------------- +YOUTUBE_API_KEY= + # SerpAPI #----------------- SERPAPI_API_KEY= @@ -218,6 +292,24 @@ MEILI_NO_ANALYTICS=true MEILI_HOST=http://0.0.0.0:7700 MEILI_MASTER_KEY=DrhYf7zENyR6AlUCKmnz0eYASOQdl6zxH7s7MKFSfFCt +#==================================================# +# Speech to Text & Text to Speech # +#==================================================# + +STT_API_KEY= +TTS_API_KEY= + +#==================================================# +# RAG # +#==================================================# +# More info: https://www.librechat.ai/docs/configuration/rag_api + +# RAG_OPENAI_BASEURL= +# RAG_OPENAI_API_KEY= +# RAG_USE_FULL_CONTEXT= +# EMBEDDINGS_PROVIDER=openai +# EMBEDDINGS_MODEL=text-embedding-3-small + #===================================================# # User System # #===================================================# @@ -263,6 +355,7 @@ ILLEGAL_MODEL_REQ_SCORE=5 #========================# CHECK_BALANCE=false +# START_BALANCE=20000 # note: the number of tokens that will be credited after registration. #========================# # Registration and Login # @@ -272,6 +365,9 @@ ALLOW_EMAIL_LOGIN=true ALLOW_REGISTRATION=true ALLOW_SOCIAL_LOGIN=false ALLOW_SOCIAL_REGISTRATION=false +ALLOW_PASSWORD_RESET=false +# ALLOW_ACCOUNT_DELETION=true # note: enabled by default if omitted/commented out +ALLOW_UNVERIFIED_EMAIL_LOGIN=true SESSION_EXPIRY=1000 * 60 * 15 REFRESH_TOKEN_EXPIRY=(1000 * 60 * 60 * 24) * 7 @@ -293,12 +389,22 @@ FACEBOOK_CALLBACK_URL=/oauth/facebook/callback GITHUB_CLIENT_ID= GITHUB_CLIENT_SECRET= GITHUB_CALLBACK_URL=/oauth/github/callback +# GitHub Eenterprise +# GITHUB_ENTERPRISE_BASE_URL= +# GITHUB_ENTERPRISE_USER_AGENT= # Google GOOGLE_CLIENT_ID= GOOGLE_CLIENT_SECRET= GOOGLE_CALLBACK_URL=/oauth/google/callback +# Apple +APPLE_CLIENT_ID= +APPLE_TEAM_ID= +APPLE_KEY_ID= +APPLE_PRIVATE_KEY_PATH= +APPLE_CALLBACK_URL=/oauth/apple/callback + # OpenID OPENID_CLIENT_ID= OPENID_CLIENT_SECRET= @@ -306,23 +412,44 @@ OPENID_ISSUER= OPENID_SESSION_SECRET= OPENID_SCOPE="openid profile email" OPENID_CALLBACK_URL=/oauth/openid/callback +OPENID_REQUIRED_ROLE= +OPENID_REQUIRED_ROLE_TOKEN_KIND= +OPENID_REQUIRED_ROLE_PARAMETER_PATH= +# Set to determine which user info property returned from OpenID Provider to store as the User's username +OPENID_USERNAME_CLAIM= +# Set to determine which user info property returned from OpenID Provider to store as the User's name +OPENID_NAME_CLAIM= OPENID_BUTTON_LABEL= OPENID_IMAGE_URL= +# LDAP +LDAP_URL= +LDAP_BIND_DN= +LDAP_BIND_CREDENTIALS= +LDAP_USER_SEARCH_BASE= +LDAP_SEARCH_FILTER=mail={{username}} +LDAP_CA_CERT_PATH= +# LDAP_TLS_REJECT_UNAUTHORIZED= +# LDAP_LOGIN_USES_USERNAME=true +# LDAP_ID= +# LDAP_USERNAME= +# LDAP_EMAIL= +# LDAP_FULL_NAME= + #========================# # Email Password Reset # #========================# -EMAIL_SERVICE= -EMAIL_HOST= -EMAIL_PORT=25 -EMAIL_ENCRYPTION= -EMAIL_ENCRYPTION_HOSTNAME= -EMAIL_ALLOW_SELFSIGNED= -EMAIL_USERNAME= -EMAIL_PASSWORD= -EMAIL_FROM_NAME= +EMAIL_SERVICE= +EMAIL_HOST= +EMAIL_PORT=25 +EMAIL_ENCRYPTION= +EMAIL_ENCRYPTION_HOSTNAME= +EMAIL_ALLOW_SELFSIGNED= +EMAIL_USERNAME= +EMAIL_PASSWORD= +EMAIL_FROM_NAME= EMAIL_FROM=noreply@librechat.ai #========================# @@ -336,6 +463,25 @@ FIREBASE_STORAGE_BUCKET= FIREBASE_MESSAGING_SENDER_ID= FIREBASE_APP_ID= +#========================# +# Shared Links # +#========================# + +ALLOW_SHARED_LINKS=true +ALLOW_SHARED_LINKS_PUBLIC=true + +#==============================# +# Static File Cache Control # +#==============================# + +# Leave commented out to use defaults: 1 day (86400 seconds) for s-maxage and 2 days (172800 seconds) for max-age +# NODE_ENV must be set to production for these to take effect +# STATIC_CACHE_MAX_AGE=172800 +# STATIC_CACHE_S_MAX_AGE=86400 + +# If you have another service in front of your LibreChat doing compression, disable express based compression here +# DISABLE_COMPRESSION=true + #===================================================# # UI # #===================================================# @@ -346,6 +492,9 @@ HELP_AND_FAQ_URL=https://librechat.ai # SHOW_BIRTHDAY_ICON=true +# Google tag manager id +#ANALYTICS_GTM_ID=user provided google tag manager id + #==================================================# # Others # #==================================================# @@ -358,3 +507,24 @@ HELP_AND_FAQ_URL=https://librechat.ai # E2E_USER_EMAIL= # E2E_USER_PASSWORD= + +#=====================================================# +# Cache Headers # +#=====================================================# +# Headers that control caching of the index.html # +# Default configuration prevents caching to ensure # +# users always get the latest version. Customize # +# only if you understand caching implications. # + +# INDEX_HTML_CACHE_CONTROL=no-cache, no-store, must-revalidate +# INDEX_HTML_PRAGMA=no-cache +# INDEX_HTML_EXPIRES=0 + +# no-cache: Forces validation with server before using cached version +# no-store: Prevents storing the response entirely +# must-revalidate: Prevents using stale content when offline + +#=====================================================# +# OpenWeather # +#=====================================================# +OPENWEATHER_API_KEY= \ No newline at end of file diff --git a/.eslintrc.js b/.eslintrc.js deleted file mode 100644 index 6d8e085182..0000000000 --- a/.eslintrc.js +++ /dev/null @@ -1,161 +0,0 @@ -module.exports = { - env: { - browser: true, - es2021: true, - node: true, - commonjs: true, - es6: true, - }, - extends: [ - 'eslint:recommended', - 'plugin:react/recommended', - 'plugin:react-hooks/recommended', - 'plugin:jest/recommended', - 'prettier', - ], - ignorePatterns: [ - 'client/dist/**/*', - 'client/public/**/*', - 'e2e/playwright-report/**/*', - 'packages/data-provider/types/**/*', - 'packages/data-provider/dist/**/*', - 'data-node/**/*', - 'meili_data/**/*', - 'node_modules/**/*', - ], - parser: '@typescript-eslint/parser', - parserOptions: { - ecmaVersion: 'latest', - sourceType: 'module', - ecmaFeatures: { - jsx: true, - }, - }, - plugins: ['react', 'react-hooks', '@typescript-eslint', 'import'], - rules: { - 'react/react-in-jsx-scope': 'off', - '@typescript-eslint/ban-ts-comment': ['error', { 'ts-ignore': 'allow' }], - indent: ['error', 2, { SwitchCase: 1 }], - 'max-len': [ - 'error', - { - code: 120, - ignoreStrings: true, - ignoreTemplateLiterals: true, - ignoreComments: true, - }, - ], - 'linebreak-style': 0, - curly: ['error', 'all'], - semi: ['error', 'always'], - 'object-curly-spacing': ['error', 'always'], - 'no-multiple-empty-lines': ['error', { max: 1 }], - 'no-trailing-spaces': 'error', - 'comma-dangle': ['error', 'always-multiline'], - // "arrow-parens": [2, "as-needed", { requireForBlockBody: true }], - // 'no-plusplus': ['error', { allowForLoopAfterthoughts: true }], - 'no-console': 'off', - 'import/no-cycle': 'error', - 'import/no-self-import': 'error', - 'import/extensions': 'off', - 'no-promise-executor-return': 'off', - 'no-param-reassign': 'off', - 'no-continue': 'off', - 'no-restricted-syntax': 'off', - 'react/prop-types': ['off'], - 'react/display-name': ['off'], - 'no-unused-vars': ['error', { varsIgnorePattern: '^_' }], - quotes: ['error', 'single'], - }, - overrides: [ - { - files: ['**/*.ts', '**/*.tsx'], - rules: { - 'no-unused-vars': 'off', // off because it conflicts with '@typescript-eslint/no-unused-vars' - 'react/display-name': 'off', - '@typescript-eslint/no-unused-vars': 'warn', - }, - }, - { - files: ['rollup.config.js', '.eslintrc.js', 'jest.config.js'], - env: { - node: true, - }, - }, - { - files: [ - '**/*.test.js', - '**/*.test.jsx', - '**/*.test.ts', - '**/*.test.tsx', - '**/*.spec.js', - '**/*.spec.jsx', - '**/*.spec.ts', - '**/*.spec.tsx', - 'setupTests.js', - ], - env: { - jest: true, - node: true, - }, - rules: { - 'react/display-name': 'off', - 'react/prop-types': 'off', - 'react/no-unescaped-entities': 'off', - }, - }, - { - files: ['**/*.ts', '**/*.tsx'], - parser: '@typescript-eslint/parser', - parserOptions: { - project: './client/tsconfig.json', - }, - plugins: ['@typescript-eslint/eslint-plugin', 'jest'], - extends: [ - 'plugin:@typescript-eslint/eslint-recommended', - 'plugin:@typescript-eslint/recommended', - ], - rules: { - '@typescript-eslint/no-explicit-any': 'error', - }, - }, - { - files: './packages/data-provider/**/*.ts', - overrides: [ - { - files: '**/*.ts', - parser: '@typescript-eslint/parser', - parserOptions: { - project: './packages/data-provider/tsconfig.json', - }, - }, - ], - }, - { - files: ['./packages/data-provider/specs/**/*.ts'], - parserOptions: { - project: './packages/data-provider/tsconfig.spec.json', - }, - }, - ], - settings: { - react: { - createClass: 'createReactClass', // Regex for Component Factory to use, - // default to "createReactClass" - pragma: 'React', // Pragma to use, default to "React" - fragment: 'Fragment', // Fragment to use (may be a property of ), default to "Fragment" - version: 'detect', // React version. "detect" automatically picks the version you have installed. - }, - 'import/parsers': { - '@typescript-eslint/parser': ['.ts', '.tsx'], - }, - 'import/resolver': { - typescript: { - project: ['./client/tsconfig.json'], - }, - node: { - project: ['./client/tsconfig.json'], - }, - }, - }, -}; diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md index 142f67c953..5951ed694e 100644 --- a/.github/CONTRIBUTING.md +++ b/.github/CONTRIBUTING.md @@ -126,6 +126,18 @@ Apply the following naming conventions to branches, labels, and other Git-relate - **Current Stance**: At present, this backend transition is of lower priority and might not be pursued. +## 7. Module Import Conventions + +- `npm` packages first, + - from shortest line (top) to longest (bottom) + +- Followed by typescript types (pertains to data-provider and client workspaces) + - longest line (top) to shortest (bottom) + - types from package come first + +- Lastly, local imports + - longest line (top) to shortest (bottom) + - imports with alias `~` treated the same as relative import with respect to line length --- diff --git a/.github/ISSUE_TEMPLATE/BUG-REPORT.yml b/.github/ISSUE_TEMPLATE/BUG-REPORT.yml index b6b64c3f2d..3a3b828ee1 100644 --- a/.github/ISSUE_TEMPLATE/BUG-REPORT.yml +++ b/.github/ISSUE_TEMPLATE/BUG-REPORT.yml @@ -1,12 +1,19 @@ name: Bug Report description: File a bug report title: "[Bug]: " -labels: ["bug"] +labels: ["🐛 bug"] body: - type: markdown attributes: value: | Thanks for taking the time to fill out this bug report! + + Before submitting, please: + - Search existing [Issues and Discussions](https://github.com/danny-avila/LibreChat/discussions) to see if your bug has already been reported + - Use [Discussions](https://github.com/danny-avila/LibreChat/discussions) instead of Issues for: + - General inquiries + - Help with setup + - Questions about whether you're experiencing a bug - type: textarea id: what-happened attributes: @@ -15,6 +22,23 @@ body: placeholder: Please give as many details as possible validations: required: true + - type: textarea + id: version-info + attributes: + label: Version Information + description: | + If using Docker, please run and provide the output of: + ```bash + docker images | grep librechat + ``` + + If running from source, please run and provide the output of: + ```bash + git rev-parse HEAD + ``` + placeholder: Paste the output here + validations: + required: true - type: textarea id: steps-to-reproduce attributes: @@ -39,7 +63,21 @@ body: id: logs attributes: label: Relevant log output - description: Please copy and paste any relevant log output. This will be automatically formatted into code, so no need for backticks. + description: | + Please paste relevant logs that were created when reproducing the error. + + Log locations: + - Docker: Project root directory ./logs + - npm: ./api/logs + + There are two types of logs that can help diagnose the issue: + - debug logs (debug-YYYY-MM-DD.log) + - error logs (error-YYYY-MM-DD.log) + + Error logs contain exact stack traces and are especially helpful, but both can provide valuable information. + Please only include the relevant portions of logs that correspond to when you reproduced the error. + + For UI-related issues, browser console logs can be very helpful. You can provide these as screenshots or paste the text here. render: shell - type: textarea id: screenshots @@ -50,7 +88,7 @@ body: id: terms attributes: label: Code of Conduct - description: By submitting this issue, you agree to follow our [Code of Conduct](https://github.com/danny-avila/LibreChat/blob/main/CODE_OF_CONDUCT.md) + description: By submitting this issue, you agree to follow our [Code of Conduct](https://github.com/danny-avila/LibreChat/blob/main/.github/CODE_OF_CONDUCT.md) options: - label: I agree to follow this project's Code of Conduct - required: true + required: true \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/FEATURE-REQUEST.yml b/.github/ISSUE_TEMPLATE/FEATURE-REQUEST.yml index 26155bdc68..613c9e0a01 100644 --- a/.github/ISSUE_TEMPLATE/FEATURE-REQUEST.yml +++ b/.github/ISSUE_TEMPLATE/FEATURE-REQUEST.yml @@ -1,7 +1,7 @@ name: Feature Request description: File a feature request -title: "Enhancement: " -labels: ["enhancement"] +title: "[Enhancement]: " +labels: ["✨ enhancement"] body: - type: markdown attributes: @@ -43,7 +43,7 @@ body: id: terms attributes: label: Code of Conduct - description: By submitting this issue, you agree to follow our [Code of Conduct](https://github.com/danny-avila/LibreChat/blob/main/CODE_OF_CONDUCT.md) + description: By submitting this issue, you agree to follow our [Code of Conduct](https://github.com/danny-avila/LibreChat/blob/main/.github/CODE_OF_CONDUCT.md) options: - label: I agree to follow this project's Code of Conduct required: true diff --git a/.github/ISSUE_TEMPLATE/NEW-LANGUAGE-REQUEST.yml b/.github/ISSUE_TEMPLATE/NEW-LANGUAGE-REQUEST.yml new file mode 100644 index 0000000000..5fddced9f8 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/NEW-LANGUAGE-REQUEST.yml @@ -0,0 +1,33 @@ +name: New Language Request +description: Request to add a new language for LibreChat translations. +title: "New Language Request: " +labels: ["✨ enhancement", "🌍 i18n"] +body: + - type: markdown + attributes: + value: | + Thank you for taking the time to submit a new language request! Please fill out the following details so we can review your request. + - type: input + id: language_name + attributes: + label: Language Name + description: Please provide the full name of the language (e.g., Spanish, Mandarin). + placeholder: e.g., Spanish + validations: + required: true + - type: input + id: iso_code + attributes: + label: ISO 639-1 Code + description: Please provide the ISO 639-1 code for the language (e.g., es for Spanish). You can refer to [this list](https://www.w3schools.com/tags/ref_language_codes.asp) for valid codes. + placeholder: e.g., es + validations: + required: true + - type: checkboxes + id: terms + attributes: + label: Code of Conduct + description: By submitting this issue, you agree to follow our [Code of Conduct](https://github.com/danny-avila/LibreChat/blob/main/.github/CODE_OF_CONDUCT.md). + options: + - label: I agree to follow this project's Code of Conduct + required: true diff --git a/.github/ISSUE_TEMPLATE/QUESTION.yml b/.github/ISSUE_TEMPLATE/QUESTION.yml index 8a0cbf5535..c66e6baa3b 100644 --- a/.github/ISSUE_TEMPLATE/QUESTION.yml +++ b/.github/ISSUE_TEMPLATE/QUESTION.yml @@ -1,7 +1,7 @@ name: Question description: Ask your question title: "[Question]: " -labels: ["question"] +labels: ["❓ question"] body: - type: markdown attributes: @@ -44,7 +44,7 @@ body: id: terms attributes: label: Code of Conduct - description: By submitting this issue, you agree to follow our [Code of Conduct](https://github.com/danny-avila/LibreChat/blob/main/CODE_OF_CONDUCT.md) + description: By submitting this issue, you agree to follow our [Code of Conduct](https://github.com/danny-avila/LibreChat/blob/main/.github/CODE_OF_CONDUCT.md) options: - label: I agree to follow this project's Code of Conduct required: true diff --git a/.github/dependabot.yml b/.github/dependabot.yml deleted file mode 100644 index ccdc68d81b..0000000000 --- a/.github/dependabot.yml +++ /dev/null @@ -1,47 +0,0 @@ -# To get started with Dependabot version updates, you'll need to specify which -# package ecosystems to update and where the package manifests are located. -# Please see the documentation for all configuration options: -# https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates - -version: 2 -updates: - - package-ecosystem: "npm" # See documentation for possible values - directory: "/api" # Location of package manifests - target-branch: "dev" - versioning-strategy: increase-if-necessary - schedule: - interval: "weekly" - allow: - # Allow both direct and indirect updates for all packages - - dependency-type: "all" - commit-message: - prefix: "npm api prod" - prefix-development: "npm api dev" - include: "scope" - - package-ecosystem: "npm" # See documentation for possible values - directory: "/client" # Location of package manifests - target-branch: "dev" - versioning-strategy: increase-if-necessary - schedule: - interval: "weekly" - allow: - # Allow both direct and indirect updates for all packages - - dependency-type: "all" - commit-message: - prefix: "npm client prod" - prefix-development: "npm client dev" - include: "scope" - - package-ecosystem: "npm" # See documentation for possible values - directory: "/" # Location of package manifests - target-branch: "dev" - versioning-strategy: increase-if-necessary - schedule: - interval: "weekly" - allow: - # Allow both direct and indirect updates for all packages - - dependency-type: "all" - commit-message: - prefix: "npm all prod" - prefix-development: "npm all dev" - include: "scope" - diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 06d2656bd6..cb637787f1 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -1,7 +1,10 @@ # Pull Request Template +⚠️ Before Submitting a PR, Please Review: +- Please ensure that you have thoroughly read and understood the [Contributing Docs](https://github.com/danny-avila/LibreChat/blob/main/.github/CONTRIBUTING.md) before submitting your Pull Request. -### ⚠️ Before Submitting a PR, read the [Contributing Docs](https://github.com/danny-avila/LibreChat/blob/main/.github/CONTRIBUTING.md) in full! +⚠️ Documentation Updates Notice: +- Kindly note that documentation updates are managed in this repository: [librechat.ai](https://github.com/LibreChat-AI/librechat.ai) ## Summary @@ -15,7 +18,6 @@ Please delete any irrelevant options. - [ ] New feature (non-breaking change which adds functionality) - [ ] Breaking change (fix or feature that would cause existing functionality to not work as expected) - [ ] This change requires a documentation update -- [ ] Documentation update - [ ] Translation update ## Testing @@ -26,6 +28,8 @@ Please describe your test process and include instructions so that we can reprod ## Checklist +Please delete any irrelevant options. + - [ ] My code adheres to this project's style guidelines - [ ] I have performed a self-review of my own code - [ ] I have commented in any complex areas of my code @@ -34,3 +38,4 @@ Please describe your test process and include instructions so that we can reprod - [ ] I have written tests demonstrating that my changes are effective or that my feature works - [ ] Local unit tests pass with my changes - [ ] Any changes dependent on mine have been merged and published in downstream modules. +- [ ] A pull request for updating the documentation has been submitted. diff --git a/.github/workflows/a11y.yml b/.github/workflows/a11y.yml new file mode 100644 index 0000000000..a7cfd08169 --- /dev/null +++ b/.github/workflows/a11y.yml @@ -0,0 +1,26 @@ +name: Lint for accessibility issues + +on: + pull_request: + paths: + - 'client/src/**' + workflow_dispatch: + inputs: + run_workflow: + description: 'Set to true to run this workflow' + required: true + default: 'false' + +jobs: + axe-linter: + runs-on: ubuntu-latest + if: > + (github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name == 'danny-avila/LibreChat') || + (github.event_name == 'workflow_dispatch' && github.event.inputs.run_workflow == 'true') + + steps: + - uses: actions/checkout@v4 + - uses: dequelabs/axe-linter-action@v1 + with: + api_key: ${{ secrets.AXE_LINTER_API_KEY }} + github_token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/backend-review.yml b/.github/workflows/backend-review.yml index 2d5cf387be..5bc3d3b2db 100644 --- a/.github/workflows/backend-review.yml +++ b/.github/workflows/backend-review.yml @@ -33,16 +33,32 @@ jobs: - name: Install dependencies run: npm ci - - name: Install Data Provider + - name: Install Data Provider Package run: npm run build:data-provider + - name: Install MCP Package + run: npm run build:mcp + + - name: Create empty auth.json file + run: | + mkdir -p api/data + echo '{}' > api/data/auth.json + + - name: Check for Circular dependency in rollup + working-directory: ./packages/data-provider + run: | + output=$(npm run rollup:api) + echo "$output" + if echo "$output" | grep -q "Circular dependency"; then + echo "Error: Circular dependency detected!" + exit 1 + fi + + - name: Prepare .env.test file + run: cp api/test/.env.test.example api/test/.env.test + - name: Run unit tests run: cd api && npm run test:ci - name: Run librechat-data-provider unit tests - run: cd packages/data-provider && npm run test:ci - - - name: Run linters - uses: wearerequired/lint-action@v2 - with: - eslint: true \ No newline at end of file + run: cd packages/data-provider && npm run test:ci \ No newline at end of file diff --git a/.github/workflows/container.yml b/.github/workflows/container.yml deleted file mode 100644 index ffc2016ec3..0000000000 --- a/.github/workflows/container.yml +++ /dev/null @@ -1,83 +0,0 @@ -name: Docker Compose Build on Tag - -# The workflow is triggered when a tag is pushed -on: - push: - tags: - - "*" - -jobs: - build: - runs-on: ubuntu-latest - - steps: - # Check out the repository - - name: Checkout - uses: actions/checkout@v4 - - # Set up Docker - - name: Set up Docker - uses: docker/setup-buildx-action@v3 - - # Set up QEMU for cross-platform builds - - name: Set up QEMU - uses: docker/setup-qemu-action@v3 - - # Log in to GitHub Container Registry - - name: Log in to GitHub Container Registry - uses: docker/login-action@v2 - with: - registry: ghcr.io - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - - # Prepare Docker Build - - name: Build Docker images - run: | - cp .env.example .env - - # Tag and push librechat-api - - name: Docker metadata for librechat-api - id: meta-librechat-api - uses: docker/metadata-action@v5 - with: - images: | - ghcr.io/${{ github.repository_owner }}/librechat-api - tags: | - type=raw,value=latest - type=semver,pattern={{version}} - type=semver,pattern={{major}} - type=semver,pattern={{major}}.{{minor}} - - - name: Build and librechat-api - uses: docker/build-push-action@v5 - with: - file: Dockerfile.multi - context: . - push: true - tags: ${{ steps.meta-librechat-api.outputs.tags }} - platforms: linux/amd64,linux/arm64 - target: api-build - - # Tag and push librechat - - name: Docker metadata for librechat - id: meta-librechat - uses: docker/metadata-action@v5 - with: - images: | - ghcr.io/${{ github.repository_owner }}/librechat - tags: | - type=raw,value=latest - type=semver,pattern={{version}} - type=semver,pattern={{major}} - type=semver,pattern={{major}}.{{minor}} - - - name: Build and librechat - uses: docker/build-push-action@v5 - with: - file: Dockerfile - context: . - push: true - tags: ${{ steps.meta-librechat.outputs.tags }} - platforms: linux/amd64,linux/arm64 - target: node \ No newline at end of file diff --git a/.github/workflows/deploy-dev.yml b/.github/workflows/deploy-dev.yml new file mode 100644 index 0000000000..fc1c02db69 --- /dev/null +++ b/.github/workflows/deploy-dev.yml @@ -0,0 +1,41 @@ +name: Update Test Server + +on: + workflow_run: + workflows: ["Docker Dev Images Build"] + types: + - completed + workflow_dispatch: + +jobs: + deploy: + runs-on: ubuntu-latest + if: | + github.repository == 'danny-avila/LibreChat' && + (github.event_name == 'workflow_dispatch' || github.event.workflow_run.conclusion == 'success') + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Install SSH Key + uses: shimataro/ssh-key-action@v2 + with: + key: ${{ secrets.DO_SSH_PRIVATE_KEY }} + known_hosts: ${{ secrets.DO_KNOWN_HOSTS }} + + - name: Run update script on DigitalOcean Droplet + env: + DO_HOST: ${{ secrets.DO_HOST }} + DO_USER: ${{ secrets.DO_USER }} + run: | + ssh -o StrictHostKeyChecking=no ${DO_USER}@${DO_HOST} << EOF + sudo -i -u danny bash << EEOF + cd ~/LibreChat && \ + git fetch origin main && \ + npm run update:deployed && \ + git checkout do-deploy && \ + git rebase main && \ + npm run start:deployed && \ + echo "Update completed. Application should be running now." + EEOF + EOF diff --git a/.github/workflows/eslint-ci.yml b/.github/workflows/eslint-ci.yml new file mode 100644 index 0000000000..ea1a5f2416 --- /dev/null +++ b/.github/workflows/eslint-ci.yml @@ -0,0 +1,73 @@ +name: ESLint Code Quality Checks + +on: + pull_request: + branches: + - main + - dev + - release/* + paths: + - 'api/**' + - 'client/**' + +jobs: + eslint_checks: + name: Run ESLint Linting + runs-on: ubuntu-latest + permissions: + contents: read + security-events: write + actions: read + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Set up Node.js 20.x + uses: actions/setup-node@v4 + with: + node-version: 20 + cache: npm + + - name: Install dependencies + run: npm ci + + # Run ESLint on changed files within the api/ and client/ directories. + - name: Run ESLint on changed files + env: + SARIF_ESLINT_IGNORE_SUPPRESSED: "true" + run: | + # Extract the base commit SHA from the pull_request event payload. + BASE_SHA=$(jq --raw-output .pull_request.base.sha "$GITHUB_EVENT_PATH") + echo "Base commit SHA: $BASE_SHA" + + # Get changed files (only JS/TS files in api/ or client/) + CHANGED_FILES=$(git diff --name-only --diff-filter=ACMRTUXB "$BASE_SHA" HEAD | grep -E '^(api|client)/.*\.(js|jsx|ts|tsx)$' || true) + + # Debug output + echo "Changed files:" + echo "$CHANGED_FILES" + + # Ensure there are files to lint before running ESLint + if [[ -z "$CHANGED_FILES" ]]; then + echo "No matching files changed. Skipping ESLint." + echo "UPLOAD_SARIF=false" >> $GITHUB_ENV + exit 0 + fi + + # Set variable to allow SARIF upload + echo "UPLOAD_SARIF=true" >> $GITHUB_ENV + + # Run ESLint + npx eslint --no-error-on-unmatched-pattern \ + --config eslint.config.mjs \ + --format @microsoft/eslint-formatter-sarif \ + --output-file eslint-results.sarif $CHANGED_FILES || true + + - name: Upload analysis results to GitHub + if: env.UPLOAD_SARIF == 'true' + uses: github/codeql-action/upload-sarif@v3 + with: + sarif_file: eslint-results.sarif + wait-for-processing: true \ No newline at end of file diff --git a/.github/workflows/frontend-review.yml b/.github/workflows/frontend-review.yml index 9f479e1b7a..0756c6773c 100644 --- a/.github/workflows/frontend-review.yml +++ b/.github/workflows/frontend-review.yml @@ -1,11 +1,6 @@ -#github action to run unit tests for frontend with jest name: Frontend Unit Tests + on: - # push: - # branches: - # - main - # - dev - # - release/* pull_request: branches: - main @@ -14,11 +9,34 @@ on: paths: - 'client/**' - 'packages/**' + jobs: - tests_frontend: - name: Run frontend unit tests + tests_frontend_ubuntu: + name: Run frontend unit tests on Ubuntu timeout-minutes: 60 runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Use Node.js 20.x + uses: actions/setup-node@v4 + with: + node-version: 20 + cache: 'npm' + + - name: Install dependencies + run: npm ci + + - name: Build Client + run: npm run frontend:ci + + - name: Run unit tests + run: npm run test:ci --verbose + working-directory: client + + tests_frontend_windows: + name: Run frontend unit tests on Windows + timeout-minutes: 60 + runs-on: windows-latest steps: - uses: actions/checkout@v4 - name: Use Node.js 20.x diff --git a/.github/workflows/generate_embeddings.yml b/.github/workflows/generate_embeddings.yml new file mode 100644 index 0000000000..c514f9c1d6 --- /dev/null +++ b/.github/workflows/generate_embeddings.yml @@ -0,0 +1,20 @@ +name: 'generate_embeddings' +on: + workflow_dispatch: + push: + branches: + - main + paths: + - 'docs/**' + +jobs: + generate: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - uses: supabase/embeddings-generator@v0.0.5 + with: + supabase-url: ${{ secrets.SUPABASE_URL }} + supabase-service-role-key: ${{ secrets.SUPABASE_SERVICE_ROLE_KEY }} + openai-key: ${{ secrets.OPENAI_DOC_EMBEDDINGS_KEY }} + docs-root-path: 'docs' \ No newline at end of file diff --git a/.github/workflows/helmcharts.yml b/.github/workflows/helmcharts.yml new file mode 100644 index 0000000000..bc715557e4 --- /dev/null +++ b/.github/workflows/helmcharts.yml @@ -0,0 +1,33 @@ +name: Build Helm Charts on Tag + +# The workflow is triggered when a tag is pushed +on: + push: + tags: + - "*" + +jobs: + release: + permissions: + contents: write + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Configure Git + run: | + git config user.name "$GITHUB_ACTOR" + git config user.email "$GITHUB_ACTOR@users.noreply.github.com" + + - name: Install Helm + uses: azure/setup-helm@v4 + env: + GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}" + + - name: Run chart-releaser + uses: helm/chart-releaser-action@v1.6.0 + env: + CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}" diff --git a/.github/workflows/i18n-unused-keys.yml b/.github/workflows/i18n-unused-keys.yml new file mode 100644 index 0000000000..79f95d3b27 --- /dev/null +++ b/.github/workflows/i18n-unused-keys.yml @@ -0,0 +1,84 @@ +name: Detect Unused i18next Strings + +on: + pull_request: + paths: + - "client/src/**" + +jobs: + detect-unused-i18n-keys: + runs-on: ubuntu-latest + permissions: + pull-requests: write # Required for posting PR comments + steps: + - name: Checkout repository + uses: actions/checkout@v3 + + - name: Find unused i18next keys + id: find-unused + run: | + echo "🔍 Scanning for unused i18next keys..." + + # Define paths + I18N_FILE="client/src/locales/en/translation.json" + SOURCE_DIR="client/src" + + # Check if translation file exists + if [[ ! -f "$I18N_FILE" ]]; then + echo "::error title=Missing i18n File::Translation file not found: $I18N_FILE" + exit 1 + fi + + # Extract all keys from the JSON file + KEYS=$(jq -r 'keys[]' "$I18N_FILE") + + # Track unused keys + UNUSED_KEYS=() + + # Check if each key is used in the source code + for KEY in $KEYS; do + if ! grep -r --include=\*.{js,jsx,ts,tsx} -q "$KEY" "$SOURCE_DIR"; then + UNUSED_KEYS+=("$KEY") + fi + done + + # Output results + if [[ ${#UNUSED_KEYS[@]} -gt 0 ]]; then + echo "🛑 Found ${#UNUSED_KEYS[@]} unused i18n keys:" + echo "unused_keys=$(echo "${UNUSED_KEYS[@]}" | jq -R -s -c 'split(" ")')" >> $GITHUB_ENV + for KEY in "${UNUSED_KEYS[@]}"; do + echo "::warning title=Unused i18n Key::'$KEY' is defined but not used in the codebase." + done + else + echo "✅ No unused i18n keys detected!" + echo "unused_keys=[]" >> $GITHUB_ENV + fi + + - name: Post verified comment on PR + if: env.unused_keys != '[]' + run: | + PR_NUMBER=$(jq --raw-output .pull_request.number "$GITHUB_EVENT_PATH") + + # Format the unused keys list correctly, filtering out empty entries + FILTERED_KEYS=$(echo "$unused_keys" | jq -r '.[]' | grep -v '^\s*$' | sed 's/^/- `/;s/$/`/' ) + + COMMENT_BODY=$(cat <> $GITHUB_ENV - - # Set up Docker - - name: Set up Docker - uses: docker/setup-buildx-action@v3 - - # Set up QEMU - - name: Set up QEMU - uses: docker/setup-qemu-action@v3 - - # Log in to GitHub Container Registry - - name: Log in to GitHub Container Registry - uses: docker/login-action@v2 - with: - registry: ghcr.io - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - - # Prepare Docker Build - - name: Build Docker images - run: cp .env.example .env - - # Docker metadata for librechat-api - - name: Docker metadata for librechat-api - id: meta-librechat-api - uses: docker/metadata-action@v5 - with: - images: ghcr.io/${{ github.repository_owner }}/librechat-api - tags: | - type=raw,value=${{ env.LATEST_TAG }},enable=true - type=raw,value=latest,enable=true - type=semver,pattern={{version}} - type=semver,pattern={{major}} - type=semver,pattern={{major}}.{{minor}} - - # Build and push librechat-api - - name: Build and push librechat-api - uses: docker/build-push-action@v5 - with: - file: Dockerfile.multi - context: . - push: true - tags: ${{ steps.meta-librechat-api.outputs.tags }} - platforms: linux/amd64,linux/arm64 - target: api-build - - # Docker metadata for librechat - - name: Docker metadata for librechat - id: meta-librechat - uses: docker/metadata-action@v5 - with: - images: ghcr.io/${{ github.repository_owner }}/librechat - tags: | - type=raw,value=${{ env.LATEST_TAG }},enable=true - type=raw,value=latest,enable=true - type=semver,pattern={{version}} - type=semver,pattern={{major}} - type=semver,pattern={{major}}.{{minor}} - - # Build and push librechat - - name: Build and push librechat - uses: docker/build-push-action@v5 - with: - file: Dockerfile - context: . - push: true - tags: ${{ steps.meta-librechat.outputs.tags }} - platforms: linux/amd64,linux/arm64 - target: node diff --git a/.github/workflows/locize-i18n-sync.yml b/.github/workflows/locize-i18n-sync.yml new file mode 100644 index 0000000000..082d3a46a6 --- /dev/null +++ b/.github/workflows/locize-i18n-sync.yml @@ -0,0 +1,72 @@ +name: Sync Locize Translations & Create Translation PR + +on: + push: + branches: [main] + repository_dispatch: + types: [locize/versionPublished] + +jobs: + sync-translations: + name: Sync Translation Keys with Locize + runs-on: ubuntu-latest + steps: + - name: Checkout Repository + uses: actions/checkout@v4 + + - name: Set Up Node.js + uses: actions/setup-node@v4 + with: + node-version: 20 + + - name: Install locize CLI + run: npm install -g locize-cli + + # Sync translations (Push missing keys & remove deleted ones) + - name: Sync Locize with Repository + if: ${{ github.event_name == 'push' }} + run: | + cd client/src/locales + locize sync --api-key ${{ secrets.LOCIZE_API_KEY }} --project-id ${{ secrets.LOCIZE_PROJECT_ID }} --language en + + # When triggered by repository_dispatch, skip sync step. + - name: Skip sync step on non-push events + if: ${{ github.event_name != 'push' }} + run: echo "Skipping sync as the event is not a push." + + create-pull-request: + name: Create Translation PR on Version Published + runs-on: ubuntu-latest + needs: sync-translations + permissions: + contents: write + pull-requests: write + steps: + # 1. Check out the repository. + - name: Checkout Repository + uses: actions/checkout@v4 + + # 2. Download translation files from locize. + - name: Download Translations from locize + uses: locize/download@v1 + with: + project-id: ${{ secrets.LOCIZE_PROJECT_ID }} + path: "client/src/locales" + + # 3. Create a Pull Request using built-in functionality. + - name: Create Pull Request + uses: peter-evans/create-pull-request@v7 + with: + token: ${{ secrets.GITHUB_TOKEN }} + sign-commits: true + commit-message: "🌍 i18n: Update translation.json with latest translations" + base: main + branch: i18n/locize-translation-update + reviewers: danny-avila + title: "🌍 i18n: Update translation.json with latest translations" + body: | + **Description**: + - 🎯 **Objective**: Update `translation.json` with the latest translations from locize. + - 🔍 **Details**: This PR is automatically generated upon receiving a versionPublished event with version "latest". It reflects the newest translations provided by locize. + - ✅ **Status**: Ready for review. + labels: "🌍 i18n" \ No newline at end of file diff --git a/.github/workflows/main-image-workflow.yml b/.github/workflows/main-image-workflow.yml index a990e04ae2..43c9d95753 100644 --- a/.github/workflows/main-image-workflow.yml +++ b/.github/workflows/main-image-workflow.yml @@ -1,12 +1,20 @@ name: Docker Compose Build Latest Main Image Tag (Manual Dispatch) -# The workflow is manually triggered on: workflow_dispatch: jobs: build: runs-on: ubuntu-latest + strategy: + matrix: + include: + - target: api-build + file: Dockerfile.multi + image_name: librechat-api + - target: node + file: Dockerfile + image_name: librechat steps: - name: Checkout @@ -17,12 +25,15 @@ jobs: git fetch --tags echo "LATEST_TAG=$(git describe --tags `git rev-list --tags --max-count=1`)" >> $GITHUB_ENV - - name: Set up Docker - uses: docker/setup-buildx-action@v3 - + # Set up QEMU - name: Set up QEMU uses: docker/setup-qemu-action@v3 + # Set up Docker Buildx + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + # Log in to GitHub Container Registry - name: Log in to GitHub Container Registry uses: docker/login-action@v2 with: @@ -30,26 +41,29 @@ jobs: username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - # Docker metadata for librechat - - name: Docker metadata for librechat - id: meta-librechat - uses: docker/metadata-action@v5 + # Login to Docker Hub + - name: Login to Docker Hub + uses: docker/login-action@v3 with: - images: ghcr.io/${{ github.repository_owner }}/librechat - tags: | - type=raw,value=${{ env.LATEST_TAG }},enable=true - type=raw,value=latest,enable=true - type=semver,pattern={{version}} - type=semver,pattern={{major}} - type=semver,pattern={{major}}.{{minor}} + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} - # Build and push librechat with only linux/amd64 platform - - name: Build and push librechat + # Prepare the environment + - name: Prepare environment + run: | + cp .env.example .env + + # Build and push Docker images for each target + - name: Build and push Docker images uses: docker/build-push-action@v5 with: - file: Dockerfile context: . + file: ${{ matrix.file }} push: true - tags: ${{ steps.meta-librechat.outputs.tags }} - platforms: linux/amd64 - target: node + tags: | + ghcr.io/${{ github.repository_owner }}/${{ matrix.image_name }}:${{ env.LATEST_TAG }} + ghcr.io/${{ github.repository_owner }}/${{ matrix.image_name }}:latest + ${{ secrets.DOCKERHUB_USERNAME }}/${{ matrix.image_name }}:${{ env.LATEST_TAG }} + ${{ secrets.DOCKERHUB_USERNAME }}/${{ matrix.image_name }}:latest + platforms: linux/amd64,linux/arm64 + target: ${{ matrix.target }} diff --git a/.github/workflows/mkdocs.yaml b/.github/workflows/mkdocs.yaml deleted file mode 100644 index 3b2878fa2a..0000000000 --- a/.github/workflows/mkdocs.yaml +++ /dev/null @@ -1,27 +0,0 @@ -name: mkdocs -on: - push: - branches: - - main -permissions: - contents: write -jobs: - deploy: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - uses: actions/setup-python@v4 - with: - python-version: 3.x - - run: echo "cache_id=$(date --utc '+%V')" >> $GITHUB_ENV - - uses: actions/cache@v3 - with: - key: mkdocs-material-${{ env.cache_id }} - path: .cache - restore-keys: | - mkdocs-material- - - run: pip install mkdocs-material - - run: pip install mkdocs-nav-weight - - run: pip install mkdocs-publisher - - run: pip install mkdocs-exclude - - run: mkdocs gh-deploy --force diff --git a/.github/workflows/tag-images.yml b/.github/workflows/tag-images.yml new file mode 100644 index 0000000000..e90f43978a --- /dev/null +++ b/.github/workflows/tag-images.yml @@ -0,0 +1,67 @@ +name: Docker Images Build on Tag + +on: + push: + tags: + - '*' + +jobs: + build: + runs-on: ubuntu-latest + strategy: + matrix: + include: + - target: api-build + file: Dockerfile.multi + image_name: librechat-api + - target: node + file: Dockerfile + image_name: librechat + + steps: + # Check out the repository + - name: Checkout + uses: actions/checkout@v4 + + # Set up QEMU + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + + # Set up Docker Buildx + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + # Log in to GitHub Container Registry + - name: Log in to GitHub Container Registry + uses: docker/login-action@v2 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + # Login to Docker Hub + - name: Login to Docker Hub + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + # Prepare the environment + - name: Prepare environment + run: | + cp .env.example .env + + # Build and push Docker images for each target + - name: Build and push Docker images + uses: docker/build-push-action@v5 + with: + context: . + file: ${{ matrix.file }} + push: true + tags: | + ghcr.io/${{ github.repository_owner }}/${{ matrix.image_name }}:${{ github.ref_name }} + ghcr.io/${{ github.repository_owner }}/${{ matrix.image_name }}:latest + ${{ secrets.DOCKERHUB_USERNAME }}/${{ matrix.image_name }}:${{ github.ref_name }} + ${{ secrets.DOCKERHUB_USERNAME }}/${{ matrix.image_name }}:latest + platforms: linux/amd64,linux/arm64 + target: ${{ matrix.target }} diff --git a/.github/workflows/unused-packages.yml b/.github/workflows/unused-packages.yml new file mode 100644 index 0000000000..7a95f9c5be --- /dev/null +++ b/.github/workflows/unused-packages.yml @@ -0,0 +1,147 @@ +name: Detect Unused NPM Packages + +on: [pull_request] + +jobs: + detect-unused-packages: + runs-on: ubuntu-latest + permissions: + pull-requests: write + + steps: + - uses: actions/checkout@v4 + + - name: Use Node.js 20.x + uses: actions/setup-node@v4 + with: + node-version: 20 + cache: 'npm' + + - name: Install depcheck + run: npm install -g depcheck + + - name: Validate JSON files + run: | + for FILE in package.json client/package.json api/package.json; do + if [[ -f "$FILE" ]]; then + jq empty "$FILE" || (echo "::error title=Invalid JSON::$FILE is invalid" && exit 1) + fi + done + + - name: Extract Dependencies Used in Scripts + id: extract-used-scripts + run: | + extract_deps_from_scripts() { + local package_file=$1 + if [[ -f "$package_file" ]]; then + jq -r '.scripts | to_entries[].value' "$package_file" | \ + grep -oE '([a-zA-Z0-9_-]+)' | sort -u > used_scripts.txt + else + touch used_scripts.txt + fi + } + + extract_deps_from_scripts "package.json" + mv used_scripts.txt root_used_deps.txt + + extract_deps_from_scripts "client/package.json" + mv used_scripts.txt client_used_deps.txt + + extract_deps_from_scripts "api/package.json" + mv used_scripts.txt api_used_deps.txt + + - name: Extract Dependencies Used in Source Code + id: extract-used-code + run: | + extract_deps_from_code() { + local folder=$1 + local output_file=$2 + if [[ -d "$folder" ]]; then + grep -rEho "require\\(['\"]([a-zA-Z0-9@/._-]+)['\"]\\)" "$folder" --include=\*.{js,ts,mjs,cjs} | \ + sed -E "s/require\\(['\"]([a-zA-Z0-9@/._-]+)['\"]\\)/\1/" > "$output_file" + + grep -rEho "import .* from ['\"]([a-zA-Z0-9@/._-]+)['\"]" "$folder" --include=\*.{js,ts,mjs,cjs} | \ + sed -E "s/import .* from ['\"]([a-zA-Z0-9@/._-]+)['\"]/\1/" >> "$output_file" + + sort -u "$output_file" -o "$output_file" + else + touch "$output_file" + fi + } + + extract_deps_from_code "." root_used_code.txt + extract_deps_from_code "client" client_used_code.txt + extract_deps_from_code "api" api_used_code.txt + + - name: Run depcheck for root package.json + id: check-root + run: | + if [[ -f "package.json" ]]; then + UNUSED=$(depcheck --json | jq -r '.dependencies | join("\n")' || echo "") + UNUSED=$(comm -23 <(echo "$UNUSED" | sort) <(cat root_used_deps.txt root_used_code.txt | sort) || echo "") + echo "ROOT_UNUSED<> $GITHUB_ENV + echo "$UNUSED" >> $GITHUB_ENV + echo "EOF" >> $GITHUB_ENV + fi + + - name: Run depcheck for client/package.json + id: check-client + run: | + if [[ -f "client/package.json" ]]; then + chmod -R 755 client + cd client + UNUSED=$(depcheck --json | jq -r '.dependencies | join("\n")' || echo "") + UNUSED=$(comm -23 <(echo "$UNUSED" | sort) <(cat ../client_used_deps.txt ../client_used_code.txt | sort) || echo "") + echo "CLIENT_UNUSED<> $GITHUB_ENV + echo "$UNUSED" >> $GITHUB_ENV + echo "EOF" >> $GITHUB_ENV + cd .. + fi + + - name: Run depcheck for api/package.json + id: check-api + run: | + if [[ -f "api/package.json" ]]; then + chmod -R 755 api + cd api + UNUSED=$(depcheck --json | jq -r '.dependencies | join("\n")' || echo "") + UNUSED=$(comm -23 <(echo "$UNUSED" | sort) <(cat ../api_used_deps.txt ../api_used_code.txt | sort) || echo "") + echo "API_UNUSED<> $GITHUB_ENV + echo "$UNUSED" >> $GITHUB_ENV + echo "EOF" >> $GITHUB_ENV + cd .. + fi + + - name: Post comment on PR if unused dependencies are found + if: env.ROOT_UNUSED != '' || env.CLIENT_UNUSED != '' || env.API_UNUSED != '' + run: | + PR_NUMBER=$(jq --raw-output .pull_request.number "$GITHUB_EVENT_PATH") + + ROOT_LIST=$(echo "$ROOT_UNUSED" | awk '{print "- `" $0 "`"}') + CLIENT_LIST=$(echo "$CLIENT_UNUSED" | awk '{print "- `" $0 "`"}') + API_LIST=$(echo "$API_UNUSED" | awk '{print "- `" $0 "`"}') + + COMMENT_BODY=$(cat </**"], + "program": "${workspaceFolder}/api/server/index.js", + "env": { + "NODE_ENV": "production" + }, + "console": "integratedTerminal", + "envFile": "${workspaceFolder}/.env" + } + ] +} diff --git a/Dockerfile b/Dockerfile index 81766fdeb3..46cabe6dff 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,21 +1,32 @@ -# Base node image -FROM node:18-alpine AS node +# v0.7.7-rc1 -COPY . /app +# Base node image +FROM node:20-alpine AS node + +RUN apk --no-cache add curl + +RUN mkdir -p /app && chown node:node /app WORKDIR /app -# Allow mounting of these files, which have no default -# values. -RUN touch .env -RUN npm config set fetch-retry-maxtimeout 300000 -RUN apk add --no-cache g++ make python3 py3-pip -RUN npm install -g node-gyp -RUN apk --no-cache add curl && \ - npm install +USER node -# React client build -ENV NODE_OPTIONS="--max-old-space-size=2048" -RUN npm run frontend +COPY --chown=node:node . . + +RUN \ + # Allow mounting of these files, which have no default + touch .env ; \ + # Create directories for the volumes to inherit the correct permissions + mkdir -p /app/client/public/images /app/api/logs ; \ + npm config set fetch-retry-maxtimeout 600000 ; \ + npm config set fetch-retries 5 ; \ + npm config set fetch-retry-mintimeout 15000 ; \ + npm install --no-audit; \ + # React client build + NODE_OPTIONS="--max-old-space-size=2048" npm run frontend; \ + npm prune --production; \ + npm cache clean --force + +RUN mkdir -p /app/client/public/images /app/api/logs # Node API setup EXPOSE 3080 diff --git a/Dockerfile.multi b/Dockerfile.multi index 0d5ebec5e2..570fbecf31 100644 --- a/Dockerfile.multi +++ b/Dockerfile.multi @@ -1,39 +1,56 @@ -# Build API, Client and Data Provider -FROM node:20-alpine AS base +# Dockerfile.multi +# v0.7.7-rc1 + +# Base for all builds +FROM node:20-alpine AS base-min +WORKDIR /app +RUN apk --no-cache add curl +RUN npm config set fetch-retry-maxtimeout 600000 && \ + npm config set fetch-retries 5 && \ + npm config set fetch-retry-mintimeout 15000 +COPY package*.json ./ +COPY packages/data-provider/package*.json ./packages/data-provider/ +COPY packages/mcp/package*.json ./packages/mcp/ +COPY client/package*.json ./client/ +COPY api/package*.json ./api/ + +# Install all dependencies for every build +FROM base-min AS base +WORKDIR /app +RUN npm ci # Build data-provider FROM base AS data-provider-build WORKDIR /app/packages/data-provider -COPY ./packages/data-provider ./ -RUN npm install +COPY packages/data-provider ./ RUN npm run build -# React client build -FROM data-provider-build AS client-build +# Build mcp package +FROM base AS mcp-build +WORKDIR /app/packages/mcp +COPY packages/mcp ./ +COPY --from=data-provider-build /app/packages/data-provider/dist /app/packages/data-provider/dist +RUN npm run build + +# Client build +FROM base AS client-build WORKDIR /app/client -COPY ./client/ ./ -# Copy data-provider to client's node_modules -RUN mkdir -p /app/client/node_modules/librechat-data-provider/ -RUN cp -R /app/packages/data-provider/* /app/client/node_modules/librechat-data-provider/ -RUN npm install +COPY client ./ +COPY --from=data-provider-build /app/packages/data-provider/dist /app/packages/data-provider/dist ENV NODE_OPTIONS="--max-old-space-size=2048" RUN npm run build -# Node API setup -FROM data-provider-build AS api-build +# API setup (including client dist) +FROM base-min AS api-build +WORKDIR /app +# Install only production deps +RUN npm ci --omit=dev +COPY api ./api +COPY config ./config +COPY --from=data-provider-build /app/packages/data-provider/dist ./packages/data-provider/dist +COPY --from=mcp-build /app/packages/mcp/dist ./packages/mcp/dist +COPY --from=client-build /app/client/dist ./client/dist WORKDIR /app/api -COPY api/package*.json ./ -COPY api/ ./ -# Copy data-provider to API's node_modules -RUN mkdir -p /app/api/node_modules/librechat-data-provider/ -RUN cp -R /app/packages/data-provider/* /app/api/node_modules/librechat-data-provider/ -RUN npm install -COPY --from=client-build /app/client/dist /app/client/dist EXPOSE 3080 ENV HOST=0.0.0.0 CMD ["node", "server/index.js"] - -# Nginx setup -FROM nginx:1.21.1-alpine AS prod-stage -COPY ./client/nginx.conf /etc/nginx/conf.d/default.conf -CMD ["nginx", "-g", "daemon off;"] diff --git a/README.md b/README.md index 928e1cc9d3..2e662ac262 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@

- +

LibreChat @@ -38,25 +38,87 @@

-# 📃 Features +

+ + Translation Progress + +

-- 🖥️ UI matching ChatGPT, including Dark mode, Streaming, and 11-2023 updates -- 💬 Multimodal Chat: - - Upload and analyze images with GPT-4 and Gemini Vision 📸 - - More filetypes and Assistants API integration in Active Development 🚧 -- 🌎 Multilingual UI: - - English, 中文, Deutsch, Español, Français, Italiano, Polski, Português Brasileiro, + +# ✨ Features + +- 🖥️ **UI & Experience** inspired by ChatGPT with enhanced design and features + +- 🤖 **AI Model Selection**: + - Anthropic (Claude), AWS Bedrock, OpenAI, Azure OpenAI, Google, Vertex AI, OpenAI Assistants API (incl. Azure) + - [Custom Endpoints](https://www.librechat.ai/docs/quick_start/custom_endpoints): Use any OpenAI-compatible API with LibreChat, no proxy required + - Compatible with [Local & Remote AI Providers](https://www.librechat.ai/docs/configuration/librechat_yaml/ai_endpoints): + - Ollama, groq, Cohere, Mistral AI, Apple MLX, koboldcpp, together.ai, + - OpenRouter, Perplexity, ShuttleAI, Deepseek, Qwen, and more + +- 🔧 **[Code Interpreter API](https://www.librechat.ai/docs/features/code_interpreter)**: + - Secure, Sandboxed Execution in Python, Node.js (JS/TS), Go, C/C++, Java, PHP, Rust, and Fortran + - Seamless File Handling: Upload, process, and download files directly + - No Privacy Concerns: Fully isolated and secure execution + +- 🔦 **Agents & Tools Integration**: + - **[LibreChat Agents](https://www.librechat.ai/docs/features/agents)**: + - No-Code Custom Assistants: Build specialized, AI-driven helpers without coding + - Flexible & Extensible: Attach tools like DALL-E-3, file search, code execution, and more + - Compatible with Custom Endpoints, OpenAI, Azure, Anthropic, AWS Bedrock, and more + - [Model Context Protocol (MCP) Support](https://modelcontextprotocol.io/clients#librechat) for Tools + - Use LibreChat Agents and OpenAI Assistants with Files, Code Interpreter, Tools, and API Actions + +- 🪄 **Generative UI with Code Artifacts**: + - [Code Artifacts](https://youtu.be/GfTj7O4gmd0?si=WJbdnemZpJzBrJo3) allow creation of React, HTML, and Mermaid diagrams directly in chat + +- 💾 **Presets & Context Management**: + - Create, Save, & Share Custom Presets + - Switch between AI Endpoints and Presets mid-chat + - Edit, Resubmit, and Continue Messages with Conversation branching + - [Fork Messages & Conversations](https://www.librechat.ai/docs/features/fork) for Advanced Context control + +- 💬 **Multimodal & File Interactions**: + - Upload and analyze images with Claude 3, GPT-4o, o1, Llama-Vision, and Gemini 📸 + - Chat with Files using Custom Endpoints, OpenAI, Azure, Anthropic, AWS Bedrock, & Google 🗃️ + +- 🌎 **Multilingual UI**: + - English, 中文, Deutsch, Español, Français, Italiano, Polski, Português Brasileiro - Русский, 日本語, Svenska, 한국어, Tiếng Việt, 繁體中文, العربية, Türkçe, Nederlands, עברית -- 🤖 AI model selection: OpenAI API, Azure, BingAI, ChatGPT, Google Vertex AI, Anthropic (Claude), Plugins -- 💾 Create, Save, & Share Custom Presets -- 🔄 Edit, Resubmit, and Continue messages with conversation branching -- 📤 Export conversations as screenshots, markdown, text, json. -- 🔍 Search all messages/conversations -- 🔌 Plugins, including web access, image generation with DALL-E-3 and more -- 👥 Multi-User, Secure Authentication with Moderation and Token spend tools -- ⚙️ Configure Proxy, Reverse Proxy, Docker, many Deployment options, and completely Open-Source -[For a thorough review of our features, see our docs here](https://docs.librechat.ai/features/plugins/introduction.html) 📚 +- 🧠 **Reasoning UI**: + - Dynamic Reasoning UI for Chain-of-Thought/Reasoning AI models like DeepSeek-R1 + +- 🎨 **Customizable Interface**: + - Customizable Dropdown & Interface that adapts to both power users and newcomers + +- 🗣️ **Speech & Audio**: + - Chat hands-free with Speech-to-Text and Text-to-Speech + - Automatically send and play Audio + - Supports OpenAI, Azure OpenAI, and Elevenlabs + +- 📥 **Import & Export Conversations**: + - Import Conversations from LibreChat, ChatGPT, Chatbot UI + - Export conversations as screenshots, markdown, text, json + +- 🔍 **Search & Discovery**: + - Search all messages/conversations + +- 👥 **Multi-User & Secure Access**: + - Multi-User, Secure Authentication with OAuth2, LDAP, & Email Login Support + - Built-in Moderation, and Token spend tools + +- ⚙️ **Configuration & Deployment**: + - Configure Proxy, Reverse Proxy, Docker, & many Deployment options + - Use completely local or deploy on the cloud + +- 📖 **Open-Source & Community**: + - Completely Open-Source & Built in Public + - Community-driven development, support, and feedback + +[For a thorough review of our features, see our docs here](https://docs.librechat.ai/) 📚 ## 🪶 All-In-One AI Conversations with LibreChat @@ -64,37 +126,50 @@ LibreChat brings together the future of assistant AIs with the revolutionary tec With LibreChat, you no longer need to opt for ChatGPT Plus and can instead use free or pay-per-call APIs. We welcome contributions, cloning, and forking to enhance the capabilities of this advanced chatbot platform. - +[![Watch the video](https://raw.githubusercontent.com/LibreChat-AI/librechat.ai/main/public/images/changelog/v0.7.6.gif)](https://www.youtube.com/watch?v=ilfwGQtJNlI) -[![Watch the video](https://img.youtube.com/vi/pNIOs1ovsXw/maxresdefault.jpg)](https://youtu.be/pNIOs1ovsXw) Click on the thumbnail to open the video☝️ --- -## 📚 Documentation +## 🌐 Resources -For more information on how to use our advanced features, install and configure our software, and access our guidelines and tutorials, please check out our documentation at [docs.librechat.ai](https://docs.librechat.ai) +**GitHub Repo:** + - **RAG API:** [github.com/danny-avila/rag_api](https://github.com/danny-avila/rag_api) + - **Website:** [github.com/LibreChat-AI/librechat.ai](https://github.com/LibreChat-AI/librechat.ai) + +**Other:** + - **Website:** [librechat.ai](https://librechat.ai) + - **Documentation:** [docs.librechat.ai](https://docs.librechat.ai) + - **Blog:** [blog.librechat.ai](https://blog.librechat.ai) --- ## 📝 Changelog -Keep up with the latest updates by visiting the releases page - [Releases](https://github.com/danny-avila/LibreChat/releases) +Keep up with the latest updates by visiting the releases page and notes: +- [Releases](https://github.com/danny-avila/LibreChat/releases) +- [Changelog](https://www.librechat.ai/changelog) -**⚠️ [Breaking Changes](docs/general_info/breaking_changes.md)** -Please consult the breaking changes before updating. +**⚠️ Please consult the [changelog](https://www.librechat.ai/changelog) for breaking changes before updating.** --- ## ⭐ Star History

-danny-avila%2FLibreChat | Trendshift + + Star History Chart + +

+

+ + danny-avila%2FLibreChat | Trendshift + + + ROSS Index - Fastest Growing Open-Source Startups in Q1 2024 | Runa Capital +

- - - Star History Chart - --- @@ -104,6 +179,8 @@ Contributions, suggestions, bug reports and fixes are welcome! For new features, components, or extensions, please open an issue and discuss before sending a PR. +If you'd like to help translate LibreChat into your language, we'd love your contribution! Improving our translations not only makes LibreChat more accessible to users around the world but also enhances the overall user experience. Please check out our [Translation Guide](https://www.librechat.ai/docs/translation). + --- ## 💖 This project exists in its current state thanks to all the people who contribute @@ -111,3 +188,15 @@ For new features, components, or extensions, please open an issue and discuss be + +--- + +## 🎉 Special Thanks + +We thank [Locize](https://locize.com) for their translation management tools that support multiple languages in LibreChat. + +

+ + Locize Logo + +

diff --git a/api/app/bingai.js b/api/app/bingai.js deleted file mode 100644 index f7ecf4462d..0000000000 --- a/api/app/bingai.js +++ /dev/null @@ -1,114 +0,0 @@ -require('dotenv').config(); -const { KeyvFile } = require('keyv-file'); -const { getUserKey, checkUserKeyExpiry } = require('~/server/services/UserService'); -const { logger } = require('~/config'); - -const askBing = async ({ - text, - parentMessageId, - conversationId, - jailbreak, - jailbreakConversationId, - context, - systemMessage, - conversationSignature, - clientId, - invocationId, - toneStyle, - key: expiresAt, - onProgress, - userId, -}) => { - const isUserProvided = process.env.BINGAI_TOKEN === 'user_provided'; - - let key = null; - if (expiresAt && isUserProvided) { - checkUserKeyExpiry( - expiresAt, - 'Your BingAI Cookies have expired. Please provide your cookies again.', - ); - key = await getUserKey({ userId, name: 'bingAI' }); - } - - const { BingAIClient } = await import('nodejs-gpt'); - const store = { - store: new KeyvFile({ filename: './data/cache.json' }), - }; - - const bingAIClient = new BingAIClient({ - // "_U" cookie from bing.com - // userToken: - // isUserProvided ? key : process.env.BINGAI_TOKEN ?? null, - // If the above doesn't work, provide all your cookies as a string instead - cookies: isUserProvided ? key : process.env.BINGAI_TOKEN ?? null, - debug: false, - cache: store, - host: process.env.BINGAI_HOST || null, - proxy: process.env.PROXY || null, - }); - - let options = {}; - - if (jailbreakConversationId == 'false') { - jailbreakConversationId = false; - } - - if (jailbreak) { - options = { - jailbreakConversationId: jailbreakConversationId || jailbreak, - context, - systemMessage, - parentMessageId, - toneStyle, - onProgress, - clientOptions: { - features: { - genImage: { - server: { - enable: true, - type: 'markdown_list', - }, - }, - }, - }, - }; - } else { - options = { - conversationId, - context, - systemMessage, - parentMessageId, - toneStyle, - onProgress, - clientOptions: { - features: { - genImage: { - server: { - enable: true, - type: 'markdown_list', - }, - }, - }, - }, - }; - - // don't give those parameters for new conversation - // for new conversation, conversationSignature always is null - if (conversationSignature) { - options.encryptedConversationSignature = conversationSignature; - options.clientId = clientId; - options.invocationId = invocationId; - } - } - - logger.debug('bing options', options); - - const res = await bingAIClient.sendMessage(text, options); - - return res; - - // for reference: - // https://github.com/waylaidwanderer/node-chatgpt-api/blob/main/demos/use-bing-client.js -}; - -module.exports = { askBing }; diff --git a/api/app/chatgpt-browser.js b/api/app/chatgpt-browser.js deleted file mode 100644 index 818661555d..0000000000 --- a/api/app/chatgpt-browser.js +++ /dev/null @@ -1,60 +0,0 @@ -require('dotenv').config(); -const { KeyvFile } = require('keyv-file'); -const { Constants } = require('librechat-data-provider'); -const { getUserKey, checkUserKeyExpiry } = require('../server/services/UserService'); - -const browserClient = async ({ - text, - parentMessageId, - conversationId, - model, - key: expiresAt, - onProgress, - onEventMessage, - abortController, - userId, -}) => { - const isUserProvided = process.env.CHATGPT_TOKEN === 'user_provided'; - - let key = null; - if (expiresAt && isUserProvided) { - checkUserKeyExpiry( - expiresAt, - 'Your ChatGPT Access Token has expired. Please provide your token again.', - ); - key = await getUserKey({ userId, name: 'chatGPTBrowser' }); - } - - const { ChatGPTBrowserClient } = await import('nodejs-gpt'); - const store = { - store: new KeyvFile({ filename: './data/cache.json' }), - }; - - const clientOptions = { - // Warning: This will expose your access token to a third party. Consider the risks before using this. - reverseProxyUrl: - process.env.CHATGPT_REVERSE_PROXY ?? 'https://ai.fakeopen.com/api/conversation', - // Access token from https://chat.openai.com/api/auth/session - accessToken: isUserProvided ? key : process.env.CHATGPT_TOKEN ?? null, - model: model, - debug: false, - proxy: process.env.PROXY ?? null, - user: userId, - }; - - const client = new ChatGPTBrowserClient(clientOptions, store); - let options = { onProgress, onEventMessage, abortController }; - - if (!!parentMessageId && !!conversationId) { - options = { ...options, parentMessageId, conversationId }; - } - - if (parentMessageId === Constants.NO_PARENT) { - delete options.conversationId; - } - - const res = await client.sendMessage(text, options); - return res; -}; - -module.exports = { browserClient }; diff --git a/api/app/clients/AnthropicClient.js b/api/app/clients/AnthropicClient.js index 084c28eaac..522b6beb4f 100644 --- a/api/app/clients/AnthropicClient.js +++ b/api/app/clients/AnthropicClient.js @@ -1,28 +1,39 @@ const Anthropic = require('@anthropic-ai/sdk'); -const { encoding_for_model: encodingForModel, get_encoding: getEncoding } = require('tiktoken'); +const { HttpsProxyAgent } = require('https-proxy-agent'); const { - getResponseSender, + Constants, EModelEndpoint, + anthropicSettings, + getResponseSender, validateVisionModel, } = require('librechat-data-provider'); const { encodeAndFormat } = require('~/server/services/Files/images/encode'); -const spendTokens = require('~/models/spendTokens'); -const { getModelMaxTokens } = require('~/utils'); -const { formatMessage } = require('./prompts'); -const { getFiles } = require('~/models/File'); +const { + truncateText, + formatMessage, + addCacheControl, + titleFunctionPrompt, + parseParamFromPrompt, + createContextHandlers, +} = require('./prompts'); +const { getModelMaxTokens, getModelMaxOutputTokens, matchModelName } = require('~/utils'); +const { spendTokens, spendStructuredTokens } = require('~/models/spendTokens'); +const Tokenizer = require('~/server/services/Tokenizer'); +const { sleep } = require('~/server/utils'); const BaseClient = require('./BaseClient'); const { logger } = require('~/config'); const HUMAN_PROMPT = '\n\nHuman:'; const AI_PROMPT = '\n\nAssistant:'; -const tokenizersCache = {}; - /** Helper function to introduce a delay before retrying */ function delayBeforeRetry(attempts, baseDelay = 1000) { return new Promise((resolve) => setTimeout(resolve, baseDelay * attempts)); } +const tokenEventTypes = new Set(['message_start', 'message_delta']); +const { legacy } = anthropicSettings; + class AnthropicClient extends BaseClient { constructor(apiKey, options = {}) { super(apiKey, options); @@ -33,6 +44,30 @@ class AnthropicClient extends BaseClient { ? options.contextStrategy.toLowerCase() : 'discard'; this.setOptions(options); + /** @type {string | undefined} */ + this.systemMessage; + /** @type {AnthropicMessageStartEvent| undefined} */ + this.message_start; + /** @type {AnthropicMessageDeltaEvent| undefined} */ + this.message_delta; + /** Whether the model is part of the Claude 3 Family + * @type {boolean} */ + this.isClaude3; + /** Whether to use Messages API or Completions API + * @type {boolean} */ + this.useMessages; + /** Whether or not the model is limited to the legacy amount of output tokens + * @type {boolean} */ + this.isLegacyOutput; + /** Whether or not the model supports Prompt Caching + * @type {boolean} */ + this.supportsCacheControl; + /** The key for the usage object's input tokens + * @type {string} */ + this.inputTokensKey = 'input_tokens'; + /** The key for the usage object's output tokens + * @type {string} */ + this.outputTokensKey = 'output_tokens'; } setOptions(options) { @@ -52,26 +87,45 @@ class AnthropicClient extends BaseClient { this.options = options; } - const modelOptions = this.options.modelOptions || {}; - this.modelOptions = { - ...modelOptions, - // set some good defaults (check for undefined in some cases because they may be 0) - model: modelOptions.model || 'claude-1', - temperature: typeof modelOptions.temperature === 'undefined' ? 1 : modelOptions.temperature, // 0 - 1, 1 is default - topP: typeof modelOptions.topP === 'undefined' ? 0.7 : modelOptions.topP, // 0 - 1, default: 0.7 - topK: typeof modelOptions.topK === 'undefined' ? 40 : modelOptions.topK, // 1-40, default: 40 - stop: modelOptions.stop, // no stop method for now - }; + this.modelOptions = Object.assign( + { + model: anthropicSettings.model.default, + }, + this.modelOptions, + this.options.modelOptions, + ); + + const modelMatch = matchModelName(this.modelOptions.model, EModelEndpoint.anthropic); + this.isClaude3 = modelMatch.includes('claude-3'); + this.isLegacyOutput = !modelMatch.includes('claude-3-5-sonnet'); + this.supportsCacheControl = + this.options.promptCache && this.checkPromptCacheSupport(modelMatch); + + if ( + this.isLegacyOutput && + this.modelOptions.maxOutputTokens && + this.modelOptions.maxOutputTokens > legacy.maxOutputTokens.default + ) { + this.modelOptions.maxOutputTokens = legacy.maxOutputTokens.default; + } - this.isClaude3 = this.modelOptions.model.includes('claude-3'); this.useMessages = this.isClaude3 || !!this.options.attachments; this.defaultVisionModel = this.options.visionModel ?? 'claude-3-sonnet-20240229'; - this.checkVisionRequest(this.options.attachments); + this.options.attachments?.then((attachments) => this.checkVisionRequest(attachments)); this.maxContextTokens = - getModelMaxTokens(this.modelOptions.model, EModelEndpoint.anthropic) ?? 100000; - this.maxResponseTokens = this.modelOptions.maxOutputTokens || 1500; + this.options.maxContextTokens ?? + getModelMaxTokens(this.modelOptions.model, EModelEndpoint.anthropic) ?? + 100000; + this.maxResponseTokens = + this.modelOptions.maxOutputTokens ?? + getModelMaxOutputTokens( + this.modelOptions.model, + this.options.endpointType ?? this.options.endpoint, + this.options.endpointTokenConfig, + ) ?? + 1500; this.maxPromptTokens = this.options.maxPromptTokens || this.maxContextTokens - this.maxResponseTokens; @@ -93,38 +147,98 @@ class AnthropicClient extends BaseClient { this.startToken = '||>'; this.endToken = ''; - this.gptEncoder = this.constructor.getTokenizer('cl100k_base'); - - if (!this.modelOptions.stop) { - const stopTokens = [this.startToken]; - if (this.endToken && this.endToken !== this.startToken) { - stopTokens.push(this.endToken); - } - stopTokens.push(`${this.userLabel}`); - stopTokens.push('<|diff_marker|>'); - - this.modelOptions.stop = stopTokens; - } return this; } - getClient() { + /** + * Get the initialized Anthropic client. + * @param {Partial} requestOptions - The options for the client. + * @returns {Anthropic} The Anthropic client instance. + */ + getClient(requestOptions) { + /** @type {Anthropic.ClientOptions} */ const options = { + fetch: this.fetch, apiKey: this.apiKey, }; + if (this.options.proxy) { + options.httpAgent = new HttpsProxyAgent(this.options.proxy); + } + if (this.options.reverseProxyUrl) { options.baseURL = this.options.reverseProxyUrl; } + if ( + this.supportsCacheControl && + requestOptions?.model && + requestOptions.model.includes('claude-3-5-sonnet') + ) { + options.defaultHeaders = { + 'anthropic-beta': 'max-tokens-3-5-sonnet-2024-07-15,prompt-caching-2024-07-31', + }; + } else if (this.supportsCacheControl) { + options.defaultHeaders = { + 'anthropic-beta': 'prompt-caching-2024-07-31', + }; + } + return new Anthropic(options); } - getTokenCountForResponse(response) { + /** + * Get stream usage as returned by this client's API response. + * @returns {AnthropicStreamUsage} The stream usage object. + */ + getStreamUsage() { + const inputUsage = this.message_start?.message?.usage ?? {}; + const outputUsage = this.message_delta?.usage ?? {}; + return Object.assign({}, inputUsage, outputUsage); + } + + /** + * Calculates the correct token count for the current user message based on the token count map and API usage. + * Edge case: If the calculation results in a negative value, it returns the original estimate. + * If revisiting a conversation with a chat history entirely composed of token estimates, + * the cumulative token count going forward should become more accurate as the conversation progresses. + * @param {Object} params - The parameters for the calculation. + * @param {Record} params.tokenCountMap - A map of message IDs to their token counts. + * @param {string} params.currentMessageId - The ID of the current message to calculate. + * @param {AnthropicStreamUsage} params.usage - The usage object returned by the API. + * @returns {number} The correct token count for the current user message. + */ + calculateCurrentTokenCount({ tokenCountMap, currentMessageId, usage }) { + const originalEstimate = tokenCountMap[currentMessageId] || 0; + + if (!usage || typeof usage.input_tokens !== 'number') { + return originalEstimate; + } + + tokenCountMap[currentMessageId] = 0; + const totalTokensFromMap = Object.values(tokenCountMap).reduce((sum, count) => { + const numCount = Number(count); + return sum + (isNaN(numCount) ? 0 : numCount); + }, 0); + const totalInputTokens = + (usage.input_tokens ?? 0) + + (usage.cache_creation_input_tokens ?? 0) + + (usage.cache_read_input_tokens ?? 0); + + const currentMessageTokens = totalInputTokens - totalTokensFromMap; + return currentMessageTokens > 0 ? currentMessageTokens : originalEstimate; + } + + /** + * Get Token Count for LibreChat Message + * @param {TMessage} responseMessage + * @returns {number} + */ + getTokenCountForResponse(responseMessage) { return this.getTokenCountForMessage({ role: 'assistant', - content: response.text, + content: responseMessage.text, }); } @@ -134,14 +248,19 @@ class AnthropicClient extends BaseClient { * - Sets `this.modelOptions.model` to `gpt-4-vision-preview` if the request is a vision request. * - Sets `this.isVisionModel` to `true` if vision request. * - Deletes `this.modelOptions.stop` if vision request. - * @param {Array | MongoFile[]> | Record} attachments + * @param {MongoFile[]} attachments */ checkVisionRequest(attachments) { const availableModels = this.options.modelsConfig?.[EModelEndpoint.anthropic]; this.isVisionModel = validateVisionModel({ model: this.modelOptions.model, availableModels }); const visionModelAvailable = availableModels?.includes(this.defaultVisionModel); - if (attachments && visionModelAvailable && !this.isVisionModel) { + if ( + attachments && + attachments.some((file) => file?.type && file?.type?.includes('image')) && + visionModelAvailable && + !this.isVisionModel + ) { this.modelOptions.model = this.defaultVisionModel; this.isVisionModel = true; } @@ -168,72 +287,54 @@ class AnthropicClient extends BaseClient { attachments, EModelEndpoint.anthropic, ); - message.image_urls = image_urls; + message.image_urls = image_urls.length ? image_urls : undefined; return files; } - async recordTokenUsage({ promptTokens, completionTokens }) { - logger.debug('[AnthropicClient] recordTokenUsage:', { promptTokens, completionTokens }); + /** + * @param {object} params + * @param {number} params.promptTokens + * @param {number} params.completionTokens + * @param {AnthropicStreamUsage} [params.usage] + * @param {string} [params.model] + * @param {string} [params.context='message'] + * @returns {Promise} + */ + async recordTokenUsage({ promptTokens, completionTokens, usage, model, context = 'message' }) { + if (usage != null && usage?.input_tokens != null) { + const input = usage.input_tokens ?? 0; + const write = usage.cache_creation_input_tokens ?? 0; + const read = usage.cache_read_input_tokens ?? 0; + + await spendStructuredTokens( + { + context, + user: this.user, + conversationId: this.conversationId, + model: model ?? this.modelOptions.model, + endpointTokenConfig: this.options.endpointTokenConfig, + }, + { + promptTokens: { input, write, read }, + completionTokens, + }, + ); + + return; + } + await spendTokens( { + context, user: this.user, - model: this.modelOptions.model, - context: 'message', conversationId: this.conversationId, + model: model ?? this.modelOptions.model, endpointTokenConfig: this.options.endpointTokenConfig, }, { promptTokens, completionTokens }, ); } - /** - * - * @param {TMessage[]} _messages - * @returns {TMessage[]} - */ - async addPreviousAttachments(_messages) { - if (!this.options.resendImages) { - return _messages; - } - - /** - * - * @param {TMessage} message - */ - const processMessage = async (message) => { - if (!this.message_file_map) { - /** @type {Record */ - this.message_file_map = {}; - } - - const fileIds = message.files.map((file) => file.file_id); - const files = await getFiles({ - file_id: { $in: fileIds }, - }); - - await this.addImageURLs(message, files); - - this.message_file_map[message.messageId] = files; - return message; - }; - - const promises = []; - - for (const message of _messages) { - if (!message.files) { - promises.push(message); - continue; - } - - promises.push(processMessage(message)); - } - - const messages = await Promise.all(promises); - - this.checkVisionRequest(this.message_file_map); - return messages; - } - async buildMessages(messages, parentMessageId) { const orderedMessages = this.constructor.getMessagesForConversation({ messages, @@ -242,12 +343,13 @@ class AnthropicClient extends BaseClient { logger.debug('[AnthropicClient] orderedMessages', { orderedMessages, parentMessageId }); - if (!this.isVisionModel && this.options.attachments) { - throw new Error('Attachments are only supported with the Claude 3 family of models'); - } else if (this.options.attachments) { - const attachments = (await this.options.attachments).filter((file) => - file.type.includes('image'), - ); + if (this.options.attachments) { + const attachments = await this.options.attachments; + const images = attachments.filter((file) => file.type.includes('image')); + + if (images.length && !this.isVisionModel) { + throw new Error('Images are only supported with the Claude 3 family of models'); + } const latestMessage = orderedMessages[orderedMessages.length - 1]; @@ -264,6 +366,13 @@ class AnthropicClient extends BaseClient { this.options.attachments = files; } + if (this.message_file_map) { + this.contextHandlers = createContextHandlers( + this.options.req, + orderedMessages[orderedMessages.length - 1].text, + ); + } + const formattedMessages = orderedMessages.map((message, i) => { const formattedMessage = this.useMessages ? formatMessage({ @@ -285,6 +394,11 @@ class AnthropicClient extends BaseClient { if (this.message_file_map && this.message_file_map[message.messageId]) { const attachments = this.message_file_map[message.messageId]; for (const file of attachments) { + if (file.embedded) { + this.contextHandlers?.processFile(file); + continue; + } + orderedMessages[i].tokenCount += this.calculateImageTokenCost({ width: file.width, height: file.height, @@ -296,8 +410,13 @@ class AnthropicClient extends BaseClient { return formattedMessage; }); + if (this.contextHandlers) { + this.augmentedPrompt = await this.contextHandlers.createContext(); + this.options.promptPrefix = this.augmentedPrompt + (this.options.promptPrefix ?? ''); + } + let { context: messagesInWindow, remainingContextTokens } = - await this.getMessagesWithinTokenLimit(formattedMessages); + await this.getMessagesWithinTokenLimit({ messages: formattedMessages }); const tokenCountMap = orderedMessages .slice(orderedMessages.length - messagesInWindow.length) @@ -372,7 +491,10 @@ class AnthropicClient extends BaseClient { identityPrefix = `${identityPrefix}\nYou are ${this.options.modelLabel}`; } - let promptPrefix = (this.options.promptPrefix || '').trim(); + let promptPrefix = (this.options.promptPrefix ?? '').trim(); + if (typeof this.options.artifactsPrompt === 'string' && this.options.artifactsPrompt) { + promptPrefix = `${promptPrefix ?? ''}\n${this.options.artifactsPrompt}`.trim(); + } if (promptPrefix) { // If the prompt prefix doesn't end with the end token, add it. if (!promptPrefix.endsWith(`${this.endToken}`)) { @@ -389,7 +511,7 @@ class AnthropicClient extends BaseClient { let isEdited = lastAuthor === this.assistantLabel; const promptSuffix = isEdited ? '' : `${promptPrefix}${this.assistantLabel}\n`; let currentTokenCount = - isEdited || this.useMEssages + isEdited || this.useMessages ? this.getTokenCount(promptPrefix) : this.getTokenCount(promptSuffix); @@ -509,7 +631,7 @@ class AnthropicClient extends BaseClient { ); }; - if (this.modelOptions.model.startsWith('claude-3')) { + if (this.modelOptions.model.includes('claude-3')) { await buildMessagesPayload(); processTokens(); return { @@ -538,12 +660,39 @@ class AnthropicClient extends BaseClient { logger.debug('AnthropicClient doesn\'t use getCompletion (all handled in sendCompletion)'); } - async createResponse(client, options) { - return this.useMessages + /** + * Creates a message or completion response using the Anthropic client. + * @param {Anthropic} client - The Anthropic client instance. + * @param {Anthropic.default.MessageCreateParams | Anthropic.default.CompletionCreateParams} options - The options for the message or completion. + * @param {boolean} useMessages - Whether to use messages or completions. Defaults to `this.useMessages`. + * @returns {Promise} The response from the Anthropic client. + */ + async createResponse(client, options, useMessages) { + return useMessages ?? this.useMessages ? await client.messages.create(options) : await client.completions.create(options); } + /** + * @param {string} modelName + * @returns {boolean} + */ + checkPromptCacheSupport(modelName) { + const modelMatch = matchModelName(modelName, EModelEndpoint.anthropic); + if (modelMatch.includes('claude-3-5-sonnet-latest')) { + return false; + } + if ( + modelMatch === 'claude-3-5-sonnet' || + modelMatch === 'claude-3-5-haiku' || + modelMatch === 'claude-3-haiku' || + modelMatch === 'claude-3-opus' + ) { + return true; + } + return false; + } + async sendCompletion(payload, { onProgress, abortController }) { if (!abortController) { abortController = new AbortController(); @@ -557,8 +706,6 @@ class AnthropicClient extends BaseClient { } logger.debug('modelOptions', { modelOptions }); - - const client = this.getClient(); const metadata = { user_id: this.user, }; @@ -586,16 +733,28 @@ class AnthropicClient extends BaseClient { if (this.useMessages) { requestOptions.messages = payload; - requestOptions.max_tokens = maxOutputTokens || 1500; + requestOptions.max_tokens = maxOutputTokens || legacy.maxOutputTokens.default; } else { requestOptions.prompt = payload; requestOptions.max_tokens_to_sample = maxOutputTokens || 1500; } - if (this.systemMessage) { + if (this.systemMessage && this.supportsCacheControl === true) { + requestOptions.system = [ + { + type: 'text', + text: this.systemMessage, + cache_control: { type: 'ephemeral' }, + }, + ]; + } else if (this.systemMessage) { requestOptions.system = this.systemMessage; } + if (this.supportsCacheControl === true && this.useMessages) { + requestOptions.messages = addCacheControl(requestOptions.messages); + } + logger.debug('[AnthropicClient]', { ...requestOptions }); const handleChunk = (currentChunk) => { @@ -606,12 +765,14 @@ class AnthropicClient extends BaseClient { }; const maxRetries = 3; + const streamRate = this.options.streamRate ?? Constants.DEFAULT_STREAM_RATE; async function processResponse() { let attempts = 0; while (attempts < maxRetries) { let response; try { + const client = this.getClient(requestOptions); response = await this.createResponse(client, requestOptions); signal.addEventListener('abort', () => { @@ -623,11 +784,18 @@ class AnthropicClient extends BaseClient { for await (const completion of response) { // Handle each completion as before + const type = completion?.type ?? ''; + if (tokenEventTypes.has(type)) { + logger.debug(`[AnthropicClient] ${type}`, completion); + this[type] = completion; + } if (completion?.delta?.text) { handleChunk(completion.delta.text); } else if (completion.completion) { handleChunk(completion.completion); } + + await sleep(streamRate); } // Successful processing, exit loop @@ -661,8 +829,15 @@ class AnthropicClient extends BaseClient { getSaveOptions() { return { + maxContextTokens: this.options.maxContextTokens, + artifacts: this.options.artifacts, promptPrefix: this.options.promptPrefix, modelLabel: this.options.modelLabel, + promptCache: this.options.promptCache, + resendFiles: this.options.resendFiles, + iconURL: this.options.iconURL, + greeting: this.options.greeting, + spec: this.options.spec, ...this.modelOptions, }; } @@ -671,22 +846,96 @@ class AnthropicClient extends BaseClient { logger.debug('AnthropicClient doesn\'t use getBuildMessagesOptions'); } - static getTokenizer(encoding, isModelName = false, extendSpecialTokens = {}) { - if (tokenizersCache[encoding]) { - return tokenizersCache[encoding]; - } - let tokenizer; - if (isModelName) { - tokenizer = encodingForModel(encoding, extendSpecialTokens); - } else { - tokenizer = getEncoding(encoding, extendSpecialTokens); - } - tokenizersCache[encoding] = tokenizer; - return tokenizer; + getEncoding() { + return 'cl100k_base'; } + /** + * Returns the token count of a given text. It also checks and resets the tokenizers if necessary. + * @param {string} text - The text to get the token count for. + * @returns {number} The token count of the given text. + */ getTokenCount(text) { - return this.gptEncoder.encode(text, 'all').length; + const encoding = this.getEncoding(); + return Tokenizer.getTokenCount(text, encoding); + } + + /** + * Generates a concise title for a conversation based on the user's input text and response. + * Involves sending a chat completion request with specific instructions for title generation. + * + * This function capitlizes on [Anthropic's function calling training](https://docs.anthropic.com/claude/docs/functions-external-tools). + * + * @param {Object} params - The parameters for the conversation title generation. + * @param {string} params.text - The user's input. + * @param {string} [params.responseText=''] - The AI's immediate response to the user. + * + * @returns {Promise} A promise that resolves to the generated conversation title. + * In case of failure, it will return the default title, "New Chat". + */ + async titleConvo({ text, responseText = '' }) { + let title = 'New Chat'; + this.message_delta = undefined; + this.message_start = undefined; + const convo = ` + ${truncateText(text)} + + + ${JSON.stringify(truncateText(responseText))} + `; + + const { ANTHROPIC_TITLE_MODEL } = process.env ?? {}; + const model = this.options.titleModel ?? ANTHROPIC_TITLE_MODEL ?? 'claude-3-haiku-20240307'; + const system = titleFunctionPrompt; + + const titleChatCompletion = async () => { + const content = ` + ${convo} + + + Please generate a title for this conversation.`; + + const titleMessage = { role: 'user', content }; + const requestOptions = { + model, + temperature: 0.3, + max_tokens: 1024, + system, + stop_sequences: ['\n\nHuman:', '\n\nAssistant', ''], + messages: [titleMessage], + }; + + try { + const response = await this.createResponse( + this.getClient(requestOptions), + requestOptions, + true, + ); + let promptTokens = response?.usage?.input_tokens; + let completionTokens = response?.usage?.output_tokens; + if (!promptTokens) { + promptTokens = this.getTokenCountForMessage(titleMessage); + promptTokens += this.getTokenCountForMessage({ role: 'system', content: system }); + } + if (!completionTokens) { + completionTokens = this.getTokenCountForMessage(response.content[0]); + } + await this.recordTokenUsage({ + model, + promptTokens, + completionTokens, + context: 'title', + }); + const text = response.content[0].text; + title = parseParamFromPrompt(text, 'title'); + } catch (e) { + logger.error('[AnthropicClient] There was an issue generating the title', e); + } + }; + + await titleChatCompletion(); + logger.debug('[AnthropicClient] Convo Title: ' + title); + return title; } } diff --git a/api/app/clients/BaseClient.js b/api/app/clients/BaseClient.js index a359ed7193..ebf3ca12d9 100644 --- a/api/app/clients/BaseClient.js +++ b/api/app/clients/BaseClient.js @@ -1,8 +1,18 @@ const crypto = require('crypto'); -const { supportsBalanceCheck, Constants } = require('librechat-data-provider'); -const { getConvo, getMessages, saveMessage, updateMessage, saveConvo } = require('~/models'); +const fetch = require('node-fetch'); +const { + supportsBalanceCheck, + isAgentsEndpoint, + isParamEndpoint, + EModelEndpoint, + ErrorTypes, + Constants, +} = require('librechat-data-provider'); +const { getMessages, saveMessage, updateMessage, saveConvo } = require('~/models'); const { addSpaceIfNeeded, isEnabled } = require('~/server/utils'); +const { truncateToolCallOutputs } = require('./prompts'); const checkBalance = require('~/models/checkBalance'); +const { getFiles } = require('~/models/File'); const TextStream = require('./TextStream'); const { logger } = require('~/config'); @@ -16,13 +26,46 @@ class BaseClient { month: 'long', day: 'numeric', }); + this.fetch = this.fetch.bind(this); + /** @type {boolean} */ + this.skipSaveConvo = false; + /** @type {boolean} */ + this.skipSaveUserMessage = false; + /** @type {ClientDatabaseSavePromise} */ + this.userMessagePromise; + /** @type {ClientDatabaseSavePromise} */ + this.responsePromise; + /** @type {string} */ + this.user; + /** @type {string} */ + this.conversationId; + /** @type {string} */ + this.responseMessageId; + /** @type {TAttachment[]} */ + this.attachments; + /** The key for the usage object's input tokens + * @type {string} */ + this.inputTokensKey = 'prompt_tokens'; + /** The key for the usage object's output tokens + * @type {string} */ + this.outputTokensKey = 'completion_tokens'; + /** @type {Set} */ + this.savedMessageIds = new Set(); + /** + * Flag to determine if the client re-submitted the latest assistant message. + * @type {boolean | undefined} */ + this.continued; + /** @type {TMessage[]} */ + this.currentMessages = []; + /** @type {import('librechat-data-provider').VisionModes | undefined} */ + this.visionMode; } setOptions() { throw new Error('Method \'setOptions\' must be implemented.'); } - getCompletion() { + async getCompletion() { throw new Error('Method \'getCompletion\' must be implemented.'); } @@ -42,21 +85,59 @@ class BaseClient { throw new Error('Subclasses attempted to call summarizeMessages without implementing it'); } - async getTokenCountForResponse(response) { - logger.debug('`[BaseClient] recordTokenUsage` not implemented.', response); + /** + * @returns {string} + */ + getResponseModel() { + if (isAgentsEndpoint(this.options.endpoint) && this.options.agent && this.options.agent.id) { + return this.options.agent.id; + } + + return this.modelOptions?.model ?? this.model; } - async addPreviousAttachments(messages) { - return messages; + /** + * Abstract method to get the token count for a message. Subclasses must implement this method. + * @param {TMessage} responseMessage + * @returns {number} + */ + getTokenCountForResponse(responseMessage) { + logger.debug('[BaseClient] `recordTokenUsage` not implemented.', responseMessage); } + /** + * Abstract method to record token usage. Subclasses must implement this method. + * If a correction to the token usage is needed, the method should return an object with the corrected token counts. + * @param {number} promptTokens + * @param {number} completionTokens + * @returns {Promise} + */ async recordTokenUsage({ promptTokens, completionTokens }) { - logger.debug('`[BaseClient] recordTokenUsage` not implemented.', { + logger.debug('[BaseClient] `recordTokenUsage` not implemented.', { promptTokens, completionTokens, }); } + /** + * Makes an HTTP request and logs the process. + * + * @param {RequestInfo} url - The URL to make the request to. Can be a string or a Request object. + * @param {RequestInit} [init] - Optional init options for the request. + * @returns {Promise} - A promise that resolves to the response of the fetch request. + */ + async fetch(_url, init) { + let url = _url; + if (this.options.directEndpoint) { + url = this.options.reverseProxyUrl; + } + logger.debug(`Making request to ${url}`); + if (typeof Bun !== 'undefined') { + return await fetch(url, init); + } + return await fetch(url, init); + } + getBuildMessagesOptions() { throw new Error('Subclasses must implement getBuildMessagesOptions'); } @@ -66,19 +147,45 @@ class BaseClient { await stream.processTextStream(onProgress); } + /** + * @returns {[string|undefined, string|undefined]} + */ + processOverideIds() { + /** @type {Record} */ + let { overrideConvoId, overrideUserMessageId } = this.options?.req?.body ?? {}; + if (overrideConvoId) { + const [conversationId, index] = overrideConvoId.split(Constants.COMMON_DIVIDER); + overrideConvoId = conversationId; + if (index !== '0') { + this.skipSaveConvo = true; + } + } + if (overrideUserMessageId) { + const [userMessageId, index] = overrideUserMessageId.split(Constants.COMMON_DIVIDER); + overrideUserMessageId = userMessageId; + if (index !== '0') { + this.skipSaveUserMessage = true; + } + } + + return [overrideConvoId, overrideUserMessageId]; + } + async setMessageOptions(opts = {}) { if (opts && opts.replaceOptions) { this.setOptions(opts); } + const [overrideConvoId, overrideUserMessageId] = this.processOverideIds(); const { isEdited, isContinued } = opts; const user = opts.user ?? null; this.user = user; const saveOptions = this.getSaveOptions(); this.abortController = opts.abortController ?? new AbortController(); - const conversationId = opts.conversationId ?? crypto.randomUUID(); + const conversationId = overrideConvoId ?? opts.conversationId ?? crypto.randomUUID(); const parentMessageId = opts.parentMessageId ?? Constants.NO_PARENT; - const userMessageId = opts.overrideParentMessageId ?? crypto.randomUUID(); + const userMessageId = + overrideUserMessageId ?? opts.overrideParentMessageId ?? crypto.randomUUID(); let responseMessageId = opts.responseMessageId ?? crypto.randomUUID(); let head = isEdited ? responseMessageId : parentMessageId; this.currentMessages = (await this.loadHistory(conversationId, head)) ?? []; @@ -90,6 +197,8 @@ class BaseClient { this.currentMessages[this.currentMessages.length - 1].messageId = head; } + this.responseMessageId = responseMessageId; + return { ...opts, user, @@ -138,11 +247,12 @@ class BaseClient { userMessage, conversationId, responseMessageId, + sender: this.sender, }); } if (typeof opts?.onStart === 'function') { - opts.onStart(userMessage); + opts.onStart(userMessage, responseMessageId); } return { @@ -159,17 +269,24 @@ class BaseClient { /** * Adds instructions to the messages array. If the instructions object is empty or undefined, * the original messages array is returned. Otherwise, the instructions are added to the messages - * array, preserving the last message at the end. + * array either at the beginning (default) or preserving the last message at the end. * * @param {Array} messages - An array of messages. * @param {Object} instructions - An object containing instructions to be added to the messages. + * @param {boolean} [beforeLast=false] - If true, adds instructions before the last message; if false, adds at the beginning. * @returns {Array} An array containing messages and instructions, or the original messages if instructions are empty. */ - addInstructions(messages, instructions) { - const payload = []; + addInstructions(messages, instructions, beforeLast = false) { if (!instructions || Object.keys(instructions).length === 0) { return messages; } + + if (!beforeLast) { + return [instructions, ...messages]; + } + + // Legacy behavior: add instructions before the last message + const payload = []; if (messages.length > 1) { payload.push(...messages.slice(0, -1)); } @@ -184,6 +301,9 @@ class BaseClient { } async handleTokenCountMap(tokenCountMap) { + if (this.clientName === EModelEndpoint.agents) { + return; + } if (this.currentMessages.length === 0) { return; } @@ -232,25 +352,38 @@ class BaseClient { * If the token limit would be exceeded by adding a message, that message is not added to the context and remains in the original array. * The method uses `push` and `pop` operations for efficient array manipulation, and reverses the context array at the end to maintain the original order of the messages. * - * @param {Array} _messages - An array of messages, each with a `tokenCount` property. The messages should be ordered from oldest to newest. - * @param {number} [maxContextTokens] - The max number of tokens allowed in the context. If not provided, defaults to `this.maxContextTokens`. - * @returns {Object} An object with four properties: `context`, `summaryIndex`, `remainingContextTokens`, and `messagesToRefine`. + * @param {Object} params + * @param {TMessage[]} params.messages - An array of messages, each with a `tokenCount` property. The messages should be ordered from oldest to newest. + * @param {number} [params.maxContextTokens] - The max number of tokens allowed in the context. If not provided, defaults to `this.maxContextTokens`. + * @param {{ role: 'system', content: text, tokenCount: number }} [params.instructions] - Instructions already added to the context at index 0. + * @returns {Promise<{ + * context: TMessage[], + * remainingContextTokens: number, + * messagesToRefine: TMessage[], + * summaryIndex: number, + * }>} An object with four properties: `context`, `summaryIndex`, `remainingContextTokens`, and `messagesToRefine`. * `context` is an array of messages that fit within the token limit. * `summaryIndex` is the index of the first message in the `messagesToRefine` array. * `remainingContextTokens` is the number of tokens remaining within the limit after adding the messages to the context. * `messagesToRefine` is an array of messages that were not added to the context because they would have exceeded the token limit. */ - async getMessagesWithinTokenLimit(_messages, maxContextTokens) { + async getMessagesWithinTokenLimit({ messages: _messages, maxContextTokens, instructions }) { // Every reply is primed with <|start|>assistant<|message|>, so we // start with 3 tokens for the label after all messages have been counted. - let currentTokenCount = 3; let summaryIndex = -1; - let remainingContextTokens = maxContextTokens ?? this.maxContextTokens; + let currentTokenCount = 3; + const instructionsTokenCount = instructions?.tokenCount ?? 0; + let remainingContextTokens = + (maxContextTokens ?? this.maxContextTokens) - instructionsTokenCount; const messages = [..._messages]; const context = []; + if (currentTokenCount < remainingContextTokens) { while (messages.length > 0 && currentTokenCount < remainingContextTokens) { + if (messages.length === 1 && instructions) { + break; + } const poppedMessage = messages.pop(); const { tokenCount } = poppedMessage; @@ -264,6 +397,11 @@ class BaseClient { } } + if (instructions) { + context.push(_messages[0]); + messages.shift(); + } + const prunedMemory = messages; summaryIndex = prunedMemory.length - 1; remainingContextTokens -= currentTokenCount; @@ -276,19 +414,50 @@ class BaseClient { }; } - async handleContextStrategy({ instructions, orderedMessages, formattedMessages }) { + async handleContextStrategy({ + instructions, + orderedMessages, + formattedMessages, + buildTokenMap = true, + }) { let _instructions; let tokenCount; if (instructions) { ({ tokenCount, ..._instructions } = instructions); } + _instructions && logger.debug('[BaseClient] instructions tokenCount: ' + tokenCount); - let payload = this.addInstructions(formattedMessages, _instructions); + if (tokenCount && tokenCount > this.maxContextTokens) { + const info = `${tokenCount} / ${this.maxContextTokens}`; + const errorMessage = `{ "type": "${ErrorTypes.INPUT_LENGTH}", "info": "${info}" }`; + logger.warn(`Instructions token count exceeds max token count (${info}).`); + throw new Error(errorMessage); + } + + if (this.clientName === EModelEndpoint.agents) { + const { dbMessages, editedIndices } = truncateToolCallOutputs( + orderedMessages, + this.maxContextTokens, + this.getTokenCountForMessage.bind(this), + ); + + if (editedIndices.length > 0) { + logger.debug('[BaseClient] Truncated tool call outputs:', editedIndices); + for (const index of editedIndices) { + formattedMessages[index].content = dbMessages[index].content; + } + orderedMessages = dbMessages; + } + } + let orderedWithInstructions = this.addInstructions(orderedMessages, instructions); let { context, remainingContextTokens, messagesToRefine, summaryIndex } = - await this.getMessagesWithinTokenLimit(orderedWithInstructions); + await this.getMessagesWithinTokenLimit({ + messages: orderedWithInstructions, + instructions, + }); logger.debug('[BaseClient] Context Count (1/2)', { remainingContextTokens, @@ -300,7 +469,9 @@ class BaseClient { let { shouldSummarize } = this; // Calculate the difference in length to determine how many messages were discarded if any - const { length } = payload; + let payload; + let { length } = formattedMessages; + length += instructions != null ? 1 : 0; const diff = length - context.length; const firstMessage = orderedWithInstructions[0]; const usePrevSummary = @@ -310,17 +481,31 @@ class BaseClient { this.previous_summary.messageId === firstMessage.messageId; if (diff > 0) { - payload = payload.slice(diff); + payload = formattedMessages.slice(diff); logger.debug( `[BaseClient] Difference between original payload (${length}) and context (${context.length}): ${diff}`, ); } + payload = this.addInstructions(payload ?? formattedMessages, _instructions); + const latestMessage = orderedWithInstructions[orderedWithInstructions.length - 1]; if (payload.length === 0 && !shouldSummarize && latestMessage) { - throw new Error( - `Prompt token count of ${latestMessage.tokenCount} exceeds max token count of ${this.maxContextTokens}.`, + const info = `${latestMessage.tokenCount} / ${this.maxContextTokens}`; + const errorMessage = `{ "type": "${ErrorTypes.INPUT_LENGTH}", "info": "${info}" }`; + logger.warn(`Prompt token count exceeds max token count (${info}).`); + throw new Error(errorMessage); + } else if ( + _instructions && + payload.length === 1 && + payload[0].content === _instructions.content + ) { + const info = `${tokenCount + 3} / ${this.maxContextTokens}`; + const errorMessage = `{ "type": "${ErrorTypes.INPUT_LENGTH}", "info": "${info}" }`; + logger.warn( + `Including instructions, the prompt token count exceeds remaining max token count (${info}).`, ); + throw new Error(errorMessage); } if (usePrevSummary) { @@ -345,19 +530,23 @@ class BaseClient { maxContextTokens: this.maxContextTokens, }); - let tokenCountMap = orderedWithInstructions.reduce((map, message, index) => { - const { messageId } = message; - if (!messageId) { + /** @type {Record | undefined} */ + let tokenCountMap; + if (buildTokenMap) { + tokenCountMap = orderedWithInstructions.reduce((map, message, index) => { + const { messageId } = message; + if (!messageId) { + return map; + } + + if (shouldSummarize && index === summaryIndex && !usePrevSummary) { + map.summaryMessage = { ...summaryMessage, messageId, tokenCount: summaryTokenCount }; + } + + map[messageId] = orderedWithInstructions[index].tokenCount; return map; - } - - if (shouldSummarize && index === summaryIndex && !usePrevSummary) { - map.summaryMessage = { ...summaryMessage, messageId, tokenCount: summaryTokenCount }; - } - - map[messageId] = orderedWithInstructions[index].tokenCount; - return map; - }, {}); + }, {}); + } const promptTokens = this.maxContextTokens - remainingContextTokens; @@ -376,6 +565,14 @@ class BaseClient { const { user, head, isEdited, conversationId, responseMessageId, saveOptions, userMessage } = await this.handleStartMethods(message, opts); + if (opts.progressCallback) { + opts.onProgress = opts.progressCallback.call(null, { + ...(opts.progressOptions ?? {}), + parentMessageId: userMessage.messageId, + messageId: responseMessageId, + }); + } + const { generation = '' } = opts; // It's not necessary to push to currentMessages @@ -389,7 +586,7 @@ class BaseClient { conversationId, parentMessageId: userMessage.messageId, isCreatedByUser: false, - model: this.modelOptions.model, + model: this.modelOptions?.model ?? this.model, sender: this.sender, text: generation, }; @@ -397,6 +594,7 @@ class BaseClient { } else { latestMessage.text = generation; } + this.continued = true; } else { this.currentMessages.push(userMessage); } @@ -424,8 +622,14 @@ class BaseClient { this.handleTokenCountMap(tokenCountMap); } - if (!isEdited) { - await this.saveMessageToDatabase(userMessage, saveOptions, user); + if (!isEdited && !this.skipSaveUserMessage) { + this.userMessagePromise = this.saveMessageToDatabase(userMessage, saveOptions, user); + this.savedMessageIds.add(userMessage.messageId); + if (typeof opts?.getReqData === 'function') { + opts.getReqData({ + userMessagePromise: this.userMessagePromise, + }); + } } if ( @@ -439,45 +643,151 @@ class BaseClient { user: this.user, tokenType: 'prompt', amount: promptTokens, - model: this.modelOptions.model, endpoint: this.options.endpoint, + model: this.modelOptions?.model ?? this.model, endpointTokenConfig: this.options.endpointTokenConfig, }, }); } + /** @type {string|string[]|undefined} */ const completion = await this.sendCompletion(payload, opts); this.abortController.requestCompleted = true; + /** @type {TMessage} */ const responseMessage = { messageId: responseMessageId, conversationId, parentMessageId: userMessage.messageId, isCreatedByUser: false, isEdited, - model: this.modelOptions.model, + model: this.getResponseModel(), sender: this.sender, - text: addSpaceIfNeeded(generation) + completion, promptTokens, + iconURL: this.options.iconURL, + endpoint: this.options.endpoint, + ...(this.metadata ?? {}), }; + if (typeof completion === 'string') { + responseMessage.text = addSpaceIfNeeded(generation) + completion; + } else if ( + Array.isArray(completion) && + isParamEndpoint(this.options.endpoint, this.options.endpointType) + ) { + responseMessage.text = ''; + responseMessage.content = completion; + } else if (Array.isArray(completion)) { + responseMessage.text = addSpaceIfNeeded(generation) + completion.join(''); + } + if ( tokenCountMap && this.recordTokenUsage && this.getTokenCountForResponse && this.getTokenCount ) { - responseMessage.tokenCount = this.getTokenCountForResponse(responseMessage); - const completionTokens = this.getTokenCount(completion); - await this.recordTokenUsage({ promptTokens, completionTokens }); + let completionTokens; + + /** + * Metadata about input/output costs for the current message. The client + * should provide a function to get the current stream usage metadata; if not, + * use the legacy token estimations. + * @type {StreamUsage | null} */ + const usage = this.getStreamUsage != null ? this.getStreamUsage() : null; + + if (usage != null && Number(usage[this.outputTokensKey]) > 0) { + responseMessage.tokenCount = usage[this.outputTokensKey]; + completionTokens = responseMessage.tokenCount; + await this.updateUserMessageTokenCount({ usage, tokenCountMap, userMessage, opts }); + } else { + responseMessage.tokenCount = this.getTokenCountForResponse(responseMessage); + completionTokens = responseMessage.tokenCount; + } + + await this.recordTokenUsage({ promptTokens, completionTokens, usage }); } - await this.saveMessageToDatabase(responseMessage, saveOptions, user); + + if (this.userMessagePromise) { + await this.userMessagePromise; + } + + if (this.artifactPromises) { + responseMessage.attachments = (await Promise.all(this.artifactPromises)).filter((a) => a); + } + + if (this.options.attachments) { + try { + saveOptions.files = this.options.attachments.map((attachments) => attachments.file_id); + } catch (error) { + logger.error('[BaseClient] Error mapping attachments for conversation', error); + } + } + + this.responsePromise = this.saveMessageToDatabase(responseMessage, saveOptions, user); + this.savedMessageIds.add(responseMessage.messageId); delete responseMessage.tokenCount; return responseMessage; } - async getConversation(conversationId, user = null) { - return await getConvo(user, conversationId); + /** + * Stream usage should only be used for user message token count re-calculation if: + * - The stream usage is available, with input tokens greater than 0, + * - the client provides a function to calculate the current token count, + * - files are being resent with every message (default behavior; or if `false`, with no attachments), + * - the `promptPrefix` (custom instructions) is not set. + * + * In these cases, the legacy token estimations would be more accurate. + * + * TODO: included system messages in the `orderedMessages` accounting, potentially as a + * separate message in the UI. ChatGPT does this through "hidden" system messages. + * @param {object} params + * @param {StreamUsage} params.usage + * @param {Record} params.tokenCountMap + * @param {TMessage} params.userMessage + * @param {object} params.opts + */ + async updateUserMessageTokenCount({ usage, tokenCountMap, userMessage, opts }) { + /** @type {boolean} */ + const shouldUpdateCount = + this.calculateCurrentTokenCount != null && + Number(usage[this.inputTokensKey]) > 0 && + (this.options.resendFiles || + (!this.options.resendFiles && !this.options.attachments?.length)) && + !this.options.promptPrefix; + + if (!shouldUpdateCount) { + return; + } + + const userMessageTokenCount = this.calculateCurrentTokenCount({ + currentMessageId: userMessage.messageId, + tokenCountMap, + usage, + }); + + if (userMessageTokenCount === userMessage.tokenCount) { + return; + } + + userMessage.tokenCount = userMessageTokenCount; + /* + Note: `AskController` saves the user message, so we update the count of its `userMessage` reference + */ + if (typeof opts?.getReqData === 'function') { + opts.getReqData({ + userMessage, + }); + } + /* + Note: we update the user message to be sure it gets the calculated token count; + though `AskController` saves the user message, EditController does not + */ + await this.userMessagePromise; + await this.updateMessageInDatabase({ + messageId: userMessage.messageId, + tokenCount: userMessageTokenCount, + }); } async loadHistory(conversationId, parentMessageId = null) { @@ -527,18 +837,52 @@ class BaseClient { return _messages; } + /** + * Save a message to the database. + * @param {TMessage} message + * @param {Partial} endpointOptions + * @param {string | null} user + */ async saveMessageToDatabase(message, endpointOptions, user = null) { - await saveMessage({ ...message, endpoint: this.options.endpoint, user, unfinished: false }); - await saveConvo(user, { - conversationId: message.conversationId, - endpoint: this.options.endpoint, - endpointType: this.options.endpointType, - ...endpointOptions, - }); + if (this.user && user !== this.user) { + throw new Error('User mismatch.'); + } + + const savedMessage = await saveMessage( + this.options.req, + { + ...message, + endpoint: this.options.endpoint, + unfinished: false, + user, + }, + { context: 'api/app/clients/BaseClient.js - saveMessageToDatabase #saveMessage' }, + ); + + if (this.skipSaveConvo) { + return { message: savedMessage }; + } + + const conversation = await saveConvo( + this.options.req, + { + conversationId: message.conversationId, + endpoint: this.options.endpoint, + endpointType: this.options.endpointType, + ...endpointOptions, + }, + { context: 'api/app/clients/BaseClient.js - saveMessageToDatabase #saveConvo' }, + ); + + return { message: savedMessage, conversation }; } + /** + * Update a message in the database. + * @param {Partial} message + */ async updateMessageInDatabase(message) { - await updateMessage(message); + await updateMessage(this.options.req, message); } /** @@ -558,11 +902,11 @@ class BaseClient { * the message is considered a root message. * * @param {Object} options - The options for the function. - * @param {Array} options.messages - An array of message objects. Each object should have either an 'id' or 'messageId' property, and may have a 'parentMessageId' property. + * @param {TMessage[]} options.messages - An array of message objects. Each object should have either an 'id' or 'messageId' property, and may have a 'parentMessageId' property. * @param {string} options.parentMessageId - The ID of the parent message to start the traversal from. * @param {Function} [options.mapMethod] - An optional function to map over the ordered messages. If provided, it will be applied to each message in the resulting array. * @param {boolean} [options.summary=false] - If set to true, the traversal modifies messages with 'summary' and 'summaryTokenCount' properties and stops at the message with a 'summary' property. - * @returns {Array} An array containing the messages in the order they should be displayed, starting with the most recent message with a 'summary' property if the 'summary' option is true, and ending with the message identified by 'parentMessageId'. + * @returns {TMessage[]} An array containing the messages in the order they should be displayed, starting with the most recent message with a 'summary' property if the 'summary' option is true, and ending with the message identified by 'parentMessageId'. */ static getMessagesForConversation({ messages, @@ -639,8 +983,9 @@ class BaseClient { // Note: gpt-3.5-turbo and gpt-4 may update over time. Use default for these as well as for unknown models let tokensPerMessage = 3; let tokensPerName = 1; + const model = this.modelOptions?.model ?? this.model; - if (this.modelOptions.model === 'gpt-3.5-turbo-0301') { + if (model === 'gpt-3.5-turbo-0301') { tokensPerMessage = 4; tokensPerName = -1; } @@ -652,6 +997,24 @@ class BaseClient { continue; } + if (item.type === 'tool_call' && item.tool_call != null) { + const toolName = item.tool_call?.name || ''; + if (toolName != null && toolName && typeof toolName === 'string') { + numTokens += this.getTokenCount(toolName); + } + + const args = item.tool_call?.args || ''; + if (args != null && args && typeof args === 'string') { + numTokens += this.getTokenCount(args); + } + + const output = item.tool_call?.output || ''; + if (output != null && output && typeof output === 'string') { + numTokens += this.getTokenCount(output); + } + continue; + } + const nestedValue = item[item.type]; if (!nestedValue) { @@ -660,8 +1023,12 @@ class BaseClient { processValue(nestedValue); } - } else { + } else if (typeof value === 'string') { numTokens += this.getTokenCount(value); + } else if (typeof value === 'number') { + numTokens += this.getTokenCount(value.toString()); + } else if (typeof value === 'boolean') { + numTokens += this.getTokenCount(value.toString()); } }; @@ -683,6 +1050,75 @@ class BaseClient { return await this.sendCompletion(payload, opts); } + + /** + * + * @param {TMessage[]} _messages + * @returns {Promise} + */ + async addPreviousAttachments(_messages) { + if (!this.options.resendFiles) { + return _messages; + } + + const seen = new Set(); + const attachmentsProcessed = + this.options.attachments && !(this.options.attachments instanceof Promise); + if (attachmentsProcessed) { + for (const attachment of this.options.attachments) { + seen.add(attachment.file_id); + } + } + + /** + * + * @param {TMessage} message + */ + const processMessage = async (message) => { + if (!this.message_file_map) { + /** @type {Record */ + this.message_file_map = {}; + } + + const fileIds = []; + for (const file of message.files) { + if (seen.has(file.file_id)) { + continue; + } + fileIds.push(file.file_id); + seen.add(file.file_id); + } + + if (fileIds.length === 0) { + return message; + } + + const files = await getFiles({ + file_id: { $in: fileIds }, + }); + + await this.addImageURLs(message, files, this.visionMode); + + this.message_file_map[message.messageId] = files; + return message; + }; + + const promises = []; + + for (const message of _messages) { + if (!message.files) { + promises.push(message); + continue; + } + + promises.push(processMessage(message)); + } + + const messages = await Promise.all(promises); + + this.checkVisionRequest(Object.values(this.message_file_map ?? {}).flat()); + return messages; + } } module.exports = BaseClient; diff --git a/api/app/clients/ChatGPTClient.js b/api/app/clients/ChatGPTClient.js index a5ed43985e..5450300a17 100644 --- a/api/app/clients/ChatGPTClient.js +++ b/api/app/clients/ChatGPTClient.js @@ -1,16 +1,20 @@ const Keyv = require('keyv'); const crypto = require('crypto'); +const { CohereClient } = require('cohere-ai'); +const { fetchEventSource } = require('@waylaidwanderer/fetch-event-source'); +const { encoding_for_model: encodingForModel, get_encoding: getEncoding } = require('tiktoken'); const { + ImageDetail, EModelEndpoint, resolveHeaders, + CohereConstants, mapModelToAzureConfig, } = require('librechat-data-provider'); -const { encoding_for_model: encodingForModel, get_encoding: getEncoding } = require('tiktoken'); -const { fetchEventSource } = require('@waylaidwanderer/fetch-event-source'); -const { Agent, ProxyAgent } = require('undici'); +const { extractBaseURL, constructAzureURL, genAzureChatCompletion } = require('~/utils'); +const { createContextHandlers } = require('./prompts'); +const { createCoherePayload } = require('./llm'); const BaseClient = require('./BaseClient'); const { logger } = require('~/config'); -const { extractBaseURL, constructAzureURL, genAzureChatCompletion } = require('~/utils'); const CHATGPT_MODEL = 'gpt-3.5-turbo'; const tokenizersCache = {}; @@ -147,7 +151,8 @@ class ChatGPTClient extends BaseClient { return tokenizer; } - async getCompletion(input, onProgress, abortController = null) { + /** @type {getCompletion} */ + async getCompletion(input, onProgress, onTokenProgress, abortController = null) { if (!abortController) { abortController = new AbortController(); } @@ -180,10 +185,6 @@ class ChatGPTClient extends BaseClient { headers: { 'Content-Type': 'application/json', }, - dispatcher: new Agent({ - bodyTimeout: 0, - headersTimeout: 0, - }), }; if (this.isVisionModel) { @@ -221,6 +222,16 @@ class ChatGPTClient extends BaseClient { this.azure = !serverless && azureOptions; this.azureEndpoint = !serverless && genAzureChatCompletion(this.azure, modelOptions.model, this); + if (serverless === true) { + this.options.defaultQuery = azureOptions.azureOpenAIApiVersion + ? { 'api-version': azureOptions.azureOpenAIApiVersion } + : undefined; + this.options.headers['api-key'] = this.apiKey; + } + } + + if (this.options.defaultQuery) { + opts.defaultQuery = this.options.defaultQuery; } if (this.options.headers) { @@ -234,9 +245,9 @@ class ChatGPTClient extends BaseClient { baseURL = this.langchainProxy ? constructAzureURL({ baseURL: this.langchainProxy, - azure: this.azure, + azureOptions: this.azure, }) - : this.azureEndpoint.split(/\/(chat|completion)/)[0]; + : this.azureEndpoint.split(/(? { + if (this.message_file_map && this.message_file_map[message.messageId]) { + const attachments = this.message_file_map[message.messageId]; + for (const file of attachments) { + if (file.embedded) { + this.contextHandlers?.processFile(file); + continue; + } + + messages[i].tokenCount = + (messages[i].tokenCount || 0) + + this.calculateImageTokenCost({ + width: file.width, + height: file.height, + detail: this.options.imageDetail ?? ImageDetail.auto, + }); + } + } + }); + + if (this.contextHandlers) { + this.augmentedPrompt = await this.contextHandlers.createContext(); + promptPrefix = this.augmentedPrompt + promptPrefix; + } + if (promptPrefix) { // If the prompt prefix doesn't end with the end token, add it. if (!promptPrefix.endsWith(`${this.endToken}`)) { promptPrefix = `${promptPrefix.trim()}${this.endToken}\n\n`; } promptPrefix = `${this.startToken}Instructions:\n${promptPrefix}`; - } else { - const currentDateString = new Date().toLocaleDateString('en-us', { - year: 'numeric', - month: 'long', - day: 'numeric', - }); - promptPrefix = `${this.startToken}Instructions:\nYou are ChatGPT, a large language model trained by OpenAI. Respond conversationally.\nCurrent date: ${currentDateString}${this.endToken}\n\n`; } - const promptSuffix = `${this.startToken}${this.chatGptLabel}:\n`; // Prompt ChatGPT to respond. const instructionsPayload = { role: 'system', - name: 'instructions', content: promptPrefix, }; @@ -668,10 +761,6 @@ ${botMessage.message} this.maxResponseTokens, ); - if (this.options.debug) { - console.debug(`Prompt : ${prompt}`); - } - if (isChatGptModel) { return { prompt: [instructionsPayload, messagePayload], context }; } diff --git a/api/app/clients/GoogleClient.js b/api/app/clients/GoogleClient.js index 22e80159c8..03461a6796 100644 --- a/api/app/clients/GoogleClient.js +++ b/api/app/clients/GoogleClient.js @@ -1,30 +1,42 @@ const { google } = require('googleapis'); -const { Agent, ProxyAgent } = require('undici'); -const { GoogleVertexAI } = require('langchain/llms/googlevertexai'); +const { concat } = require('@langchain/core/utils/stream'); +const { ChatVertexAI } = require('@langchain/google-vertexai'); const { ChatGoogleGenerativeAI } = require('@langchain/google-genai'); -const { ChatGoogleVertexAI } = require('langchain/chat_models/googlevertexai'); -const { AIMessage, HumanMessage, SystemMessage } = require('langchain/schema'); -const { encoding_for_model: encodingForModel, get_encoding: getEncoding } = require('tiktoken'); +const { GoogleGenerativeAI: GenAI } = require('@google/generative-ai'); +const { HumanMessage, SystemMessage } = require('@langchain/core/messages'); const { + googleGenConfigSchema, validateVisionModel, getResponseSender, endpointSettings, EModelEndpoint, + ContentTypes, + VisionModes, + ErrorTypes, + Constants, AuthKeys, } = require('librechat-data-provider'); +const { getSafetySettings } = require('~/server/services/Endpoints/google/llm'); const { encodeAndFormat } = require('~/server/services/Files/images'); +const Tokenizer = require('~/server/services/Tokenizer'); +const { spendTokens } = require('~/models/spendTokens'); const { getModelMaxTokens } = require('~/utils'); -const { formatMessage } = require('./prompts'); -const BaseClient = require('./BaseClient'); +const { sleep } = require('~/server/utils'); const { logger } = require('~/config'); +const { + formatMessage, + createContextHandlers, + titleInstruction, + truncateText, +} = require('./prompts'); +const BaseClient = require('./BaseClient'); -const loc = 'us-central1'; +const loc = process.env.GOOGLE_LOC || 'us-central1'; const publisher = 'google'; -const endpointPrefix = `https://${loc}-aiplatform.googleapis.com`; -// const apiEndpoint = loc + '-aiplatform.googleapis.com'; -const tokenizersCache = {}; +const endpointPrefix = `${loc}-aiplatform.googleapis.com`; const settings = endpointSettings[EModelEndpoint.google]; +const EXCLUDED_GENAI_MODELS = /gemini-(?:1\.0|1-0|pro)/; class GoogleClient extends BaseClient { constructor(credentials, options = {}) { @@ -40,13 +52,27 @@ class GoogleClient extends BaseClient { const serviceKey = creds[AuthKeys.GOOGLE_SERVICE_KEY] ?? {}; this.serviceKey = serviceKey && typeof serviceKey === 'string' ? JSON.parse(serviceKey) : serviceKey ?? {}; + /** @type {string | null | undefined} */ + this.project_id = this.serviceKey.project_id; this.client_email = this.serviceKey.client_email; this.private_key = this.serviceKey.private_key; - this.project_id = this.serviceKey.project_id; this.access_token = null; this.apiKey = creds[AuthKeys.GOOGLE_API_KEY]; + this.reverseProxyUrl = options.reverseProxyUrl; + + this.authHeader = options.authHeader; + + /** @type {UsageMetadata | undefined} */ + this.usage; + /** The key for the usage object's input tokens + * @type {string} */ + this.inputTokensKey = 'input_tokens'; + /** The key for the usage object's output tokens + * @type {string} */ + this.outputTokensKey = 'output_tokens'; + this.visionMode = VisionModes.generative; if (options.skipSetOptions) { return; } @@ -55,7 +81,7 @@ class GoogleClient extends BaseClient { /* Google specific methods */ constructUrl() { - return `${endpointPrefix}/v1/projects/${this.project_id}/locations/${loc}/publishers/${publisher}/models/${this.modelOptions.model}:serverStreamingPredict`; + return `https://${endpointPrefix}/v1/projects/${this.project_id}/locations/${loc}/publishers/${publisher}/models/${this.modelOptions.model}:serverStreamingPredict`; } async getClient() { @@ -106,53 +132,18 @@ class GoogleClient extends BaseClient { this.options = options; } - this.options.examples = (this.options.examples ?? []) - .filter((ex) => ex) - .filter((obj) => obj.input.content !== '' && obj.output.content !== ''); + this.modelOptions = this.options.modelOptions || {}; - const modelOptions = this.options.modelOptions || {}; - this.modelOptions = { - ...modelOptions, - // set some good defaults (check for undefined in some cases because they may be 0) - model: modelOptions.model || settings.model.default, - temperature: - typeof modelOptions.temperature === 'undefined' - ? settings.temperature.default - : modelOptions.temperature, - topP: typeof modelOptions.topP === 'undefined' ? settings.topP.default : modelOptions.topP, - topK: typeof modelOptions.topK === 'undefined' ? settings.topK.default : modelOptions.topK, - // stop: modelOptions.stop // no stop method for now - }; + this.options.attachments?.then((attachments) => this.checkVisionRequest(attachments)); - /* Validation vision request */ - this.defaultVisionModel = this.options.visionModel ?? 'gemini-pro-vision'; - const availableModels = this.options.modelsConfig?.[EModelEndpoint.google]; - this.isVisionModel = validateVisionModel({ model: this.modelOptions.model, availableModels }); + /** @type {boolean} Whether using a "GenerativeAI" Model */ + this.isGenerativeModel = + this.modelOptions.model.includes('gemini') || this.modelOptions.model.includes('learnlm'); - if ( - this.options.attachments && - availableModels?.includes(this.defaultVisionModel) && - !this.isVisionModel - ) { - this.modelOptions.model = this.defaultVisionModel; - this.isVisionModel = true; - } + this.maxContextTokens = + this.options.maxContextTokens ?? + getModelMaxTokens(this.modelOptions.model, EModelEndpoint.google); - if (this.isVisionModel && !this.options.attachments) { - this.modelOptions.model = 'gemini-pro'; - this.isVisionModel = false; - } - - // TODO: as of 12/14/23, only gemini models are "Generative AI" models provided by Google - this.isGenerativeModel = this.modelOptions.model.includes('gemini'); - const { isGenerativeModel } = this; - this.isChatModel = !isGenerativeModel && this.modelOptions.model.includes('chat'); - const { isChatModel } = this; - this.isTextModel = - !isGenerativeModel && !isChatModel && /code|text/.test(this.modelOptions.model); - const { isTextModel } = this; - - this.maxContextTokens = getModelMaxTokens(this.modelOptions.model, EModelEndpoint.google); // The max prompt tokens is determined by the max context tokens minus the max response tokens. // Earlier messages will be dropped until the prompt is within the limit. this.maxResponseTokens = this.modelOptions.maxOutputTokens || settings.maxOutputTokens.default; @@ -183,72 +174,159 @@ class GoogleClient extends BaseClient { this.userLabel = this.options.userLabel || 'User'; this.modelLabel = this.options.modelLabel || 'Assistant'; - if (isChatModel || isGenerativeModel) { - // Use these faux tokens to help the AI understand the context since we are building the chat log ourselves. - // Trying to use "<|im_start|>" causes the AI to still generate "<" or "<|" at the end sometimes for some reason, - // without tripping the stop sequences, so I'm using "||>" instead. - this.startToken = '||>'; - this.endToken = ''; - this.gptEncoder = this.constructor.getTokenizer('cl100k_base'); - } else if (isTextModel) { - this.startToken = '||>'; - this.endToken = ''; - this.gptEncoder = this.constructor.getTokenizer('text-davinci-003', true, { - '<|im_start|>': 100264, - '<|im_end|>': 100265, - }); - } else { - // Previously I was trying to use "<|endoftext|>" but there seems to be some bug with OpenAI's token counting - // system that causes only the first "<|endoftext|>" to be counted as 1 token, and the rest are not treated - // as a single token. So we're using this instead. - this.startToken = '||>'; - this.endToken = ''; - try { - this.gptEncoder = this.constructor.getTokenizer(this.modelOptions.model, true); - } catch { - this.gptEncoder = this.constructor.getTokenizer('text-davinci-003', true); - } - } - - if (!this.modelOptions.stop) { - const stopTokens = [this.startToken]; - if (this.endToken && this.endToken !== this.startToken) { - stopTokens.push(this.endToken); - } - stopTokens.push(`\n${this.userLabel}:`); - stopTokens.push('<|diff_marker|>'); - // I chose not to do one for `modelLabel` because I've never seen it happen - this.modelOptions.stop = stopTokens; - } - if (this.options.reverseProxyUrl) { this.completionsUrl = this.options.reverseProxyUrl; } else { this.completionsUrl = this.constructUrl(); } + let promptPrefix = (this.options.promptPrefix ?? '').trim(); + if (typeof this.options.artifactsPrompt === 'string' && this.options.artifactsPrompt) { + promptPrefix = `${promptPrefix ?? ''}\n${this.options.artifactsPrompt}`.trim(); + } + this.options.promptPrefix = promptPrefix; + this.initializeClient(); return this; } + /** + * + * Checks if the model is a vision model based on request attachments and sets the appropriate options: + * @param {MongoFile[]} attachments + */ + checkVisionRequest(attachments) { + /* Validation vision request */ + this.defaultVisionModel = this.options.visionModel ?? 'gemini-pro-vision'; + const availableModels = this.options.modelsConfig?.[EModelEndpoint.google]; + this.isVisionModel = validateVisionModel({ model: this.modelOptions.model, availableModels }); + + if ( + attachments && + attachments.some((file) => file?.type && file?.type?.includes('image')) && + availableModels?.includes(this.defaultVisionModel) && + !this.isVisionModel + ) { + this.modelOptions.model = this.defaultVisionModel; + this.isVisionModel = true; + } + + if (this.isVisionModel && !attachments && this.modelOptions.model.includes('gemini-pro')) { + this.modelOptions.model = 'gemini-pro'; + this.isVisionModel = false; + } + } + formatMessages() { - return ((message) => ({ - author: message?.author ?? (message.isCreatedByUser ? this.userLabel : this.modelLabel), - content: message?.content ?? message.text, - })).bind(this); + return ((message) => { + const msg = { + author: message?.author ?? (message.isCreatedByUser ? this.userLabel : this.modelLabel), + content: message?.content ?? message.text, + }; + + if (!message.image_urls?.length) { + return msg; + } + + msg.content = ( + !Array.isArray(msg.content) + ? [ + { + type: ContentTypes.TEXT, + [ContentTypes.TEXT]: msg.content, + }, + ] + : msg.content + ).concat(message.image_urls); + + return msg; + }).bind(this); + } + + /** + * Formats messages for generative AI + * @param {TMessage[]} messages + * @returns + */ + async formatGenerativeMessages(messages) { + const formattedMessages = []; + const attachments = await this.options.attachments; + const latestMessage = { ...messages[messages.length - 1] }; + const files = await this.addImageURLs(latestMessage, attachments, VisionModes.generative); + this.options.attachments = files; + messages[messages.length - 1] = latestMessage; + + for (const _message of messages) { + const role = _message.isCreatedByUser ? this.userLabel : this.modelLabel; + const parts = []; + parts.push({ text: _message.text }); + if (!_message.image_urls?.length) { + formattedMessages.push({ role, parts }); + continue; + } + + for (const images of _message.image_urls) { + if (images.inlineData) { + parts.push({ inlineData: images.inlineData }); + } + } + + formattedMessages.push({ role, parts }); + } + + return formattedMessages; + } + + /** + * + * Adds image URLs to the message object and returns the files + * + * @param {TMessage[]} messages + * @param {MongoFile[]} files + * @returns {Promise} + */ + async addImageURLs(message, attachments, mode = '') { + const { files, image_urls } = await encodeAndFormat( + this.options.req, + attachments, + EModelEndpoint.google, + mode, + ); + message.image_urls = image_urls.length ? image_urls : undefined; + return files; + } + + /** + * Builds the augmented prompt for attachments + * TODO: Add File API Support + * @param {TMessage[]} messages + */ + async buildAugmentedPrompt(messages = []) { + const attachments = await this.options.attachments; + const latestMessage = { ...messages[messages.length - 1] }; + this.contextHandlers = createContextHandlers(this.options.req, latestMessage.text); + + if (this.contextHandlers) { + for (const file of attachments) { + if (file.embedded) { + this.contextHandlers?.processFile(file); + continue; + } + } + + this.augmentedPrompt = await this.contextHandlers.createContext(); + this.options.promptPrefix = this.augmentedPrompt + this.options.promptPrefix; + } } async buildVisionMessages(messages = [], parentMessageId) { - const { prompt } = await this.buildMessagesPrompt(messages, parentMessageId); const attachments = await this.options.attachments; - const { files, image_urls } = await encodeAndFormat( - this.options.req, - attachments.filter((file) => file.type.includes('image')), - EModelEndpoint.google, - ); - const latestMessage = { ...messages[messages.length - 1] }; + await this.buildAugmentedPrompt(messages); + + const { prompt } = await this.buildMessagesPrompt(messages, parentMessageId); + + const files = await this.addImageURLs(latestMessage, attachments); - latestMessage.image_urls = image_urls; this.options.attachments = files; latestMessage.text = prompt; @@ -259,28 +337,73 @@ class GoogleClient extends BaseClient { messages: [new HumanMessage(formatMessage({ message: latestMessage }))], }, ], - parameters: this.modelOptions, }; return { prompt: payload }; } - async buildMessages(messages = [], parentMessageId) { + /** @param {TMessage[]} [messages=[]] */ + async buildGenerativeMessages(messages = []) { + this.userLabel = 'user'; + this.modelLabel = 'model'; + const promises = []; + promises.push(await this.formatGenerativeMessages(messages)); + promises.push(this.buildAugmentedPrompt(messages)); + const [formattedMessages] = await Promise.all(promises); + return { prompt: formattedMessages }; + } + + /** + * @param {TMessage[]} [messages=[]] + * @param {string} [parentMessageId] + */ + async buildMessages(_messages = [], parentMessageId) { if (!this.isGenerativeModel && !this.project_id) { - throw new Error( - '[GoogleClient] a Service Account JSON Key is required for PaLM 2 and Codey models (Vertex AI)', - ); - } else if (this.isGenerativeModel && (!this.apiKey || this.apiKey === 'user_provided')) { - throw new Error( - '[GoogleClient] an API Key is required for Gemini models (Generative Language API)', - ); + throw new Error('[GoogleClient] PaLM 2 and Codey models are no longer supported.'); } - if (this.options.attachments) { - return this.buildVisionMessages(messages, parentMessageId); + if (this.options.promptPrefix) { + const instructionsTokenCount = this.getTokenCount(this.options.promptPrefix); + + this.maxContextTokens = this.maxContextTokens - instructionsTokenCount; + if (this.maxContextTokens < 0) { + const info = `${instructionsTokenCount} / ${this.maxContextTokens}`; + const errorMessage = `{ "type": "${ErrorTypes.INPUT_LENGTH}", "info": "${info}" }`; + logger.warn(`Instructions token count exceeds max context (${info}).`); + throw new Error(errorMessage); + } } - if (this.isTextModel) { - return this.buildMessagesPrompt(messages, parentMessageId); + for (let i = 0; i < _messages.length; i++) { + const message = _messages[i]; + if (!message.tokenCount) { + _messages[i].tokenCount = this.getTokenCountForMessage({ + role: message.isCreatedByUser ? 'user' : 'assistant', + content: message.content ?? message.text, + }); + } + } + + const { + payload: messages, + tokenCountMap, + promptTokens, + } = await this.handleContextStrategy({ + orderedMessages: _messages, + formattedMessages: _messages, + }); + + if (!this.project_id && !EXCLUDED_GENAI_MODELS.test(this.modelOptions.model)) { + const result = await this.buildGenerativeMessages(messages); + result.tokenCountMap = tokenCountMap; + result.promptTokens = promptTokens; + return result; + } + + if (this.options.attachments && this.isGenerativeModel) { + const result = this.buildVisionMessages(messages, parentMessageId); + result.tokenCountMap = tokenCountMap; + result.promptTokens = promptTokens; + return result; } let payload = { @@ -292,20 +415,14 @@ class GoogleClient extends BaseClient { .map((message) => formatMessage({ message, langChain: true })), }, ], - parameters: this.modelOptions, }; if (this.options.promptPrefix) { payload.instances[0].context = this.options.promptPrefix; } - if (this.options.examples.length > 0) { - payload.instances[0].examples = this.options.examples; - } - logger.debug('[GoogleClient] buildMessages', payload); - - return { prompt: payload }; + return { prompt: payload, tokenCountMap, promptTokens }; } async buildMessagesPrompt(messages, parentMessageId) { @@ -319,10 +436,7 @@ class GoogleClient extends BaseClient { parentMessageId, }); - const formattedMessages = orderedMessages.map((message) => ({ - author: message.isCreatedByUser ? this.userLabel : this.modelLabel, - content: message?.content ?? message.text, - })); + const formattedMessages = orderedMessages.map(this.formatMessages()); let lastAuthor = ''; let groupedMessages = []; @@ -350,14 +464,7 @@ class GoogleClient extends BaseClient { identityPrefix = `${identityPrefix}\nYou are ${this.options.modelLabel}`; } - let promptPrefix = (this.options.promptPrefix || '').trim(); - if (promptPrefix) { - // If the prompt prefix doesn't end with the end token, add it. - if (!promptPrefix.endsWith(`${this.endToken}`)) { - promptPrefix = `${promptPrefix.trim()}${this.endToken}\n\n`; - } - promptPrefix = `\nContext:\n${promptPrefix}`; - } + let promptPrefix = (this.options.promptPrefix ?? '').trim(); if (identityPrefix) { promptPrefix = `${identityPrefix}${promptPrefix}`; @@ -394,7 +501,7 @@ class GoogleClient extends BaseClient { isCreatedByUser || !isEdited ? `\n\n${message.author}:` : `${promptPrefix}\n\n${message.author}:`; - const messageString = `${messagePrefix}\n${message.content}${this.endToken}\n`; + const messageString = `${messagePrefix}\n${message.content}\n`; let newPromptBody = `${messageString}${promptBody}`; context.unshift(message); @@ -460,54 +567,50 @@ class GoogleClient extends BaseClient { return { prompt, context }; } - async _getCompletion(payload, abortController = null) { - if (!abortController) { - abortController = new AbortController(); - } - const { debug } = this.options; - const url = this.completionsUrl; - if (debug) { - logger.debug('GoogleClient _getCompletion', { url, payload }); - } - const opts = { - method: 'POST', - agent: new Agent({ - bodyTimeout: 0, - headersTimeout: 0, - }), - signal: abortController.signal, - }; - - if (this.options.proxy) { - opts.agent = new ProxyAgent(this.options.proxy); - } - - const client = await this.getClient(); - const res = await client.request({ url, method: 'POST', data: payload }); - logger.debug('GoogleClient _getCompletion', { res }); - return res.data; - } - createLLM(clientOptions) { - if (this.isGenerativeModel) { - return new ChatGoogleGenerativeAI({ ...clientOptions, apiKey: this.apiKey }); + const model = clientOptions.modelName ?? clientOptions.model; + clientOptions.location = loc; + clientOptions.endpoint = endpointPrefix; + + let requestOptions = null; + if (this.reverseProxyUrl) { + requestOptions = { + baseUrl: this.reverseProxyUrl, + }; + + if (this.authHeader) { + requestOptions.customHeaders = { + Authorization: `Bearer ${this.apiKey}`, + }; + } } - return this.isTextModel - ? new GoogleVertexAI(clientOptions) - : new ChatGoogleVertexAI(clientOptions); + if (this.project_id != null) { + logger.debug('Creating VertexAI client'); + this.visionMode = undefined; + clientOptions.streaming = true; + const client = new ChatVertexAI(clientOptions); + client.temperature = clientOptions.temperature; + client.topP = clientOptions.topP; + client.topK = clientOptions.topK; + client.topLogprobs = clientOptions.topLogprobs; + client.frequencyPenalty = clientOptions.frequencyPenalty; + client.presencePenalty = clientOptions.presencePenalty; + client.maxOutputTokens = clientOptions.maxOutputTokens; + return client; + } else if (!EXCLUDED_GENAI_MODELS.test(model)) { + logger.debug('Creating GenAI client'); + return new GenAI(this.apiKey).getGenerativeModel({ model }, requestOptions); + } + + logger.debug('Creating Chat Google Generative AI client'); + return new ChatGoogleGenerativeAI({ ...clientOptions, apiKey: this.apiKey }); } - async getCompletion(_payload, options = {}) { - const { onProgress, abortController } = options; - const { parameters, instances } = _payload; - const { messages: _messages, context, examples: _examples } = instances?.[0] ?? {}; + initializeClient() { + let clientOptions = { ...this.modelOptions }; - let examples; - - let clientOptions = { ...parameters, maxRetries: 2 }; - - if (!this.isGenerativeModel) { + if (this.project_id) { clientOptions['authOptions'] = { credentials: { ...this.serviceKey, @@ -516,60 +619,284 @@ class GoogleClient extends BaseClient { }; } - if (!parameters) { - clientOptions = { ...clientOptions, ...this.modelOptions }; - } - - if (this.isGenerativeModel) { + if (this.isGenerativeModel && !this.project_id) { clientOptions.modelName = clientOptions.model; delete clientOptions.model; } - if (_examples && _examples.length) { - examples = _examples - .map((ex) => { - const { input, output } = ex; - if (!input || !output) { - return undefined; - } - return { - input: new HumanMessage(input.content), - output: new AIMessage(output.content), - }; - }) - .filter((ex) => ex); + this.client = this.createLLM(clientOptions); + return this.client; + } - clientOptions.examples = examples; - } - - const model = this.createLLM(clientOptions); + async getCompletion(_payload, options = {}) { + const { onProgress, abortController } = options; + const safetySettings = getSafetySettings(this.modelOptions.model); + const streamRate = this.options.streamRate ?? Constants.DEFAULT_STREAM_RATE; + const modelName = this.modelOptions.modelName ?? this.modelOptions.model ?? ''; let reply = ''; - const messages = this.isTextModel ? _payload.trim() : _messages; + /** @type {Error} */ + let error; + try { + if (!EXCLUDED_GENAI_MODELS.test(modelName) && !this.project_id) { + /** @type {GenAI} */ + const client = this.client; + /** @type {GenerateContentRequest} */ + const requestOptions = { + safetySettings, + contents: _payload, + generationConfig: googleGenConfigSchema.parse(this.modelOptions), + }; - if (!this.isVisionModel && context && messages?.length > 0) { - messages.unshift(new SystemMessage(context)); - } + const promptPrefix = (this.options.promptPrefix ?? '').trim(); + if (promptPrefix.length) { + requestOptions.systemInstruction = { + parts: [ + { + text: promptPrefix, + }, + ], + }; + } - const stream = await model.stream(messages, { - signal: abortController.signal, - timeout: 7000, - }); + const delay = modelName.includes('flash') ? 8 : 15; + /** @type {GenAIUsageMetadata} */ + let usageMetadata; - for await (const chunk of stream) { - await this.generateTextStream(chunk?.content ?? chunk, onProgress, { - delay: this.isGenerativeModel ? 12 : 8, + const result = await client.generateContentStream(requestOptions); + for await (const chunk of result.stream) { + usageMetadata = !usageMetadata + ? chunk?.usageMetadata + : Object.assign(usageMetadata, chunk?.usageMetadata); + const chunkText = chunk.text(); + await this.generateTextStream(chunkText, onProgress, { + delay, + }); + reply += chunkText; + await sleep(streamRate); + } + + if (usageMetadata) { + this.usage = { + input_tokens: usageMetadata.promptTokenCount, + output_tokens: usageMetadata.candidatesTokenCount, + }; + } + + return reply; + } + + const { instances } = _payload; + const { messages: messages, context } = instances?.[0] ?? {}; + + if (!this.isVisionModel && context && messages?.length > 0) { + messages.unshift(new SystemMessage(context)); + } + + /** @type {import('@langchain/core/messages').AIMessageChunk['usage_metadata']} */ + let usageMetadata; + /** @type {ChatVertexAI} */ + const client = this.client; + const stream = await client.stream(messages, { + signal: abortController.signal, + streamUsage: true, + safetySettings, }); - reply += chunk?.content ?? chunk; + + let delay = this.options.streamRate || 8; + + if (!this.options.streamRate) { + if (this.isGenerativeModel) { + delay = 15; + } + if (modelName.includes('flash')) { + delay = 5; + } + } + + for await (const chunk of stream) { + if (chunk?.usage_metadata) { + const metadata = chunk.usage_metadata; + for (const key in metadata) { + if (Number.isNaN(metadata[key])) { + delete metadata[key]; + } + } + + usageMetadata = !usageMetadata ? metadata : concat(usageMetadata, metadata); + } + + const chunkText = chunk?.content ?? ''; + await this.generateTextStream(chunkText, onProgress, { + delay, + }); + reply += chunkText; + } + + if (usageMetadata) { + this.usage = usageMetadata; + } + } catch (e) { + error = e; + logger.error('[GoogleClient] There was an issue generating the completion', e); } + if (error != null && reply === '') { + const errorMessage = `{ "type": "${ErrorTypes.GoogleError}", "info": "${ + error.message ?? 'The Google provider failed to generate content, please contact the Admin.' + }" }`; + throw new Error(errorMessage); + } return reply; } + /** + * Get stream usage as returned by this client's API response. + * @returns {UsageMetadata} The stream usage object. + */ + getStreamUsage() { + return this.usage; + } + + /** + * Calculates the correct token count for the current user message based on the token count map and API usage. + * Edge case: If the calculation results in a negative value, it returns the original estimate. + * If revisiting a conversation with a chat history entirely composed of token estimates, + * the cumulative token count going forward should become more accurate as the conversation progresses. + * @param {Object} params - The parameters for the calculation. + * @param {Record} params.tokenCountMap - A map of message IDs to their token counts. + * @param {string} params.currentMessageId - The ID of the current message to calculate. + * @param {UsageMetadata} params.usage - The usage object returned by the API. + * @returns {number} The correct token count for the current user message. + */ + calculateCurrentTokenCount({ tokenCountMap, currentMessageId, usage }) { + const originalEstimate = tokenCountMap[currentMessageId] || 0; + + if (!usage || typeof usage.input_tokens !== 'number') { + return originalEstimate; + } + + tokenCountMap[currentMessageId] = 0; + const totalTokensFromMap = Object.values(tokenCountMap).reduce((sum, count) => { + const numCount = Number(count); + return sum + (isNaN(numCount) ? 0 : numCount); + }, 0); + const totalInputTokens = usage.input_tokens ?? 0; + const currentMessageTokens = totalInputTokens - totalTokensFromMap; + return currentMessageTokens > 0 ? currentMessageTokens : originalEstimate; + } + + /** + * @param {object} params + * @param {number} params.promptTokens + * @param {number} params.completionTokens + * @param {UsageMetadata} [params.usage] + * @param {string} [params.model] + * @param {string} [params.context='message'] + * @returns {Promise} + */ + async recordTokenUsage({ promptTokens, completionTokens, model, context = 'message' }) { + await spendTokens( + { + context, + user: this.user ?? this.options.req?.user?.id, + conversationId: this.conversationId, + model: model ?? this.modelOptions.model, + endpointTokenConfig: this.options.endpointTokenConfig, + }, + { promptTokens, completionTokens }, + ); + } + + /** + * Stripped-down logic for generating a title. This uses the non-streaming APIs, since the user does not see titles streaming + */ + async titleChatCompletion(_payload, options = {}) { + let reply = ''; + const { abortController } = options; + + const model = this.modelOptions.modelName ?? this.modelOptions.model ?? ''; + const safetySettings = getSafetySettings(model); + if (!EXCLUDED_GENAI_MODELS.test(model) && !this.project_id) { + logger.debug('Identified titling model as GenAI version'); + /** @type {GenerativeModel} */ + const client = this.client; + const requestOptions = { + contents: _payload, + safetySettings, + generationConfig: { + temperature: 0.5, + }, + }; + + const result = await client.generateContent(requestOptions); + reply = result.response?.text(); + return reply; + } else { + const { instances } = _payload; + const { messages } = instances?.[0] ?? {}; + const titleResponse = await this.client.invoke(messages, { + signal: abortController.signal, + timeout: 7000, + safetySettings, + }); + + if (titleResponse.usage_metadata) { + await this.recordTokenUsage({ + model, + promptTokens: titleResponse.usage_metadata.input_tokens, + completionTokens: titleResponse.usage_metadata.output_tokens, + context: 'title', + }); + } + + reply = titleResponse.content; + return reply; + } + } + + async titleConvo({ text, responseText = '' }) { + let title = 'New Chat'; + const convo = `||>User: +"${truncateText(text)}" +||>Response: +"${JSON.stringify(truncateText(responseText))}"`; + + let { prompt: payload } = await this.buildMessages([ + { + text: `Please generate ${titleInstruction} + + ${convo} + + ||>Title:`, + isCreatedByUser: true, + author: this.userLabel, + }, + ]); + + try { + this.initializeClient(); + title = await this.titleChatCompletion(payload, { + abortController: new AbortController(), + onProgress: () => {}, + }); + } catch (e) { + logger.error('[GoogleClient] There was an issue generating the title', e); + } + logger.debug(`Title response: ${title}`); + return title; + } + getSaveOptions() { return { + endpointType: null, + artifacts: this.options.artifacts, promptPrefix: this.options.promptPrefix, + maxContextTokens: this.options.maxContextTokens, modelLabel: this.options.modelLabel, + iconURL: this.options.iconURL, + greeting: this.options.greeting, + spec: this.options.spec, ...this.modelOptions, }; } @@ -584,23 +911,34 @@ class GoogleClient extends BaseClient { return reply.trim(); } - /* TO-DO: Handle tokens with Google tokenization NOTE: these are required */ - static getTokenizer(encoding, isModelName = false, extendSpecialTokens = {}) { - if (tokenizersCache[encoding]) { - return tokenizersCache[encoding]; - } - let tokenizer; - if (isModelName) { - tokenizer = encodingForModel(encoding, extendSpecialTokens); - } else { - tokenizer = getEncoding(encoding, extendSpecialTokens); - } - tokenizersCache[encoding] = tokenizer; - return tokenizer; + getEncoding() { + return 'cl100k_base'; } + async getVertexTokenCount(text) { + /** @type {ChatVertexAI} */ + const client = this.client ?? this.initializeClient(); + const connection = client.connection; + const gAuthClient = connection.client; + const tokenEndpoint = `https://${connection._endpoint}/${connection.apiVersion}/projects/${this.project_id}/locations/${connection._location}/publishers/google/models/${connection.model}/:countTokens`; + const result = await gAuthClient.request({ + url: tokenEndpoint, + method: 'POST', + data: { + contents: [{ role: 'user', parts: [{ text }] }], + }, + }); + return result; + } + + /** + * Returns the token count of a given text. It also checks and resets the tokenizers if necessary. + * @param {string} text - The text to get the token count for. + * @returns {number} The token count of the given text. + */ getTokenCount(text) { - return this.gptEncoder.encode(text, 'all').length; + const encoding = this.getEncoding(); + return Tokenizer.getTokenCount(text, encoding); } } diff --git a/api/app/clients/OllamaClient.js b/api/app/clients/OllamaClient.js new file mode 100644 index 0000000000..d86e120f43 --- /dev/null +++ b/api/app/clients/OllamaClient.js @@ -0,0 +1,161 @@ +const { z } = require('zod'); +const axios = require('axios'); +const { Ollama } = require('ollama'); +const { Constants } = require('librechat-data-provider'); +const { deriveBaseURL } = require('~/utils'); +const { sleep } = require('~/server/utils'); +const { logger } = require('~/config'); + +const ollamaPayloadSchema = z.object({ + mirostat: z.number().optional(), + mirostat_eta: z.number().optional(), + mirostat_tau: z.number().optional(), + num_ctx: z.number().optional(), + repeat_last_n: z.number().optional(), + repeat_penalty: z.number().optional(), + temperature: z.number().optional(), + seed: z.number().nullable().optional(), + stop: z.array(z.string()).optional(), + tfs_z: z.number().optional(), + num_predict: z.number().optional(), + top_k: z.number().optional(), + top_p: z.number().optional(), + stream: z.optional(z.boolean()), + model: z.string(), +}); + +/** + * @param {string} imageUrl + * @returns {string} + * @throws {Error} + */ +const getValidBase64 = (imageUrl) => { + const parts = imageUrl.split(';base64,'); + + if (parts.length === 2) { + return parts[1]; + } else { + logger.error('Invalid or no Base64 string found in URL.'); + } +}; + +class OllamaClient { + constructor(options = {}) { + const host = deriveBaseURL(options.baseURL ?? 'http://localhost:11434'); + this.streamRate = options.streamRate ?? Constants.DEFAULT_STREAM_RATE; + /** @type {Ollama} */ + this.client = new Ollama({ host }); + } + + /** + * Fetches Ollama models from the specified base API path. + * @param {string} baseURL + * @returns {Promise} The Ollama models. + */ + static async fetchModels(baseURL) { + let models = []; + if (!baseURL) { + return models; + } + try { + const ollamaEndpoint = deriveBaseURL(baseURL); + /** @type {Promise>} */ + const response = await axios.get(`${ollamaEndpoint}/api/tags`, { + timeout: 5000, + }); + models = response.data.models.map((tag) => tag.name); + return models; + } catch (error) { + const logMessage = + 'Failed to fetch models from Ollama API. If you are not using Ollama directly, and instead, through some aggregator or reverse proxy that handles fetching via OpenAI spec, ensure the name of the endpoint doesn\'t start with `ollama` (case-insensitive).'; + logger.error(logMessage, error); + return []; + } + } + + /** + * @param {ChatCompletionMessage[]} messages + * @returns {OllamaMessage[]} + */ + static formatOpenAIMessages(messages) { + const ollamaMessages = []; + + for (const message of messages) { + if (typeof message.content === 'string') { + ollamaMessages.push({ + role: message.role, + content: message.content, + }); + continue; + } + + let aggregatedText = ''; + let imageUrls = []; + + for (const content of message.content) { + if (content.type === 'text') { + aggregatedText += content.text + ' '; + } else if (content.type === 'image_url') { + imageUrls.push(getValidBase64(content.image_url.url)); + } + } + + const ollamaMessage = { + role: message.role, + content: aggregatedText.trim(), + }; + + if (imageUrls.length > 0) { + ollamaMessage.images = imageUrls; + } + + ollamaMessages.push(ollamaMessage); + } + + return ollamaMessages; + } + + /*** + * @param {Object} params + * @param {ChatCompletionPayload} params.payload + * @param {onTokenProgress} params.onProgress + * @param {AbortController} params.abortController + */ + async chatCompletion({ payload, onProgress, abortController = null }) { + let intermediateReply = ''; + + const parameters = ollamaPayloadSchema.parse(payload); + const messages = OllamaClient.formatOpenAIMessages(payload.messages); + + if (parameters.stream) { + const stream = await this.client.chat({ + messages, + ...parameters, + }); + + for await (const chunk of stream) { + const token = chunk.message.content; + intermediateReply += token; + onProgress(token); + if (abortController.signal.aborted) { + stream.controller.abort(); + break; + } + + await sleep(this.streamRate); + } + } + // TODO: regular completion + else { + // const generation = await this.client.generate(payload); + } + + return intermediateReply; + } + catch(err) { + logger.error('[OllamaClient.chatCompletion]', err); + throw err; + } +} + +module.exports = { OllamaClient, ollamaPayloadSchema }; diff --git a/api/app/clients/OpenAIClient.js b/api/app/clients/OpenAIClient.js index 20afdeb1bc..368e7d6e84 100644 --- a/api/app/clients/OpenAIClient.js +++ b/api/app/clients/OpenAIClient.js @@ -1,46 +1,55 @@ const OpenAI = require('openai'); +const { OllamaClient } = require('./OllamaClient'); const { HttpsProxyAgent } = require('https-proxy-agent'); +const { SplitStreamHandler, GraphEvents } = require('@librechat/agents'); const { + Constants, ImageDetail, EModelEndpoint, resolveHeaders, + openAISettings, ImageDetailCost, + CohereConstants, getResponseSender, validateVisionModel, mapModelToAzureConfig, } = require('librechat-data-provider'); -const { encoding_for_model: encodingForModel, get_encoding: getEncoding } = require('tiktoken'); const { extractBaseURL, constructAzureURL, getModelMaxTokens, genAzureChatCompletion, + getModelMaxOutputTokens, } = require('~/utils'); +const { + truncateText, + formatMessage, + CUT_OFF_PROMPT, + titleInstruction, + createContextHandlers, +} = require('./prompts'); const { encodeAndFormat } = require('~/server/services/Files/images/encode'); -const { truncateText, formatMessage, CUT_OFF_PROMPT } = require('./prompts'); +const { addSpaceIfNeeded, isEnabled, sleep } = require('~/server/utils'); +const Tokenizer = require('~/server/services/Tokenizer'); +const { spendTokens } = require('~/models/spendTokens'); const { handleOpenAIErrors } = require('./tools/util'); -const spendTokens = require('~/models/spendTokens'); const { createLLM, RunManager } = require('./llm'); +const { logger, sendEvent } = require('~/config'); const ChatGPTClient = require('./ChatGPTClient'); -const { isEnabled } = require('~/server/utils'); -const { getFiles } = require('~/models/File'); const { summaryBuffer } = require('./memory'); const { runTitleChain } = require('./chains'); const { tokenSplit } = require('./document'); const BaseClient = require('./BaseClient'); -const { logger } = require('~/config'); - -// Cache to store Tiktoken instances -const tokenizersCache = {}; -// Counter for keeping track of the number of tokenizer calls -let tokenizerCallsCount = 0; class OpenAIClient extends BaseClient { constructor(apiKey, options = {}) { super(apiKey, options); this.ChatGPTClient = new ChatGPTClient(); this.buildPrompt = this.ChatGPTClient.buildPrompt.bind(this); + /** @type {getCompletion} */ this.getCompletion = this.ChatGPTClient.getCompletion.bind(this); + /** @type {cohereChatCompletion} */ + this.cohereChatCompletion = this.ChatGPTClient.cohereChatCompletion.bind(this); this.contextStrategy = options.contextStrategy ? options.contextStrategy.toLowerCase() : 'discard'; @@ -48,6 +57,17 @@ class OpenAIClient extends BaseClient { /** @type {AzureOptions} */ this.azure = options.azure || false; this.setOptions(options); + this.metadata = {}; + + /** @type {string | undefined} - The API Completions URL */ + this.completionsUrl; + + /** @type {OpenAIUsageMetadata | undefined} */ + this.usage; + /** @type {boolean|undefined} */ + this.isOmni; + /** @type {SplitStreamHandler | undefined} */ + this.streamHandler; } // TODO: PluginsClient calls this 3x, unneeded @@ -70,29 +90,23 @@ class OpenAIClient extends BaseClient { this.apiKey = this.options.openaiApiKey; } - const modelOptions = this.options.modelOptions || {}; - - if (!this.modelOptions) { - this.modelOptions = { - ...modelOptions, - model: modelOptions.model || 'gpt-3.5-turbo', - temperature: - typeof modelOptions.temperature === 'undefined' ? 0.8 : modelOptions.temperature, - top_p: typeof modelOptions.top_p === 'undefined' ? 1 : modelOptions.top_p, - presence_penalty: - typeof modelOptions.presence_penalty === 'undefined' ? 1 : modelOptions.presence_penalty, - stop: modelOptions.stop, - }; - } else { - // Update the modelOptions if it already exists - this.modelOptions = { - ...this.modelOptions, - ...modelOptions, - }; - } + this.modelOptions = Object.assign( + { + model: openAISettings.model.default, + }, + this.modelOptions, + this.options.modelOptions, + ); this.defaultVisionModel = this.options.visionModel ?? 'gpt-4-vision-preview'; - this.checkVisionRequest(this.options.attachments); + if (typeof this.options.attachments?.then === 'function') { + this.options.attachments.then((attachments) => this.checkVisionRequest(attachments)); + } else { + this.checkVisionRequest(this.options.attachments); + } + + const omniPattern = /\b(o1|o3)\b/i; + this.isOmni = omniPattern.test(this.modelOptions.model); const { OPENROUTER_API_KEY, OPENAI_FORCE_PROMPT } = process.env ?? {}; if (OPENROUTER_API_KEY && !this.azure) { @@ -110,6 +124,10 @@ class OpenAIClient extends BaseClient { this.useOpenRouter = true; } + if (this.options.endpoint?.toLowerCase() === 'ollama') { + this.isOllama = true; + } + this.FORCE_PROMPT = isEnabled(OPENAI_FORCE_PROMPT) || (reverseProxy && reverseProxy.includes('completions') && !reverseProxy.includes('chat')); @@ -127,7 +145,8 @@ class OpenAIClient extends BaseClient { const { model } = this.modelOptions; - this.isChatCompletion = this.useOpenRouter || !!reverseProxy || model.includes('gpt'); + this.isChatCompletion = + omniPattern.test(model) || model.includes('gpt') || this.useOpenRouter || !!reverseProxy; this.isChatGptModel = this.isChatCompletion; if ( model.includes('text-davinci') || @@ -142,11 +161,13 @@ class OpenAIClient extends BaseClient { model.startsWith('text-chat') || model.startsWith('text-davinci-002-render'); this.maxContextTokens = + this.options.maxContextTokens ?? getModelMaxTokens( model, this.options.endpointType ?? this.options.endpoint, this.options.endpointTokenConfig, - ) ?? 4095; // 1 less than maximum + ) ?? + 4095; // 1 less than maximum if (this.shouldSummarize) { this.maxContextTokens = Math.floor(this.maxContextTokens / 2); @@ -156,7 +177,14 @@ class OpenAIClient extends BaseClient { logger.debug('[OpenAIClient] maxContextTokens', this.maxContextTokens); } - this.maxResponseTokens = this.modelOptions.max_tokens || 1024; + this.maxResponseTokens = + this.modelOptions.max_tokens ?? + getModelMaxOutputTokens( + model, + this.options.endpointType ?? this.options.endpoint, + this.options.endpointTokenConfig, + ) ?? + 1024; this.maxPromptTokens = this.options.maxPromptTokens || this.maxContextTokens - this.maxResponseTokens; @@ -174,8 +202,8 @@ class OpenAIClient extends BaseClient { model: this.modelOptions.model, endpoint: this.options.endpoint, endpointType: this.options.endpointType, - chatGptLabel: this.options.chatGptLabel, modelDisplayLabel: this.options.modelDisplayLabel, + chatGptLabel: this.options.chatGptLabel || this.options.modelLabel, }); this.userLabel = this.options.userLabel || 'User'; @@ -183,16 +211,6 @@ class OpenAIClient extends BaseClient { this.setupTokens(); - if (!this.modelOptions.stop && !this.isVisionModel) { - const stopTokens = [this.startToken]; - if (this.endToken && this.endToken !== this.startToken) { - stopTokens.push(this.endToken); - } - stopTokens.push(`\n${this.userLabel}:`); - stopTokens.push('<|diff_marker|>'); - this.modelOptions.stop = stopTokens; - } - if (reverseProxy) { this.completionsUrl = reverseProxy; this.langchainProxy = extractBaseURL(reverseProxy); @@ -223,21 +241,55 @@ class OpenAIClient extends BaseClient { * - Sets `this.modelOptions.model` to `gpt-4-vision-preview` if the request is a vision request. * - Sets `this.isVisionModel` to `true` if vision request. * - Deletes `this.modelOptions.stop` if vision request. - * @param {Array | MongoFile[]> | Record} attachments + * @param {MongoFile[]} attachments */ checkVisionRequest(attachments) { - const availableModels = this.options.modelsConfig?.[this.options.endpoint]; - this.isVisionModel = validateVisionModel({ model: this.modelOptions.model, availableModels }); - - const visionModelAvailable = availableModels?.includes(this.defaultVisionModel); - if (attachments && visionModelAvailable && !this.isVisionModel) { - this.modelOptions.model = this.defaultVisionModel; - this.isVisionModel = true; + if (!attachments) { + return; } + const availableModels = this.options.modelsConfig?.[this.options.endpoint]; + if (!availableModels) { + return; + } + + let visionRequestDetected = false; + for (const file of attachments) { + if (file?.type?.includes('image')) { + visionRequestDetected = true; + break; + } + } + if (!visionRequestDetected) { + return; + } + + this.isVisionModel = validateVisionModel({ model: this.modelOptions.model, availableModels }); if (this.isVisionModel) { delete this.modelOptions.stop; + return; } + + for (const model of availableModels) { + if (!validateVisionModel({ model, availableModels })) { + continue; + } + this.modelOptions.model = model; + this.isVisionModel = true; + delete this.modelOptions.stop; + return; + } + + if (!availableModels.includes(this.defaultVisionModel)) { + return; + } + if (!validateVisionModel({ model: this.defaultVisionModel, availableModels })) { + return; + } + + this.modelOptions.model = this.defaultVisionModel; + this.isVisionModel = true; + delete this.modelOptions.stop; } setupTokens() { @@ -253,75 +305,8 @@ class OpenAIClient extends BaseClient { } } - // Selects an appropriate tokenizer based on the current configuration of the client instance. - // It takes into account factors such as whether it's a chat completion, an unofficial chat GPT model, etc. - selectTokenizer() { - let tokenizer; - this.encoding = 'text-davinci-003'; - if (this.isChatCompletion) { - this.encoding = 'cl100k_base'; - tokenizer = this.constructor.getTokenizer(this.encoding); - } else if (this.isUnofficialChatGptModel) { - const extendSpecialTokens = { - '<|im_start|>': 100264, - '<|im_end|>': 100265, - }; - tokenizer = this.constructor.getTokenizer(this.encoding, true, extendSpecialTokens); - } else { - try { - const { model } = this.modelOptions; - this.encoding = model.includes('instruct') ? 'text-davinci-003' : model; - tokenizer = this.constructor.getTokenizer(this.encoding, true); - } catch { - tokenizer = this.constructor.getTokenizer('text-davinci-003', true); - } - } - - return tokenizer; - } - - // Retrieves a tokenizer either from the cache or creates a new one if one doesn't exist in the cache. - // If a tokenizer is being created, it's also added to the cache. - static getTokenizer(encoding, isModelName = false, extendSpecialTokens = {}) { - let tokenizer; - if (tokenizersCache[encoding]) { - tokenizer = tokenizersCache[encoding]; - } else { - if (isModelName) { - tokenizer = encodingForModel(encoding, extendSpecialTokens); - } else { - tokenizer = getEncoding(encoding, extendSpecialTokens); - } - tokenizersCache[encoding] = tokenizer; - } - return tokenizer; - } - - // Frees all encoders in the cache and resets the count. - static freeAndResetAllEncoders() { - try { - Object.keys(tokenizersCache).forEach((key) => { - if (tokenizersCache[key]) { - tokenizersCache[key].free(); - delete tokenizersCache[key]; - } - }); - // Reset count - tokenizerCallsCount = 1; - } catch (error) { - logger.error('[OpenAIClient] Free and reset encoders error', error); - } - } - - // Checks if the cache of tokenizers has reached a certain size. If it has, it frees and resets all tokenizers. - resetTokenizersIfNecessary() { - if (tokenizerCallsCount >= 25) { - if (this.options.debug) { - logger.debug('[OpenAIClient] freeAndResetAllEncoders: reached 25 encodings, resetting...'); - } - this.constructor.freeAndResetAllEncoders(); - } - tokenizerCallsCount++; + getEncoding() { + return this.model?.includes('gpt-4o') ? 'o200k_base' : 'cl100k_base'; } /** @@ -330,15 +315,8 @@ class OpenAIClient extends BaseClient { * @returns {number} The token count of the given text. */ getTokenCount(text) { - this.resetTokenizersIfNecessary(); - try { - const tokenizer = this.selectTokenizer(); - return tokenizer.encode(text, 'all').length; - } catch (error) { - this.constructor.freeAndResetAllEncoders(); - const tokenizer = this.selectTokenizer(); - return tokenizer.encode(text, 'all').length; - } + const encoding = this.getEncoding(); + return Tokenizer.getTokenCount(text, encoding); } /** @@ -364,10 +342,16 @@ class OpenAIClient extends BaseClient { getSaveOptions() { return { + artifacts: this.options.artifacts, + maxContextTokens: this.options.maxContextTokens, chatGptLabel: this.options.chatGptLabel, promptPrefix: this.options.promptPrefix, - resendImages: this.options.resendImages, + resendFiles: this.options.resendFiles, imageDetail: this.options.imageDetail, + modelLabel: this.options.modelLabel, + iconURL: this.options.iconURL, + greeting: this.options.greeting, + spec: this.options.spec, ...this.modelOptions, }; } @@ -380,54 +364,6 @@ class OpenAIClient extends BaseClient { }; } - /** - * - * @param {TMessage[]} _messages - * @returns {TMessage[]} - */ - async addPreviousAttachments(_messages) { - if (!this.options.resendImages) { - return _messages; - } - - /** - * - * @param {TMessage} message - */ - const processMessage = async (message) => { - if (!this.message_file_map) { - /** @type {Record */ - this.message_file_map = {}; - } - - const fileIds = message.files.map((file) => file.file_id); - const files = await getFiles({ - file_id: { $in: fileIds }, - }); - - await this.addImageURLs(message, files); - - this.message_file_map[message.messageId] = files; - return message; - }; - - const promises = []; - - for (const message of _messages) { - if (!message.files) { - promises.push(message); - continue; - } - - promises.push(processMessage(message)); - } - - const messages = await Promise.all(promises); - - this.checkVisionRequest(this.message_file_map); - return messages; - } - /** * * Adds image URLs to the message object and returns the files @@ -437,9 +373,12 @@ class OpenAIClient extends BaseClient { * @returns {Promise} */ async addImageURLs(message, attachments) { - const { files, image_urls } = await encodeAndFormat(this.options.req, attachments); - - message.image_urls = image_urls; + const { files, image_urls } = await encodeAndFormat( + this.options.req, + attachments, + this.options.endpoint, + ); + message.image_urls = image_urls.length ? image_urls : undefined; return files; } @@ -467,23 +406,12 @@ class OpenAIClient extends BaseClient { let promptTokens; promptPrefix = (promptPrefix || this.options.promptPrefix || '').trim(); - if (promptPrefix) { - promptPrefix = `Instructions:\n${promptPrefix}`; - instructions = { - role: 'system', - name: 'instructions', - content: promptPrefix, - }; - - if (this.contextStrategy) { - instructions.tokenCount = this.getTokenCountForMessage(instructions); - } + if (typeof this.options.artifactsPrompt === 'string' && this.options.artifactsPrompt) { + promptPrefix = `${promptPrefix ?? ''}\n${this.options.artifactsPrompt}`.trim(); } if (this.options.attachments) { - const attachments = (await this.options.attachments).filter((file) => - file.type.includes('image'), - ); + const attachments = await this.options.attachments; if (this.message_file_map) { this.message_file_map[orderedMessages[orderedMessages.length - 1].messageId] = attachments; @@ -501,6 +429,13 @@ class OpenAIClient extends BaseClient { this.options.attachments = files; } + if (this.message_file_map) { + this.contextHandlers = createContextHandlers( + this.options.req, + orderedMessages[orderedMessages.length - 1].text, + ); + } + const formattedMessages = orderedMessages.map((message, i) => { const formattedMessage = formatMessage({ message, @@ -519,6 +454,11 @@ class OpenAIClient extends BaseClient { if (this.message_file_map && this.message_file_map[message.messageId]) { const attachments = this.message_file_map[message.messageId]; for (const file of attachments) { + if (file.embedded) { + this.contextHandlers?.processFile(file); + continue; + } + orderedMessages[i].tokenCount += this.calculateImageTokenCost({ width: file.width, height: file.height, @@ -530,6 +470,23 @@ class OpenAIClient extends BaseClient { return formattedMessage; }); + if (this.contextHandlers) { + this.augmentedPrompt = await this.contextHandlers.createContext(); + promptPrefix = this.augmentedPrompt + promptPrefix; + } + + if (promptPrefix && this.isOmni !== true) { + promptPrefix = `Instructions:\n${promptPrefix.trim()}`; + instructions = { + role: 'system', + content: promptPrefix, + }; + + if (this.contextStrategy) { + instructions.tokenCount = this.getTokenCountForMessage(instructions); + } + } + // TODO: need to handle interleaving instructions better if (this.contextStrategy) { ({ payload, tokenCountMap, promptTokens, messages } = await this.handleContextStrategy({ @@ -545,6 +502,15 @@ class OpenAIClient extends BaseClient { messages, }; + /** EXPERIMENTAL */ + if (promptPrefix && this.isOmni === true) { + const lastUserMessageIndex = payload.findLastIndex((message) => message.role === 'user'); + if (lastUserMessageIndex !== -1) { + payload[lastUserMessageIndex].content = + `${promptPrefix}\n${payload[lastUserMessageIndex].content}`; + } + } + if (tokenCountMap) { tokenCountMap.instructions = instructions?.tokenCount; result.tokenCountMap = tokenCountMap; @@ -557,15 +523,16 @@ class OpenAIClient extends BaseClient { return result; } + /** @type {sendCompletion} */ async sendCompletion(payload, opts = {}) { let reply = ''; let result = null; let streamResult = null; this.modelOptions.user = this.user; const invalidBaseUrl = this.completionsUrl && extractBaseURL(this.completionsUrl) === null; - const useOldMethod = !!(invalidBaseUrl || !this.isChatCompletion || typeof Bun !== 'undefined'); + const useOldMethod = !!(invalidBaseUrl || !this.isChatCompletion); if (typeof opts.onProgress === 'function' && useOldMethod) { - await this.getCompletion( + const completionResult = await this.getCompletion( payload, (progressMessage) => { if (progressMessage === '[DONE]') { @@ -598,12 +565,22 @@ class OpenAIClient extends BaseClient { opts.onProgress(token); reply += token; }, + opts.onProgress, opts.abortController || new AbortController(), ); + + if (completionResult && typeof completionResult === 'string') { + reply = completionResult; + } else if ( + completionResult && + typeof completionResult === 'object' && + Array.isArray(completionResult.choices) + ) { + reply = completionResult.choices[0]?.text?.replace(this.endToken, ''); + } } else if (typeof opts.onProgress === 'function' || this.options.useChatCompletion) { reply = await this.chatCompletion({ payload, - clientOptions: opts, onProgress: opts.onProgress, abortController: opts.abortController, }); @@ -611,9 +588,14 @@ class OpenAIClient extends BaseClient { result = await this.getCompletion( payload, null, + opts.onProgress, opts.abortController || new AbortController(), ); + if (result && typeof result === 'string') { + return result.trim(); + } + logger.debug('[OpenAIClient] sendCompletion: result', result); if (this.isChatCompletion) { @@ -623,19 +605,17 @@ class OpenAIClient extends BaseClient { } } - if (streamResult && typeof opts.addMetadata === 'function') { + if (streamResult) { const { finish_reason } = streamResult.choices[0]; - opts.addMetadata({ finish_reason }); + this.metadata = { finish_reason }; } return (reply ?? '').trim(); } initializeLLM({ - model = 'gpt-3.5-turbo', + model = 'gpt-4o-mini', modelName, temperature = 0.2, - presence_penalty = 0, - frequency_penalty = 0, max_tokens, streaming, context, @@ -646,8 +626,6 @@ class OpenAIClient extends BaseClient { const modelOptions = { modelName: modelName ?? model, temperature, - presence_penalty, - frequency_penalty, user: this.user, }; @@ -722,6 +700,12 @@ class OpenAIClient extends BaseClient { * In case of failure, it will return the default title, "New Chat". */ async titleConvo({ text, conversationId, responseText = '' }) { + this.conversationId = conversationId; + + if (this.options.attachments) { + delete this.options.attachments; + } + let title = 'New Chat'; const convo = `||>User: "${truncateText(text)}" @@ -730,7 +714,10 @@ class OpenAIClient extends BaseClient { const { OPENAI_TITLE_MODEL } = process.env ?? {}; - const model = this.options.titleModel ?? OPENAI_TITLE_MODEL ?? 'gpt-3.5-turbo'; + let model = this.options.titleModel ?? OPENAI_TITLE_MODEL ?? 'gpt-4o-mini'; + if (model === Constants.CURRENT_MODEL) { + model = this.modelOptions.model; + } const modelOptions = { // TODO: remove the gpt fallback and make it specific to endpoint @@ -744,9 +731,10 @@ class OpenAIClient extends BaseClient { /** @type {TAzureConfig | undefined} */ const azureConfig = this.options?.req?.app?.locals?.[EModelEndpoint.azureOpenAI]; - const resetTitleOptions = + const resetTitleOptions = !!( (this.azure && azureConfig) || - (azureConfig && this.options.endpoint === EModelEndpoint.azureOpenAI); + (azureConfig && this.options.endpoint === EModelEndpoint.azureOpenAI) + ); if (resetTitleOptions) { const { modelGroupMap, groupMap } = azureConfig; @@ -771,32 +759,53 @@ class OpenAIClient extends BaseClient { this.options.dropParams = azureConfig.groupMap[groupName].dropParams; this.options.forcePrompt = azureConfig.groupMap[groupName].forcePrompt; this.azure = !serverless && azureOptions; + if (serverless === true) { + this.options.defaultQuery = azureOptions.azureOpenAIApiVersion + ? { 'api-version': azureOptions.azureOpenAIApiVersion } + : undefined; + this.options.headers['api-key'] = this.apiKey; + } } const titleChatCompletion = async () => { - modelOptions.model = model; + try { + modelOptions.model = model; - if (this.azure) { - modelOptions.model = process.env.AZURE_OPENAI_DEFAULT_MODEL ?? modelOptions.model; - this.azureEndpoint = genAzureChatCompletion(this.azure, modelOptions.model, this); - } + if (this.azure) { + modelOptions.model = process.env.AZURE_OPENAI_DEFAULT_MODEL ?? modelOptions.model; + this.azureEndpoint = genAzureChatCompletion(this.azure, modelOptions.model, this); + } - const instructionsPayload = [ - { - role: 'system', - content: `Detect user language and write in the same language an extremely concise title for this conversation, which you must accurately detect. -Write in the detected language. Title in 5 Words or Less. No Punctuation or Quotation. Do not mention the language. All first letters of every word should be capitalized and write the title in User Language only. + const instructionsPayload = [ + { + role: this.options.titleMessageRole ?? (this.isOllama ? 'user' : 'system'), + content: `Please generate ${titleInstruction} ${convo} ||>Title:`, - }, - ]; + }, + ]; + + const promptTokens = this.getTokenCountForMessage(instructionsPayload[0]); + + let useChatCompletion = true; + + if (this.options.reverseProxyUrl === CohereConstants.API_URL) { + useChatCompletion = false; + } - try { title = ( - await this.sendPayload(instructionsPayload, { modelOptions, useChatCompletion: true }) + await this.sendPayload(instructionsPayload, { + modelOptions, + useChatCompletion, + context: 'title', + }) ).replaceAll('"', ''); + + const completionTokens = this.getTokenCount(title); + + this.recordTokenUsage({ promptTokens, completionTokens, context: 'title' }); } catch (e) { logger.error( '[OpenAIClient] There was an issue generating the title with the completion method', @@ -819,6 +828,7 @@ ${convo} context: 'title', tokenBuffer: 150, }); + title = await runTitleChain({ llm, text, convo, signal: this.abortController.signal }); } catch (e) { if (e?.message?.toLowerCase()?.includes('abort')) { @@ -837,14 +847,72 @@ ${convo} return title; } + /** + * Get stream usage as returned by this client's API response. + * @returns {OpenAIUsageMetadata} The stream usage object. + */ + getStreamUsage() { + if ( + this.usage && + typeof this.usage === 'object' && + 'completion_tokens_details' in this.usage && + this.usage.completion_tokens_details && + typeof this.usage.completion_tokens_details === 'object' && + 'reasoning_tokens' in this.usage.completion_tokens_details + ) { + const outputTokens = Math.abs( + this.usage.completion_tokens_details.reasoning_tokens - this.usage[this.outputTokensKey], + ); + return { + ...this.usage.completion_tokens_details, + [this.inputTokensKey]: this.usage[this.inputTokensKey], + [this.outputTokensKey]: outputTokens, + }; + } + return this.usage; + } + + /** + * Calculates the correct token count for the current user message based on the token count map and API usage. + * Edge case: If the calculation results in a negative value, it returns the original estimate. + * If revisiting a conversation with a chat history entirely composed of token estimates, + * the cumulative token count going forward should become more accurate as the conversation progresses. + * @param {Object} params - The parameters for the calculation. + * @param {Record} params.tokenCountMap - A map of message IDs to their token counts. + * @param {string} params.currentMessageId - The ID of the current message to calculate. + * @param {OpenAIUsageMetadata} params.usage - The usage object returned by the API. + * @returns {number} The correct token count for the current user message. + */ + calculateCurrentTokenCount({ tokenCountMap, currentMessageId, usage }) { + const originalEstimate = tokenCountMap[currentMessageId] || 0; + + if (!usage || typeof usage[this.inputTokensKey] !== 'number') { + return originalEstimate; + } + + tokenCountMap[currentMessageId] = 0; + const totalTokensFromMap = Object.values(tokenCountMap).reduce((sum, count) => { + const numCount = Number(count); + return sum + (isNaN(numCount) ? 0 : numCount); + }, 0); + const totalInputTokens = usage[this.inputTokensKey] ?? 0; + + const currentMessageTokens = totalInputTokens - totalTokensFromMap; + return currentMessageTokens > 0 ? currentMessageTokens : originalEstimate; + } + async summarizeMessages({ messagesToRefine, remainingContextTokens }) { logger.debug('[OpenAIClient] Summarizing messages...'); let context = messagesToRefine; let prompt; // TODO: remove the gpt fallback and make it specific to endpoint - const { OPENAI_SUMMARY_MODEL = 'gpt-3.5-turbo' } = process.env ?? {}; - const model = this.options.summaryModel ?? OPENAI_SUMMARY_MODEL; + const { OPENAI_SUMMARY_MODEL = 'gpt-4o-mini' } = process.env ?? {}; + let model = this.options.summaryModel ?? OPENAI_SUMMARY_MODEL; + if (model === Constants.CURRENT_MODEL) { + model = this.modelOptions.model; + } + const maxContextTokens = getModelMaxTokens( model, @@ -865,7 +933,10 @@ ${convo} ); if (excessTokenCount > maxContextTokens) { - ({ context } = await this.getMessagesWithinTokenLimit(context, maxContextTokens)); + ({ context } = await this.getMessagesWithinTokenLimit({ + messages: context, + maxContextTokens, + })); } if (context.length === 0) { @@ -948,18 +1019,44 @@ ${convo} } } - async recordTokenUsage({ promptTokens, completionTokens }) { - logger.debug('[OpenAIClient] recordTokenUsage:', { promptTokens, completionTokens }); + /** + * @param {object} params + * @param {number} params.promptTokens + * @param {number} params.completionTokens + * @param {OpenAIUsageMetadata} [params.usage] + * @param {string} [params.model] + * @param {string} [params.context='message'] + * @returns {Promise} + */ + async recordTokenUsage({ promptTokens, completionTokens, usage, context = 'message' }) { await spendTokens( { - user: this.user, + context, model: this.modelOptions.model, - context: 'message', conversationId: this.conversationId, + user: this.user ?? this.options.req.user?.id, endpointTokenConfig: this.options.endpointTokenConfig, }, { promptTokens, completionTokens }, ); + + if ( + usage && + typeof usage === 'object' && + 'reasoning_tokens' in usage && + typeof usage.reasoning_tokens === 'number' + ) { + await spendTokens( + { + context: 'reasoning', + model: this.modelOptions.model, + conversationId: this.conversationId, + user: this.user ?? this.options.req.user?.id, + endpointTokenConfig: this.options.endpointTokenConfig, + }, + { completionTokens: usage.reasoning_tokens }, + ); + } } getTokenCountForResponse(response) { @@ -969,10 +1066,58 @@ ${convo} }); } - async chatCompletion({ payload, onProgress, clientOptions, abortController = null }) { + /** + * + * @param {string[]} [intermediateReply] + * @returns {string} + */ + getStreamText(intermediateReply) { + if (!this.streamHandler) { + return intermediateReply?.join('') ?? ''; + } + + let thinkMatch; + let remainingText; + let reasoningText = ''; + + if (this.streamHandler.reasoningTokens.length > 0) { + reasoningText = this.streamHandler.reasoningTokens.join(''); + thinkMatch = reasoningText.match(/([\s\S]*?)<\/think>/)?.[1]?.trim(); + if (thinkMatch != null && thinkMatch) { + const reasoningTokens = `:::thinking\n${thinkMatch}\n:::\n`; + remainingText = reasoningText.split(/<\/think>/)?.[1]?.trim() || ''; + return `${reasoningTokens}${remainingText}${this.streamHandler.tokens.join('')}`; + } else if (thinkMatch === '') { + remainingText = reasoningText.split(/<\/think>/)?.[1]?.trim() || ''; + return `${remainingText}${this.streamHandler.tokens.join('')}`; + } + } + + const reasoningTokens = + reasoningText.length > 0 + ? `:::thinking\n${reasoningText.replace('', '').replace('', '').trim()}\n:::\n` + : ''; + + return `${reasoningTokens}${this.streamHandler.tokens.join('')}`; + } + + getMessageMapMethod() { + /** + * @param {TMessage} msg + */ + return (msg) => { + if (msg.text != null && msg.text && msg.text.startsWith(':::thinking')) { + msg.text = msg.text.replace(/:::thinking.*?:::/gs, '').trim(); + } + + return msg; + }; + } + + async chatCompletion({ payload, onProgress, abortController = null }) { let error = null; + let intermediateReply = []; const errorCallback = (err) => (error = err); - let intermediateReply = ''; try { if (!abortController) { abortController = new AbortController(); @@ -990,15 +1135,6 @@ ${convo} } const baseURL = extractBaseURL(this.completionsUrl); - // let { messages: _msgsToLog, ...modelOptionsToLog } = modelOptions; - // if (modelOptionsToLog.messages) { - // _msgsToLog = modelOptionsToLog.messages.map((msg) => { - // let { content, ...rest } = msg; - - // if (content) - // return { ...rest, content: truncateText(content) }; - // }); - // } logger.debug('[OpenAIClient] chatCompletion', { baseURL, modelOptions }); const opts = { baseURL, @@ -1015,6 +1151,10 @@ ${convo} opts.defaultHeaders = { ...opts.defaultHeaders, ...this.options.headers }; } + if (this.options.defaultQuery) { + opts.defaultQuery = this.options.defaultQuery; + } + if (this.options.proxy) { opts.httpAgent = new HttpsProxyAgent(this.options.proxy); } @@ -1053,22 +1193,39 @@ ${convo} this.azure = !serverless && azureOptions; this.azureEndpoint = !serverless && genAzureChatCompletion(this.azure, modelOptions.model, this); + if (serverless === true) { + this.options.defaultQuery = azureOptions.azureOpenAIApiVersion + ? { 'api-version': azureOptions.azureOpenAIApiVersion } + : undefined; + this.options.headers['api-key'] = this.apiKey; + } } if (this.azure || this.options.azure) { - // Azure does not accept `model` in the body, so we need to remove it. + /* Azure Bug, extremely short default `max_tokens` response */ + if (!modelOptions.max_tokens && modelOptions.model === 'gpt-4-vision-preview') { + modelOptions.max_tokens = 4000; + } + + /* Azure does not accept `model` in the body, so we need to remove it. */ delete modelOptions.model; opts.baseURL = this.langchainProxy ? constructAzureURL({ baseURL: this.langchainProxy, - azure: this.azure, + azureOptions: this.azure, }) - : this.azureEndpoint.split(/\/(chat|completion)/)[0]; + : this.azureEndpoint.split(/(? msg.role === 'system'); @@ -1095,10 +1250,16 @@ ${convo} } modelOptions.messages = messages; + } - if (messages.length === 1 && messages[0].role === 'system') { - modelOptions.messages[0].role = 'user'; - } + /* If there is only one message and it's a system message, change the role to user */ + if ( + (opts.baseURL.includes('api.mistral.ai') || opts.baseURL.includes('api.perplexity.ai')) && + modelOptions.messages && + modelOptions.messages.length === 1 && + modelOptions.messages[0]?.role === 'system' + ) { + modelOptions.messages[0].role = 'user'; } if (this.options.addParams && typeof this.options.addParams === 'object') { @@ -1122,46 +1283,136 @@ ${convo} }); } + const streamRate = this.options.streamRate ?? Constants.DEFAULT_STREAM_RATE; + + if (this.message_file_map && this.isOllama) { + const ollamaClient = new OllamaClient({ baseURL, streamRate }); + return await ollamaClient.chatCompletion({ + payload: modelOptions, + onProgress, + abortController, + }); + } + let UnexpectedRoleError = false; + /** @type {Promise} */ + let streamPromise; + /** @type {(value: void | PromiseLike) => void} */ + let streamResolve; + + if ( + this.isOmni === true && + (this.azure || /o1(?!-(?:mini|preview)).*$/.test(modelOptions.model)) && + !/o3-.*$/.test(this.modelOptions.model) && + modelOptions.stream + ) { + delete modelOptions.stream; + delete modelOptions.stop; + } else if (!this.isOmni && modelOptions.reasoning_effort != null) { + delete modelOptions.reasoning_effort; + } + + let reasoningKey = 'reasoning_content'; + if (this.useOpenRouter) { + modelOptions.include_reasoning = true; + reasoningKey = 'reasoning'; + } + + this.streamHandler = new SplitStreamHandler({ + reasoningKey, + accumulate: true, + runId: this.responseMessageId, + handlers: { + [GraphEvents.ON_RUN_STEP]: (event) => sendEvent(this.options.res, event), + [GraphEvents.ON_MESSAGE_DELTA]: (event) => sendEvent(this.options.res, event), + [GraphEvents.ON_REASONING_DELTA]: (event) => sendEvent(this.options.res, event), + }, + }); + + intermediateReply = this.streamHandler.tokens; + if (modelOptions.stream) { + streamPromise = new Promise((resolve) => { + streamResolve = resolve; + }); + /** @type {OpenAI.OpenAI.CompletionCreateParamsStreaming} */ + const params = { + ...modelOptions, + stream: true, + }; + if ( + this.options.endpoint === EModelEndpoint.openAI || + this.options.endpoint === EModelEndpoint.azureOpenAI + ) { + params.stream_options = { include_usage: true }; + } const stream = await openai.beta.chat.completions - .stream({ - ...modelOptions, - stream: true, - }) + .stream(params) .on('abort', () => { /* Do nothing here */ }) .on('error', (err) => { handleOpenAIErrors(err, errorCallback, 'stream'); }) - .on('finalChatCompletion', (finalChatCompletion) => { + .on('finalChatCompletion', async (finalChatCompletion) => { const finalMessage = finalChatCompletion?.choices?.[0]?.message; - if (finalMessage && finalMessage?.role !== 'assistant') { + if (!finalMessage) { + return; + } + await streamPromise; + if (finalMessage?.role !== 'assistant') { finalChatCompletion.choices[0].message.role = 'assistant'; } - if (finalMessage && !finalMessage?.content?.trim()) { - finalChatCompletion.choices[0].message.content = intermediateReply; + if (typeof finalMessage.content !== 'string' || finalMessage.content.trim() === '') { + finalChatCompletion.choices[0].message.content = this.streamHandler.tokens.join(''); } }) .on('finalMessage', (message) => { if (message?.role !== 'assistant') { - stream.messages.push({ role: 'assistant', content: intermediateReply }); + stream.messages.push({ + role: 'assistant', + content: this.streamHandler.tokens.join(''), + }); UnexpectedRoleError = true; } }); + if (this.continued === true) { + const latestText = addSpaceIfNeeded( + this.currentMessages[this.currentMessages.length - 1]?.text ?? '', + ); + this.streamHandler.handle({ + choices: [ + { + delta: { + content: latestText, + }, + }, + ], + }); + } + for await (const chunk of stream) { - const token = chunk.choices[0]?.delta?.content || ''; - intermediateReply += token; - onProgress(token); + // Add finish_reason: null if missing in any choice + if (chunk.choices) { + chunk.choices.forEach((choice) => { + if (!('finish_reason' in choice)) { + choice.finish_reason = null; + } + }); + } + this.streamHandler.handle(chunk); if (abortController.signal.aborted) { stream.controller.abort(); break; } + + await sleep(streamRate); } + streamResolve(); + if (!UnexpectedRoleError) { chatCompletion = await stream.finalChatCompletion().catch((err) => { handleOpenAIErrors(err, errorCallback, 'finalChatCompletion'); @@ -1189,19 +1440,45 @@ ${convo} throw new Error('Chat completion failed'); } - const { message, finish_reason } = chatCompletion.choices[0]; - if (chatCompletion && typeof clientOptions.addMetadata === 'function') { - clientOptions.addMetadata({ finish_reason }); + const { choices } = chatCompletion; + this.usage = chatCompletion.usage; + + if (!Array.isArray(choices) || choices.length === 0) { + logger.warn('[OpenAIClient] Chat completion response has no choices'); + return this.streamHandler.tokens.join(''); } + const { message, finish_reason } = choices[0] ?? {}; + this.metadata = { finish_reason }; + logger.debug('[OpenAIClient] chatCompletion response', chatCompletion); - if (!message?.content?.trim() && intermediateReply.length) { + if (!message) { + logger.warn('[OpenAIClient] Message is undefined in chatCompletion response'); + return this.streamHandler.tokens.join(''); + } + + if (typeof message.content !== 'string' || message.content.trim() === '') { + const reply = this.streamHandler.tokens.join(''); logger.debug( '[OpenAIClient] chatCompletion: using intermediateReply due to empty message.content', - { intermediateReply }, + { intermediateReply: reply }, ); - return intermediateReply; + return reply; + } + + if ( + this.streamHandler.reasoningTokens.length > 0 && + this.options.context !== 'title' && + !message.content.startsWith('') + ) { + return this.getStreamText(); + } else if ( + this.streamHandler.reasoningTokens.length > 0 && + this.options.context !== 'title' && + message.content.startsWith('') + ) { + return this.getStreamText(); } return message.content; @@ -1210,7 +1487,7 @@ ${convo} err?.message?.includes('abort') || (err instanceof OpenAI.APIError && err?.message?.includes('abort')) ) { - return intermediateReply; + return this.getStreamText(intermediateReply); } if ( err?.message?.includes( @@ -1225,10 +1502,18 @@ ${convo} (err instanceof OpenAI.OpenAIError && err?.message?.includes('missing finish_reason')) ) { logger.error('[OpenAIClient] Known OpenAI error:', err); - return intermediateReply; + if (this.streamHandler && this.streamHandler.reasoningTokens.length) { + return this.getStreamText(); + } else if (intermediateReply.length > 0) { + return this.getStreamText(intermediateReply); + } else { + throw err; + } } else if (err instanceof OpenAI.APIError) { - if (intermediateReply) { - return intermediateReply; + if (this.streamHandler && this.streamHandler.reasoningTokens.length) { + return this.getStreamText(); + } else if (intermediateReply.length > 0) { + return this.getStreamText(intermediateReply); } else { throw err; } diff --git a/api/app/clients/PluginsClient.js b/api/app/clients/PluginsClient.js index 033c122664..bfe222e248 100644 --- a/api/app/clients/PluginsClient.js +++ b/api/app/clients/PluginsClient.js @@ -1,13 +1,12 @@ const OpenAIClient = require('./OpenAIClient'); -const { CallbackManager } = require('langchain/callbacks'); +const { CallbackManager } = require('@langchain/core/callbacks/manager'); const { BufferMemory, ChatMessageHistory } = require('langchain/memory'); -const { initializeCustomAgent, initializeFunctionsAgent } = require('./agents'); const { addImages, buildErrorInput, buildPromptPrefix } = require('./output_parsers'); +const { initializeCustomAgent, initializeFunctionsAgent } = require('./agents'); const { processFileURL } = require('~/server/services/Files/process'); const { EModelEndpoint } = require('librechat-data-provider'); const { formatLangChainMessages } = require('./prompts'); const checkBalance = require('~/models/checkBalance'); -const { SelfReflectionTool } = require('./tools'); const { isEnabled } = require('~/server/utils'); const { extractBaseURL } = require('~/utils'); const { loadTools } = require('./tools/util'); @@ -40,10 +39,16 @@ class PluginsClient extends OpenAIClient { getSaveOptions() { return { + artifacts: this.options.artifacts, chatGptLabel: this.options.chatGptLabel, + modelLabel: this.options.modelLabel, promptPrefix: this.options.promptPrefix, + tools: this.options.tools, ...this.modelOptions, agentOptions: this.agentOptions, + iconURL: this.options.iconURL, + greeting: this.options.greeting, + spec: this.options.spec, }; } @@ -99,7 +104,7 @@ class PluginsClient extends OpenAIClient { chatHistory: new ChatMessageHistory(pastMessages), }); - this.tools = await loadTools({ + const { loadedTools } = await loadTools({ user, model, tools: this.options.tools, @@ -113,14 +118,15 @@ class PluginsClient extends OpenAIClient { processFileURL, message, }, + useSpecs: true, }); - if (this.tools.length > 0 && !this.functionsAgent) { - this.tools.push(new SelfReflectionTool({ message, isGpt3: false })); - } else if (this.tools.length === 0) { + if (loadedTools.length === 0) { return; } + this.tools = loadedTools; + logger.debug('[PluginsClient] Requested Tools', this.options.tools); logger.debug( '[PluginsClient] Loaded Tools', @@ -139,14 +145,22 @@ class PluginsClient extends OpenAIClient { // initialize agent const initializer = this.functionsAgent ? initializeFunctionsAgent : initializeCustomAgent; + + let customInstructions = (this.options.promptPrefix ?? '').trim(); + if (typeof this.options.artifactsPrompt === 'string' && this.options.artifactsPrompt) { + customInstructions = `${customInstructions ?? ''}\n${this.options.artifactsPrompt}`.trim(); + } + this.executor = await initializer({ model, signal, pastMessages, tools: this.tools, - currentDateString: this.currentDateString, + customInstructions, verbose: this.options.debug, returnIntermediateSteps: true, + customName: this.options.chatGptLabel, + currentDateString: this.currentDateString, callbackManager: CallbackManager.fromHandlers({ async handleAgentAction(action, runId) { handleAction(action, runId, onAgentAction); @@ -214,6 +228,13 @@ class PluginsClient extends OpenAIClient { } } + /** + * + * @param {TMessage} responseMessage + * @param {Partial} saveOptions + * @param {string} user + * @returns + */ async handleResponseMessage(responseMessage, saveOptions, user) { const { output, errorMessage, ...result } = this.result; logger.debug('[PluginsClient][handleResponseMessage] Output:', { @@ -232,22 +253,33 @@ class PluginsClient extends OpenAIClient { await this.recordTokenUsage(responseMessage); } - await this.saveMessageToDatabase(responseMessage, saveOptions, user); + this.responsePromise = this.saveMessageToDatabase(responseMessage, saveOptions, user); delete responseMessage.tokenCount; return { ...responseMessage, ...result }; } async sendMessage(message, opts = {}) { + /** @type {{ filteredTools: string[], includedTools: string[] }} */ + const { filteredTools = [], includedTools = [] } = this.options.req.app.locals; + + if (includedTools.length > 0) { + const tools = this.options.tools.filter((plugin) => includedTools.includes(plugin)); + this.options.tools = tools; + } else { + const tools = this.options.tools.filter((plugin) => !filteredTools.includes(plugin)); + this.options.tools = tools; + } + // If a message is edited, no tools can be used. const completionMode = this.options.tools.length === 0 || opts.isEdited; if (completionMode) { this.setOptions(opts); return super.sendMessage(message, opts); } - logger.debug('[PluginsClient] sendMessage', { message, opts }); + + logger.debug('[PluginsClient] sendMessage', { userMessageText: message, opts }); const { user, - isEdited, conversationId, responseMessageId, saveOptions, @@ -258,6 +290,14 @@ class PluginsClient extends OpenAIClient { onToolEnd, } = await this.handleStartMethods(message, opts); + if (opts.progressCallback) { + opts.onProgress = opts.progressCallback.call(null, { + ...(opts.progressOptions ?? {}), + parentMessageId: userMessage.messageId, + messageId: responseMessageId, + }); + } + this.currentMessages.push(userMessage); let { @@ -286,7 +326,15 @@ class PluginsClient extends OpenAIClient { if (payload) { this.currentMessages = payload; } - await this.saveMessageToDatabase(userMessage, saveOptions, user); + + if (!this.skipSaveUserMessage) { + this.userMessagePromise = this.saveMessageToDatabase(userMessage, saveOptions, user); + if (typeof opts?.getReqData === 'function') { + opts.getReqData({ + userMessagePromise: this.userMessagePromise, + }); + } + } if (isEnabled(process.env.CHECK_BALANCE)) { await checkBalance({ @@ -304,11 +352,12 @@ class PluginsClient extends OpenAIClient { } const responseMessage = { + endpoint: EModelEndpoint.gptPlugins, + iconURL: this.options.iconURL, messageId: responseMessageId, conversationId, parentMessageId: userMessage.messageId, isCreatedByUser: false, - isEdited, model: this.modelOptions.model, sender: this.sender, promptTokens, @@ -397,7 +446,6 @@ class PluginsClient extends OpenAIClient { const instructionsPayload = { role: 'system', - name: 'instructions', content: promptPrefix, }; diff --git a/api/app/clients/agents/CustomAgent/CustomAgent.js b/api/app/clients/agents/CustomAgent/CustomAgent.js index cc9b63d357..bd270361e8 100644 --- a/api/app/clients/agents/CustomAgent/CustomAgent.js +++ b/api/app/clients/agents/CustomAgent/CustomAgent.js @@ -1,5 +1,5 @@ const { ZeroShotAgent } = require('langchain/agents'); -const { PromptTemplate, renderTemplate } = require('langchain/prompts'); +const { PromptTemplate, renderTemplate } = require('@langchain/core/prompts'); const { gpt3, gpt4 } = require('./instructions'); class CustomAgent extends ZeroShotAgent { diff --git a/api/app/clients/agents/CustomAgent/initializeCustomAgent.js b/api/app/clients/agents/CustomAgent/initializeCustomAgent.js index 2a7813eea6..496dba337f 100644 --- a/api/app/clients/agents/CustomAgent/initializeCustomAgent.js +++ b/api/app/clients/agents/CustomAgent/initializeCustomAgent.js @@ -7,16 +7,24 @@ const { ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate, -} = require('langchain/prompts'); +} = require('@langchain/core/prompts'); const initializeCustomAgent = async ({ tools, model, pastMessages, + customName, + customInstructions, currentDateString, ...rest }) => { let prompt = CustomAgent.createPrompt(tools, { currentDateString, model: model.modelName }); + if (customName) { + prompt = `You are "${customName}".\n${prompt}`; + } + if (customInstructions) { + prompt = `${prompt}\n${customInstructions}`; + } const chatPrompt = ChatPromptTemplate.fromMessages([ new SystemMessagePromptTemplate(prompt), diff --git a/api/app/clients/agents/CustomAgent/instructions.js b/api/app/clients/agents/CustomAgent/instructions.js index 1689475c5f..7e8aad5da3 100644 --- a/api/app/clients/agents/CustomAgent/instructions.js +++ b/api/app/clients/agents/CustomAgent/instructions.js @@ -1,44 +1,3 @@ -/* -module.exports = `You are ChatGPT, a Large Language model with useful tools. - -Talk to the human and provide meaningful answers when questions are asked. - -Use the tools when you need them, but use your own knowledge if you are confident of the answer. Keep answers short and concise. - -A tool is not usually needed for creative requests, so do your best to answer them without tools. - -Avoid repeating identical answers if it appears before. Only fulfill the human's requests, do not create extra steps beyond what the human has asked for. - -Your input for 'Action' should be the name of tool used only. - -Be honest. If you can't answer something, or a tool is not appropriate, say you don't know or answer to the best of your ability. - -Attempt to fulfill the human's requests in as few actions as possible`; -*/ - -// module.exports = `You are ChatGPT, a highly knowledgeable and versatile large language model. - -// Engage with the Human conversationally, providing concise and meaningful answers to questions. Utilize built-in tools when necessary, except for creative requests, where relying on your own knowledge is preferred. Aim for variety and avoid repetitive answers. - -// For your 'Action' input, state the name of the tool used only, and honor user requests without adding extra steps. Always be honest; if you cannot provide an appropriate answer or tool, admit that or do your best. - -// Strive to meet the user's needs efficiently with minimal actions.`; - -// import { -// BasePromptTemplate, -// BaseStringPromptTemplate, -// SerializedBasePromptTemplate, -// renderTemplate, -// } from "langchain/prompts"; - -// prefix: `You are ChatGPT, a highly knowledgeable and versatile large language model. -// Your objective is to help users by understanding their intent and choosing the best action. Prioritize direct, specific responses. Use concise, varied answers and rely on your knowledge for creative tasks. Utilize tools when needed, and structure results for machine compatibility. -// prefix: `Objective: to comprehend human intentions based on user input and available tools. Goal: identify the best action to directly address the human's query. In your subsequent steps, you will utilize the chosen action. You may select multiple actions and list them in a meaningful order. Prioritize actions that directly relate to the user's query over general ones. Ensure that the generated thought is highly specific and explicit to best match the user's expectations. Construct the result in a manner that an online open-API would most likely expect. Provide concise and meaningful answers to human queries. Utilize tools when necessary. Relying on your own knowledge is preferred for creative requests. Aim for variety and avoid repetitive answers. - -// # Available Actions & Tools: -// N/A: no suitable action, use your own knowledge.`, -// suffix: `Remember, all your responses MUST adhere to the described format and only respond if the format is followed. Output exactly with the requested format, avoiding any other text as this will be parsed by a machine. Following 'Action:', provide only one of the actions listed above. If a tool is not necessary, deduce this quickly and finish your response. Honor the human's requests without adding extra steps. Carry out tasks in the sequence written by the human. Always be honest; if you cannot provide an appropriate answer or tool, do your best with your own knowledge. Strive to meet the user's needs efficiently with minimal actions.`; - module.exports = { 'gpt3-v1': { prefix: `Objective: Understand human intentions using user input and available tools. Goal: Identify the most suitable actions to directly address user queries. diff --git a/api/app/clients/agents/Functions/FunctionsAgent.js b/api/app/clients/agents/Functions/FunctionsAgent.js deleted file mode 100644 index 476a6bda5c..0000000000 --- a/api/app/clients/agents/Functions/FunctionsAgent.js +++ /dev/null @@ -1,122 +0,0 @@ -const { Agent } = require('langchain/agents'); -const { LLMChain } = require('langchain/chains'); -const { FunctionChatMessage, AIChatMessage } = require('langchain/schema'); -const { - ChatPromptTemplate, - MessagesPlaceholder, - SystemMessagePromptTemplate, - HumanMessagePromptTemplate, -} = require('langchain/prompts'); -const { logger } = require('~/config'); - -const PREFIX = 'You are a helpful AI assistant.'; - -function parseOutput(message) { - if (message.additional_kwargs.function_call) { - const function_call = message.additional_kwargs.function_call; - return { - tool: function_call.name, - toolInput: function_call.arguments ? JSON.parse(function_call.arguments) : {}, - log: message.text, - }; - } else { - return { returnValues: { output: message.text }, log: message.text }; - } -} - -class FunctionsAgent extends Agent { - constructor(input) { - super({ ...input, outputParser: undefined }); - this.tools = input.tools; - } - - lc_namespace = ['langchain', 'agents', 'openai']; - - _agentType() { - return 'openai-functions'; - } - - observationPrefix() { - return 'Observation: '; - } - - llmPrefix() { - return 'Thought:'; - } - - _stop() { - return ['Observation:']; - } - - static createPrompt(_tools, fields) { - const { prefix = PREFIX, currentDateString } = fields || {}; - - return ChatPromptTemplate.fromMessages([ - SystemMessagePromptTemplate.fromTemplate(`Date: ${currentDateString}\n${prefix}`), - new MessagesPlaceholder('chat_history'), - HumanMessagePromptTemplate.fromTemplate('Query: {input}'), - new MessagesPlaceholder('agent_scratchpad'), - ]); - } - - static fromLLMAndTools(llm, tools, args) { - FunctionsAgent.validateTools(tools); - const prompt = FunctionsAgent.createPrompt(tools, args); - const chain = new LLMChain({ - prompt, - llm, - callbacks: args?.callbacks, - }); - return new FunctionsAgent({ - llmChain: chain, - allowedTools: tools.map((t) => t.name), - tools, - }); - } - - async constructScratchPad(steps) { - return steps.flatMap(({ action, observation }) => [ - new AIChatMessage('', { - function_call: { - name: action.tool, - arguments: JSON.stringify(action.toolInput), - }, - }), - new FunctionChatMessage(observation, action.tool), - ]); - } - - async plan(steps, inputs, callbackManager) { - // Add scratchpad and stop to inputs - const thoughts = await this.constructScratchPad(steps); - const newInputs = Object.assign({}, inputs, { agent_scratchpad: thoughts }); - if (this._stop().length !== 0) { - newInputs.stop = this._stop(); - } - - // Split inputs between prompt and llm - const llm = this.llmChain.llm; - const valuesForPrompt = Object.assign({}, newInputs); - const valuesForLLM = { - tools: this.tools, - }; - for (let i = 0; i < this.llmChain.llm.callKeys.length; i++) { - const key = this.llmChain.llm.callKeys[i]; - if (key in inputs) { - valuesForLLM[key] = inputs[key]; - delete valuesForPrompt[key]; - } - } - - const promptValue = await this.llmChain.prompt.formatPromptValue(valuesForPrompt); - const message = await llm.predictMessages( - promptValue.toChatMessages(), - valuesForLLM, - callbackManager, - ); - logger.debug('[FunctionsAgent] plan message', message); - return parseOutput(message); - } -} - -module.exports = FunctionsAgent; diff --git a/api/app/clients/agents/Functions/initializeFunctionsAgent.js b/api/app/clients/agents/Functions/initializeFunctionsAgent.js index 3d1a1704ea..3e813bdbcc 100644 --- a/api/app/clients/agents/Functions/initializeFunctionsAgent.js +++ b/api/app/clients/agents/Functions/initializeFunctionsAgent.js @@ -10,6 +10,8 @@ const initializeFunctionsAgent = async ({ tools, model, pastMessages, + customName, + customInstructions, currentDateString, ...rest }) => { @@ -24,7 +26,13 @@ const initializeFunctionsAgent = async ({ returnMessages: true, }); - const prefix = addToolDescriptions(`Current Date: ${currentDateString}\n${PREFIX}`, tools); + let prefix = addToolDescriptions(`Current Date: ${currentDateString}\n${PREFIX}`, tools); + if (customName) { + prefix = `You are "${customName}".\n${prefix}`; + } + if (customInstructions) { + prefix = `${prefix}\n${customInstructions}`; + } return await initializeAgentExecutorWithOptions(tools, model, { agentType: 'openai-functions', diff --git a/api/app/clients/document/tokenSplit.js b/api/app/clients/document/tokenSplit.js index 12c0ee6640..497249c519 100644 --- a/api/app/clients/document/tokenSplit.js +++ b/api/app/clients/document/tokenSplit.js @@ -1,4 +1,4 @@ -const { TokenTextSplitter } = require('langchain/text_splitter'); +const { TokenTextSplitter } = require('@langchain/textsplitters'); /** * Splits a given text by token chunks, based on the provided parameters for the TokenTextSplitter. diff --git a/api/app/clients/document/tokenSplit.spec.js b/api/app/clients/document/tokenSplit.spec.js index 39e9068d69..d39c7d73cd 100644 --- a/api/app/clients/document/tokenSplit.spec.js +++ b/api/app/clients/document/tokenSplit.spec.js @@ -12,7 +12,7 @@ describe('tokenSplit', () => { returnSize: 5, }); - expect(result).toEqual(['. Null', ' Nullam', 'am id', ' id.', '.']); + expect(result).toEqual(['it.', '. Null', ' Nullam', 'am id', ' id.']); }); it('returns correct text chunks with default parameters', async () => { diff --git a/api/app/clients/llm/RunManager.js b/api/app/clients/llm/RunManager.js index 7ab0b06b52..51abe480a9 100644 --- a/api/app/clients/llm/RunManager.js +++ b/api/app/clients/llm/RunManager.js @@ -1,5 +1,5 @@ const { createStartHandler } = require('~/app/clients/callbacks'); -const spendTokens = require('~/models/spendTokens'); +const { spendTokens } = require('~/models/spendTokens'); const { logger } = require('~/config'); class RunManager { diff --git a/api/app/clients/llm/createCoherePayload.js b/api/app/clients/llm/createCoherePayload.js new file mode 100644 index 0000000000..58803d76f3 --- /dev/null +++ b/api/app/clients/llm/createCoherePayload.js @@ -0,0 +1,85 @@ +const { CohereConstants } = require('librechat-data-provider'); +const { titleInstruction } = require('../prompts/titlePrompts'); + +// Mapping OpenAI roles to Cohere roles +const roleMap = { + user: CohereConstants.ROLE_USER, + assistant: CohereConstants.ROLE_CHATBOT, + system: CohereConstants.ROLE_SYSTEM, // Recognize and map the system role explicitly +}; + +/** + * Adjusts an OpenAI ChatCompletionPayload to conform with Cohere's expected chat payload format. + * Now includes handling for "system" roles explicitly mentioned. + * + * @param {Object} options - Object containing the model options. + * @param {ChatCompletionPayload} options.modelOptions - The OpenAI model payload options. + * @returns {CohereChatStreamRequest} Cohere-compatible chat API payload. + */ +function createCoherePayload({ modelOptions }) { + /** @type {string | undefined} */ + let preamble; + let latestUserMessageContent = ''; + const { + stream, + stop, + top_p, + temperature, + frequency_penalty, + presence_penalty, + max_tokens, + messages, + model, + ...rest + } = modelOptions; + + // Filter out the latest user message and transform remaining messages to Cohere's chat_history format + let chatHistory = messages.reduce((acc, message, index, arr) => { + const isLastUserMessage = index === arr.length - 1 && message.role === 'user'; + + const messageContent = + typeof message.content === 'string' + ? message.content + : message.content.map((part) => (part.type === 'text' ? part.text : '')).join(' '); + + if (isLastUserMessage) { + latestUserMessageContent = messageContent; + } else { + acc.push({ + role: roleMap[message.role] || CohereConstants.ROLE_USER, + message: messageContent, + }); + } + + return acc; + }, []); + + if ( + chatHistory.length === 1 && + chatHistory[0].role === CohereConstants.ROLE_SYSTEM && + !latestUserMessageContent.length + ) { + const message = chatHistory[0].message; + latestUserMessageContent = message.includes(titleInstruction) + ? CohereConstants.TITLE_MESSAGE + : '.'; + preamble = message; + } + + return { + message: latestUserMessageContent, + model: model, + chatHistory, + stream: stream ?? false, + temperature: temperature, + frequencyPenalty: frequency_penalty, + presencePenalty: presence_penalty, + maxTokens: max_tokens, + stopSequences: stop, + preamble, + p: top_p, + ...rest, + }; +} + +module.exports = createCoherePayload; diff --git a/api/app/clients/llm/createLLM.js b/api/app/clients/llm/createLLM.js index a944d0c32d..7dc0d40ceb 100644 --- a/api/app/clients/llm/createLLM.js +++ b/api/app/clients/llm/createLLM.js @@ -1,4 +1,4 @@ -const { ChatOpenAI } = require('langchain/chat_models/openai'); +const { ChatOpenAI } = require('@langchain/openai'); const { sanitizeModelName, constructAzureURL } = require('~/utils'); const { isEnabled } = require('~/server/utils'); @@ -8,7 +8,7 @@ const { isEnabled } = require('~/server/utils'); * @param {Object} options - The options for creating the LLM. * @param {ModelOptions} options.modelOptions - The options specific to the model, including modelName, temperature, presence_penalty, frequency_penalty, and other model-related settings. * @param {ConfigOptions} options.configOptions - Configuration options for the API requests, including proxy settings and custom headers. - * @param {Callbacks} options.callbacks - Callback functions for managing the lifecycle of the LLM, including token buffers, context, and initial message count. + * @param {Callbacks} [options.callbacks] - Callback functions for managing the lifecycle of the LLM, including token buffers, context, and initial message count. * @param {boolean} [options.streaming=false] - Determines if the LLM should operate in streaming mode. * @param {string} options.openAIApiKey - The API key for OpenAI, used for authentication. * @param {AzureOptions} [options.azure={}] - Optional Azure-specific configurations. If provided, Azure configurations take precedence over OpenAI configurations. @@ -17,7 +17,7 @@ const { isEnabled } = require('~/server/utils'); * * @example * const llm = createLLM({ - * modelOptions: { modelName: 'gpt-3.5-turbo', temperature: 0.2 }, + * modelOptions: { modelName: 'gpt-4o-mini', temperature: 0.2 }, * configOptions: { basePath: 'https://example.api/path' }, * callbacks: { onMessage: handleMessage }, * openAIApiKey: 'your-api-key' @@ -57,7 +57,7 @@ function createLLM({ if (azure && configOptions.basePath) { const azureURL = constructAzureURL({ baseURL: configOptions.basePath, - azure: azureOptions, + azureOptions, }); azureOptions.azureOpenAIBasePath = azureURL.split( `/${azureOptions.azureOpenAIApiDeploymentName}`, diff --git a/api/app/clients/llm/index.js b/api/app/clients/llm/index.js index 46478ade63..2e09bbb841 100644 --- a/api/app/clients/llm/index.js +++ b/api/app/clients/llm/index.js @@ -1,7 +1,9 @@ const createLLM = require('./createLLM'); const RunManager = require('./RunManager'); +const createCoherePayload = require('./createCoherePayload'); module.exports = { createLLM, RunManager, + createCoherePayload, }; diff --git a/api/app/clients/memory/summaryBuffer.demo.js b/api/app/clients/memory/summaryBuffer.demo.js index c47b3c45f6..fc575c3032 100644 --- a/api/app/clients/memory/summaryBuffer.demo.js +++ b/api/app/clients/memory/summaryBuffer.demo.js @@ -1,9 +1,9 @@ require('dotenv').config(); -const { ChatOpenAI } = require('langchain/chat_models/openai'); +const { ChatOpenAI } = require('@langchain/openai'); const { getBufferString, ConversationSummaryBufferMemory } = require('langchain/memory'); const chatPromptMemory = new ConversationSummaryBufferMemory({ - llm: new ChatOpenAI({ modelName: 'gpt-3.5-turbo', temperature: 0 }), + llm: new ChatOpenAI({ modelName: 'gpt-4o-mini', temperature: 0 }), maxTokenLimit: 10, returnMessages: true, }); diff --git a/api/app/clients/output_parsers/addImages.js b/api/app/clients/output_parsers/addImages.js index ec04bcac86..7bef60259c 100644 --- a/api/app/clients/output_parsers/addImages.js +++ b/api/app/clients/output_parsers/addImages.js @@ -60,10 +60,10 @@ function addImages(intermediateSteps, responseMessage) { if (!observation || !observation.includes('![')) { return; } - const observedImagePath = observation.match(/!\[.*\]\([^)]*\)/g); + const observedImagePath = observation.match(/!\[[^(]*\]\([^)]*\)/g); if (observedImagePath && !responseMessage.text.includes(observedImagePath[0])) { - responseMessage.text += '\n' + observation; - logger.debug('[addImages] added image from intermediateSteps:', observation); + responseMessage.text += '\n' + observedImagePath[0]; + logger.debug('[addImages] added image from intermediateSteps:', observedImagePath[0]); } }); } diff --git a/api/app/clients/output_parsers/addImages.spec.js b/api/app/clients/output_parsers/addImages.spec.js index eb4d87d65a..7c5a04137e 100644 --- a/api/app/clients/output_parsers/addImages.spec.js +++ b/api/app/clients/output_parsers/addImages.spec.js @@ -81,4 +81,62 @@ describe('addImages', () => { addImages(intermediateSteps, responseMessage); expect(responseMessage.text).toBe(`${originalText}\n${imageMarkdown}`); }); + + it('should extract only image markdowns when there is text between them', () => { + const markdownWithTextBetweenImages = ` + ![image1](/images/image1.png) + Some text between images that should not be included. + ![image2](/images/image2.png) + More text that should be ignored. + ![image3](/images/image3.png) + `; + intermediateSteps.push({ observation: markdownWithTextBetweenImages }); + addImages(intermediateSteps, responseMessage); + expect(responseMessage.text).toBe('\n![image1](/images/image1.png)'); + }); + + it('should only return the first image when multiple images are present', () => { + const markdownWithMultipleImages = ` + ![image1](/images/image1.png) + ![image2](/images/image2.png) + ![image3](/images/image3.png) + `; + intermediateSteps.push({ observation: markdownWithMultipleImages }); + addImages(intermediateSteps, responseMessage); + expect(responseMessage.text).toBe('\n![image1](/images/image1.png)'); + }); + + it('should not include any text or metadata surrounding the image markdown', () => { + const markdownWithMetadata = ` + Title: Test Document + Author: John Doe + ![image1](/images/image1.png) + Some content after the image. + Vector values: [0.1, 0.2, 0.3] + `; + intermediateSteps.push({ observation: markdownWithMetadata }); + addImages(intermediateSteps, responseMessage); + expect(responseMessage.text).toBe('\n![image1](/images/image1.png)'); + }); + + it('should handle complex markdown with multiple images and only return the first one', () => { + const complexMarkdown = ` + # Document Title + + ## Section 1 + Here's some text with an embedded image: + ![image1](/images/image1.png) + + ## Section 2 + More text here... + ![image2](/images/image2.png) + + ### Subsection + Even more content + ![image3](/images/image3.png) + `; + intermediateSteps.push({ observation: complexMarkdown }); + addImages(intermediateSteps, responseMessage); + expect(responseMessage.text).toBe('\n![image1](/images/image1.png)'); + }); }); diff --git a/api/app/clients/prompts/addCacheControl.js b/api/app/clients/prompts/addCacheControl.js new file mode 100644 index 0000000000..eed5910dc9 --- /dev/null +++ b/api/app/clients/prompts/addCacheControl.js @@ -0,0 +1,43 @@ +/** + * Anthropic API: Adds cache control to the appropriate user messages in the payload. + * @param {Array} messages - The array of message objects. + * @returns {Array} - The updated array of message objects with cache control added. + */ +function addCacheControl(messages) { + if (!Array.isArray(messages) || messages.length < 2) { + return messages; + } + + const updatedMessages = [...messages]; + let userMessagesModified = 0; + + for (let i = updatedMessages.length - 1; i >= 0 && userMessagesModified < 2; i--) { + const message = updatedMessages[i]; + if (message.role !== 'user') { + continue; + } + + if (typeof message.content === 'string') { + message.content = [ + { + type: 'text', + text: message.content, + cache_control: { type: 'ephemeral' }, + }, + ]; + userMessagesModified++; + } else if (Array.isArray(message.content)) { + for (let j = message.content.length - 1; j >= 0; j--) { + if (message.content[j].type === 'text') { + message.content[j].cache_control = { type: 'ephemeral' }; + userMessagesModified++; + break; + } + } + } + } + + return updatedMessages; +} + +module.exports = addCacheControl; diff --git a/api/app/clients/prompts/addCacheControl.spec.js b/api/app/clients/prompts/addCacheControl.spec.js new file mode 100644 index 0000000000..c46ffd95e3 --- /dev/null +++ b/api/app/clients/prompts/addCacheControl.spec.js @@ -0,0 +1,227 @@ +const addCacheControl = require('./addCacheControl'); + +describe('addCacheControl', () => { + test('should add cache control to the last two user messages with array content', () => { + const messages = [ + { role: 'user', content: [{ type: 'text', text: 'Hello' }] }, + { role: 'assistant', content: [{ type: 'text', text: 'Hi there' }] }, + { role: 'user', content: [{ type: 'text', text: 'How are you?' }] }, + { role: 'assistant', content: [{ type: 'text', text: 'I\'m doing well, thanks!' }] }, + { role: 'user', content: [{ type: 'text', text: 'Great!' }] }, + ]; + + const result = addCacheControl(messages); + + expect(result[0].content[0]).not.toHaveProperty('cache_control'); + expect(result[2].content[0].cache_control).toEqual({ type: 'ephemeral' }); + expect(result[4].content[0].cache_control).toEqual({ type: 'ephemeral' }); + }); + + test('should add cache control to the last two user messages with string content', () => { + const messages = [ + { role: 'user', content: 'Hello' }, + { role: 'assistant', content: 'Hi there' }, + { role: 'user', content: 'How are you?' }, + { role: 'assistant', content: 'I\'m doing well, thanks!' }, + { role: 'user', content: 'Great!' }, + ]; + + const result = addCacheControl(messages); + + expect(result[0].content).toBe('Hello'); + expect(result[2].content[0]).toEqual({ + type: 'text', + text: 'How are you?', + cache_control: { type: 'ephemeral' }, + }); + expect(result[4].content[0]).toEqual({ + type: 'text', + text: 'Great!', + cache_control: { type: 'ephemeral' }, + }); + }); + + test('should handle mixed string and array content', () => { + const messages = [ + { role: 'user', content: 'Hello' }, + { role: 'assistant', content: 'Hi there' }, + { role: 'user', content: [{ type: 'text', text: 'How are you?' }] }, + ]; + + const result = addCacheControl(messages); + + expect(result[0].content[0]).toEqual({ + type: 'text', + text: 'Hello', + cache_control: { type: 'ephemeral' }, + }); + expect(result[2].content[0].cache_control).toEqual({ type: 'ephemeral' }); + }); + + test('should handle less than two user messages', () => { + const messages = [ + { role: 'user', content: 'Hello' }, + { role: 'assistant', content: 'Hi there' }, + ]; + + const result = addCacheControl(messages); + + expect(result[0].content[0]).toEqual({ + type: 'text', + text: 'Hello', + cache_control: { type: 'ephemeral' }, + }); + expect(result[1].content).toBe('Hi there'); + }); + + test('should return original array if no user messages', () => { + const messages = [ + { role: 'assistant', content: 'Hi there' }, + { role: 'assistant', content: 'How can I help?' }, + ]; + + const result = addCacheControl(messages); + + expect(result).toEqual(messages); + }); + + test('should handle empty array', () => { + const messages = []; + const result = addCacheControl(messages); + expect(result).toEqual([]); + }); + + test('should handle non-array input', () => { + const messages = 'not an array'; + const result = addCacheControl(messages); + expect(result).toBe('not an array'); + }); + + test('should not modify assistant messages', () => { + const messages = [ + { role: 'user', content: 'Hello' }, + { role: 'assistant', content: 'Hi there' }, + { role: 'user', content: 'How are you?' }, + ]; + + const result = addCacheControl(messages); + + expect(result[1].content).toBe('Hi there'); + }); + + test('should handle multiple content items in user messages', () => { + const messages = [ + { + role: 'user', + content: [ + { type: 'text', text: 'Hello' }, + { type: 'image', url: 'http://example.com/image.jpg' }, + { type: 'text', text: 'This is an image' }, + ], + }, + { role: 'assistant', content: 'Hi there' }, + { role: 'user', content: 'How are you?' }, + ]; + + const result = addCacheControl(messages); + + expect(result[0].content[0]).not.toHaveProperty('cache_control'); + expect(result[0].content[1]).not.toHaveProperty('cache_control'); + expect(result[0].content[2].cache_control).toEqual({ type: 'ephemeral' }); + expect(result[2].content[0]).toEqual({ + type: 'text', + text: 'How are you?', + cache_control: { type: 'ephemeral' }, + }); + }); + + test('should handle an array with mixed content types', () => { + const messages = [ + { role: 'user', content: 'Hello' }, + { role: 'assistant', content: 'Hi there' }, + { role: 'user', content: [{ type: 'text', text: 'How are you?' }] }, + { role: 'assistant', content: 'I\'m doing well, thanks!' }, + { role: 'user', content: 'Great!' }, + ]; + + const result = addCacheControl(messages); + + expect(result[0].content).toEqual('Hello'); + expect(result[2].content[0]).toEqual({ + type: 'text', + text: 'How are you?', + cache_control: { type: 'ephemeral' }, + }); + expect(result[4].content).toEqual([ + { + type: 'text', + text: 'Great!', + cache_control: { type: 'ephemeral' }, + }, + ]); + expect(result[1].content).toBe('Hi there'); + expect(result[3].content).toBe('I\'m doing well, thanks!'); + }); + + test('should handle edge case with multiple content types', () => { + const messages = [ + { + role: 'user', + content: [ + { + type: 'image', + source: { type: 'base64', media_type: 'image/png', data: 'some_base64_string' }, + }, + { + type: 'image', + source: { type: 'base64', media_type: 'image/png', data: 'another_base64_string' }, + }, + { type: 'text', text: 'what do all these images have in common' }, + ], + }, + { role: 'assistant', content: 'I see multiple images.' }, + { role: 'user', content: 'Correct!' }, + ]; + + const result = addCacheControl(messages); + + expect(result[0].content[0]).not.toHaveProperty('cache_control'); + expect(result[0].content[1]).not.toHaveProperty('cache_control'); + expect(result[0].content[2].cache_control).toEqual({ type: 'ephemeral' }); + expect(result[2].content[0]).toEqual({ + type: 'text', + text: 'Correct!', + cache_control: { type: 'ephemeral' }, + }); + }); + + test('should handle user message with no text block', () => { + const messages = [ + { + role: 'user', + content: [ + { + type: 'image', + source: { type: 'base64', media_type: 'image/png', data: 'some_base64_string' }, + }, + { + type: 'image', + source: { type: 'base64', media_type: 'image/png', data: 'another_base64_string' }, + }, + ], + }, + { role: 'assistant', content: 'I see two images.' }, + { role: 'user', content: 'Correct!' }, + ]; + + const result = addCacheControl(messages); + + expect(result[0].content[0]).not.toHaveProperty('cache_control'); + expect(result[0].content[1]).not.toHaveProperty('cache_control'); + expect(result[2].content[0]).toEqual({ + type: 'text', + text: 'Correct!', + cache_control: { type: 'ephemeral' }, + }); + }); +}); diff --git a/api/app/clients/prompts/artifacts.js b/api/app/clients/prompts/artifacts.js new file mode 100644 index 0000000000..b907a16b56 --- /dev/null +++ b/api/app/clients/prompts/artifacts.js @@ -0,0 +1,527 @@ +const dedent = require('dedent'); +const { EModelEndpoint, ArtifactModes } = require('librechat-data-provider'); +const { generateShadcnPrompt } = require('~/app/clients/prompts/shadcn-docs/generate'); +const { components } = require('~/app/clients/prompts/shadcn-docs/components'); + +// eslint-disable-next-line no-unused-vars +const artifactsPromptV1 = dedent`The assistant can create and reference artifacts during conversations. + +Artifacts are for substantial, self-contained content that users might modify or reuse, displayed in a separate UI window for clarity. + +# Good artifacts are... +- Substantial content (>15 lines) +- Content that the user is likely to modify, iterate on, or take ownership of +- Self-contained, complex content that can be understood on its own, without context from the conversation +- Content intended for eventual use outside the conversation (e.g., reports, emails, presentations) +- Content likely to be referenced or reused multiple times + +# Don't use artifacts for... +- Simple, informational, or short content, such as brief code snippets, mathematical equations, or small examples +- Primarily explanatory, instructional, or illustrative content, such as examples provided to clarify a concept +- Suggestions, commentary, or feedback on existing artifacts +- Conversational or explanatory content that doesn't represent a standalone piece of work +- Content that is dependent on the current conversational context to be useful +- Content that is unlikely to be modified or iterated upon by the user +- Request from users that appears to be a one-off question + +# Usage notes +- One artifact per message unless specifically requested +- Prefer in-line content (don't use artifacts) when possible. Unnecessary use of artifacts can be jarring for users. +- If a user asks the assistant to "draw an SVG" or "make a website," the assistant does not need to explain that it doesn't have these capabilities. Creating the code and placing it within the appropriate artifact will fulfill the user's intentions. +- If asked to generate an image, the assistant can offer an SVG instead. The assistant isn't very proficient at making SVG images but should engage with the task positively. Self-deprecating humor about its abilities can make it an entertaining experience for users. +- The assistant errs on the side of simplicity and avoids overusing artifacts for content that can be effectively presented within the conversation. +- Always provide complete, specific, and fully functional content without any placeholders, ellipses, or 'remains the same' comments. + + + When collaborating with the user on creating content that falls into compatible categories, the assistant should follow these steps: + + 1. Create the artifact using the following format: + + :::artifact{identifier="unique-identifier" type="mime-type" title="Artifact Title"} + \`\`\` + Your artifact content here + \`\`\` + ::: + + 2. Assign an identifier to the \`identifier\` attribute. For updates, reuse the prior identifier. For new artifacts, the identifier should be descriptive and relevant to the content, using kebab-case (e.g., "example-code-snippet"). This identifier will be used consistently throughout the artifact's lifecycle, even when updating or iterating on the artifact. + 3. Include a \`title\` attribute to provide a brief title or description of the content. + 4. Add a \`type\` attribute to specify the type of content the artifact represents. Assign one of the following values to the \`type\` attribute: + - HTML: "text/html" + - The user interface can render single file HTML pages placed within the artifact tags. HTML, JS, and CSS should be in a single file when using the \`text/html\` type. + - Images from the web are not allowed, but you can use placeholder images by specifying the width and height like so \`placeholder\` + - The only place external scripts can be imported from is https://cdnjs.cloudflare.com + - Mermaid Diagrams: "application/vnd.mermaid" + - The user interface will render Mermaid diagrams placed within the artifact tags. + - React Components: "application/vnd.react" + - Use this for displaying either: React elements, e.g. \`Hello World!\`, React pure functional components, e.g. \`() => Hello World!\`, React functional components with Hooks, or React component classes + - When creating a React component, ensure it has no required props (or provide default values for all props) and use a default export. + - Use Tailwind classes for styling. DO NOT USE ARBITRARY VALUES (e.g. \`h-[600px]\`). + - Base React is available to be imported. To use hooks, first import it at the top of the artifact, e.g. \`import { useState } from "react"\` + - The lucide-react@0.263.1 library is available to be imported. e.g. \`import { Camera } from "lucide-react"\` & \`\` + - The recharts charting library is available to be imported, e.g. \`import { LineChart, XAxis, ... } from "recharts"\` & \` ...\` + - The assistant can use prebuilt components from the \`shadcn/ui\` library after it is imported: \`import { Alert, AlertDescription, AlertTitle, AlertDialog, AlertDialogAction } from '/components/ui/alert';\`. If using components from the shadcn/ui library, the assistant mentions this to the user and offers to help them install the components if necessary. + - Components MUST be imported from \`/components/ui/name\` and NOT from \`/components/name\` or \`@/components/ui/name\`. + - NO OTHER LIBRARIES (e.g. zod, hookform) ARE INSTALLED OR ABLE TO BE IMPORTED. + - Images from the web are not allowed, but you can use placeholder images by specifying the width and height like so \`placeholder\` + - If you are unable to follow the above requirements for any reason, don't use artifacts and use regular code blocks instead, which will not attempt to render the component. + 5. Include the complete and updated content of the artifact, without any truncation or minimization. Don't use "// rest of the code remains the same...". + 6. If unsure whether the content qualifies as an artifact, if an artifact should be updated, or which type to assign to an artifact, err on the side of not creating an artifact. + 7. Always use triple backticks (\`\`\`) to enclose the content within the artifact, regardless of the content type. + + +Here are some examples of correct usage of artifacts: + + + + This example demonstrates how to create a Mermaid artifact for a simple flow chart. + + + + Can you create a simple flow chart showing the process of making tea using Mermaid? + + + Sure! Here's a simple flow chart depicting the process of making tea using Mermaid syntax: + + :::artifact{identifier="tea-making-flowchart" type="application/vnd.mermaid" title="Flow chart: Making Tea"} + \`\`\`mermaid + graph TD + A[Start] --> B{Water boiled?} + B -->|Yes| C[Add tea leaves to cup] + B -->|No| D[Boil water] + D --> B + C --> E[Pour boiling water into cup] + E --> F[Steep tea for desired time] + F --> G[Remove tea leaves] + G --> H[Add milk or sugar, if desired] + H --> I[Enjoy your tea!] + I --> J[End] + \`\`\` + ::: + + This flow chart uses Mermaid syntax to visualize the steps involved in making a cup of tea. Here's a brief explanation of the process: + + 1. Start + 2. Check if water is boiled + 3. If not boiled, boil the water + 4. Once water is boiled, add tea leaves to the cup + 5. Pour boiling water into the cup + 6. Steep the tea for the desired time + 7. Remove the tea leaves + 8. Optionally add milk or sugar + 9. Enjoy your tea! + 10. End + + This chart provides a clear visual representation of the tea-making process. You can easily modify or expand this chart if you want to add more details or steps to the process. Let me know if you'd like any changes or have any questions! + + +`; +const artifactsPrompt = dedent`The assistant can create and reference artifacts during conversations. + +Artifacts are for substantial, self-contained content that users might modify or reuse, displayed in a separate UI window for clarity. + +# Good artifacts are... +- Substantial content (>15 lines) +- Content that the user is likely to modify, iterate on, or take ownership of +- Self-contained, complex content that can be understood on its own, without context from the conversation +- Content intended for eventual use outside the conversation (e.g., reports, emails, presentations) +- Content likely to be referenced or reused multiple times + +# Don't use artifacts for... +- Simple, informational, or short content, such as brief code snippets, mathematical equations, or small examples +- Primarily explanatory, instructional, or illustrative content, such as examples provided to clarify a concept +- Suggestions, commentary, or feedback on existing artifacts +- Conversational or explanatory content that doesn't represent a standalone piece of work +- Content that is dependent on the current conversational context to be useful +- Content that is unlikely to be modified or iterated upon by the user +- Request from users that appears to be a one-off question + +# Usage notes +- One artifact per message unless specifically requested +- Prefer in-line content (don't use artifacts) when possible. Unnecessary use of artifacts can be jarring for users. +- If a user asks the assistant to "draw an SVG" or "make a website," the assistant does not need to explain that it doesn't have these capabilities. Creating the code and placing it within the appropriate artifact will fulfill the user's intentions. +- If asked to generate an image, the assistant can offer an SVG instead. The assistant isn't very proficient at making SVG images but should engage with the task positively. Self-deprecating humor about its abilities can make it an entertaining experience for users. +- The assistant errs on the side of simplicity and avoids overusing artifacts for content that can be effectively presented within the conversation. +- Always provide complete, specific, and fully functional content for artifacts without any snippets, placeholders, ellipses, or 'remains the same' comments. +- If an artifact is not necessary or requested, the assistant should not mention artifacts at all, and respond to the user accordingly. + + + When collaborating with the user on creating content that falls into compatible categories, the assistant should follow these steps: + + 1. Create the artifact using the following format: + + :::artifact{identifier="unique-identifier" type="mime-type" title="Artifact Title"} + \`\`\` + Your artifact content here + \`\`\` + ::: + + 2. Assign an identifier to the \`identifier\` attribute. For updates, reuse the prior identifier. For new artifacts, the identifier should be descriptive and relevant to the content, using kebab-case (e.g., "example-code-snippet"). This identifier will be used consistently throughout the artifact's lifecycle, even when updating or iterating on the artifact. + 3. Include a \`title\` attribute to provide a brief title or description of the content. + 4. Add a \`type\` attribute to specify the type of content the artifact represents. Assign one of the following values to the \`type\` attribute: + - HTML: "text/html" + - The user interface can render single file HTML pages placed within the artifact tags. HTML, JS, and CSS should be in a single file when using the \`text/html\` type. + - Images from the web are not allowed, but you can use placeholder images by specifying the width and height like so \`placeholder\` + - The only place external scripts can be imported from is https://cdnjs.cloudflare.com + - SVG: "image/svg+xml" + - The user interface will render the Scalable Vector Graphics (SVG) image within the artifact tags. + - The assistant should specify the viewbox of the SVG rather than defining a width/height + - Mermaid Diagrams: "application/vnd.mermaid" + - The user interface will render Mermaid diagrams placed within the artifact tags. + - React Components: "application/vnd.react" + - Use this for displaying either: React elements, e.g. \`Hello World!\`, React pure functional components, e.g. \`() => Hello World!\`, React functional components with Hooks, or React component classes + - When creating a React component, ensure it has no required props (or provide default values for all props) and use a default export. + - Use Tailwind classes for styling. DO NOT USE ARBITRARY VALUES (e.g. \`h-[600px]\`). + - Base React is available to be imported. To use hooks, first import it at the top of the artifact, e.g. \`import { useState } from "react"\` + - The lucide-react@0.394.0 library is available to be imported. e.g. \`import { Camera } from "lucide-react"\` & \`\` + - The recharts charting library is available to be imported, e.g. \`import { LineChart, XAxis, ... } from "recharts"\` & \` ...\` + - The three.js library is available to be imported, e.g. \`import * as THREE from "three";\` + - The date-fns library is available to be imported, e.g. \`import { compareAsc, format } from "date-fns";\` + - The react-day-picker library is available to be imported, e.g. \`import { DayPicker } from "react-day-picker";\` + - The assistant can use prebuilt components from the \`shadcn/ui\` library after it is imported: \`import { Alert, AlertDescription, AlertTitle, AlertDialog, AlertDialogAction } from '/components/ui/alert';\`. If using components from the shadcn/ui library, the assistant mentions this to the user and offers to help them install the components if necessary. + - Components MUST be imported from \`/components/ui/name\` and NOT from \`/components/name\` or \`@/components/ui/name\`. + - NO OTHER LIBRARIES (e.g. zod, hookform) ARE INSTALLED OR ABLE TO BE IMPORTED. + - Images from the web are not allowed, but you can use placeholder images by specifying the width and height like so \`placeholder\` + - When iterating on code, ensure that the code is complete and functional without any snippets, placeholders, or ellipses. + - If you are unable to follow the above requirements for any reason, don't use artifacts and use regular code blocks instead, which will not attempt to render the component. + 5. Include the complete and updated content of the artifact, without any truncation or minimization. Don't use "// rest of the code remains the same...". + 6. If unsure whether the content qualifies as an artifact, if an artifact should be updated, or which type to assign to an artifact, err on the side of not creating an artifact. + 7. Always use triple backticks (\`\`\`) to enclose the content within the artifact, regardless of the content type. + + +Here are some examples of correct usage of artifacts: + + + + This example demonstrates how to create a Mermaid artifact for a simple flow chart. + + + + Can you create a simple flow chart showing the process of making tea using Mermaid? + + + Sure! Here's a simple flow chart depicting the process of making tea using Mermaid syntax: + + :::artifact{identifier="tea-making-flowchart" type="application/vnd.mermaid" title="Flow chart: Making Tea"} + \`\`\`mermaid + graph TD + A[Start] --> B{Water boiled?} + B -->|Yes| C[Add tea leaves to cup] + B -->|No| D[Boil water] + D --> B + C --> E[Pour boiling water into cup] + E --> F[Steep tea for desired time] + F --> G[Remove tea leaves] + G --> H[Add milk or sugar, if desired] + H --> I[Enjoy your tea!] + I --> J[End] + \`\`\` + ::: + + This flow chart uses Mermaid syntax to visualize the steps involved in making a cup of tea. Here's a brief explanation of the process: + + 1. Start + 2. Check if water is boiled + 3. If not boiled, boil the water + 4. Once water is boiled, add tea leaves to the cup + 5. Pour boiling water into the cup + 6. Steep the tea for the desired time + 7. Remove the tea leaves + 8. Optionally add milk or sugar + 9. Enjoy your tea! + 10. End + + This chart provides a clear visual representation of the tea-making process. You can easily modify or expand this chart if you want to add more details or steps to the process. Let me know if you'd like any changes or have any questions! + + + + + Create a simple React counter component + + Here's a simple React counter component: + + :::artifact{identifier="react-counter" type="application/vnd.react" title="React Counter"} + \`\`\` + import { useState } from 'react'; + + export default function Counter() { + const [count, setCount] = useState(0); + return ( +
+

Count: {count}

+ +
+ ); + } + \`\`\` + ::: + + This component creates a simple counter with an increment button. +
+
+ + + Create a basic HTML structure for a blog post + + Here's a basic HTML structure for a blog post: + + :::artifact{identifier="blog-post-html" type="text/html" title="Blog Post HTML"} + \`\`\` + + + + + + My Blog Post + + + +
+

My First Blog Post

+
+
+
+

This is the content of my blog post. It's short and sweet!

+
+
+
+

© 2023 My Blog

+
+ + + \`\`\` + ::: + + This HTML structure provides a simple layout for a blog post. +
+
+
`; + +const artifactsOpenAIPrompt = dedent`The assistant can create and reference artifacts during conversations. + +Artifacts are for substantial, self-contained content that users might modify or reuse, displayed in a separate UI window for clarity. + +# Good artifacts are... +- Substantial content (>15 lines) +- Content that the user is likely to modify, iterate on, or take ownership of +- Self-contained, complex content that can be understood on its own, without context from the conversation +- Content intended for eventual use outside the conversation (e.g., reports, emails, presentations) +- Content likely to be referenced or reused multiple times + +# Don't use artifacts for... +- Simple, informational, or short content, such as brief code snippets, mathematical equations, or small examples +- Primarily explanatory, instructional, or illustrative content, such as examples provided to clarify a concept +- Suggestions, commentary, or feedback on existing artifacts +- Conversational or explanatory content that doesn't represent a standalone piece of work +- Content that is dependent on the current conversational context to be useful +- Content that is unlikely to be modified or iterated upon by the user +- Request from users that appears to be a one-off question + +# Usage notes +- One artifact per message unless specifically requested +- Prefer in-line content (don't use artifacts) when possible. Unnecessary use of artifacts can be jarring for users. +- If a user asks the assistant to "draw an SVG" or "make a website," the assistant does not need to explain that it doesn't have these capabilities. Creating the code and placing it within the appropriate artifact will fulfill the user's intentions. +- If asked to generate an image, the assistant can offer an SVG instead. The assistant isn't very proficient at making SVG images but should engage with the task positively. Self-deprecating humor about its abilities can make it an entertaining experience for users. +- The assistant errs on the side of simplicity and avoids overusing artifacts for content that can be effectively presented within the conversation. +- Always provide complete, specific, and fully functional content for artifacts without any snippets, placeholders, ellipses, or 'remains the same' comments. +- If an artifact is not necessary or requested, the assistant should not mention artifacts at all, and respond to the user accordingly. + +## Artifact Instructions + When collaborating with the user on creating content that falls into compatible categories, the assistant should follow these steps: + + 1. Create the artifact using the following remark-directive markdown format: + + :::artifact{identifier="unique-identifier" type="mime-type" title="Artifact Title"} + \`\`\` + Your artifact content here + \`\`\` + ::: + + a. Example of correct format: + + :::artifact{identifier="example-artifact" type="text/plain" title="Example Artifact"} + \`\`\` + This is the content of the artifact. + It can span multiple lines. + \`\`\` + ::: + + b. Common mistakes to avoid: + - Don't split the opening ::: line + - Don't add extra backticks outside the artifact structure + - Don't omit the closing ::: + + 2. Assign an identifier to the \`identifier\` attribute. For updates, reuse the prior identifier. For new artifacts, the identifier should be descriptive and relevant to the content, using kebab-case (e.g., "example-code-snippet"). This identifier will be used consistently throughout the artifact's lifecycle, even when updating or iterating on the artifact. + 3. Include a \`title\` attribute to provide a brief title or description of the content. + 4. Add a \`type\` attribute to specify the type of content the artifact represents. Assign one of the following values to the \`type\` attribute: + - HTML: "text/html" + - The user interface can render single file HTML pages placed within the artifact tags. HTML, JS, and CSS should be in a single file when using the \`text/html\` type. + - Images from the web are not allowed, but you can use placeholder images by specifying the width and height like so \`placeholder\` + - The only place external scripts can be imported from is https://cdnjs.cloudflare.com + - SVG: "image/svg+xml" + - The user interface will render the Scalable Vector Graphics (SVG) image within the artifact tags. + - The assistant should specify the viewbox of the SVG rather than defining a width/height + - Mermaid Diagrams: "application/vnd.mermaid" + - The user interface will render Mermaid diagrams placed within the artifact tags. + - React Components: "application/vnd.react" + - Use this for displaying either: React elements, e.g. \`Hello World!\`, React pure functional components, e.g. \`() => Hello World!\`, React functional components with Hooks, or React component classes + - When creating a React component, ensure it has no required props (or provide default values for all props) and use a default export. + - Use Tailwind classes for styling. DO NOT USE ARBITRARY VALUES (e.g. \`h-[600px]\`). + - Base React is available to be imported. To use hooks, first import it at the top of the artifact, e.g. \`import { useState } from "react"\` + - The lucide-react@0.394.0 library is available to be imported. e.g. \`import { Camera } from "lucide-react"\` & \`\` + - The recharts charting library is available to be imported, e.g. \`import { LineChart, XAxis, ... } from "recharts"\` & \` ...\` + - The three.js library is available to be imported, e.g. \`import * as THREE from "three";\` + - The date-fns library is available to be imported, e.g. \`import { compareAsc, format } from "date-fns";\` + - The react-day-picker library is available to be imported, e.g. \`import { DayPicker } from "react-day-picker";\` + - The assistant can use prebuilt components from the \`shadcn/ui\` library after it is imported: \`import { Alert, AlertDescription, AlertTitle, AlertDialog, AlertDialogAction } from '/components/ui/alert';\`. If using components from the shadcn/ui library, the assistant mentions this to the user and offers to help them install the components if necessary. + - Components MUST be imported from \`/components/ui/name\` and NOT from \`/components/name\` or \`@/components/ui/name\`. + - NO OTHER LIBRARIES (e.g. zod, hookform) ARE INSTALLED OR ABLE TO BE IMPORTED. + - Images from the web are not allowed, but you can use placeholder images by specifying the width and height like so \`placeholder\` + - When iterating on code, ensure that the code is complete and functional without any snippets, placeholders, or ellipses. + - If you are unable to follow the above requirements for any reason, don't use artifacts and use regular code blocks instead, which will not attempt to render the component. + 5. Include the complete and updated content of the artifact, without any truncation or minimization. Don't use "// rest of the code remains the same...". + 6. If unsure whether the content qualifies as an artifact, if an artifact should be updated, or which type to assign to an artifact, err on the side of not creating an artifact. + 7. NEVER use triple backticks to enclose the artifact, ONLY the content within the artifact. + +Here are some examples of correct usage of artifacts: + +## Examples + +### Example 1 + + This example demonstrates how to create a Mermaid artifact for a simple flow chart. + + User: Can you create a simple flow chart showing the process of making tea using Mermaid? + + Assistant: Sure! Here's a simple flow chart depicting the process of making tea using Mermaid syntax: + + :::artifact{identifier="tea-making-flowchart" type="application/vnd.mermaid" title="Flow chart: Making Tea"} + \`\`\`mermaid + graph TD + A[Start] --> B{Water boiled?} + B -->|Yes| C[Add tea leaves to cup] + B -->|No| D[Boil water] + D --> B + C --> E[Pour boiling water into cup] + E --> F[Steep tea for desired time] + F --> G[Remove tea leaves] + G --> H[Add milk or sugar, if desired] + H --> I[Enjoy your tea!] + I --> J[End] + \`\`\` + ::: + + This flow chart uses Mermaid syntax to visualize the steps involved in making a cup of tea. Here's a brief explanation of the process: + + 1. Start + 2. Check if water is boiled + 3. If not boiled, boil the water + 4. Once water is boiled, add tea leaves to the cup + 5. Pour boiling water into the cup + 6. Steep the tea for the desired time + 7. Remove the tea leaves + 8. Optionally add milk or sugar + 9. Enjoy your tea! + 10. End + + This chart provides a clear visual representation of the tea-making process. You can easily modify or expand this chart if you want to add more details or steps to the process. Let me know if you'd like any changes or have any questions! + +--- + +### Example 2 + + User: Create a simple React counter component + + Assistant: Here's a simple React counter component: + + :::artifact{identifier="react-counter" type="application/vnd.react" title="React Counter"} + \`\`\` + import { useState } from 'react'; + + export default function Counter() { + const [count, setCount] = useState(0); + return ( +
+

Count: {count}

+ +
+ ); + } + \`\`\` + ::: + + This component creates a simple counter with an increment button. + +--- + +### Example 3 + User: Create a basic HTML structure for a blog post + Assistant: Here's a basic HTML structure for a blog post: + + :::artifact{identifier="blog-post-html" type="text/html" title="Blog Post HTML"} + \`\`\` + + + + + + My Blog Post + + + +
+

My First Blog Post

+
+
+
+

This is the content of my blog post. It's short and sweet!

+
+
+
+

© 2023 My Blog

+
+ + + \`\`\` + ::: + + This HTML structure provides a simple layout for a blog post. + +---`; + +/** + * + * @param {Object} params + * @param {EModelEndpoint | string} params.endpoint - The current endpoint + * @param {ArtifactModes} params.artifacts - The current artifact mode + * @returns + */ +const generateArtifactsPrompt = ({ endpoint, artifacts }) => { + if (artifacts === ArtifactModes.CUSTOM) { + return null; + } + + let prompt = artifactsPrompt; + if (endpoint !== EModelEndpoint.anthropic) { + prompt = artifactsOpenAIPrompt; + } + + if (artifacts === ArtifactModes.SHADCNUI) { + prompt += generateShadcnPrompt({ components, useXML: endpoint === EModelEndpoint.anthropic }); + } + + return prompt; +}; + +module.exports = generateArtifactsPrompt; diff --git a/api/app/clients/prompts/createContextHandlers.js b/api/app/clients/prompts/createContextHandlers.js new file mode 100644 index 0000000000..4dcfaf68e4 --- /dev/null +++ b/api/app/clients/prompts/createContextHandlers.js @@ -0,0 +1,160 @@ +const axios = require('axios'); +const { isEnabled } = require('~/server/utils'); +const { logger } = require('~/config'); + +const footer = `Use the context as your learned knowledge to better answer the user. + +In your response, remember to follow these guidelines: +- If you don't know the answer, simply say that you don't know. +- If you are unsure how to answer, ask for clarification. +- Avoid mentioning that you obtained the information from the context. +`; + +function createContextHandlers(req, userMessageContent) { + if (!process.env.RAG_API_URL) { + return; + } + + const queryPromises = []; + const processedFiles = []; + const processedIds = new Set(); + const jwtToken = req.headers.authorization.split(' ')[1]; + const useFullContext = isEnabled(process.env.RAG_USE_FULL_CONTEXT); + + const query = async (file) => { + if (useFullContext) { + return axios.get(`${process.env.RAG_API_URL}/documents/${file.file_id}/context`, { + headers: { + Authorization: `Bearer ${jwtToken}`, + }, + }); + } + + return axios.post( + `${process.env.RAG_API_URL}/query`, + { + file_id: file.file_id, + query: userMessageContent, + k: 4, + }, + { + headers: { + Authorization: `Bearer ${jwtToken}`, + 'Content-Type': 'application/json', + }, + }, + ); + }; + + const processFile = async (file) => { + if (file.embedded && !processedIds.has(file.file_id)) { + try { + const promise = query(file); + queryPromises.push(promise); + processedFiles.push(file); + processedIds.add(file.file_id); + } catch (error) { + logger.error(`Error processing file ${file.filename}:`, error); + } + } + }; + + const createContext = async () => { + try { + if (!queryPromises.length || !processedFiles.length) { + return ''; + } + + const oneFile = processedFiles.length === 1; + const header = `The user has attached ${oneFile ? 'a' : processedFiles.length} file${ + !oneFile ? 's' : '' + } to the conversation:`; + + const files = `${ + oneFile + ? '' + : ` + ` + }${processedFiles + .map( + (file) => ` + + ${file.filename} + ${file.type} + `, + ) + .join('')}${ + oneFile + ? '' + : ` + ` + }`; + + const resolvedQueries = await Promise.all(queryPromises); + + const context = + resolvedQueries.length === 0 + ? '\n\tThe semantic search did not return any results.' + : resolvedQueries + .map((queryResult, index) => { + const file = processedFiles[index]; + let contextItems = queryResult.data; + + const generateContext = (currentContext) => + ` + + ${file.filename} + ${currentContext} + + `; + + if (useFullContext) { + return generateContext(`\n${contextItems}`); + } + + contextItems = queryResult.data + .map((item) => { + const pageContent = item[0].page_content; + return ` + + + `; + }) + .join(''); + + return generateContext(contextItems); + }) + .join(''); + + if (useFullContext) { + const prompt = `${header} + ${context} + ${footer}`; + + return prompt; + } + + const prompt = `${header} + ${files} + + A semantic search was executed with the user's message as the query, retrieving the following context inside XML tags. + + ${context} + + + ${footer}`; + + return prompt; + } catch (error) { + logger.error('Error creating context:', error); + throw error; + } + }; + + return { + processFile, + createContext, + }; +} + +module.exports = createContextHandlers; diff --git a/api/app/clients/prompts/createVisionPrompt.js b/api/app/clients/prompts/createVisionPrompt.js new file mode 100644 index 0000000000..5d8a7bbf51 --- /dev/null +++ b/api/app/clients/prompts/createVisionPrompt.js @@ -0,0 +1,34 @@ +/** + * Generates a prompt instructing the user to describe an image in detail, tailored to different types of visual content. + * @param {boolean} pluralized - Whether to pluralize the prompt for multiple images. + * @returns {string} - The generated vision prompt. + */ +const createVisionPrompt = (pluralized = false) => { + return `Please describe the image${ + pluralized ? 's' : '' + } in detail, covering relevant aspects such as: + + For photographs, illustrations, or artwork: + - The main subject(s) and their appearance, positioning, and actions + - The setting, background, and any notable objects or elements + - Colors, lighting, and overall mood or atmosphere + - Any interesting details, textures, or patterns + - The style, technique, or medium used (if discernible) + + For screenshots or images containing text: + - The content and purpose of the text + - The layout, formatting, and organization of the information + - Any notable visual elements, such as logos, icons, or graphics + - The overall context or message conveyed by the screenshot + + For graphs, charts, or data visualizations: + - The type of graph or chart (e.g., bar graph, line chart, pie chart) + - The variables being compared or analyzed + - Any trends, patterns, or outliers in the data + - The axis labels, scales, and units of measurement + - The title, legend, and any additional context provided + + Be as specific and descriptive as possible while maintaining clarity and concision.`; +}; + +module.exports = createVisionPrompt; diff --git a/api/app/clients/prompts/formatAgentMessages.spec.js b/api/app/clients/prompts/formatAgentMessages.spec.js new file mode 100644 index 0000000000..20731f6984 --- /dev/null +++ b/api/app/clients/prompts/formatAgentMessages.spec.js @@ -0,0 +1,285 @@ +const { ToolMessage } = require('@langchain/core/messages'); +const { ContentTypes } = require('librechat-data-provider'); +const { HumanMessage, AIMessage, SystemMessage } = require('@langchain/core/messages'); +const { formatAgentMessages } = require('./formatMessages'); + +describe('formatAgentMessages', () => { + it('should format simple user and AI messages', () => { + const payload = [ + { role: 'user', content: 'Hello' }, + { role: 'assistant', content: 'Hi there!' }, + ]; + const result = formatAgentMessages(payload); + expect(result).toHaveLength(2); + expect(result[0]).toBeInstanceOf(HumanMessage); + expect(result[1]).toBeInstanceOf(AIMessage); + }); + + it('should handle system messages', () => { + const payload = [{ role: 'system', content: 'You are a helpful assistant.' }]; + const result = formatAgentMessages(payload); + expect(result).toHaveLength(1); + expect(result[0]).toBeInstanceOf(SystemMessage); + }); + + it('should format messages with content arrays', () => { + const payload = [ + { + role: 'user', + content: [{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Hello' }], + }, + ]; + const result = formatAgentMessages(payload); + expect(result).toHaveLength(1); + expect(result[0]).toBeInstanceOf(HumanMessage); + }); + + it('should handle tool calls and create ToolMessages', () => { + const payload = [ + { + role: 'assistant', + content: [ + { + type: ContentTypes.TEXT, + [ContentTypes.TEXT]: 'Let me check that for you.', + tool_call_ids: ['123'], + }, + { + type: ContentTypes.TOOL_CALL, + tool_call: { + id: '123', + name: 'search', + args: '{"query":"weather"}', + output: 'The weather is sunny.', + }, + }, + ], + }, + ]; + const result = formatAgentMessages(payload); + expect(result).toHaveLength(2); + expect(result[0]).toBeInstanceOf(AIMessage); + expect(result[1]).toBeInstanceOf(ToolMessage); + expect(result[0].tool_calls).toHaveLength(1); + expect(result[1].tool_call_id).toBe('123'); + }); + + it('should handle multiple content parts in assistant messages', () => { + const payload = [ + { + role: 'assistant', + content: [ + { type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Part 1' }, + { type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Part 2' }, + ], + }, + ]; + const result = formatAgentMessages(payload); + expect(result).toHaveLength(1); + expect(result[0]).toBeInstanceOf(AIMessage); + expect(result[0].content).toHaveLength(2); + }); + + it('should throw an error for invalid tool call structure', () => { + const payload = [ + { + role: 'assistant', + content: [ + { + type: ContentTypes.TOOL_CALL, + tool_call: { + id: '123', + name: 'search', + args: '{"query":"weather"}', + output: 'The weather is sunny.', + }, + }, + ], + }, + ]; + expect(() => formatAgentMessages(payload)).toThrow('Invalid tool call structure'); + }); + + it('should handle tool calls with non-JSON args', () => { + const payload = [ + { + role: 'assistant', + content: [ + { type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Checking...', tool_call_ids: ['123'] }, + { + type: ContentTypes.TOOL_CALL, + tool_call: { + id: '123', + name: 'search', + args: 'non-json-string', + output: 'Result', + }, + }, + ], + }, + ]; + const result = formatAgentMessages(payload); + expect(result).toHaveLength(2); + expect(result[0].tool_calls[0].args).toStrictEqual({ input: 'non-json-string' }); + }); + + it('should handle complex tool calls with multiple steps', () => { + const payload = [ + { + role: 'assistant', + content: [ + { + type: ContentTypes.TEXT, + [ContentTypes.TEXT]: 'I\'ll search for that information.', + tool_call_ids: ['search_1'], + }, + { + type: ContentTypes.TOOL_CALL, + tool_call: { + id: 'search_1', + name: 'search', + args: '{"query":"weather in New York"}', + output: 'The weather in New York is currently sunny with a temperature of 75°F.', + }, + }, + { + type: ContentTypes.TEXT, + [ContentTypes.TEXT]: 'Now, I\'ll convert the temperature.', + tool_call_ids: ['convert_1'], + }, + { + type: ContentTypes.TOOL_CALL, + tool_call: { + id: 'convert_1', + name: 'convert_temperature', + args: '{"temperature": 75, "from": "F", "to": "C"}', + output: '23.89°C', + }, + }, + { type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Here\'s your answer.' }, + ], + }, + ]; + + const result = formatAgentMessages(payload); + + expect(result).toHaveLength(5); + expect(result[0]).toBeInstanceOf(AIMessage); + expect(result[1]).toBeInstanceOf(ToolMessage); + expect(result[2]).toBeInstanceOf(AIMessage); + expect(result[3]).toBeInstanceOf(ToolMessage); + expect(result[4]).toBeInstanceOf(AIMessage); + + // Check first AIMessage + expect(result[0].content).toBe('I\'ll search for that information.'); + expect(result[0].tool_calls).toHaveLength(1); + expect(result[0].tool_calls[0]).toEqual({ + id: 'search_1', + name: 'search', + args: { query: 'weather in New York' }, + }); + + // Check first ToolMessage + expect(result[1].tool_call_id).toBe('search_1'); + expect(result[1].name).toBe('search'); + expect(result[1].content).toBe( + 'The weather in New York is currently sunny with a temperature of 75°F.', + ); + + // Check second AIMessage + expect(result[2].content).toBe('Now, I\'ll convert the temperature.'); + expect(result[2].tool_calls).toHaveLength(1); + expect(result[2].tool_calls[0]).toEqual({ + id: 'convert_1', + name: 'convert_temperature', + args: { temperature: 75, from: 'F', to: 'C' }, + }); + + // Check second ToolMessage + expect(result[3].tool_call_id).toBe('convert_1'); + expect(result[3].name).toBe('convert_temperature'); + expect(result[3].content).toBe('23.89°C'); + + // Check final AIMessage + expect(result[4].content).toStrictEqual([ + { [ContentTypes.TEXT]: 'Here\'s your answer.', type: ContentTypes.TEXT }, + ]); + }); + + it.skip('should not produce two consecutive assistant messages and format content correctly', () => { + const payload = [ + { role: 'user', content: 'Hello' }, + { + role: 'assistant', + content: [{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Hi there!' }], + }, + { + role: 'assistant', + content: [{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'How can I help you?' }], + }, + { role: 'user', content: 'What\'s the weather?' }, + { + role: 'assistant', + content: [ + { + type: ContentTypes.TEXT, + [ContentTypes.TEXT]: 'Let me check that for you.', + tool_call_ids: ['weather_1'], + }, + { + type: ContentTypes.TOOL_CALL, + tool_call: { + id: 'weather_1', + name: 'check_weather', + args: '{"location":"New York"}', + output: 'Sunny, 75°F', + }, + }, + ], + }, + { + role: 'assistant', + content: [ + { type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Here\'s the weather information.' }, + ], + }, + ]; + + const result = formatAgentMessages(payload); + + // Check correct message count and types + expect(result).toHaveLength(6); + expect(result[0]).toBeInstanceOf(HumanMessage); + expect(result[1]).toBeInstanceOf(AIMessage); + expect(result[2]).toBeInstanceOf(HumanMessage); + expect(result[3]).toBeInstanceOf(AIMessage); + expect(result[4]).toBeInstanceOf(ToolMessage); + expect(result[5]).toBeInstanceOf(AIMessage); + + // Check content of messages + expect(result[0].content).toStrictEqual([ + { [ContentTypes.TEXT]: 'Hello', type: ContentTypes.TEXT }, + ]); + expect(result[1].content).toStrictEqual([ + { [ContentTypes.TEXT]: 'Hi there!', type: ContentTypes.TEXT }, + { [ContentTypes.TEXT]: 'How can I help you?', type: ContentTypes.TEXT }, + ]); + expect(result[2].content).toStrictEqual([ + { [ContentTypes.TEXT]: 'What\'s the weather?', type: ContentTypes.TEXT }, + ]); + expect(result[3].content).toBe('Let me check that for you.'); + expect(result[4].content).toBe('Sunny, 75°F'); + expect(result[5].content).toStrictEqual([ + { [ContentTypes.TEXT]: 'Here\'s the weather information.', type: ContentTypes.TEXT }, + ]); + + // Check that there are no consecutive AIMessages + const messageTypes = result.map((message) => message.constructor); + for (let i = 0; i < messageTypes.length - 1; i++) { + expect(messageTypes[i] === AIMessage && messageTypes[i + 1] === AIMessage).toBe(false); + } + + // Additional check to ensure the consecutive assistant messages were combined + expect(result[1].content).toHaveLength(2); + }); +}); diff --git a/api/app/clients/prompts/formatMessages.js b/api/app/clients/prompts/formatMessages.js index c19eee260a..d84e62cca8 100644 --- a/api/app/clients/prompts/formatMessages.js +++ b/api/app/clients/prompts/formatMessages.js @@ -1,5 +1,6 @@ -const { EModelEndpoint } = require('librechat-data-provider'); -const { HumanMessage, AIMessage, SystemMessage } = require('langchain/schema'); +const { ToolMessage } = require('@langchain/core/messages'); +const { EModelEndpoint, ContentTypes } = require('librechat-data-provider'); +const { HumanMessage, AIMessage, SystemMessage } = require('@langchain/core/messages'); /** * Formats a message to OpenAI Vision API payload format. @@ -14,11 +15,11 @@ const { HumanMessage, AIMessage, SystemMessage } = require('langchain/schema'); */ const formatVisionMessage = ({ message, image_urls, endpoint }) => { if (endpoint === EModelEndpoint.anthropic) { - message.content = [...image_urls, { type: 'text', text: message.content }]; + message.content = [...image_urls, { type: ContentTypes.TEXT, text: message.content }]; return message; } - message.content = [{ type: 'text', text: message.content }, ...image_urls]; + message.content = [{ type: ContentTypes.TEXT, text: message.content }, ...image_urls]; return message; }; @@ -51,7 +52,7 @@ const formatMessage = ({ message, userName, assistantName, endpoint, langChain = _role = roleMapping[lc_id[2]]; } const role = _role ?? (sender && sender?.toLowerCase() === 'user' ? 'user' : 'assistant'); - const content = text ?? _content ?? ''; + const content = _content ?? text ?? ''; const formattedMessage = { role, content, @@ -131,4 +132,129 @@ const formatFromLangChain = (message) => { }; }; -module.exports = { formatMessage, formatLangChainMessages, formatFromLangChain }; +/** + * Formats an array of messages for LangChain, handling tool calls and creating ToolMessage instances. + * + * @param {Array>} payload - The array of messages to format. + * @returns {Array<(HumanMessage|AIMessage|SystemMessage|ToolMessage)>} - The array of formatted LangChain messages, including ToolMessages for tool calls. + */ +const formatAgentMessages = (payload) => { + const messages = []; + + for (const message of payload) { + if (typeof message.content === 'string') { + message.content = [{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: message.content }]; + } + if (message.role !== 'assistant') { + messages.push(formatMessage({ message, langChain: true })); + continue; + } + + let currentContent = []; + let lastAIMessage = null; + + for (const part of message.content) { + if (part.type === ContentTypes.TEXT && part.tool_call_ids) { + /* + If there's pending content, it needs to be aggregated as a single string to prepare for tool calls. + For Anthropic models, the "tool_calls" field on a message is only respected if content is a string. + */ + if (currentContent.length > 0) { + let content = currentContent.reduce((acc, curr) => { + if (curr.type === ContentTypes.TEXT) { + return `${acc}${curr[ContentTypes.TEXT]}\n`; + } + return acc; + }, ''); + content = `${content}\n${part[ContentTypes.TEXT] ?? ''}`.trim(); + lastAIMessage = new AIMessage({ content }); + messages.push(lastAIMessage); + currentContent = []; + continue; + } + + // Create a new AIMessage with this text and prepare for tool calls + lastAIMessage = new AIMessage({ + content: part.text || '', + }); + + messages.push(lastAIMessage); + } else if (part.type === ContentTypes.TOOL_CALL) { + if (!lastAIMessage) { + throw new Error('Invalid tool call structure: No preceding AIMessage with tool_call_ids'); + } + + // Note: `tool_calls` list is defined when constructed by `AIMessage` class, and outputs should be excluded from it + const { output, args: _args, ...tool_call } = part.tool_call; + // TODO: investigate; args as dictionary may need to be provider-or-tool-specific + let args = _args; + try { + args = JSON.parse(_args); + } catch (e) { + if (typeof _args === 'string') { + args = { input: _args }; + } + } + + tool_call.args = args; + lastAIMessage.tool_calls.push(tool_call); + + // Add the corresponding ToolMessage + messages.push( + new ToolMessage({ + tool_call_id: tool_call.id, + name: tool_call.name, + content: output || '', + }), + ); + } else { + currentContent.push(part); + } + } + + if (currentContent.length > 0) { + messages.push(new AIMessage({ content: currentContent })); + } + } + + return messages; +}; + +/** + * Formats an array of messages for LangChain, making sure all content fields are strings + * @param {Array<(HumanMessage|AIMessage|SystemMessage|ToolMessage)>} payload - The array of messages to format. + * @returns {Array<(HumanMessage|AIMessage|SystemMessage|ToolMessage)>} - The array of formatted LangChain messages, including ToolMessages for tool calls. + */ +const formatContentStrings = (payload) => { + const messages = []; + + for (const message of payload) { + if (typeof message.content === 'string') { + continue; + } + + if (!Array.isArray(message.content)) { + continue; + } + + // Reduce text types to a single string, ignore all other types + const content = message.content.reduce((acc, curr) => { + if (curr.type === ContentTypes.TEXT) { + return `${acc}${curr[ContentTypes.TEXT]}\n`; + } + return acc; + }, ''); + + message.content = content.trim(); + } + + return messages; +}; + +module.exports = { + formatMessage, + formatFromLangChain, + formatAgentMessages, + formatContentStrings, + formatLangChainMessages, +}; diff --git a/api/app/clients/prompts/formatMessages.spec.js b/api/app/clients/prompts/formatMessages.spec.js index 8d4956b381..97e40b0caa 100644 --- a/api/app/clients/prompts/formatMessages.spec.js +++ b/api/app/clients/prompts/formatMessages.spec.js @@ -1,5 +1,5 @@ const { Constants } = require('librechat-data-provider'); -const { HumanMessage, AIMessage, SystemMessage } = require('langchain/schema'); +const { HumanMessage, AIMessage, SystemMessage } = require('@langchain/core/messages'); const { formatMessage, formatLangChainMessages, formatFromLangChain } = require('./formatMessages'); describe('formatMessage', () => { @@ -60,7 +60,6 @@ describe('formatMessage', () => { error: false, finish_reason: null, isCreatedByUser: true, - isEdited: false, model: null, parentMessageId: Constants.NO_PARENT, sender: 'User', diff --git a/api/app/clients/prompts/index.js b/api/app/clients/prompts/index.js index 40db3d9043..2549ccda5c 100644 --- a/api/app/clients/prompts/index.js +++ b/api/app/clients/prompts/index.js @@ -1,15 +1,21 @@ +const addCacheControl = require('./addCacheControl'); const formatMessages = require('./formatMessages'); const summaryPrompts = require('./summaryPrompts'); const handleInputs = require('./handleInputs'); const instructions = require('./instructions'); const titlePrompts = require('./titlePrompts'); -const truncateText = require('./truncateText'); +const truncate = require('./truncate'); +const createVisionPrompt = require('./createVisionPrompt'); +const createContextHandlers = require('./createContextHandlers'); module.exports = { + addCacheControl, ...formatMessages, ...summaryPrompts, ...handleInputs, ...instructions, ...titlePrompts, - truncateText, + ...truncate, + createVisionPrompt, + createContextHandlers, }; diff --git a/api/app/clients/prompts/shadcn-docs/components.js b/api/app/clients/prompts/shadcn-docs/components.js new file mode 100644 index 0000000000..b67c47d50f --- /dev/null +++ b/api/app/clients/prompts/shadcn-docs/components.js @@ -0,0 +1,495 @@ +// Essential Components +const essentialComponents = { + avatar: { + componentName: 'Avatar', + importDocs: 'import { Avatar, AvatarFallback, AvatarImage } from "/components/ui/avatar"', + usageDocs: ` + + + CN +`, + }, + button: { + componentName: 'Button', + importDocs: 'import { Button } from "/components/ui/button"', + usageDocs: ` +`, + }, + card: { + componentName: 'Card', + importDocs: ` +import { + Card, + CardContent, + CardDescription, + CardFooter, + CardHeader, + CardTitle, +} from "/components/ui/card"`, + usageDocs: ` + + + Card Title + Card Description + + +

Card Content

+
+ +

Card Footer

+
+
`, + }, + checkbox: { + componentName: 'Checkbox', + importDocs: 'import { Checkbox } from "/components/ui/checkbox"', + usageDocs: '', + }, + input: { + componentName: 'Input', + importDocs: 'import { Input } from "/components/ui/input"', + usageDocs: '', + }, + label: { + componentName: 'Label', + importDocs: 'import { Label } from "/components/ui/label"', + usageDocs: '', + }, + radioGroup: { + componentName: 'RadioGroup', + importDocs: ` +import { Label } from "/components/ui/label" +import { RadioGroup, RadioGroupItem } from "/components/ui/radio-group"`, + usageDocs: ` + +
+ + +
+
+ + +
+
`, + }, + select: { + componentName: 'Select', + importDocs: ` +import { + Select, + SelectContent, + SelectItem, + SelectTrigger, + SelectValue, +} from "/components/ui/select"`, + usageDocs: ` +`, + }, + textarea: { + componentName: 'Textarea', + importDocs: 'import { Textarea } from "/components/ui/textarea"', + usageDocs: '