mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-12-17 08:50:15 +01:00
Compare commits
No commits in common. "main" and "chart-1.9.4" have entirely different histories.
main
...
chart-1.9.
825 changed files with 23264 additions and 36314 deletions
|
|
@ -20,7 +20,8 @@ services:
|
||||||
environment:
|
environment:
|
||||||
- HOST=0.0.0.0
|
- HOST=0.0.0.0
|
||||||
- MONGO_URI=mongodb://mongodb:27017/LibreChat
|
- MONGO_URI=mongodb://mongodb:27017/LibreChat
|
||||||
# - OPENAI_REVERSE_PROXY=http://host.docker.internal:8070/v1
|
# - CHATGPT_REVERSE_PROXY=http://host.docker.internal:8080/api/conversation # if you are hosting your own chatgpt reverse proxy with docker
|
||||||
|
# - OPENAI_REVERSE_PROXY=http://host.docker.internal:8070/v1/chat/completions # if you are hosting your own chatgpt reverse proxy with docker
|
||||||
- MEILI_HOST=http://meilisearch:7700
|
- MEILI_HOST=http://meilisearch:7700
|
||||||
|
|
||||||
# Runs app on the same network as the service container, allows "forwardPorts" in devcontainer.json function.
|
# Runs app on the same network as the service container, allows "forwardPorts" in devcontainer.json function.
|
||||||
|
|
|
||||||
|
|
@ -129,6 +129,7 @@ ANTHROPIC_API_KEY=user_provided
|
||||||
# AZURE_OPENAI_API_VERSION= # Deprecated
|
# AZURE_OPENAI_API_VERSION= # Deprecated
|
||||||
# AZURE_OPENAI_API_COMPLETIONS_DEPLOYMENT_NAME= # Deprecated
|
# AZURE_OPENAI_API_COMPLETIONS_DEPLOYMENT_NAME= # Deprecated
|
||||||
# AZURE_OPENAI_API_EMBEDDINGS_DEPLOYMENT_NAME= # Deprecated
|
# AZURE_OPENAI_API_EMBEDDINGS_DEPLOYMENT_NAME= # Deprecated
|
||||||
|
# PLUGINS_USE_AZURE="true" # Deprecated
|
||||||
|
|
||||||
#=================#
|
#=================#
|
||||||
# AWS Bedrock #
|
# AWS Bedrock #
|
||||||
|
|
@ -229,6 +230,14 @@ ASSISTANTS_API_KEY=user_provided
|
||||||
# More info, including how to enable use of Assistants with Azure here:
|
# More info, including how to enable use of Assistants with Azure here:
|
||||||
# https://www.librechat.ai/docs/configuration/librechat_yaml/ai_endpoints/azure#using-assistants-with-azure
|
# https://www.librechat.ai/docs/configuration/librechat_yaml/ai_endpoints/azure#using-assistants-with-azure
|
||||||
|
|
||||||
|
#============#
|
||||||
|
# Plugins #
|
||||||
|
#============#
|
||||||
|
|
||||||
|
# PLUGIN_MODELS=gpt-4o,gpt-4o-mini,gpt-4,gpt-4-turbo-preview,gpt-4-0125-preview,gpt-4-1106-preview,gpt-4-0613,gpt-3.5-turbo,gpt-3.5-turbo-0125,gpt-3.5-turbo-1106,gpt-3.5-turbo-0613
|
||||||
|
|
||||||
|
DEBUG_PLUGINS=true
|
||||||
|
|
||||||
CREDS_KEY=f34be427ebb29de8d88c107a71546019685ed8b241d8f2ed00c3df97ad2566f0
|
CREDS_KEY=f34be427ebb29de8d88c107a71546019685ed8b241d8f2ed00c3df97ad2566f0
|
||||||
CREDS_IV=e2341419ec3dd3d19b13a1a87fafcbfb
|
CREDS_IV=e2341419ec3dd3d19b13a1a87fafcbfb
|
||||||
|
|
||||||
|
|
|
||||||
3
.github/workflows/backend-review.yml
vendored
3
.github/workflows/backend-review.yml
vendored
|
|
@ -4,7 +4,6 @@ on:
|
||||||
branches:
|
branches:
|
||||||
- main
|
- main
|
||||||
- dev
|
- dev
|
||||||
- dev-staging
|
|
||||||
- release/*
|
- release/*
|
||||||
paths:
|
paths:
|
||||||
- 'api/**'
|
- 'api/**'
|
||||||
|
|
@ -72,4 +71,4 @@ jobs:
|
||||||
run: cd packages/data-schemas && npm run test:ci
|
run: cd packages/data-schemas && npm run test:ci
|
||||||
|
|
||||||
- name: Run @librechat/api unit tests
|
- name: Run @librechat/api unit tests
|
||||||
run: cd packages/api && npm run test:ci
|
run: cd packages/api && npm run test:ci
|
||||||
|
|
@ -5,7 +5,6 @@ on:
|
||||||
branches:
|
branches:
|
||||||
- main
|
- main
|
||||||
- dev
|
- dev
|
||||||
- dev-staging
|
|
||||||
- release/*
|
- release/*
|
||||||
paths:
|
paths:
|
||||||
- 'packages/api/src/cache/**'
|
- 'packages/api/src/cache/**'
|
||||||
|
|
@ -87,4 +86,4 @@ jobs:
|
||||||
|
|
||||||
- name: Stop Single Redis Instance
|
- name: Stop Single Redis Instance
|
||||||
if: always()
|
if: always()
|
||||||
run: redis-cli -p 6379 shutdown || true
|
run: redis-cli -p 6379 shutdown || true
|
||||||
3
.github/workflows/eslint-ci.yml
vendored
3
.github/workflows/eslint-ci.yml
vendored
|
|
@ -5,7 +5,6 @@ on:
|
||||||
branches:
|
branches:
|
||||||
- main
|
- main
|
||||||
- dev
|
- dev
|
||||||
- dev-staging
|
|
||||||
- release/*
|
- release/*
|
||||||
paths:
|
paths:
|
||||||
- 'api/**'
|
- 'api/**'
|
||||||
|
|
@ -57,4 +56,4 @@ jobs:
|
||||||
# Run ESLint
|
# Run ESLint
|
||||||
npx eslint --no-error-on-unmatched-pattern \
|
npx eslint --no-error-on-unmatched-pattern \
|
||||||
--config eslint.config.mjs \
|
--config eslint.config.mjs \
|
||||||
$CHANGED_FILES
|
$CHANGED_FILES
|
||||||
1
.github/workflows/frontend-review.yml
vendored
1
.github/workflows/frontend-review.yml
vendored
|
|
@ -5,7 +5,6 @@ on:
|
||||||
branches:
|
branches:
|
||||||
- main
|
- main
|
||||||
- dev
|
- dev
|
||||||
- dev-staging
|
|
||||||
- release/*
|
- release/*
|
||||||
paths:
|
paths:
|
||||||
- 'client/**'
|
- 'client/**'
|
||||||
|
|
|
||||||
83
.github/workflows/unused-packages.yml
vendored
83
.github/workflows/unused-packages.yml
vendored
|
|
@ -8,7 +8,6 @@ on:
|
||||||
- 'client/**'
|
- 'client/**'
|
||||||
- 'api/**'
|
- 'api/**'
|
||||||
- 'packages/client/**'
|
- 'packages/client/**'
|
||||||
- 'packages/api/**'
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
detect-unused-packages:
|
detect-unused-packages:
|
||||||
|
|
@ -64,45 +63,35 @@ jobs:
|
||||||
extract_deps_from_code() {
|
extract_deps_from_code() {
|
||||||
local folder=$1
|
local folder=$1
|
||||||
local output_file=$2
|
local output_file=$2
|
||||||
|
|
||||||
# Initialize empty output file
|
|
||||||
> "$output_file"
|
|
||||||
|
|
||||||
if [[ -d "$folder" ]]; then
|
if [[ -d "$folder" ]]; then
|
||||||
# Extract require() statements (use explicit includes for portability)
|
# Extract require() statements
|
||||||
grep -rEho "require\\(['\"]([a-zA-Z0-9@/._-]+)['\"]\\)" "$folder" \
|
grep -rEho "require\\(['\"]([a-zA-Z0-9@/._-]+)['\"]\\)" "$folder" --include=\*.{js,ts,tsx,jsx,mjs,cjs} | \
|
||||||
--include='*.js' --include='*.ts' --include='*.tsx' --include='*.jsx' --include='*.mjs' --include='*.cjs' 2>/dev/null | \
|
sed -E "s/require\\(['\"]([a-zA-Z0-9@/._-]+)['\"]\\)/\1/" > "$output_file"
|
||||||
sed -E "s/require\\(['\"]([a-zA-Z0-9@/._-]+)['\"]\\)/\1/" >> "$output_file" || true
|
|
||||||
|
|
||||||
# Extract ES6 imports - import x from 'module'
|
# Extract ES6 imports - various patterns
|
||||||
grep -rEho "import .* from ['\"]([a-zA-Z0-9@/._-]+)['\"]" "$folder" \
|
# import x from 'module'
|
||||||
--include='*.js' --include='*.ts' --include='*.tsx' --include='*.jsx' --include='*.mjs' --include='*.cjs' 2>/dev/null | \
|
grep -rEho "import .* from ['\"]([a-zA-Z0-9@/._-]+)['\"]" "$folder" --include=\*.{js,ts,tsx,jsx,mjs,cjs} | \
|
||||||
sed -E "s/import .* from ['\"]([a-zA-Z0-9@/._-]+)['\"]/\1/" >> "$output_file" || true
|
sed -E "s/import .* from ['\"]([a-zA-Z0-9@/._-]+)['\"]/\1/" >> "$output_file"
|
||||||
|
|
||||||
# import 'module' (side-effect imports)
|
# import 'module' (side-effect imports)
|
||||||
grep -rEho "import ['\"]([a-zA-Z0-9@/._-]+)['\"]" "$folder" \
|
grep -rEho "import ['\"]([a-zA-Z0-9@/._-]+)['\"]" "$folder" --include=\*.{js,ts,tsx,jsx,mjs,cjs} | \
|
||||||
--include='*.js' --include='*.ts' --include='*.tsx' --include='*.jsx' --include='*.mjs' --include='*.cjs' 2>/dev/null | \
|
sed -E "s/import ['\"]([a-zA-Z0-9@/._-]+)['\"]/\1/" >> "$output_file"
|
||||||
sed -E "s/import ['\"]([a-zA-Z0-9@/._-]+)['\"]/\1/" >> "$output_file" || true
|
|
||||||
|
|
||||||
# export { x } from 'module' or export * from 'module'
|
# export { x } from 'module' or export * from 'module'
|
||||||
grep -rEho "export .* from ['\"]([a-zA-Z0-9@/._-]+)['\"]" "$folder" \
|
grep -rEho "export .* from ['\"]([a-zA-Z0-9@/._-]+)['\"]" "$folder" --include=\*.{js,ts,tsx,jsx,mjs,cjs} | \
|
||||||
--include='*.js' --include='*.ts' --include='*.tsx' --include='*.jsx' --include='*.mjs' --include='*.cjs' 2>/dev/null | \
|
sed -E "s/export .* from ['\"]([a-zA-Z0-9@/._-]+)['\"]/\1/" >> "$output_file"
|
||||||
sed -E "s/export .* from ['\"]([a-zA-Z0-9@/._-]+)['\"]/\1/" >> "$output_file" || true
|
|
||||||
|
|
||||||
# import type { x } from 'module' (TypeScript)
|
# import type { x } from 'module' (TypeScript)
|
||||||
grep -rEho "import type .* from ['\"]([a-zA-Z0-9@/._-]+)['\"]" "$folder" \
|
grep -rEho "import type .* from ['\"]([a-zA-Z0-9@/._-]+)['\"]" "$folder" --include=\*.{ts,tsx} | \
|
||||||
--include='*.ts' --include='*.tsx' 2>/dev/null | \
|
sed -E "s/import type .* from ['\"]([a-zA-Z0-9@/._-]+)['\"]/\1/" >> "$output_file"
|
||||||
sed -E "s/import type .* from ['\"]([a-zA-Z0-9@/._-]+)['\"]/\1/" >> "$output_file" || true
|
|
||||||
|
|
||||||
# Remove subpath imports but keep the base package
|
# Remove subpath imports but keep the base package
|
||||||
# For scoped packages: '@scope/pkg/subpath' -> '@scope/pkg'
|
# e.g., '@tanstack/react-query/devtools' becomes '@tanstack/react-query'
|
||||||
# For regular packages: 'pkg/subpath' -> 'pkg'
|
sed -i -E 's|^(@?[a-zA-Z0-9-]+(/[a-zA-Z0-9-]+)?)/.*|\1|' "$output_file"
|
||||||
# Scoped packages (must keep @scope/package, strip anything after)
|
|
||||||
sed -i -E 's|^(@[a-zA-Z0-9_-]+/[a-zA-Z0-9_-]+)/.*|\1|' "$output_file" 2>/dev/null || true
|
|
||||||
# Non-scoped packages (keep package name, strip subpath)
|
|
||||||
sed -i -E 's|^([a-zA-Z0-9_-]+)/.*|\1|' "$output_file" 2>/dev/null || true
|
|
||||||
|
|
||||||
sort -u "$output_file" -o "$output_file"
|
sort -u "$output_file" -o "$output_file"
|
||||||
|
else
|
||||||
|
touch "$output_file"
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -110,10 +99,8 @@ jobs:
|
||||||
extract_deps_from_code "client" client_used_code.txt
|
extract_deps_from_code "client" client_used_code.txt
|
||||||
extract_deps_from_code "api" api_used_code.txt
|
extract_deps_from_code "api" api_used_code.txt
|
||||||
|
|
||||||
# Extract dependencies used by workspace packages
|
# Extract dependencies used by @librechat/client package
|
||||||
# These packages are used in the workspace but dependencies are provided by parent package.json
|
|
||||||
extract_deps_from_code "packages/client" packages_client_used_code.txt
|
extract_deps_from_code "packages/client" packages_client_used_code.txt
|
||||||
extract_deps_from_code "packages/api" packages_api_used_code.txt
|
|
||||||
|
|
||||||
- name: Get @librechat/client dependencies
|
- name: Get @librechat/client dependencies
|
||||||
id: get-librechat-client-deps
|
id: get-librechat-client-deps
|
||||||
|
|
@ -139,30 +126,6 @@ jobs:
|
||||||
touch librechat_client_deps.txt
|
touch librechat_client_deps.txt
|
||||||
fi
|
fi
|
||||||
|
|
||||||
- name: Get @librechat/api dependencies
|
|
||||||
id: get-librechat-api-deps
|
|
||||||
run: |
|
|
||||||
if [[ -f "packages/api/package.json" ]]; then
|
|
||||||
# Get all dependencies from @librechat/api (dependencies, devDependencies, and peerDependencies)
|
|
||||||
DEPS=$(jq -r '.dependencies // {} | keys[]' packages/api/package.json 2>/dev/null || echo "")
|
|
||||||
DEV_DEPS=$(jq -r '.devDependencies // {} | keys[]' packages/api/package.json 2>/dev/null || echo "")
|
|
||||||
PEER_DEPS=$(jq -r '.peerDependencies // {} | keys[]' packages/api/package.json 2>/dev/null || echo "")
|
|
||||||
|
|
||||||
# Combine all dependencies
|
|
||||||
echo "$DEPS" > librechat_api_deps.txt
|
|
||||||
echo "$DEV_DEPS" >> librechat_api_deps.txt
|
|
||||||
echo "$PEER_DEPS" >> librechat_api_deps.txt
|
|
||||||
|
|
||||||
# Also include dependencies that are imported in packages/api
|
|
||||||
cat packages_api_used_code.txt >> librechat_api_deps.txt
|
|
||||||
|
|
||||||
# Remove empty lines and sort
|
|
||||||
grep -v '^$' librechat_api_deps.txt | sort -u > temp_deps.txt
|
|
||||||
mv temp_deps.txt librechat_api_deps.txt
|
|
||||||
else
|
|
||||||
touch librechat_api_deps.txt
|
|
||||||
fi
|
|
||||||
|
|
||||||
- name: Extract Workspace Dependencies
|
- name: Extract Workspace Dependencies
|
||||||
id: extract-workspace-deps
|
id: extract-workspace-deps
|
||||||
run: |
|
run: |
|
||||||
|
|
@ -221,8 +184,8 @@ jobs:
|
||||||
chmod -R 755 client
|
chmod -R 755 client
|
||||||
cd client
|
cd client
|
||||||
UNUSED=$(depcheck --json | jq -r '.dependencies | join("\n")' || echo "")
|
UNUSED=$(depcheck --json | jq -r '.dependencies | join("\n")' || echo "")
|
||||||
# Exclude dependencies used in scripts, code, workspace packages, and @librechat/client imports
|
# Exclude dependencies used in scripts, code, and workspace packages
|
||||||
UNUSED=$(comm -23 <(echo "$UNUSED" | sort) <(cat ../client_used_deps.txt ../client_used_code.txt ../client_workspace_deps.txt ../packages_client_used_code.txt ../librechat_client_deps.txt 2>/dev/null | sort -u) || echo "")
|
UNUSED=$(comm -23 <(echo "$UNUSED" | sort) <(cat ../client_used_deps.txt ../client_used_code.txt ../client_workspace_deps.txt | sort) || echo "")
|
||||||
# Filter out false positives
|
# Filter out false positives
|
||||||
UNUSED=$(echo "$UNUSED" | grep -v "^micromark-extension-llm-math$" || echo "")
|
UNUSED=$(echo "$UNUSED" | grep -v "^micromark-extension-llm-math$" || echo "")
|
||||||
echo "CLIENT_UNUSED<<EOF" >> $GITHUB_ENV
|
echo "CLIENT_UNUSED<<EOF" >> $GITHUB_ENV
|
||||||
|
|
@ -238,8 +201,8 @@ jobs:
|
||||||
chmod -R 755 api
|
chmod -R 755 api
|
||||||
cd api
|
cd api
|
||||||
UNUSED=$(depcheck --json | jq -r '.dependencies | join("\n")' || echo "")
|
UNUSED=$(depcheck --json | jq -r '.dependencies | join("\n")' || echo "")
|
||||||
# Exclude dependencies used in scripts, code, workspace packages, and @librechat/api imports
|
# Exclude dependencies used in scripts, code, and workspace packages
|
||||||
UNUSED=$(comm -23 <(echo "$UNUSED" | sort) <(cat ../api_used_deps.txt ../api_used_code.txt ../api_workspace_deps.txt ../packages_api_used_code.txt ../librechat_api_deps.txt 2>/dev/null | sort -u) || echo "")
|
UNUSED=$(comm -23 <(echo "$UNUSED" | sort) <(cat ../api_used_deps.txt ../api_used_code.txt ../api_workspace_deps.txt | sort) || echo "")
|
||||||
echo "API_UNUSED<<EOF" >> $GITHUB_ENV
|
echo "API_UNUSED<<EOF" >> $GITHUB_ENV
|
||||||
echo "$UNUSED" >> $GITHUB_ENV
|
echo "$UNUSED" >> $GITHUB_ENV
|
||||||
echo "EOF" >> $GITHUB_ENV
|
echo "EOF" >> $GITHUB_ENV
|
||||||
|
|
@ -278,4 +241,4 @@ jobs:
|
||||||
|
|
||||||
- name: Fail workflow if unused dependencies found
|
- name: Fail workflow if unused dependencies found
|
||||||
if: env.ROOT_UNUSED != '' || env.CLIENT_UNUSED != '' || env.API_UNUSED != ''
|
if: env.ROOT_UNUSED != '' || env.CLIENT_UNUSED != '' || env.API_UNUSED != ''
|
||||||
run: exit 1
|
run: exit 1
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
# v0.8.2-rc1
|
# v0.8.1
|
||||||
|
|
||||||
# Base node image
|
# Base node image
|
||||||
FROM node:20-alpine AS node
|
FROM node:20-alpine AS node
|
||||||
|
|
@ -11,7 +11,7 @@ RUN apk add --no-cache python3 py3-pip uv
|
||||||
ENV LD_PRELOAD=/usr/lib/libjemalloc.so.2
|
ENV LD_PRELOAD=/usr/lib/libjemalloc.so.2
|
||||||
|
|
||||||
# Add `uv` for extended MCP support
|
# Add `uv` for extended MCP support
|
||||||
COPY --from=ghcr.io/astral-sh/uv:0.9.5-python3.12-alpine /usr/local/bin/uv /usr/local/bin/uvx /bin/
|
COPY --from=ghcr.io/astral-sh/uv:0.6.13 /uv /uvx /bin/
|
||||||
RUN uv --version
|
RUN uv --version
|
||||||
|
|
||||||
RUN mkdir -p /app && chown node:node /app
|
RUN mkdir -p /app && chown node:node /app
|
||||||
|
|
@ -30,7 +30,7 @@ RUN \
|
||||||
# Allow mounting of these files, which have no default
|
# Allow mounting of these files, which have no default
|
||||||
touch .env ; \
|
touch .env ; \
|
||||||
# Create directories for the volumes to inherit the correct permissions
|
# Create directories for the volumes to inherit the correct permissions
|
||||||
mkdir -p /app/client/public/images /app/logs /app/uploads ; \
|
mkdir -p /app/client/public/images /app/api/logs /app/uploads ; \
|
||||||
npm config set fetch-retry-maxtimeout 600000 ; \
|
npm config set fetch-retry-maxtimeout 600000 ; \
|
||||||
npm config set fetch-retries 5 ; \
|
npm config set fetch-retries 5 ; \
|
||||||
npm config set fetch-retry-mintimeout 15000 ; \
|
npm config set fetch-retry-mintimeout 15000 ; \
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
# Dockerfile.multi
|
# Dockerfile.multi
|
||||||
# v0.8.2-rc1
|
# v0.8.1
|
||||||
|
|
||||||
# Base for all builds
|
# Base for all builds
|
||||||
FROM node:20-alpine AS base-min
|
FROM node:20-alpine AS base-min
|
||||||
|
|
|
||||||
991
api/app/clients/AnthropicClient.js
Normal file
991
api/app/clients/AnthropicClient.js
Normal file
|
|
@ -0,0 +1,991 @@
|
||||||
|
const Anthropic = require('@anthropic-ai/sdk');
|
||||||
|
const { logger } = require('@librechat/data-schemas');
|
||||||
|
const { HttpsProxyAgent } = require('https-proxy-agent');
|
||||||
|
const {
|
||||||
|
Constants,
|
||||||
|
ErrorTypes,
|
||||||
|
EModelEndpoint,
|
||||||
|
parseTextParts,
|
||||||
|
anthropicSettings,
|
||||||
|
getResponseSender,
|
||||||
|
validateVisionModel,
|
||||||
|
} = require('librechat-data-provider');
|
||||||
|
const { sleep, SplitStreamHandler: _Handler, addCacheControl } = require('@librechat/agents');
|
||||||
|
const {
|
||||||
|
Tokenizer,
|
||||||
|
createFetch,
|
||||||
|
matchModelName,
|
||||||
|
getClaudeHeaders,
|
||||||
|
getModelMaxTokens,
|
||||||
|
configureReasoning,
|
||||||
|
checkPromptCacheSupport,
|
||||||
|
getModelMaxOutputTokens,
|
||||||
|
createStreamEventHandlers,
|
||||||
|
} = require('@librechat/api');
|
||||||
|
const {
|
||||||
|
truncateText,
|
||||||
|
formatMessage,
|
||||||
|
titleFunctionPrompt,
|
||||||
|
parseParamFromPrompt,
|
||||||
|
createContextHandlers,
|
||||||
|
} = require('./prompts');
|
||||||
|
const { spendTokens, spendStructuredTokens } = require('~/models/spendTokens');
|
||||||
|
const { encodeAndFormat } = require('~/server/services/Files/images/encode');
|
||||||
|
const BaseClient = require('./BaseClient');
|
||||||
|
|
||||||
|
const HUMAN_PROMPT = '\n\nHuman:';
|
||||||
|
const AI_PROMPT = '\n\nAssistant:';
|
||||||
|
|
||||||
|
class SplitStreamHandler extends _Handler {
|
||||||
|
getDeltaContent(chunk) {
|
||||||
|
return (chunk?.delta?.text ?? chunk?.completion) || '';
|
||||||
|
}
|
||||||
|
getReasoningDelta(chunk) {
|
||||||
|
return chunk?.delta?.thinking || '';
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Helper function to introduce a delay before retrying */
|
||||||
|
function delayBeforeRetry(attempts, baseDelay = 1000) {
|
||||||
|
return new Promise((resolve) => setTimeout(resolve, baseDelay * attempts));
|
||||||
|
}
|
||||||
|
|
||||||
|
const tokenEventTypes = new Set(['message_start', 'message_delta']);
|
||||||
|
const { legacy } = anthropicSettings;
|
||||||
|
|
||||||
|
class AnthropicClient extends BaseClient {
|
||||||
|
constructor(apiKey, options = {}) {
|
||||||
|
super(apiKey, options);
|
||||||
|
this.apiKey = apiKey || process.env.ANTHROPIC_API_KEY;
|
||||||
|
this.userLabel = HUMAN_PROMPT;
|
||||||
|
this.assistantLabel = AI_PROMPT;
|
||||||
|
this.contextStrategy = options.contextStrategy
|
||||||
|
? options.contextStrategy.toLowerCase()
|
||||||
|
: 'discard';
|
||||||
|
this.setOptions(options);
|
||||||
|
/** @type {string | undefined} */
|
||||||
|
this.systemMessage;
|
||||||
|
/** @type {AnthropicMessageStartEvent| undefined} */
|
||||||
|
this.message_start;
|
||||||
|
/** @type {AnthropicMessageDeltaEvent| undefined} */
|
||||||
|
this.message_delta;
|
||||||
|
/** Whether the model is part of the Claude 3 Family
|
||||||
|
* @type {boolean} */
|
||||||
|
this.isClaudeLatest;
|
||||||
|
/** Whether to use Messages API or Completions API
|
||||||
|
* @type {boolean} */
|
||||||
|
this.useMessages;
|
||||||
|
/** Whether or not the model supports Prompt Caching
|
||||||
|
* @type {boolean} */
|
||||||
|
this.supportsCacheControl;
|
||||||
|
/** The key for the usage object's input tokens
|
||||||
|
* @type {string} */
|
||||||
|
this.inputTokensKey = 'input_tokens';
|
||||||
|
/** The key for the usage object's output tokens
|
||||||
|
* @type {string} */
|
||||||
|
this.outputTokensKey = 'output_tokens';
|
||||||
|
/** @type {SplitStreamHandler | undefined} */
|
||||||
|
this.streamHandler;
|
||||||
|
}
|
||||||
|
|
||||||
|
setOptions(options) {
|
||||||
|
if (this.options && !this.options.replaceOptions) {
|
||||||
|
// nested options aren't spread properly, so we need to do this manually
|
||||||
|
this.options.modelOptions = {
|
||||||
|
...this.options.modelOptions,
|
||||||
|
...options.modelOptions,
|
||||||
|
};
|
||||||
|
delete options.modelOptions;
|
||||||
|
// now we can merge options
|
||||||
|
this.options = {
|
||||||
|
...this.options,
|
||||||
|
...options,
|
||||||
|
};
|
||||||
|
} else {
|
||||||
|
this.options = options;
|
||||||
|
}
|
||||||
|
|
||||||
|
this.modelOptions = Object.assign(
|
||||||
|
{
|
||||||
|
model: anthropicSettings.model.default,
|
||||||
|
},
|
||||||
|
this.modelOptions,
|
||||||
|
this.options.modelOptions,
|
||||||
|
);
|
||||||
|
|
||||||
|
const modelMatch = matchModelName(this.modelOptions.model, EModelEndpoint.anthropic);
|
||||||
|
this.isClaudeLatest =
|
||||||
|
/claude-[3-9]/.test(modelMatch) || /claude-(?:sonnet|opus|haiku)-[4-9]/.test(modelMatch);
|
||||||
|
const isLegacyOutput = !(
|
||||||
|
/claude-3[-.]5-sonnet/.test(modelMatch) ||
|
||||||
|
/claude-3[-.]7/.test(modelMatch) ||
|
||||||
|
/claude-(?:sonnet|opus|haiku)-[4-9]/.test(modelMatch) ||
|
||||||
|
/claude-[4-9]/.test(modelMatch)
|
||||||
|
);
|
||||||
|
this.supportsCacheControl = this.options.promptCache && checkPromptCacheSupport(modelMatch);
|
||||||
|
|
||||||
|
if (
|
||||||
|
isLegacyOutput &&
|
||||||
|
this.modelOptions.maxOutputTokens &&
|
||||||
|
this.modelOptions.maxOutputTokens > legacy.maxOutputTokens.default
|
||||||
|
) {
|
||||||
|
this.modelOptions.maxOutputTokens = legacy.maxOutputTokens.default;
|
||||||
|
}
|
||||||
|
|
||||||
|
this.useMessages = this.isClaudeLatest || !!this.options.attachments;
|
||||||
|
|
||||||
|
this.defaultVisionModel = this.options.visionModel ?? 'claude-3-sonnet-20240229';
|
||||||
|
this.options.attachments?.then((attachments) => this.checkVisionRequest(attachments));
|
||||||
|
|
||||||
|
this.maxContextTokens =
|
||||||
|
this.options.maxContextTokens ??
|
||||||
|
getModelMaxTokens(this.modelOptions.model, EModelEndpoint.anthropic) ??
|
||||||
|
100000;
|
||||||
|
this.maxResponseTokens =
|
||||||
|
this.modelOptions.maxOutputTokens ??
|
||||||
|
getModelMaxOutputTokens(
|
||||||
|
this.modelOptions.model,
|
||||||
|
this.options.endpointType ?? this.options.endpoint,
|
||||||
|
this.options.endpointTokenConfig,
|
||||||
|
) ??
|
||||||
|
anthropicSettings.maxOutputTokens.reset(this.modelOptions.model);
|
||||||
|
this.maxPromptTokens =
|
||||||
|
this.options.maxPromptTokens || this.maxContextTokens - this.maxResponseTokens;
|
||||||
|
|
||||||
|
const reservedTokens = this.maxPromptTokens + this.maxResponseTokens;
|
||||||
|
if (reservedTokens > this.maxContextTokens) {
|
||||||
|
const info = `Total Possible Tokens + Max Output Tokens must be less than or equal to Max Context Tokens: ${this.maxPromptTokens} (total possible output) + ${this.maxResponseTokens} (max output) = ${reservedTokens}/${this.maxContextTokens} (max context)`;
|
||||||
|
const errorMessage = `{ "type": "${ErrorTypes.INPUT_LENGTH}", "info": "${info}" }`;
|
||||||
|
logger.warn(info);
|
||||||
|
throw new Error(errorMessage);
|
||||||
|
} else if (this.maxResponseTokens === this.maxContextTokens) {
|
||||||
|
const info = `Max Output Tokens must be less than Max Context Tokens: ${this.maxResponseTokens} (max output) = ${this.maxContextTokens} (max context)`;
|
||||||
|
const errorMessage = `{ "type": "${ErrorTypes.INPUT_LENGTH}", "info": "${info}" }`;
|
||||||
|
logger.warn(info);
|
||||||
|
throw new Error(errorMessage);
|
||||||
|
}
|
||||||
|
|
||||||
|
this.sender =
|
||||||
|
this.options.sender ??
|
||||||
|
getResponseSender({
|
||||||
|
model: this.modelOptions.model,
|
||||||
|
endpoint: EModelEndpoint.anthropic,
|
||||||
|
modelLabel: this.options.modelLabel,
|
||||||
|
});
|
||||||
|
|
||||||
|
this.startToken = '||>';
|
||||||
|
this.endToken = '';
|
||||||
|
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the initialized Anthropic client.
|
||||||
|
* @param {Partial<Anthropic.ClientOptions>} requestOptions - The options for the client.
|
||||||
|
* @returns {Anthropic} The Anthropic client instance.
|
||||||
|
*/
|
||||||
|
getClient(requestOptions) {
|
||||||
|
/** @type {Anthropic.ClientOptions} */
|
||||||
|
const options = {
|
||||||
|
fetch: createFetch({
|
||||||
|
directEndpoint: this.options.directEndpoint,
|
||||||
|
reverseProxyUrl: this.options.reverseProxyUrl,
|
||||||
|
}),
|
||||||
|
apiKey: this.apiKey,
|
||||||
|
fetchOptions: {},
|
||||||
|
};
|
||||||
|
|
||||||
|
if (this.options.proxy) {
|
||||||
|
options.fetchOptions.agent = new HttpsProxyAgent(this.options.proxy);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (this.options.reverseProxyUrl) {
|
||||||
|
options.baseURL = this.options.reverseProxyUrl;
|
||||||
|
}
|
||||||
|
|
||||||
|
const headers = getClaudeHeaders(requestOptions?.model, this.supportsCacheControl);
|
||||||
|
if (headers) {
|
||||||
|
options.defaultHeaders = headers;
|
||||||
|
}
|
||||||
|
|
||||||
|
return new Anthropic(options);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get stream usage as returned by this client's API response.
|
||||||
|
* @returns {AnthropicStreamUsage} The stream usage object.
|
||||||
|
*/
|
||||||
|
getStreamUsage() {
|
||||||
|
const inputUsage = this.message_start?.message?.usage ?? {};
|
||||||
|
const outputUsage = this.message_delta?.usage ?? {};
|
||||||
|
return Object.assign({}, inputUsage, outputUsage);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Calculates the correct token count for the current user message based on the token count map and API usage.
|
||||||
|
* Edge case: If the calculation results in a negative value, it returns the original estimate.
|
||||||
|
* If revisiting a conversation with a chat history entirely composed of token estimates,
|
||||||
|
* the cumulative token count going forward should become more accurate as the conversation progresses.
|
||||||
|
* @param {Object} params - The parameters for the calculation.
|
||||||
|
* @param {Record<string, number>} params.tokenCountMap - A map of message IDs to their token counts.
|
||||||
|
* @param {string} params.currentMessageId - The ID of the current message to calculate.
|
||||||
|
* @param {AnthropicStreamUsage} params.usage - The usage object returned by the API.
|
||||||
|
* @returns {number} The correct token count for the current user message.
|
||||||
|
*/
|
||||||
|
calculateCurrentTokenCount({ tokenCountMap, currentMessageId, usage }) {
|
||||||
|
const originalEstimate = tokenCountMap[currentMessageId] || 0;
|
||||||
|
|
||||||
|
if (!usage || typeof usage.input_tokens !== 'number') {
|
||||||
|
return originalEstimate;
|
||||||
|
}
|
||||||
|
|
||||||
|
tokenCountMap[currentMessageId] = 0;
|
||||||
|
const totalTokensFromMap = Object.values(tokenCountMap).reduce((sum, count) => {
|
||||||
|
const numCount = Number(count);
|
||||||
|
return sum + (isNaN(numCount) ? 0 : numCount);
|
||||||
|
}, 0);
|
||||||
|
const totalInputTokens =
|
||||||
|
(usage.input_tokens ?? 0) +
|
||||||
|
(usage.cache_creation_input_tokens ?? 0) +
|
||||||
|
(usage.cache_read_input_tokens ?? 0);
|
||||||
|
|
||||||
|
const currentMessageTokens = totalInputTokens - totalTokensFromMap;
|
||||||
|
return currentMessageTokens > 0 ? currentMessageTokens : originalEstimate;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get Token Count for LibreChat Message
|
||||||
|
* @param {TMessage} responseMessage
|
||||||
|
* @returns {number}
|
||||||
|
*/
|
||||||
|
getTokenCountForResponse(responseMessage) {
|
||||||
|
return this.getTokenCountForMessage({
|
||||||
|
role: 'assistant',
|
||||||
|
content: responseMessage.text,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
*
|
||||||
|
* Checks if the model is a vision model based on request attachments and sets the appropriate options:
|
||||||
|
* - Sets `this.modelOptions.model` to `gpt-4-vision-preview` if the request is a vision request.
|
||||||
|
* - Sets `this.isVisionModel` to `true` if vision request.
|
||||||
|
* - Deletes `this.modelOptions.stop` if vision request.
|
||||||
|
* @param {MongoFile[]} attachments
|
||||||
|
*/
|
||||||
|
checkVisionRequest(attachments) {
|
||||||
|
const availableModels = this.options.modelsConfig?.[EModelEndpoint.anthropic];
|
||||||
|
this.isVisionModel = validateVisionModel({ model: this.modelOptions.model, availableModels });
|
||||||
|
|
||||||
|
const visionModelAvailable = availableModels?.includes(this.defaultVisionModel);
|
||||||
|
if (
|
||||||
|
attachments &&
|
||||||
|
attachments.some((file) => file?.type && file?.type?.includes('image')) &&
|
||||||
|
visionModelAvailable &&
|
||||||
|
!this.isVisionModel
|
||||||
|
) {
|
||||||
|
this.modelOptions.model = this.defaultVisionModel;
|
||||||
|
this.isVisionModel = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Calculate the token cost in tokens for an image based on its dimensions and detail level.
|
||||||
|
*
|
||||||
|
* For reference, see: https://docs.anthropic.com/claude/docs/vision#image-costs
|
||||||
|
*
|
||||||
|
* @param {Object} image - The image object.
|
||||||
|
* @param {number} image.width - The width of the image.
|
||||||
|
* @param {number} image.height - The height of the image.
|
||||||
|
* @returns {number} The calculated token cost measured by tokens.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
calculateImageTokenCost({ width, height }) {
|
||||||
|
return Math.ceil((width * height) / 750);
|
||||||
|
}
|
||||||
|
|
||||||
|
async addImageURLs(message, attachments) {
|
||||||
|
const { files, image_urls } = await encodeAndFormat(this.options.req, attachments, {
|
||||||
|
endpoint: EModelEndpoint.anthropic,
|
||||||
|
});
|
||||||
|
message.image_urls = image_urls.length ? image_urls : undefined;
|
||||||
|
return files;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param {object} params
|
||||||
|
* @param {number} params.promptTokens
|
||||||
|
* @param {number} params.completionTokens
|
||||||
|
* @param {AnthropicStreamUsage} [params.usage]
|
||||||
|
* @param {string} [params.model]
|
||||||
|
* @param {string} [params.context='message']
|
||||||
|
* @returns {Promise<void>}
|
||||||
|
*/
|
||||||
|
async recordTokenUsage({ promptTokens, completionTokens, usage, model, context = 'message' }) {
|
||||||
|
if (usage != null && usage?.input_tokens != null) {
|
||||||
|
const input = usage.input_tokens ?? 0;
|
||||||
|
const write = usage.cache_creation_input_tokens ?? 0;
|
||||||
|
const read = usage.cache_read_input_tokens ?? 0;
|
||||||
|
|
||||||
|
await spendStructuredTokens(
|
||||||
|
{
|
||||||
|
context,
|
||||||
|
user: this.user,
|
||||||
|
conversationId: this.conversationId,
|
||||||
|
model: model ?? this.modelOptions.model,
|
||||||
|
endpointTokenConfig: this.options.endpointTokenConfig,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
promptTokens: { input, write, read },
|
||||||
|
completionTokens,
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
await spendTokens(
|
||||||
|
{
|
||||||
|
context,
|
||||||
|
user: this.user,
|
||||||
|
conversationId: this.conversationId,
|
||||||
|
model: model ?? this.modelOptions.model,
|
||||||
|
endpointTokenConfig: this.options.endpointTokenConfig,
|
||||||
|
},
|
||||||
|
{ promptTokens, completionTokens },
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
async buildMessages(messages, parentMessageId) {
|
||||||
|
const orderedMessages = this.constructor.getMessagesForConversation({
|
||||||
|
messages,
|
||||||
|
parentMessageId,
|
||||||
|
});
|
||||||
|
|
||||||
|
logger.debug('[AnthropicClient] orderedMessages', { orderedMessages, parentMessageId });
|
||||||
|
|
||||||
|
if (this.options.attachments) {
|
||||||
|
const attachments = await this.options.attachments;
|
||||||
|
const images = attachments.filter((file) => file.type.includes('image'));
|
||||||
|
|
||||||
|
if (images.length && !this.isVisionModel) {
|
||||||
|
throw new Error('Images are only supported with the Claude 3 family of models');
|
||||||
|
}
|
||||||
|
|
||||||
|
const latestMessage = orderedMessages[orderedMessages.length - 1];
|
||||||
|
|
||||||
|
if (this.message_file_map) {
|
||||||
|
this.message_file_map[latestMessage.messageId] = attachments;
|
||||||
|
} else {
|
||||||
|
this.message_file_map = {
|
||||||
|
[latestMessage.messageId]: attachments,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
const files = await this.addImageURLs(latestMessage, attachments);
|
||||||
|
|
||||||
|
this.options.attachments = files;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (this.message_file_map) {
|
||||||
|
this.contextHandlers = createContextHandlers(
|
||||||
|
this.options.req,
|
||||||
|
orderedMessages[orderedMessages.length - 1].text,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
const formattedMessages = orderedMessages.map((message, i) => {
|
||||||
|
const formattedMessage = this.useMessages
|
||||||
|
? formatMessage({
|
||||||
|
message,
|
||||||
|
endpoint: EModelEndpoint.anthropic,
|
||||||
|
})
|
||||||
|
: {
|
||||||
|
author: message.isCreatedByUser ? this.userLabel : this.assistantLabel,
|
||||||
|
content: message?.content ?? message.text,
|
||||||
|
};
|
||||||
|
|
||||||
|
const needsTokenCount = this.contextStrategy && !orderedMessages[i].tokenCount;
|
||||||
|
/* If tokens were never counted, or, is a Vision request and the message has files, count again */
|
||||||
|
if (needsTokenCount || (this.isVisionModel && (message.image_urls || message.files))) {
|
||||||
|
orderedMessages[i].tokenCount = this.getTokenCountForMessage(formattedMessage);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* If message has files, calculate image token cost */
|
||||||
|
if (this.message_file_map && this.message_file_map[message.messageId]) {
|
||||||
|
const attachments = this.message_file_map[message.messageId];
|
||||||
|
for (const file of attachments) {
|
||||||
|
if (file.embedded) {
|
||||||
|
this.contextHandlers?.processFile(file);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if (file.metadata?.fileIdentifier) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
orderedMessages[i].tokenCount += this.calculateImageTokenCost({
|
||||||
|
width: file.width,
|
||||||
|
height: file.height,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
formattedMessage.tokenCount = orderedMessages[i].tokenCount;
|
||||||
|
return formattedMessage;
|
||||||
|
});
|
||||||
|
|
||||||
|
if (this.contextHandlers) {
|
||||||
|
this.augmentedPrompt = await this.contextHandlers.createContext();
|
||||||
|
this.options.promptPrefix = this.augmentedPrompt + (this.options.promptPrefix ?? '');
|
||||||
|
}
|
||||||
|
|
||||||
|
let { context: messagesInWindow, remainingContextTokens } =
|
||||||
|
await this.getMessagesWithinTokenLimit({ messages: formattedMessages });
|
||||||
|
|
||||||
|
const tokenCountMap = orderedMessages
|
||||||
|
.slice(orderedMessages.length - messagesInWindow.length)
|
||||||
|
.reduce((map, message, index) => {
|
||||||
|
const { messageId } = message;
|
||||||
|
if (!messageId) {
|
||||||
|
return map;
|
||||||
|
}
|
||||||
|
|
||||||
|
map[messageId] = orderedMessages[index].tokenCount;
|
||||||
|
return map;
|
||||||
|
}, {});
|
||||||
|
|
||||||
|
logger.debug('[AnthropicClient]', {
|
||||||
|
messagesInWindow: messagesInWindow.length,
|
||||||
|
remainingContextTokens,
|
||||||
|
});
|
||||||
|
|
||||||
|
let lastAuthor = '';
|
||||||
|
let groupedMessages = [];
|
||||||
|
|
||||||
|
for (let i = 0; i < messagesInWindow.length; i++) {
|
||||||
|
const message = messagesInWindow[i];
|
||||||
|
const author = message.role ?? message.author;
|
||||||
|
// If last author is not same as current author, add to new group
|
||||||
|
if (lastAuthor !== author) {
|
||||||
|
const newMessage = {
|
||||||
|
content: [message.content],
|
||||||
|
};
|
||||||
|
|
||||||
|
if (message.role) {
|
||||||
|
newMessage.role = message.role;
|
||||||
|
} else {
|
||||||
|
newMessage.author = message.author;
|
||||||
|
}
|
||||||
|
|
||||||
|
groupedMessages.push(newMessage);
|
||||||
|
lastAuthor = author;
|
||||||
|
// If same author, append content to the last group
|
||||||
|
} else {
|
||||||
|
groupedMessages[groupedMessages.length - 1].content.push(message.content);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
groupedMessages = groupedMessages.map((msg, i) => {
|
||||||
|
const isLast = i === groupedMessages.length - 1;
|
||||||
|
if (msg.content.length === 1) {
|
||||||
|
const content = msg.content[0];
|
||||||
|
return {
|
||||||
|
...msg,
|
||||||
|
// reason: final assistant content cannot end with trailing whitespace
|
||||||
|
content:
|
||||||
|
isLast && this.useMessages && msg.role === 'assistant' && typeof content === 'string'
|
||||||
|
? content?.trim()
|
||||||
|
: content,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!this.useMessages && msg.tokenCount) {
|
||||||
|
delete msg.tokenCount;
|
||||||
|
}
|
||||||
|
|
||||||
|
return msg;
|
||||||
|
});
|
||||||
|
|
||||||
|
let identityPrefix = '';
|
||||||
|
if (this.options.userLabel) {
|
||||||
|
identityPrefix = `\nHuman's name: ${this.options.userLabel}`;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (this.options.modelLabel) {
|
||||||
|
identityPrefix = `${identityPrefix}\nYou are ${this.options.modelLabel}`;
|
||||||
|
}
|
||||||
|
|
||||||
|
let promptPrefix = (this.options.promptPrefix ?? '').trim();
|
||||||
|
if (typeof this.options.artifactsPrompt === 'string' && this.options.artifactsPrompt) {
|
||||||
|
promptPrefix = `${promptPrefix ?? ''}\n${this.options.artifactsPrompt}`.trim();
|
||||||
|
}
|
||||||
|
if (promptPrefix) {
|
||||||
|
// If the prompt prefix doesn't end with the end token, add it.
|
||||||
|
if (!promptPrefix.endsWith(`${this.endToken}`)) {
|
||||||
|
promptPrefix = `${promptPrefix.trim()}${this.endToken}\n\n`;
|
||||||
|
}
|
||||||
|
promptPrefix = `\nContext:\n${promptPrefix}`;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (identityPrefix) {
|
||||||
|
promptPrefix = `${identityPrefix}${promptPrefix}`;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prompt AI to respond, empty if last message was from AI
|
||||||
|
let isEdited = lastAuthor === this.assistantLabel;
|
||||||
|
const promptSuffix = isEdited ? '' : `${promptPrefix}${this.assistantLabel}\n`;
|
||||||
|
let currentTokenCount =
|
||||||
|
isEdited || this.useMessages
|
||||||
|
? this.getTokenCount(promptPrefix)
|
||||||
|
: this.getTokenCount(promptSuffix);
|
||||||
|
|
||||||
|
let promptBody = '';
|
||||||
|
const maxTokenCount = this.maxPromptTokens;
|
||||||
|
|
||||||
|
const context = [];
|
||||||
|
|
||||||
|
// Iterate backwards through the messages, adding them to the prompt until we reach the max token count.
|
||||||
|
// Do this within a recursive async function so that it doesn't block the event loop for too long.
|
||||||
|
// Also, remove the next message when the message that puts us over the token limit is created by the user.
|
||||||
|
// Otherwise, remove only the exceeding message. This is due to Anthropic's strict payload rule to start with "Human:".
|
||||||
|
const nextMessage = {
|
||||||
|
remove: false,
|
||||||
|
tokenCount: 0,
|
||||||
|
messageString: '',
|
||||||
|
};
|
||||||
|
|
||||||
|
const buildPromptBody = async () => {
|
||||||
|
if (currentTokenCount < maxTokenCount && groupedMessages.length > 0) {
|
||||||
|
const message = groupedMessages.pop();
|
||||||
|
const isCreatedByUser = message.author === this.userLabel;
|
||||||
|
// Use promptPrefix if message is edited assistant'
|
||||||
|
const messagePrefix =
|
||||||
|
isCreatedByUser || !isEdited ? message.author : `${promptPrefix}${message.author}`;
|
||||||
|
const messageString = `${messagePrefix}\n${message.content}${this.endToken}\n`;
|
||||||
|
let newPromptBody = `${messageString}${promptBody}`;
|
||||||
|
|
||||||
|
context.unshift(message);
|
||||||
|
|
||||||
|
const tokenCountForMessage = this.getTokenCount(messageString);
|
||||||
|
const newTokenCount = currentTokenCount + tokenCountForMessage;
|
||||||
|
|
||||||
|
if (!isCreatedByUser) {
|
||||||
|
nextMessage.messageString = messageString;
|
||||||
|
nextMessage.tokenCount = tokenCountForMessage;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (newTokenCount > maxTokenCount) {
|
||||||
|
if (!promptBody) {
|
||||||
|
// This is the first message, so we can't add it. Just throw an error.
|
||||||
|
throw new Error(
|
||||||
|
`Prompt is too long. Max token count is ${maxTokenCount}, but prompt is ${newTokenCount} tokens long.`,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Otherwise, ths message would put us over the token limit, so don't add it.
|
||||||
|
// if created by user, remove next message, otherwise remove only this message
|
||||||
|
if (isCreatedByUser) {
|
||||||
|
nextMessage.remove = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
promptBody = newPromptBody;
|
||||||
|
currentTokenCount = newTokenCount;
|
||||||
|
|
||||||
|
// Switch off isEdited after using it for the first time
|
||||||
|
if (isEdited) {
|
||||||
|
isEdited = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// wait for next tick to avoid blocking the event loop
|
||||||
|
await new Promise((resolve) => setImmediate(resolve));
|
||||||
|
return buildPromptBody();
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
};
|
||||||
|
|
||||||
|
const messagesPayload = [];
|
||||||
|
const buildMessagesPayload = async () => {
|
||||||
|
let canContinue = true;
|
||||||
|
|
||||||
|
if (promptPrefix) {
|
||||||
|
this.systemMessage = promptPrefix;
|
||||||
|
}
|
||||||
|
|
||||||
|
while (currentTokenCount < maxTokenCount && groupedMessages.length > 0 && canContinue) {
|
||||||
|
const message = groupedMessages.pop();
|
||||||
|
|
||||||
|
let tokenCountForMessage = message.tokenCount ?? this.getTokenCountForMessage(message);
|
||||||
|
|
||||||
|
const newTokenCount = currentTokenCount + tokenCountForMessage;
|
||||||
|
const exceededMaxCount = newTokenCount > maxTokenCount;
|
||||||
|
|
||||||
|
if (exceededMaxCount && messagesPayload.length === 0) {
|
||||||
|
throw new Error(
|
||||||
|
`Prompt is too long. Max token count is ${maxTokenCount}, but prompt is ${newTokenCount} tokens long.`,
|
||||||
|
);
|
||||||
|
} else if (exceededMaxCount) {
|
||||||
|
canContinue = false;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
delete message.tokenCount;
|
||||||
|
messagesPayload.unshift(message);
|
||||||
|
currentTokenCount = newTokenCount;
|
||||||
|
|
||||||
|
// Switch off isEdited after using it once
|
||||||
|
if (isEdited && message.role === 'assistant') {
|
||||||
|
isEdited = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait for next tick to avoid blocking the event loop
|
||||||
|
await new Promise((resolve) => setImmediate(resolve));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
const processTokens = () => {
|
||||||
|
// Add 2 tokens for metadata after all messages have been counted.
|
||||||
|
currentTokenCount += 2;
|
||||||
|
|
||||||
|
// Use up to `this.maxContextTokens` tokens (prompt + response), but try to leave `this.maxTokens` tokens for the response.
|
||||||
|
this.modelOptions.maxOutputTokens = Math.min(
|
||||||
|
this.maxContextTokens - currentTokenCount,
|
||||||
|
this.maxResponseTokens,
|
||||||
|
);
|
||||||
|
};
|
||||||
|
|
||||||
|
if (
|
||||||
|
/claude-[3-9]/.test(this.modelOptions.model) ||
|
||||||
|
/claude-(?:sonnet|opus|haiku)-[4-9]/.test(this.modelOptions.model)
|
||||||
|
) {
|
||||||
|
await buildMessagesPayload();
|
||||||
|
processTokens();
|
||||||
|
return {
|
||||||
|
prompt: messagesPayload,
|
||||||
|
context: messagesInWindow,
|
||||||
|
promptTokens: currentTokenCount,
|
||||||
|
tokenCountMap,
|
||||||
|
};
|
||||||
|
} else {
|
||||||
|
await buildPromptBody();
|
||||||
|
processTokens();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (nextMessage.remove) {
|
||||||
|
promptBody = promptBody.replace(nextMessage.messageString, '');
|
||||||
|
currentTokenCount -= nextMessage.tokenCount;
|
||||||
|
context.shift();
|
||||||
|
}
|
||||||
|
|
||||||
|
let prompt = `${promptBody}${promptSuffix}`;
|
||||||
|
|
||||||
|
return { prompt, context, promptTokens: currentTokenCount, tokenCountMap };
|
||||||
|
}
|
||||||
|
|
||||||
|
getCompletion() {
|
||||||
|
logger.debug("AnthropicClient doesn't use getCompletion (all handled in sendCompletion)");
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a message or completion response using the Anthropic client.
|
||||||
|
* @param {Anthropic} client - The Anthropic client instance.
|
||||||
|
* @param {Anthropic.default.MessageCreateParams | Anthropic.default.CompletionCreateParams} options - The options for the message or completion.
|
||||||
|
* @param {boolean} useMessages - Whether to use messages or completions. Defaults to `this.useMessages`.
|
||||||
|
* @returns {Promise<Anthropic.default.Message | Anthropic.default.Completion>} The response from the Anthropic client.
|
||||||
|
*/
|
||||||
|
async createResponse(client, options, useMessages) {
|
||||||
|
return (useMessages ?? this.useMessages)
|
||||||
|
? await client.messages.create(options)
|
||||||
|
: await client.completions.create(options);
|
||||||
|
}
|
||||||
|
|
||||||
|
getMessageMapMethod() {
|
||||||
|
/**
|
||||||
|
* @param {TMessage} msg
|
||||||
|
*/
|
||||||
|
return (msg) => {
|
||||||
|
if (msg.text != null && msg.text && msg.text.startsWith(':::thinking')) {
|
||||||
|
msg.text = msg.text.replace(/:::thinking.*?:::/gs, '').trim();
|
||||||
|
} else if (msg.content != null) {
|
||||||
|
msg.text = parseTextParts(msg.content, true);
|
||||||
|
delete msg.content;
|
||||||
|
}
|
||||||
|
|
||||||
|
return msg;
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param {string[]} [intermediateReply]
|
||||||
|
* @returns {string}
|
||||||
|
*/
|
||||||
|
getStreamText(intermediateReply) {
|
||||||
|
if (!this.streamHandler) {
|
||||||
|
return intermediateReply?.join('') ?? '';
|
||||||
|
}
|
||||||
|
|
||||||
|
const reasoningText = this.streamHandler.reasoningTokens.join('');
|
||||||
|
|
||||||
|
const reasoningBlock = reasoningText.length > 0 ? `:::thinking\n${reasoningText}\n:::\n` : '';
|
||||||
|
|
||||||
|
return `${reasoningBlock}${this.streamHandler.tokens.join('')}`;
|
||||||
|
}
|
||||||
|
|
||||||
|
async sendCompletion(payload, { onProgress, abortController }) {
|
||||||
|
if (!abortController) {
|
||||||
|
abortController = new AbortController();
|
||||||
|
}
|
||||||
|
|
||||||
|
const { signal } = abortController;
|
||||||
|
|
||||||
|
const modelOptions = { ...this.modelOptions };
|
||||||
|
if (typeof onProgress === 'function') {
|
||||||
|
modelOptions.stream = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.debug('modelOptions', { modelOptions });
|
||||||
|
const metadata = {
|
||||||
|
user_id: this.user,
|
||||||
|
};
|
||||||
|
|
||||||
|
const {
|
||||||
|
stream,
|
||||||
|
model,
|
||||||
|
temperature,
|
||||||
|
maxOutputTokens,
|
||||||
|
stop: stop_sequences,
|
||||||
|
topP: top_p,
|
||||||
|
topK: top_k,
|
||||||
|
} = this.modelOptions;
|
||||||
|
|
||||||
|
let requestOptions = {
|
||||||
|
model,
|
||||||
|
stream: stream || true,
|
||||||
|
stop_sequences,
|
||||||
|
temperature,
|
||||||
|
metadata,
|
||||||
|
};
|
||||||
|
|
||||||
|
if (this.useMessages) {
|
||||||
|
requestOptions.messages = payload;
|
||||||
|
requestOptions.max_tokens =
|
||||||
|
maxOutputTokens || anthropicSettings.maxOutputTokens.reset(requestOptions.model);
|
||||||
|
} else {
|
||||||
|
requestOptions.prompt = payload;
|
||||||
|
requestOptions.max_tokens_to_sample = maxOutputTokens || legacy.maxOutputTokens.default;
|
||||||
|
}
|
||||||
|
|
||||||
|
requestOptions = configureReasoning(requestOptions, {
|
||||||
|
thinking: this.options.thinking,
|
||||||
|
thinkingBudget: this.options.thinkingBudget,
|
||||||
|
});
|
||||||
|
|
||||||
|
if (!/claude-3[-.]7/.test(model)) {
|
||||||
|
requestOptions.top_p = top_p;
|
||||||
|
requestOptions.top_k = top_k;
|
||||||
|
} else if (requestOptions.thinking == null) {
|
||||||
|
requestOptions.topP = top_p;
|
||||||
|
requestOptions.topK = top_k;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (this.systemMessage && this.supportsCacheControl === true) {
|
||||||
|
requestOptions.system = [
|
||||||
|
{
|
||||||
|
type: 'text',
|
||||||
|
text: this.systemMessage,
|
||||||
|
cache_control: { type: 'ephemeral' },
|
||||||
|
},
|
||||||
|
];
|
||||||
|
} else if (this.systemMessage) {
|
||||||
|
requestOptions.system = this.systemMessage;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (this.supportsCacheControl === true && this.useMessages) {
|
||||||
|
requestOptions.messages = addCacheControl(requestOptions.messages);
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.debug('[AnthropicClient]', { ...requestOptions });
|
||||||
|
const handlers = createStreamEventHandlers(this.options.res);
|
||||||
|
this.streamHandler = new SplitStreamHandler({
|
||||||
|
accumulate: true,
|
||||||
|
runId: this.responseMessageId,
|
||||||
|
handlers,
|
||||||
|
});
|
||||||
|
|
||||||
|
let intermediateReply = this.streamHandler.tokens;
|
||||||
|
|
||||||
|
const maxRetries = 3;
|
||||||
|
const streamRate = this.options.streamRate ?? Constants.DEFAULT_STREAM_RATE;
|
||||||
|
async function processResponse() {
|
||||||
|
let attempts = 0;
|
||||||
|
|
||||||
|
while (attempts < maxRetries) {
|
||||||
|
let response;
|
||||||
|
try {
|
||||||
|
const client = this.getClient(requestOptions);
|
||||||
|
response = await this.createResponse(client, requestOptions);
|
||||||
|
|
||||||
|
signal.addEventListener('abort', () => {
|
||||||
|
logger.debug('[AnthropicClient] message aborted!');
|
||||||
|
if (response.controller?.abort) {
|
||||||
|
response.controller.abort();
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
for await (const completion of response) {
|
||||||
|
const type = completion?.type ?? '';
|
||||||
|
if (tokenEventTypes.has(type)) {
|
||||||
|
logger.debug(`[AnthropicClient] ${type}`, completion);
|
||||||
|
this[type] = completion;
|
||||||
|
}
|
||||||
|
this.streamHandler.handle(completion);
|
||||||
|
await sleep(streamRate);
|
||||||
|
}
|
||||||
|
|
||||||
|
break;
|
||||||
|
} catch (error) {
|
||||||
|
attempts += 1;
|
||||||
|
logger.warn(
|
||||||
|
`User: ${this.user} | Anthropic Request ${attempts} failed: ${error.message}`,
|
||||||
|
);
|
||||||
|
|
||||||
|
if (attempts < maxRetries) {
|
||||||
|
await delayBeforeRetry(attempts, 350);
|
||||||
|
} else if (this.streamHandler && this.streamHandler.reasoningTokens.length) {
|
||||||
|
return this.getStreamText();
|
||||||
|
} else if (intermediateReply.length > 0) {
|
||||||
|
return this.getStreamText(intermediateReply);
|
||||||
|
} else {
|
||||||
|
throw new Error(`Operation failed after ${maxRetries} attempts: ${error.message}`);
|
||||||
|
}
|
||||||
|
} finally {
|
||||||
|
signal.removeEventListener('abort', () => {
|
||||||
|
logger.debug('[AnthropicClient] message aborted!');
|
||||||
|
if (response.controller?.abort) {
|
||||||
|
response.controller.abort();
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
await processResponse.bind(this)();
|
||||||
|
return this.getStreamText(intermediateReply);
|
||||||
|
}
|
||||||
|
|
||||||
|
getSaveOptions() {
|
||||||
|
return {
|
||||||
|
maxContextTokens: this.options.maxContextTokens,
|
||||||
|
artifacts: this.options.artifacts,
|
||||||
|
promptPrefix: this.options.promptPrefix,
|
||||||
|
modelLabel: this.options.modelLabel,
|
||||||
|
promptCache: this.options.promptCache,
|
||||||
|
thinking: this.options.thinking,
|
||||||
|
thinkingBudget: this.options.thinkingBudget,
|
||||||
|
resendFiles: this.options.resendFiles,
|
||||||
|
iconURL: this.options.iconURL,
|
||||||
|
greeting: this.options.greeting,
|
||||||
|
spec: this.options.spec,
|
||||||
|
...this.modelOptions,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
getBuildMessagesOptions() {
|
||||||
|
logger.debug("AnthropicClient doesn't use getBuildMessagesOptions");
|
||||||
|
}
|
||||||
|
|
||||||
|
getEncoding() {
|
||||||
|
return 'cl100k_base';
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the token count of a given text. It also checks and resets the tokenizers if necessary.
|
||||||
|
* @param {string} text - The text to get the token count for.
|
||||||
|
* @returns {number} The token count of the given text.
|
||||||
|
*/
|
||||||
|
getTokenCount(text) {
|
||||||
|
const encoding = this.getEncoding();
|
||||||
|
return Tokenizer.getTokenCount(text, encoding);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Generates a concise title for a conversation based on the user's input text and response.
|
||||||
|
* Involves sending a chat completion request with specific instructions for title generation.
|
||||||
|
*
|
||||||
|
* This function capitlizes on [Anthropic's function calling training](https://docs.anthropic.com/claude/docs/functions-external-tools).
|
||||||
|
*
|
||||||
|
* @param {Object} params - The parameters for the conversation title generation.
|
||||||
|
* @param {string} params.text - The user's input.
|
||||||
|
* @param {string} [params.responseText=''] - The AI's immediate response to the user.
|
||||||
|
*
|
||||||
|
* @returns {Promise<string | 'New Chat'>} A promise that resolves to the generated conversation title.
|
||||||
|
* In case of failure, it will return the default title, "New Chat".
|
||||||
|
*/
|
||||||
|
async titleConvo({ text, responseText = '' }) {
|
||||||
|
let title = 'New Chat';
|
||||||
|
this.message_delta = undefined;
|
||||||
|
this.message_start = undefined;
|
||||||
|
const convo = `<initial_message>
|
||||||
|
${truncateText(text)}
|
||||||
|
</initial_message>
|
||||||
|
<response>
|
||||||
|
${JSON.stringify(truncateText(responseText))}
|
||||||
|
</response>`;
|
||||||
|
|
||||||
|
const { ANTHROPIC_TITLE_MODEL } = process.env ?? {};
|
||||||
|
const model = this.options.titleModel ?? ANTHROPIC_TITLE_MODEL ?? 'claude-3-haiku-20240307';
|
||||||
|
const system = titleFunctionPrompt;
|
||||||
|
|
||||||
|
const titleChatCompletion = async () => {
|
||||||
|
const content = `<conversation_context>
|
||||||
|
${convo}
|
||||||
|
</conversation_context>
|
||||||
|
|
||||||
|
Please generate a title for this conversation.`;
|
||||||
|
|
||||||
|
const titleMessage = { role: 'user', content };
|
||||||
|
const requestOptions = {
|
||||||
|
model,
|
||||||
|
temperature: 0.3,
|
||||||
|
max_tokens: 1024,
|
||||||
|
system,
|
||||||
|
stop_sequences: ['\n\nHuman:', '\n\nAssistant', '</function_calls>'],
|
||||||
|
messages: [titleMessage],
|
||||||
|
};
|
||||||
|
|
||||||
|
try {
|
||||||
|
const response = await this.createResponse(
|
||||||
|
this.getClient(requestOptions),
|
||||||
|
requestOptions,
|
||||||
|
true,
|
||||||
|
);
|
||||||
|
let promptTokens = response?.usage?.input_tokens;
|
||||||
|
let completionTokens = response?.usage?.output_tokens;
|
||||||
|
if (!promptTokens) {
|
||||||
|
promptTokens = this.getTokenCountForMessage(titleMessage);
|
||||||
|
promptTokens += this.getTokenCountForMessage({ role: 'system', content: system });
|
||||||
|
}
|
||||||
|
if (!completionTokens) {
|
||||||
|
completionTokens = this.getTokenCountForMessage(response.content[0]);
|
||||||
|
}
|
||||||
|
await this.recordTokenUsage({
|
||||||
|
model,
|
||||||
|
promptTokens,
|
||||||
|
completionTokens,
|
||||||
|
context: 'title',
|
||||||
|
});
|
||||||
|
const text = response.content[0].text;
|
||||||
|
title = parseParamFromPrompt(text, 'title');
|
||||||
|
} catch (e) {
|
||||||
|
logger.error('[AnthropicClient] There was an issue generating the title', e);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
await titleChatCompletion();
|
||||||
|
logger.debug('[AnthropicClient] Convo Title: ' + title);
|
||||||
|
return title;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = AnthropicClient;
|
||||||
|
|
@ -20,17 +20,11 @@ const {
|
||||||
isAgentsEndpoint,
|
isAgentsEndpoint,
|
||||||
supportsBalanceCheck,
|
supportsBalanceCheck,
|
||||||
} = require('librechat-data-provider');
|
} = require('librechat-data-provider');
|
||||||
const {
|
const { getMessages, saveMessage, updateMessage, saveConvo, getConvo } = require('~/models');
|
||||||
updateMessage,
|
|
||||||
getMessages,
|
|
||||||
saveMessage,
|
|
||||||
saveConvo,
|
|
||||||
getConvo,
|
|
||||||
getFiles,
|
|
||||||
} = require('~/models');
|
|
||||||
const { getStrategyFunctions } = require('~/server/services/Files/strategies');
|
const { getStrategyFunctions } = require('~/server/services/Files/strategies');
|
||||||
const { checkBalance } = require('~/models/balanceMethods');
|
const { checkBalance } = require('~/models/balanceMethods');
|
||||||
const { truncateToolCallOutputs } = require('./prompts');
|
const { truncateToolCallOutputs } = require('./prompts');
|
||||||
|
const { getFiles } = require('~/models/File');
|
||||||
const TextStream = require('./TextStream');
|
const TextStream = require('./TextStream');
|
||||||
|
|
||||||
class BaseClient {
|
class BaseClient {
|
||||||
|
|
@ -966,13 +960,6 @@ class BaseClient {
|
||||||
|
|
||||||
const unsetFields = {};
|
const unsetFields = {};
|
||||||
const exceptions = new Set(['spec', 'iconURL']);
|
const exceptions = new Set(['spec', 'iconURL']);
|
||||||
const hasNonEphemeralAgent =
|
|
||||||
isAgentsEndpoint(this.options.endpoint) &&
|
|
||||||
endpointOptions?.agent_id &&
|
|
||||||
endpointOptions.agent_id !== Constants.EPHEMERAL_AGENT_ID;
|
|
||||||
if (hasNonEphemeralAgent) {
|
|
||||||
exceptions.add('model');
|
|
||||||
}
|
|
||||||
if (existingConvo != null) {
|
if (existingConvo != null) {
|
||||||
this.fetchedConvo = true;
|
this.fetchedConvo = true;
|
||||||
for (const key in existingConvo) {
|
for (const key in existingConvo) {
|
||||||
|
|
|
||||||
994
api/app/clients/GoogleClient.js
Normal file
994
api/app/clients/GoogleClient.js
Normal file
|
|
@ -0,0 +1,994 @@
|
||||||
|
const { google } = require('googleapis');
|
||||||
|
const { sleep } = require('@librechat/agents');
|
||||||
|
const { logger } = require('@librechat/data-schemas');
|
||||||
|
const { getModelMaxTokens } = require('@librechat/api');
|
||||||
|
const { concat } = require('@langchain/core/utils/stream');
|
||||||
|
const { ChatVertexAI } = require('@langchain/google-vertexai');
|
||||||
|
const { Tokenizer, getSafetySettings } = require('@librechat/api');
|
||||||
|
const { ChatGoogleGenerativeAI } = require('@langchain/google-genai');
|
||||||
|
const { GoogleGenerativeAI: GenAI } = require('@google/generative-ai');
|
||||||
|
const { HumanMessage, SystemMessage } = require('@langchain/core/messages');
|
||||||
|
const {
|
||||||
|
googleGenConfigSchema,
|
||||||
|
validateVisionModel,
|
||||||
|
getResponseSender,
|
||||||
|
endpointSettings,
|
||||||
|
parseTextParts,
|
||||||
|
EModelEndpoint,
|
||||||
|
googleSettings,
|
||||||
|
ContentTypes,
|
||||||
|
VisionModes,
|
||||||
|
ErrorTypes,
|
||||||
|
Constants,
|
||||||
|
AuthKeys,
|
||||||
|
} = require('librechat-data-provider');
|
||||||
|
const { encodeAndFormat } = require('~/server/services/Files/images');
|
||||||
|
const { spendTokens } = require('~/models/spendTokens');
|
||||||
|
const {
|
||||||
|
formatMessage,
|
||||||
|
createContextHandlers,
|
||||||
|
titleInstruction,
|
||||||
|
truncateText,
|
||||||
|
} = require('./prompts');
|
||||||
|
const BaseClient = require('./BaseClient');
|
||||||
|
|
||||||
|
const loc = process.env.GOOGLE_LOC || 'us-central1';
|
||||||
|
const publisher = 'google';
|
||||||
|
const endpointPrefix =
|
||||||
|
loc === 'global' ? 'aiplatform.googleapis.com' : `${loc}-aiplatform.googleapis.com`;
|
||||||
|
|
||||||
|
const settings = endpointSettings[EModelEndpoint.google];
|
||||||
|
const EXCLUDED_GENAI_MODELS = /gemini-(?:1\.0|1-0|pro)/;
|
||||||
|
|
||||||
|
class GoogleClient extends BaseClient {
|
||||||
|
constructor(credentials, options = {}) {
|
||||||
|
super('apiKey', options);
|
||||||
|
let creds = {};
|
||||||
|
|
||||||
|
if (typeof credentials === 'string') {
|
||||||
|
creds = JSON.parse(credentials);
|
||||||
|
} else if (credentials) {
|
||||||
|
creds = credentials;
|
||||||
|
}
|
||||||
|
|
||||||
|
const serviceKey = creds[AuthKeys.GOOGLE_SERVICE_KEY] ?? {};
|
||||||
|
this.serviceKey =
|
||||||
|
serviceKey && typeof serviceKey === 'string' ? JSON.parse(serviceKey) : (serviceKey ?? {});
|
||||||
|
/** @type {string | null | undefined} */
|
||||||
|
this.project_id = this.serviceKey.project_id;
|
||||||
|
this.client_email = this.serviceKey.client_email;
|
||||||
|
this.private_key = this.serviceKey.private_key;
|
||||||
|
this.access_token = null;
|
||||||
|
|
||||||
|
this.apiKey = creds[AuthKeys.GOOGLE_API_KEY];
|
||||||
|
|
||||||
|
this.reverseProxyUrl = options.reverseProxyUrl;
|
||||||
|
|
||||||
|
this.authHeader = options.authHeader;
|
||||||
|
|
||||||
|
/** @type {UsageMetadata | undefined} */
|
||||||
|
this.usage;
|
||||||
|
/** The key for the usage object's input tokens
|
||||||
|
* @type {string} */
|
||||||
|
this.inputTokensKey = 'input_tokens';
|
||||||
|
/** The key for the usage object's output tokens
|
||||||
|
* @type {string} */
|
||||||
|
this.outputTokensKey = 'output_tokens';
|
||||||
|
this.visionMode = VisionModes.generative;
|
||||||
|
/** @type {string} */
|
||||||
|
this.systemMessage;
|
||||||
|
if (options.skipSetOptions) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
this.setOptions(options);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Google specific methods */
|
||||||
|
constructUrl() {
|
||||||
|
return `https://${endpointPrefix}/v1/projects/${this.project_id}/locations/${loc}/publishers/${publisher}/models/${this.modelOptions.model}:serverStreamingPredict`;
|
||||||
|
}
|
||||||
|
|
||||||
|
async getClient() {
|
||||||
|
const scopes = ['https://www.googleapis.com/auth/cloud-platform'];
|
||||||
|
const jwtClient = new google.auth.JWT(this.client_email, null, this.private_key, scopes);
|
||||||
|
|
||||||
|
jwtClient.authorize((err) => {
|
||||||
|
if (err) {
|
||||||
|
logger.error('jwtClient failed to authorize', err);
|
||||||
|
throw err;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
return jwtClient;
|
||||||
|
}
|
||||||
|
|
||||||
|
async getAccessToken() {
|
||||||
|
const scopes = ['https://www.googleapis.com/auth/cloud-platform'];
|
||||||
|
const jwtClient = new google.auth.JWT(this.client_email, null, this.private_key, scopes);
|
||||||
|
|
||||||
|
return new Promise((resolve, reject) => {
|
||||||
|
jwtClient.authorize((err, tokens) => {
|
||||||
|
if (err) {
|
||||||
|
logger.error('jwtClient failed to authorize', err);
|
||||||
|
reject(err);
|
||||||
|
} else {
|
||||||
|
resolve(tokens.access_token);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Required Client methods */
|
||||||
|
setOptions(options) {
|
||||||
|
if (this.options && !this.options.replaceOptions) {
|
||||||
|
// nested options aren't spread properly, so we need to do this manually
|
||||||
|
this.options.modelOptions = {
|
||||||
|
...this.options.modelOptions,
|
||||||
|
...options.modelOptions,
|
||||||
|
};
|
||||||
|
delete options.modelOptions;
|
||||||
|
// now we can merge options
|
||||||
|
this.options = {
|
||||||
|
...this.options,
|
||||||
|
...options,
|
||||||
|
};
|
||||||
|
} else {
|
||||||
|
this.options = options;
|
||||||
|
}
|
||||||
|
|
||||||
|
this.modelOptions = this.options.modelOptions || {};
|
||||||
|
|
||||||
|
this.options.attachments?.then((attachments) => this.checkVisionRequest(attachments));
|
||||||
|
|
||||||
|
/** @type {boolean} Whether using a "GenerativeAI" Model */
|
||||||
|
this.isGenerativeModel = /gemini|learnlm|gemma/.test(this.modelOptions.model);
|
||||||
|
|
||||||
|
this.maxContextTokens =
|
||||||
|
this.options.maxContextTokens ??
|
||||||
|
getModelMaxTokens(this.modelOptions.model, EModelEndpoint.google);
|
||||||
|
|
||||||
|
// The max prompt tokens is determined by the max context tokens minus the max response tokens.
|
||||||
|
// Earlier messages will be dropped until the prompt is within the limit.
|
||||||
|
this.maxResponseTokens = this.modelOptions.maxOutputTokens || settings.maxOutputTokens.default;
|
||||||
|
|
||||||
|
if (this.maxContextTokens > 32000) {
|
||||||
|
this.maxContextTokens = this.maxContextTokens - this.maxResponseTokens;
|
||||||
|
}
|
||||||
|
|
||||||
|
this.maxPromptTokens =
|
||||||
|
this.options.maxPromptTokens || this.maxContextTokens - this.maxResponseTokens;
|
||||||
|
|
||||||
|
if (this.maxPromptTokens + this.maxResponseTokens > this.maxContextTokens) {
|
||||||
|
throw new Error(
|
||||||
|
`maxPromptTokens + maxOutputTokens (${this.maxPromptTokens} + ${this.maxResponseTokens} = ${
|
||||||
|
this.maxPromptTokens + this.maxResponseTokens
|
||||||
|
}) must be less than or equal to maxContextTokens (${this.maxContextTokens})`,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add thinking configuration
|
||||||
|
this.modelOptions.thinkingConfig = {
|
||||||
|
thinkingBudget:
|
||||||
|
(this.modelOptions.thinking ?? googleSettings.thinking.default)
|
||||||
|
? this.modelOptions.thinkingBudget
|
||||||
|
: 0,
|
||||||
|
};
|
||||||
|
delete this.modelOptions.thinking;
|
||||||
|
delete this.modelOptions.thinkingBudget;
|
||||||
|
|
||||||
|
this.sender =
|
||||||
|
this.options.sender ??
|
||||||
|
getResponseSender({
|
||||||
|
model: this.modelOptions.model,
|
||||||
|
endpoint: EModelEndpoint.google,
|
||||||
|
modelLabel: this.options.modelLabel,
|
||||||
|
});
|
||||||
|
|
||||||
|
this.userLabel = this.options.userLabel || 'User';
|
||||||
|
this.modelLabel = this.options.modelLabel || 'Assistant';
|
||||||
|
|
||||||
|
if (this.options.reverseProxyUrl) {
|
||||||
|
this.completionsUrl = this.options.reverseProxyUrl;
|
||||||
|
} else {
|
||||||
|
this.completionsUrl = this.constructUrl();
|
||||||
|
}
|
||||||
|
|
||||||
|
let promptPrefix = (this.options.promptPrefix ?? '').trim();
|
||||||
|
if (typeof this.options.artifactsPrompt === 'string' && this.options.artifactsPrompt) {
|
||||||
|
promptPrefix = `${promptPrefix ?? ''}\n${this.options.artifactsPrompt}`.trim();
|
||||||
|
}
|
||||||
|
this.systemMessage = promptPrefix;
|
||||||
|
this.initializeClient();
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
*
|
||||||
|
* Checks if the model is a vision model based on request attachments and sets the appropriate options:
|
||||||
|
* @param {MongoFile[]} attachments
|
||||||
|
*/
|
||||||
|
checkVisionRequest(attachments) {
|
||||||
|
/* Validation vision request */
|
||||||
|
this.defaultVisionModel =
|
||||||
|
this.options.visionModel ??
|
||||||
|
(!EXCLUDED_GENAI_MODELS.test(this.modelOptions.model)
|
||||||
|
? this.modelOptions.model
|
||||||
|
: 'gemini-pro-vision');
|
||||||
|
const availableModels = this.options.modelsConfig?.[EModelEndpoint.google];
|
||||||
|
this.isVisionModel = validateVisionModel({ model: this.modelOptions.model, availableModels });
|
||||||
|
|
||||||
|
if (
|
||||||
|
attachments &&
|
||||||
|
attachments.some((file) => file?.type && file?.type?.includes('image')) &&
|
||||||
|
availableModels?.includes(this.defaultVisionModel) &&
|
||||||
|
!this.isVisionModel
|
||||||
|
) {
|
||||||
|
this.modelOptions.model = this.defaultVisionModel;
|
||||||
|
this.isVisionModel = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (this.isVisionModel && !attachments && this.modelOptions.model.includes('gemini-pro')) {
|
||||||
|
this.modelOptions.model = 'gemini-pro';
|
||||||
|
this.isVisionModel = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
formatMessages() {
|
||||||
|
return ((message) => {
|
||||||
|
const msg = {
|
||||||
|
author: message?.author ?? (message.isCreatedByUser ? this.userLabel : this.modelLabel),
|
||||||
|
content: message?.content ?? message.text,
|
||||||
|
};
|
||||||
|
|
||||||
|
if (!message.image_urls?.length) {
|
||||||
|
return msg;
|
||||||
|
}
|
||||||
|
|
||||||
|
msg.content = (
|
||||||
|
!Array.isArray(msg.content)
|
||||||
|
? [
|
||||||
|
{
|
||||||
|
type: ContentTypes.TEXT,
|
||||||
|
[ContentTypes.TEXT]: msg.content,
|
||||||
|
},
|
||||||
|
]
|
||||||
|
: msg.content
|
||||||
|
).concat(message.image_urls);
|
||||||
|
|
||||||
|
return msg;
|
||||||
|
}).bind(this);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Formats messages for generative AI
|
||||||
|
* @param {TMessage[]} messages
|
||||||
|
* @returns
|
||||||
|
*/
|
||||||
|
async formatGenerativeMessages(messages) {
|
||||||
|
const formattedMessages = [];
|
||||||
|
const attachments = await this.options.attachments;
|
||||||
|
const latestMessage = { ...messages[messages.length - 1] };
|
||||||
|
const files = await this.addImageURLs(latestMessage, attachments, VisionModes.generative);
|
||||||
|
this.options.attachments = files;
|
||||||
|
messages[messages.length - 1] = latestMessage;
|
||||||
|
|
||||||
|
for (const _message of messages) {
|
||||||
|
const role = _message.isCreatedByUser ? this.userLabel : this.modelLabel;
|
||||||
|
const parts = [];
|
||||||
|
parts.push({ text: _message.text });
|
||||||
|
if (!_message.image_urls?.length) {
|
||||||
|
formattedMessages.push({ role, parts });
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (const images of _message.image_urls) {
|
||||||
|
if (images.inlineData) {
|
||||||
|
parts.push({ inlineData: images.inlineData });
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
formattedMessages.push({ role, parts });
|
||||||
|
}
|
||||||
|
|
||||||
|
return formattedMessages;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
*
|
||||||
|
* Adds image URLs to the message object and returns the files
|
||||||
|
*
|
||||||
|
* @param {TMessage[]} messages
|
||||||
|
* @param {MongoFile[]} files
|
||||||
|
* @returns {Promise<MongoFile[]>}
|
||||||
|
*/
|
||||||
|
async addImageURLs(message, attachments, mode = '') {
|
||||||
|
const { files, image_urls } = await encodeAndFormat(
|
||||||
|
this.options.req,
|
||||||
|
attachments,
|
||||||
|
{
|
||||||
|
endpoint: EModelEndpoint.google,
|
||||||
|
},
|
||||||
|
mode,
|
||||||
|
);
|
||||||
|
message.image_urls = image_urls.length ? image_urls : undefined;
|
||||||
|
return files;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Builds the augmented prompt for attachments
|
||||||
|
* TODO: Add File API Support
|
||||||
|
* @param {TMessage[]} messages
|
||||||
|
*/
|
||||||
|
async buildAugmentedPrompt(messages = []) {
|
||||||
|
const attachments = await this.options.attachments;
|
||||||
|
const latestMessage = { ...messages[messages.length - 1] };
|
||||||
|
this.contextHandlers = createContextHandlers(this.options.req, latestMessage.text);
|
||||||
|
|
||||||
|
if (this.contextHandlers) {
|
||||||
|
for (const file of attachments) {
|
||||||
|
if (file.embedded) {
|
||||||
|
this.contextHandlers?.processFile(file);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if (file.metadata?.fileIdentifier) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
this.augmentedPrompt = await this.contextHandlers.createContext();
|
||||||
|
this.systemMessage = this.augmentedPrompt + this.systemMessage;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async buildVisionMessages(messages = [], parentMessageId) {
|
||||||
|
const attachments = await this.options.attachments;
|
||||||
|
const latestMessage = { ...messages[messages.length - 1] };
|
||||||
|
await this.buildAugmentedPrompt(messages);
|
||||||
|
|
||||||
|
const { prompt } = await this.buildMessagesPrompt(messages, parentMessageId);
|
||||||
|
|
||||||
|
const files = await this.addImageURLs(latestMessage, attachments);
|
||||||
|
|
||||||
|
this.options.attachments = files;
|
||||||
|
|
||||||
|
latestMessage.text = prompt;
|
||||||
|
|
||||||
|
const payload = {
|
||||||
|
instances: [
|
||||||
|
{
|
||||||
|
messages: [new HumanMessage(formatMessage({ message: latestMessage }))],
|
||||||
|
},
|
||||||
|
],
|
||||||
|
};
|
||||||
|
return { prompt: payload };
|
||||||
|
}
|
||||||
|
|
||||||
|
/** @param {TMessage[]} [messages=[]] */
|
||||||
|
async buildGenerativeMessages(messages = []) {
|
||||||
|
this.userLabel = 'user';
|
||||||
|
this.modelLabel = 'model';
|
||||||
|
const promises = [];
|
||||||
|
promises.push(await this.formatGenerativeMessages(messages));
|
||||||
|
promises.push(this.buildAugmentedPrompt(messages));
|
||||||
|
const [formattedMessages] = await Promise.all(promises);
|
||||||
|
return { prompt: formattedMessages };
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param {TMessage[]} [messages=[]]
|
||||||
|
* @param {string} [parentMessageId]
|
||||||
|
*/
|
||||||
|
async buildMessages(_messages = [], parentMessageId) {
|
||||||
|
if (!this.isGenerativeModel && !this.project_id) {
|
||||||
|
throw new Error('[GoogleClient] PaLM 2 and Codey models are no longer supported.');
|
||||||
|
}
|
||||||
|
|
||||||
|
if (this.systemMessage) {
|
||||||
|
const instructionsTokenCount = this.getTokenCount(this.systemMessage);
|
||||||
|
|
||||||
|
this.maxContextTokens = this.maxContextTokens - instructionsTokenCount;
|
||||||
|
if (this.maxContextTokens < 0) {
|
||||||
|
const info = `${instructionsTokenCount} / ${this.maxContextTokens}`;
|
||||||
|
const errorMessage = `{ "type": "${ErrorTypes.INPUT_LENGTH}", "info": "${info}" }`;
|
||||||
|
logger.warn(`Instructions token count exceeds max context (${info}).`);
|
||||||
|
throw new Error(errorMessage);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for (let i = 0; i < _messages.length; i++) {
|
||||||
|
const message = _messages[i];
|
||||||
|
if (!message.tokenCount) {
|
||||||
|
_messages[i].tokenCount = this.getTokenCountForMessage({
|
||||||
|
role: message.isCreatedByUser ? 'user' : 'assistant',
|
||||||
|
content: message.content ?? message.text,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const {
|
||||||
|
payload: messages,
|
||||||
|
tokenCountMap,
|
||||||
|
promptTokens,
|
||||||
|
} = await this.handleContextStrategy({
|
||||||
|
orderedMessages: _messages,
|
||||||
|
formattedMessages: _messages,
|
||||||
|
});
|
||||||
|
|
||||||
|
if (!this.project_id && !EXCLUDED_GENAI_MODELS.test(this.modelOptions.model)) {
|
||||||
|
const result = await this.buildGenerativeMessages(messages);
|
||||||
|
result.tokenCountMap = tokenCountMap;
|
||||||
|
result.promptTokens = promptTokens;
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (this.options.attachments && this.isGenerativeModel) {
|
||||||
|
const result = this.buildVisionMessages(messages, parentMessageId);
|
||||||
|
result.tokenCountMap = tokenCountMap;
|
||||||
|
result.promptTokens = promptTokens;
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
let payload = {
|
||||||
|
instances: [
|
||||||
|
{
|
||||||
|
messages: messages
|
||||||
|
.map(this.formatMessages())
|
||||||
|
.map((msg) => ({ ...msg, role: msg.author === 'User' ? 'user' : 'assistant' }))
|
||||||
|
.map((message) => formatMessage({ message, langChain: true })),
|
||||||
|
},
|
||||||
|
],
|
||||||
|
};
|
||||||
|
|
||||||
|
if (this.systemMessage) {
|
||||||
|
payload.instances[0].context = this.systemMessage;
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.debug('[GoogleClient] buildMessages', payload);
|
||||||
|
return { prompt: payload, tokenCountMap, promptTokens };
|
||||||
|
}
|
||||||
|
|
||||||
|
async buildMessagesPrompt(messages, parentMessageId) {
|
||||||
|
const orderedMessages = this.constructor.getMessagesForConversation({
|
||||||
|
messages,
|
||||||
|
parentMessageId,
|
||||||
|
});
|
||||||
|
|
||||||
|
logger.debug('[GoogleClient]', {
|
||||||
|
orderedMessages,
|
||||||
|
parentMessageId,
|
||||||
|
});
|
||||||
|
|
||||||
|
const formattedMessages = orderedMessages.map(this.formatMessages());
|
||||||
|
|
||||||
|
let lastAuthor = '';
|
||||||
|
let groupedMessages = [];
|
||||||
|
|
||||||
|
for (let message of formattedMessages) {
|
||||||
|
// If last author is not same as current author, add to new group
|
||||||
|
if (lastAuthor !== message.author) {
|
||||||
|
groupedMessages.push({
|
||||||
|
author: message.author,
|
||||||
|
content: [message.content],
|
||||||
|
});
|
||||||
|
lastAuthor = message.author;
|
||||||
|
// If same author, append content to the last group
|
||||||
|
} else {
|
||||||
|
groupedMessages[groupedMessages.length - 1].content.push(message.content);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let identityPrefix = '';
|
||||||
|
if (this.options.userLabel) {
|
||||||
|
identityPrefix = `\nHuman's name: ${this.options.userLabel}`;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (this.options.modelLabel) {
|
||||||
|
identityPrefix = `${identityPrefix}\nYou are ${this.options.modelLabel}`;
|
||||||
|
}
|
||||||
|
|
||||||
|
let promptPrefix = (this.systemMessage ?? '').trim();
|
||||||
|
|
||||||
|
if (identityPrefix) {
|
||||||
|
promptPrefix = `${identityPrefix}${promptPrefix}`;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prompt AI to respond, empty if last message was from AI
|
||||||
|
let isEdited = lastAuthor === this.modelLabel;
|
||||||
|
const promptSuffix = isEdited ? '' : `${promptPrefix}\n\n${this.modelLabel}:\n`;
|
||||||
|
let currentTokenCount = isEdited
|
||||||
|
? this.getTokenCount(promptPrefix)
|
||||||
|
: this.getTokenCount(promptSuffix);
|
||||||
|
|
||||||
|
let promptBody = '';
|
||||||
|
const maxTokenCount = this.maxPromptTokens;
|
||||||
|
|
||||||
|
const context = [];
|
||||||
|
|
||||||
|
// Iterate backwards through the messages, adding them to the prompt until we reach the max token count.
|
||||||
|
// Do this within a recursive async function so that it doesn't block the event loop for too long.
|
||||||
|
// Also, remove the next message when the message that puts us over the token limit is created by the user.
|
||||||
|
// Otherwise, remove only the exceeding message. This is due to Anthropic's strict payload rule to start with "Human:".
|
||||||
|
const nextMessage = {
|
||||||
|
remove: false,
|
||||||
|
tokenCount: 0,
|
||||||
|
messageString: '',
|
||||||
|
};
|
||||||
|
|
||||||
|
const buildPromptBody = async () => {
|
||||||
|
if (currentTokenCount < maxTokenCount && groupedMessages.length > 0) {
|
||||||
|
const message = groupedMessages.pop();
|
||||||
|
const isCreatedByUser = message.author === this.userLabel;
|
||||||
|
// Use promptPrefix if message is edited assistant'
|
||||||
|
const messagePrefix =
|
||||||
|
isCreatedByUser || !isEdited
|
||||||
|
? `\n\n${message.author}:`
|
||||||
|
: `${promptPrefix}\n\n${message.author}:`;
|
||||||
|
const messageString = `${messagePrefix}\n${message.content}\n`;
|
||||||
|
let newPromptBody = `${messageString}${promptBody}`;
|
||||||
|
|
||||||
|
context.unshift(message);
|
||||||
|
|
||||||
|
const tokenCountForMessage = this.getTokenCount(messageString);
|
||||||
|
const newTokenCount = currentTokenCount + tokenCountForMessage;
|
||||||
|
|
||||||
|
if (!isCreatedByUser) {
|
||||||
|
nextMessage.messageString = messageString;
|
||||||
|
nextMessage.tokenCount = tokenCountForMessage;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (newTokenCount > maxTokenCount) {
|
||||||
|
if (!promptBody) {
|
||||||
|
// This is the first message, so we can't add it. Just throw an error.
|
||||||
|
throw new Error(
|
||||||
|
`Prompt is too long. Max token count is ${maxTokenCount}, but prompt is ${newTokenCount} tokens long.`,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Otherwise, ths message would put us over the token limit, so don't add it.
|
||||||
|
// if created by user, remove next message, otherwise remove only this message
|
||||||
|
if (isCreatedByUser) {
|
||||||
|
nextMessage.remove = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
promptBody = newPromptBody;
|
||||||
|
currentTokenCount = newTokenCount;
|
||||||
|
|
||||||
|
// Switch off isEdited after using it for the first time
|
||||||
|
if (isEdited) {
|
||||||
|
isEdited = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// wait for next tick to avoid blocking the event loop
|
||||||
|
await new Promise((resolve) => setImmediate(resolve));
|
||||||
|
return buildPromptBody();
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
};
|
||||||
|
|
||||||
|
await buildPromptBody();
|
||||||
|
|
||||||
|
if (nextMessage.remove) {
|
||||||
|
promptBody = promptBody.replace(nextMessage.messageString, '');
|
||||||
|
currentTokenCount -= nextMessage.tokenCount;
|
||||||
|
context.shift();
|
||||||
|
}
|
||||||
|
|
||||||
|
let prompt = `${promptBody}${promptSuffix}`.trim();
|
||||||
|
|
||||||
|
// Add 2 tokens for metadata after all messages have been counted.
|
||||||
|
currentTokenCount += 2;
|
||||||
|
|
||||||
|
// Use up to `this.maxContextTokens` tokens (prompt + response), but try to leave `this.maxTokens` tokens for the response.
|
||||||
|
this.modelOptions.maxOutputTokens = Math.min(
|
||||||
|
this.maxContextTokens - currentTokenCount,
|
||||||
|
this.maxResponseTokens,
|
||||||
|
);
|
||||||
|
|
||||||
|
return { prompt, context };
|
||||||
|
}
|
||||||
|
|
||||||
|
createLLM(clientOptions) {
|
||||||
|
const model = clientOptions.modelName ?? clientOptions.model;
|
||||||
|
clientOptions.location = loc;
|
||||||
|
clientOptions.endpoint = endpointPrefix;
|
||||||
|
|
||||||
|
let requestOptions = null;
|
||||||
|
if (this.reverseProxyUrl) {
|
||||||
|
requestOptions = {
|
||||||
|
baseUrl: this.reverseProxyUrl,
|
||||||
|
};
|
||||||
|
|
||||||
|
if (this.authHeader) {
|
||||||
|
requestOptions.customHeaders = {
|
||||||
|
Authorization: `Bearer ${this.apiKey}`,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (this.project_id != null) {
|
||||||
|
logger.debug('Creating VertexAI client');
|
||||||
|
this.visionMode = undefined;
|
||||||
|
clientOptions.streaming = true;
|
||||||
|
const client = new ChatVertexAI(clientOptions);
|
||||||
|
client.temperature = clientOptions.temperature;
|
||||||
|
client.topP = clientOptions.topP;
|
||||||
|
client.topK = clientOptions.topK;
|
||||||
|
client.topLogprobs = clientOptions.topLogprobs;
|
||||||
|
client.frequencyPenalty = clientOptions.frequencyPenalty;
|
||||||
|
client.presencePenalty = clientOptions.presencePenalty;
|
||||||
|
client.maxOutputTokens = clientOptions.maxOutputTokens;
|
||||||
|
return client;
|
||||||
|
} else if (!EXCLUDED_GENAI_MODELS.test(model)) {
|
||||||
|
logger.debug('Creating GenAI client');
|
||||||
|
return new GenAI(this.apiKey).getGenerativeModel({ model }, requestOptions);
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.debug('Creating Chat Google Generative AI client');
|
||||||
|
return new ChatGoogleGenerativeAI({ ...clientOptions, apiKey: this.apiKey });
|
||||||
|
}
|
||||||
|
|
||||||
|
initializeClient() {
|
||||||
|
let clientOptions = { ...this.modelOptions };
|
||||||
|
|
||||||
|
if (this.project_id) {
|
||||||
|
clientOptions['authOptions'] = {
|
||||||
|
credentials: {
|
||||||
|
...this.serviceKey,
|
||||||
|
},
|
||||||
|
projectId: this.project_id,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
if (this.isGenerativeModel && !this.project_id) {
|
||||||
|
clientOptions.modelName = clientOptions.model;
|
||||||
|
delete clientOptions.model;
|
||||||
|
}
|
||||||
|
|
||||||
|
this.client = this.createLLM(clientOptions);
|
||||||
|
return this.client;
|
||||||
|
}
|
||||||
|
|
||||||
|
async getCompletion(_payload, options = {}) {
|
||||||
|
const { onProgress, abortController } = options;
|
||||||
|
const safetySettings = getSafetySettings(this.modelOptions.model);
|
||||||
|
const streamRate = this.options.streamRate ?? Constants.DEFAULT_STREAM_RATE;
|
||||||
|
const modelName = this.modelOptions.modelName ?? this.modelOptions.model ?? '';
|
||||||
|
|
||||||
|
let reply = '';
|
||||||
|
/** @type {Error} */
|
||||||
|
let error;
|
||||||
|
try {
|
||||||
|
if (!EXCLUDED_GENAI_MODELS.test(modelName) && !this.project_id) {
|
||||||
|
/** @type {GenerativeModel} */
|
||||||
|
const client = this.client;
|
||||||
|
/** @type {GenerateContentRequest} */
|
||||||
|
const requestOptions = {
|
||||||
|
safetySettings,
|
||||||
|
contents: _payload,
|
||||||
|
generationConfig: googleGenConfigSchema.parse(this.modelOptions),
|
||||||
|
};
|
||||||
|
|
||||||
|
const promptPrefix = (this.systemMessage ?? '').trim();
|
||||||
|
if (promptPrefix.length) {
|
||||||
|
requestOptions.systemInstruction = {
|
||||||
|
parts: [
|
||||||
|
{
|
||||||
|
text: promptPrefix,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
const delay = modelName.includes('flash') ? 8 : 15;
|
||||||
|
/** @type {GenAIUsageMetadata} */
|
||||||
|
let usageMetadata;
|
||||||
|
|
||||||
|
abortController.signal.addEventListener(
|
||||||
|
'abort',
|
||||||
|
() => {
|
||||||
|
logger.warn('[GoogleClient] Request was aborted', abortController.signal.reason);
|
||||||
|
},
|
||||||
|
{ once: true },
|
||||||
|
);
|
||||||
|
|
||||||
|
const result = await client.generateContentStream(requestOptions, {
|
||||||
|
signal: abortController.signal,
|
||||||
|
});
|
||||||
|
for await (const chunk of result.stream) {
|
||||||
|
usageMetadata = !usageMetadata
|
||||||
|
? chunk?.usageMetadata
|
||||||
|
: Object.assign(usageMetadata, chunk?.usageMetadata);
|
||||||
|
const chunkText = chunk.text();
|
||||||
|
await this.generateTextStream(chunkText, onProgress, {
|
||||||
|
delay,
|
||||||
|
});
|
||||||
|
reply += chunkText;
|
||||||
|
await sleep(streamRate);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (usageMetadata) {
|
||||||
|
this.usage = {
|
||||||
|
input_tokens: usageMetadata.promptTokenCount,
|
||||||
|
output_tokens: usageMetadata.candidatesTokenCount,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
return reply;
|
||||||
|
}
|
||||||
|
|
||||||
|
const { instances } = _payload;
|
||||||
|
const { messages: messages, context } = instances?.[0] ?? {};
|
||||||
|
|
||||||
|
if (!this.isVisionModel && context && messages?.length > 0) {
|
||||||
|
messages.unshift(new SystemMessage(context));
|
||||||
|
}
|
||||||
|
|
||||||
|
/** @type {import('@langchain/core/messages').AIMessageChunk['usage_metadata']} */
|
||||||
|
let usageMetadata;
|
||||||
|
/** @type {ChatVertexAI} */
|
||||||
|
const client = this.client;
|
||||||
|
const stream = await client.stream(messages, {
|
||||||
|
signal: abortController.signal,
|
||||||
|
streamUsage: true,
|
||||||
|
safetySettings,
|
||||||
|
});
|
||||||
|
|
||||||
|
let delay = this.options.streamRate || 8;
|
||||||
|
|
||||||
|
if (!this.options.streamRate) {
|
||||||
|
if (this.isGenerativeModel) {
|
||||||
|
delay = 15;
|
||||||
|
}
|
||||||
|
if (modelName.includes('flash')) {
|
||||||
|
delay = 5;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for await (const chunk of stream) {
|
||||||
|
if (chunk?.usage_metadata) {
|
||||||
|
const metadata = chunk.usage_metadata;
|
||||||
|
for (const key in metadata) {
|
||||||
|
if (Number.isNaN(metadata[key])) {
|
||||||
|
delete metadata[key];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
usageMetadata = !usageMetadata ? metadata : concat(usageMetadata, metadata);
|
||||||
|
}
|
||||||
|
|
||||||
|
const chunkText = chunk?.content ?? '';
|
||||||
|
await this.generateTextStream(chunkText, onProgress, {
|
||||||
|
delay,
|
||||||
|
});
|
||||||
|
reply += chunkText;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (usageMetadata) {
|
||||||
|
this.usage = usageMetadata;
|
||||||
|
}
|
||||||
|
} catch (e) {
|
||||||
|
error = e;
|
||||||
|
logger.error('[GoogleClient] There was an issue generating the completion', e);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (error != null && reply === '') {
|
||||||
|
const errorMessage = `{ "type": "${ErrorTypes.GoogleError}", "info": "${
|
||||||
|
error.message ?? 'The Google provider failed to generate content, please contact the Admin.'
|
||||||
|
}" }`;
|
||||||
|
throw new Error(errorMessage);
|
||||||
|
}
|
||||||
|
return reply;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get stream usage as returned by this client's API response.
|
||||||
|
* @returns {UsageMetadata} The stream usage object.
|
||||||
|
*/
|
||||||
|
getStreamUsage() {
|
||||||
|
return this.usage;
|
||||||
|
}
|
||||||
|
|
||||||
|
getMessageMapMethod() {
|
||||||
|
/**
|
||||||
|
* @param {TMessage} msg
|
||||||
|
*/
|
||||||
|
return (msg) => {
|
||||||
|
if (msg.text != null && msg.text && msg.text.startsWith(':::thinking')) {
|
||||||
|
msg.text = msg.text.replace(/:::thinking.*?:::/gs, '').trim();
|
||||||
|
} else if (msg.content != null) {
|
||||||
|
msg.text = parseTextParts(msg.content, true);
|
||||||
|
delete msg.content;
|
||||||
|
}
|
||||||
|
|
||||||
|
return msg;
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Calculates the correct token count for the current user message based on the token count map and API usage.
|
||||||
|
* Edge case: If the calculation results in a negative value, it returns the original estimate.
|
||||||
|
* If revisiting a conversation with a chat history entirely composed of token estimates,
|
||||||
|
* the cumulative token count going forward should become more accurate as the conversation progresses.
|
||||||
|
* @param {Object} params - The parameters for the calculation.
|
||||||
|
* @param {Record<string, number>} params.tokenCountMap - A map of message IDs to their token counts.
|
||||||
|
* @param {string} params.currentMessageId - The ID of the current message to calculate.
|
||||||
|
* @param {UsageMetadata} params.usage - The usage object returned by the API.
|
||||||
|
* @returns {number} The correct token count for the current user message.
|
||||||
|
*/
|
||||||
|
calculateCurrentTokenCount({ tokenCountMap, currentMessageId, usage }) {
|
||||||
|
const originalEstimate = tokenCountMap[currentMessageId] || 0;
|
||||||
|
|
||||||
|
if (!usage || typeof usage.input_tokens !== 'number') {
|
||||||
|
return originalEstimate;
|
||||||
|
}
|
||||||
|
|
||||||
|
tokenCountMap[currentMessageId] = 0;
|
||||||
|
const totalTokensFromMap = Object.values(tokenCountMap).reduce((sum, count) => {
|
||||||
|
const numCount = Number(count);
|
||||||
|
return sum + (isNaN(numCount) ? 0 : numCount);
|
||||||
|
}, 0);
|
||||||
|
const totalInputTokens = usage.input_tokens ?? 0;
|
||||||
|
const currentMessageTokens = totalInputTokens - totalTokensFromMap;
|
||||||
|
return currentMessageTokens > 0 ? currentMessageTokens : originalEstimate;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param {object} params
|
||||||
|
* @param {number} params.promptTokens
|
||||||
|
* @param {number} params.completionTokens
|
||||||
|
* @param {UsageMetadata} [params.usage]
|
||||||
|
* @param {string} [params.model]
|
||||||
|
* @param {string} [params.context='message']
|
||||||
|
* @returns {Promise<void>}
|
||||||
|
*/
|
||||||
|
async recordTokenUsage({ promptTokens, completionTokens, model, context = 'message' }) {
|
||||||
|
await spendTokens(
|
||||||
|
{
|
||||||
|
context,
|
||||||
|
user: this.user ?? this.options.req?.user?.id,
|
||||||
|
conversationId: this.conversationId,
|
||||||
|
model: model ?? this.modelOptions.model,
|
||||||
|
endpointTokenConfig: this.options.endpointTokenConfig,
|
||||||
|
},
|
||||||
|
{ promptTokens, completionTokens },
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Stripped-down logic for generating a title. This uses the non-streaming APIs, since the user does not see titles streaming
|
||||||
|
*/
|
||||||
|
async titleChatCompletion(_payload, options = {}) {
|
||||||
|
let reply = '';
|
||||||
|
const { abortController } = options;
|
||||||
|
|
||||||
|
const model =
|
||||||
|
this.options.titleModel ?? this.modelOptions.modelName ?? this.modelOptions.model ?? '';
|
||||||
|
const safetySettings = getSafetySettings(model);
|
||||||
|
if (!EXCLUDED_GENAI_MODELS.test(model) && !this.project_id) {
|
||||||
|
logger.debug('Identified titling model as GenAI version');
|
||||||
|
/** @type {GenerativeModel} */
|
||||||
|
const client = this.client;
|
||||||
|
const requestOptions = {
|
||||||
|
contents: _payload,
|
||||||
|
safetySettings,
|
||||||
|
generationConfig: {
|
||||||
|
temperature: 0.5,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
const result = await client.generateContent(requestOptions);
|
||||||
|
reply = result.response?.text();
|
||||||
|
return reply;
|
||||||
|
} else {
|
||||||
|
const { instances } = _payload;
|
||||||
|
const { messages } = instances?.[0] ?? {};
|
||||||
|
const titleResponse = await this.client.invoke(messages, {
|
||||||
|
signal: abortController.signal,
|
||||||
|
timeout: 7000,
|
||||||
|
safetySettings,
|
||||||
|
});
|
||||||
|
|
||||||
|
if (titleResponse.usage_metadata) {
|
||||||
|
await this.recordTokenUsage({
|
||||||
|
model,
|
||||||
|
promptTokens: titleResponse.usage_metadata.input_tokens,
|
||||||
|
completionTokens: titleResponse.usage_metadata.output_tokens,
|
||||||
|
context: 'title',
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
reply = titleResponse.content;
|
||||||
|
return reply;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async titleConvo({ text, responseText = '' }) {
|
||||||
|
let title = 'New Chat';
|
||||||
|
const convo = `||>User:
|
||||||
|
"${truncateText(text)}"
|
||||||
|
||>Response:
|
||||||
|
"${JSON.stringify(truncateText(responseText))}"`;
|
||||||
|
|
||||||
|
let { prompt: payload } = await this.buildMessages([
|
||||||
|
{
|
||||||
|
text: `Please generate ${titleInstruction}
|
||||||
|
|
||||||
|
${convo}
|
||||||
|
|
||||||
|
||>Title:`,
|
||||||
|
isCreatedByUser: true,
|
||||||
|
author: this.userLabel,
|
||||||
|
},
|
||||||
|
]);
|
||||||
|
|
||||||
|
try {
|
||||||
|
this.initializeClient();
|
||||||
|
title = await this.titleChatCompletion(payload, {
|
||||||
|
abortController: new AbortController(),
|
||||||
|
onProgress: () => {},
|
||||||
|
});
|
||||||
|
} catch (e) {
|
||||||
|
logger.error('[GoogleClient] There was an issue generating the title', e);
|
||||||
|
}
|
||||||
|
logger.debug(`Title response: ${title}`);
|
||||||
|
return title;
|
||||||
|
}
|
||||||
|
|
||||||
|
getSaveOptions() {
|
||||||
|
return {
|
||||||
|
endpointType: null,
|
||||||
|
artifacts: this.options.artifacts,
|
||||||
|
promptPrefix: this.options.promptPrefix,
|
||||||
|
maxContextTokens: this.options.maxContextTokens,
|
||||||
|
modelLabel: this.options.modelLabel,
|
||||||
|
iconURL: this.options.iconURL,
|
||||||
|
greeting: this.options.greeting,
|
||||||
|
spec: this.options.spec,
|
||||||
|
...this.modelOptions,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
getBuildMessagesOptions() {
|
||||||
|
// logger.debug('GoogleClient doesn\'t use getBuildMessagesOptions');
|
||||||
|
}
|
||||||
|
|
||||||
|
async sendCompletion(payload, opts = {}) {
|
||||||
|
let reply = '';
|
||||||
|
reply = await this.getCompletion(payload, opts);
|
||||||
|
return reply.trim();
|
||||||
|
}
|
||||||
|
|
||||||
|
getEncoding() {
|
||||||
|
return 'cl100k_base';
|
||||||
|
}
|
||||||
|
|
||||||
|
async getVertexTokenCount(text) {
|
||||||
|
/** @type {ChatVertexAI} */
|
||||||
|
const client = this.client ?? this.initializeClient();
|
||||||
|
const connection = client.connection;
|
||||||
|
const gAuthClient = connection.client;
|
||||||
|
const tokenEndpoint = `https://${connection._endpoint}/${connection.apiVersion}/projects/${this.project_id}/locations/${connection._location}/publishers/google/models/${connection.model}/:countTokens`;
|
||||||
|
const result = await gAuthClient.request({
|
||||||
|
url: tokenEndpoint,
|
||||||
|
method: 'POST',
|
||||||
|
data: {
|
||||||
|
contents: [{ role: 'user', parts: [{ text }] }],
|
||||||
|
},
|
||||||
|
});
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the token count of a given text. It also checks and resets the tokenizers if necessary.
|
||||||
|
* @param {string} text - The text to get the token count for.
|
||||||
|
* @returns {number} The token count of the given text.
|
||||||
|
*/
|
||||||
|
getTokenCount(text) {
|
||||||
|
const encoding = this.getEncoding();
|
||||||
|
return Tokenizer.getTokenCount(text, encoding);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = GoogleClient;
|
||||||
|
|
@ -2,9 +2,10 @@ const { z } = require('zod');
|
||||||
const axios = require('axios');
|
const axios = require('axios');
|
||||||
const { Ollama } = require('ollama');
|
const { Ollama } = require('ollama');
|
||||||
const { sleep } = require('@librechat/agents');
|
const { sleep } = require('@librechat/agents');
|
||||||
|
const { resolveHeaders } = require('@librechat/api');
|
||||||
const { logger } = require('@librechat/data-schemas');
|
const { logger } = require('@librechat/data-schemas');
|
||||||
const { Constants } = require('librechat-data-provider');
|
const { Constants } = require('librechat-data-provider');
|
||||||
const { resolveHeaders, deriveBaseURL } = require('@librechat/api');
|
const { deriveBaseURL } = require('~/utils');
|
||||||
|
|
||||||
const ollamaPayloadSchema = z.object({
|
const ollamaPayloadSchema = z.object({
|
||||||
mirostat: z.number().optional(),
|
mirostat: z.number().optional(),
|
||||||
|
|
|
||||||
1207
api/app/clients/OpenAIClient.js
Normal file
1207
api/app/clients/OpenAIClient.js
Normal file
File diff suppressed because it is too large
Load diff
5
api/app/clients/document/index.js
Normal file
5
api/app/clients/document/index.js
Normal file
|
|
@ -0,0 +1,5 @@
|
||||||
|
const tokenSplit = require('./tokenSplit');
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
tokenSplit,
|
||||||
|
};
|
||||||
51
api/app/clients/document/tokenSplit.js
Normal file
51
api/app/clients/document/tokenSplit.js
Normal file
|
|
@ -0,0 +1,51 @@
|
||||||
|
const { TokenTextSplitter } = require('@langchain/textsplitters');
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Splits a given text by token chunks, based on the provided parameters for the TokenTextSplitter.
|
||||||
|
* Note: limit or memoize use of this function as its calculation is expensive.
|
||||||
|
*
|
||||||
|
* @param {Object} obj - Configuration object for the text splitting operation.
|
||||||
|
* @param {string} obj.text - The text to be split.
|
||||||
|
* @param {string} [obj.encodingName='cl100k_base'] - Encoding name. Defaults to 'cl100k_base'.
|
||||||
|
* @param {number} [obj.chunkSize=1] - The token size of each chunk. Defaults to 1.
|
||||||
|
* @param {number} [obj.chunkOverlap=0] - The number of chunk elements to be overlapped between adjacent chunks. Defaults to 0.
|
||||||
|
* @param {number} [obj.returnSize] - If specified and not 0, slices the return array from the end by this amount.
|
||||||
|
*
|
||||||
|
* @returns {Promise<Array>} Returns a promise that resolves to an array of text chunks.
|
||||||
|
* If no text is provided, an empty array is returned.
|
||||||
|
* If returnSize is specified and not 0, slices the return array from the end by returnSize.
|
||||||
|
*
|
||||||
|
* @async
|
||||||
|
* @function tokenSplit
|
||||||
|
*/
|
||||||
|
async function tokenSplit({
|
||||||
|
text,
|
||||||
|
encodingName = 'cl100k_base',
|
||||||
|
chunkSize = 1,
|
||||||
|
chunkOverlap = 0,
|
||||||
|
returnSize,
|
||||||
|
}) {
|
||||||
|
if (!text) {
|
||||||
|
return [];
|
||||||
|
}
|
||||||
|
|
||||||
|
const splitter = new TokenTextSplitter({
|
||||||
|
encodingName,
|
||||||
|
chunkSize,
|
||||||
|
chunkOverlap,
|
||||||
|
});
|
||||||
|
|
||||||
|
if (!returnSize) {
|
||||||
|
return await splitter.splitText(text);
|
||||||
|
}
|
||||||
|
|
||||||
|
const splitText = await splitter.splitText(text);
|
||||||
|
|
||||||
|
if (returnSize && returnSize > 0 && splitText.length > 0) {
|
||||||
|
return splitText.slice(-Math.abs(returnSize));
|
||||||
|
}
|
||||||
|
|
||||||
|
return splitText;
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = tokenSplit;
|
||||||
56
api/app/clients/document/tokenSplit.spec.js
Normal file
56
api/app/clients/document/tokenSplit.spec.js
Normal file
|
|
@ -0,0 +1,56 @@
|
||||||
|
const tokenSplit = require('./tokenSplit');
|
||||||
|
|
||||||
|
describe('tokenSplit', () => {
|
||||||
|
const text = 'Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nullam id.';
|
||||||
|
|
||||||
|
it('returns correct text chunks with provided parameters', async () => {
|
||||||
|
const result = await tokenSplit({
|
||||||
|
text: text,
|
||||||
|
encodingName: 'gpt2',
|
||||||
|
chunkSize: 2,
|
||||||
|
chunkOverlap: 1,
|
||||||
|
returnSize: 5,
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(result).toEqual(['it.', '. Null', ' Nullam', 'am id', ' id.']);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('returns correct text chunks with default parameters', async () => {
|
||||||
|
const result = await tokenSplit({ text });
|
||||||
|
expect(result).toEqual([
|
||||||
|
'Lorem',
|
||||||
|
' ipsum',
|
||||||
|
' dolor',
|
||||||
|
' sit',
|
||||||
|
' amet',
|
||||||
|
',',
|
||||||
|
' consectetur',
|
||||||
|
' adipiscing',
|
||||||
|
' elit',
|
||||||
|
'.',
|
||||||
|
' Null',
|
||||||
|
'am',
|
||||||
|
' id',
|
||||||
|
'.',
|
||||||
|
]);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('returns correct text chunks with specific return size', async () => {
|
||||||
|
const result = await tokenSplit({ text, returnSize: 2 });
|
||||||
|
expect(result.length).toEqual(2);
|
||||||
|
expect(result).toEqual([' id', '.']);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('returns correct text chunks with specified chunk size', async () => {
|
||||||
|
const result = await tokenSplit({ text, chunkSize: 10 });
|
||||||
|
expect(result).toEqual([
|
||||||
|
'Lorem ipsum dolor sit amet, consectetur adipiscing elit.',
|
||||||
|
' Nullam id.',
|
||||||
|
]);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('returns empty array with no text', async () => {
|
||||||
|
const result = await tokenSplit({ text: '' });
|
||||||
|
expect(result).toEqual([]);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
@ -1,7 +1,13 @@
|
||||||
|
const OpenAIClient = require('./OpenAIClient');
|
||||||
|
const GoogleClient = require('./GoogleClient');
|
||||||
const TextStream = require('./TextStream');
|
const TextStream = require('./TextStream');
|
||||||
|
const AnthropicClient = require('./AnthropicClient');
|
||||||
const toolUtils = require('./tools/util');
|
const toolUtils = require('./tools/util');
|
||||||
|
|
||||||
module.exports = {
|
module.exports = {
|
||||||
|
OpenAIClient,
|
||||||
|
GoogleClient,
|
||||||
TextStream,
|
TextStream,
|
||||||
|
AnthropicClient,
|
||||||
...toolUtils,
|
...toolUtils,
|
||||||
};
|
};
|
||||||
|
|
|
||||||
85
api/app/clients/llm/createCoherePayload.js
Normal file
85
api/app/clients/llm/createCoherePayload.js
Normal file
|
|
@ -0,0 +1,85 @@
|
||||||
|
const { CohereConstants } = require('librechat-data-provider');
|
||||||
|
const { titleInstruction } = require('../prompts/titlePrompts');
|
||||||
|
|
||||||
|
// Mapping OpenAI roles to Cohere roles
|
||||||
|
const roleMap = {
|
||||||
|
user: CohereConstants.ROLE_USER,
|
||||||
|
assistant: CohereConstants.ROLE_CHATBOT,
|
||||||
|
system: CohereConstants.ROLE_SYSTEM, // Recognize and map the system role explicitly
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Adjusts an OpenAI ChatCompletionPayload to conform with Cohere's expected chat payload format.
|
||||||
|
* Now includes handling for "system" roles explicitly mentioned.
|
||||||
|
*
|
||||||
|
* @param {Object} options - Object containing the model options.
|
||||||
|
* @param {ChatCompletionPayload} options.modelOptions - The OpenAI model payload options.
|
||||||
|
* @returns {CohereChatStreamRequest} Cohere-compatible chat API payload.
|
||||||
|
*/
|
||||||
|
function createCoherePayload({ modelOptions }) {
|
||||||
|
/** @type {string | undefined} */
|
||||||
|
let preamble;
|
||||||
|
let latestUserMessageContent = '';
|
||||||
|
const {
|
||||||
|
stream,
|
||||||
|
stop,
|
||||||
|
top_p,
|
||||||
|
temperature,
|
||||||
|
frequency_penalty,
|
||||||
|
presence_penalty,
|
||||||
|
max_tokens,
|
||||||
|
messages,
|
||||||
|
model,
|
||||||
|
...rest
|
||||||
|
} = modelOptions;
|
||||||
|
|
||||||
|
// Filter out the latest user message and transform remaining messages to Cohere's chat_history format
|
||||||
|
let chatHistory = messages.reduce((acc, message, index, arr) => {
|
||||||
|
const isLastUserMessage = index === arr.length - 1 && message.role === 'user';
|
||||||
|
|
||||||
|
const messageContent =
|
||||||
|
typeof message.content === 'string'
|
||||||
|
? message.content
|
||||||
|
: message.content.map((part) => (part.type === 'text' ? part.text : '')).join(' ');
|
||||||
|
|
||||||
|
if (isLastUserMessage) {
|
||||||
|
latestUserMessageContent = messageContent;
|
||||||
|
} else {
|
||||||
|
acc.push({
|
||||||
|
role: roleMap[message.role] || CohereConstants.ROLE_USER,
|
||||||
|
message: messageContent,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
return acc;
|
||||||
|
}, []);
|
||||||
|
|
||||||
|
if (
|
||||||
|
chatHistory.length === 1 &&
|
||||||
|
chatHistory[0].role === CohereConstants.ROLE_SYSTEM &&
|
||||||
|
!latestUserMessageContent.length
|
||||||
|
) {
|
||||||
|
const message = chatHistory[0].message;
|
||||||
|
latestUserMessageContent = message.includes(titleInstruction)
|
||||||
|
? CohereConstants.TITLE_MESSAGE
|
||||||
|
: '.';
|
||||||
|
preamble = message;
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
message: latestUserMessageContent,
|
||||||
|
model: model,
|
||||||
|
chatHistory,
|
||||||
|
stream: stream ?? false,
|
||||||
|
temperature: temperature,
|
||||||
|
frequencyPenalty: frequency_penalty,
|
||||||
|
presencePenalty: presence_penalty,
|
||||||
|
maxTokens: max_tokens,
|
||||||
|
stopSequences: stop,
|
||||||
|
preamble,
|
||||||
|
p: top_p,
|
||||||
|
...rest,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = createCoherePayload;
|
||||||
5
api/app/clients/llm/index.js
Normal file
5
api/app/clients/llm/index.js
Normal file
|
|
@ -0,0 +1,5 @@
|
||||||
|
const createCoherePayload = require('./createCoherePayload');
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
createCoherePayload,
|
||||||
|
};
|
||||||
90
api/app/clients/output_parsers/addImages.js
Normal file
90
api/app/clients/output_parsers/addImages.js
Normal file
|
|
@ -0,0 +1,90 @@
|
||||||
|
const { getBasePath } = require('@librechat/api');
|
||||||
|
const { logger } = require('@librechat/data-schemas');
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The `addImages` function corrects any erroneous image URLs in the `responseMessage.text`
|
||||||
|
* and appends image observations from `intermediateSteps` if they are not already present.
|
||||||
|
*
|
||||||
|
* @function
|
||||||
|
* @module addImages
|
||||||
|
*
|
||||||
|
* @param {Array.<Object>} intermediateSteps - An array of objects, each containing an observation.
|
||||||
|
* @param {Object} responseMessage - An object containing the text property which might have image URLs.
|
||||||
|
*
|
||||||
|
* @property {string} intermediateSteps[].observation - The observation string which might contain an image markdown.
|
||||||
|
* @property {string} responseMessage.text - The text which might contain image URLs.
|
||||||
|
*
|
||||||
|
* @example
|
||||||
|
*
|
||||||
|
* const intermediateSteps = [
|
||||||
|
* { observation: '' }
|
||||||
|
* ];
|
||||||
|
* const responseMessage = { text: 'Some text with ' };
|
||||||
|
*
|
||||||
|
* addImages(intermediateSteps, responseMessage);
|
||||||
|
*
|
||||||
|
* logger.debug(responseMessage.text);
|
||||||
|
* // Outputs: 'Some text with \n'
|
||||||
|
*
|
||||||
|
* @returns {void}
|
||||||
|
*/
|
||||||
|
function addImages(intermediateSteps, responseMessage) {
|
||||||
|
if (!intermediateSteps || !responseMessage) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const basePath = getBasePath();
|
||||||
|
|
||||||
|
// Correct any erroneous URLs in the responseMessage.text first
|
||||||
|
intermediateSteps.forEach((step) => {
|
||||||
|
const { observation } = step;
|
||||||
|
if (!observation || !observation.includes('![')) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const match = observation.match(/\/images\/.*\.\w*/);
|
||||||
|
if (!match) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
const essentialImagePath = match[0];
|
||||||
|
const fullImagePath = `${basePath}${essentialImagePath}`;
|
||||||
|
|
||||||
|
const regex = /!\[.*?\]\((.*?)\)/g;
|
||||||
|
let matchErroneous;
|
||||||
|
while ((matchErroneous = regex.exec(responseMessage.text)) !== null) {
|
||||||
|
if (matchErroneous[1] && !matchErroneous[1].startsWith(`${basePath}/images/`)) {
|
||||||
|
// Replace with the full path including base path
|
||||||
|
responseMessage.text = responseMessage.text.replace(matchErroneous[1], fullImagePath);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Now, check if the responseMessage already includes the correct image file path and append if not
|
||||||
|
intermediateSteps.forEach((step) => {
|
||||||
|
const { observation } = step;
|
||||||
|
if (!observation || !observation.includes('![')) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
const observedImagePath = observation.match(/!\[[^(]*\]\([^)]*\)/g);
|
||||||
|
if (observedImagePath) {
|
||||||
|
// Fix the image path to include base path if it doesn't already
|
||||||
|
let imageMarkdown = observedImagePath[0];
|
||||||
|
const urlMatch = imageMarkdown.match(/\(([^)]+)\)/);
|
||||||
|
if (
|
||||||
|
urlMatch &&
|
||||||
|
urlMatch[1] &&
|
||||||
|
!urlMatch[1].startsWith(`${basePath}/images/`) &&
|
||||||
|
urlMatch[1].startsWith('/images/')
|
||||||
|
) {
|
||||||
|
imageMarkdown = imageMarkdown.replace(urlMatch[1], `${basePath}${urlMatch[1]}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!responseMessage.text.includes(imageMarkdown)) {
|
||||||
|
responseMessage.text += '\n' + imageMarkdown;
|
||||||
|
logger.debug('[addImages] added image from intermediateSteps:', imageMarkdown);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = addImages;
|
||||||
246
api/app/clients/output_parsers/addImages.spec.js
Normal file
246
api/app/clients/output_parsers/addImages.spec.js
Normal file
|
|
@ -0,0 +1,246 @@
|
||||||
|
let addImages = require('./addImages');
|
||||||
|
|
||||||
|
describe('addImages', () => {
|
||||||
|
let intermediateSteps;
|
||||||
|
let responseMessage;
|
||||||
|
let options;
|
||||||
|
|
||||||
|
beforeEach(() => {
|
||||||
|
intermediateSteps = [];
|
||||||
|
responseMessage = { text: '' };
|
||||||
|
options = { debug: false };
|
||||||
|
this.options = options;
|
||||||
|
addImages = addImages.bind(this);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle null or undefined parameters', () => {
|
||||||
|
addImages(null, responseMessage);
|
||||||
|
expect(responseMessage.text).toBe('');
|
||||||
|
|
||||||
|
addImages(intermediateSteps, null);
|
||||||
|
expect(responseMessage.text).toBe('');
|
||||||
|
|
||||||
|
addImages(null, null);
|
||||||
|
expect(responseMessage.text).toBe('');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should append correct image markdown if not present in responseMessage', () => {
|
||||||
|
intermediateSteps.push({ observation: '' });
|
||||||
|
addImages(intermediateSteps, responseMessage);
|
||||||
|
expect(responseMessage.text).toBe('\n');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should not append image markdown if already present in responseMessage', () => {
|
||||||
|
responseMessage.text = '';
|
||||||
|
intermediateSteps.push({ observation: '' });
|
||||||
|
addImages(intermediateSteps, responseMessage);
|
||||||
|
expect(responseMessage.text).toBe('');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should correct and append image markdown with erroneous URL', () => {
|
||||||
|
responseMessage.text = '';
|
||||||
|
intermediateSteps.push({ observation: '' });
|
||||||
|
addImages(intermediateSteps, responseMessage);
|
||||||
|
expect(responseMessage.text).toBe('');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should correct multiple erroneous URLs in responseMessage', () => {
|
||||||
|
responseMessage.text =
|
||||||
|
' ';
|
||||||
|
intermediateSteps.push({ observation: '' });
|
||||||
|
intermediateSteps.push({ observation: '' });
|
||||||
|
addImages(intermediateSteps, responseMessage);
|
||||||
|
expect(responseMessage.text).toBe(' ');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should not append non-image markdown observations', () => {
|
||||||
|
intermediateSteps.push({ observation: '[desc](/images/test.png)' });
|
||||||
|
addImages(intermediateSteps, responseMessage);
|
||||||
|
expect(responseMessage.text).toBe('');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle multiple observations', () => {
|
||||||
|
intermediateSteps.push({ observation: '' });
|
||||||
|
intermediateSteps.push({ observation: '' });
|
||||||
|
addImages(intermediateSteps, responseMessage);
|
||||||
|
expect(responseMessage.text).toBe('\n\n');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should not append if observation does not contain image markdown', () => {
|
||||||
|
intermediateSteps.push({ observation: 'This is a test observation without image markdown.' });
|
||||||
|
addImages(intermediateSteps, responseMessage);
|
||||||
|
expect(responseMessage.text).toBe('');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should append correctly from a real scenario', () => {
|
||||||
|
responseMessage.text =
|
||||||
|
"Here is the generated image based on your request. It depicts a surreal landscape filled with floating musical notes. The style is impressionistic, with vibrant sunset hues dominating the scene. At the center, there's a silhouette of a grand piano, adding a dreamy emotion to the overall image. This could serve as a unique and creative music album cover. Would you like to make any changes or generate another image?";
|
||||||
|
const originalText = responseMessage.text;
|
||||||
|
const imageMarkdown = '';
|
||||||
|
intermediateSteps.push({ observation: imageMarkdown });
|
||||||
|
addImages(intermediateSteps, responseMessage);
|
||||||
|
expect(responseMessage.text).toBe(`${originalText}\n${imageMarkdown}`);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should extract only image markdowns when there is text between them', () => {
|
||||||
|
const markdownWithTextBetweenImages = `
|
||||||
|

|
||||||
|
Some text between images that should not be included.
|
||||||
|

|
||||||
|
More text that should be ignored.
|
||||||
|

|
||||||
|
`;
|
||||||
|
intermediateSteps.push({ observation: markdownWithTextBetweenImages });
|
||||||
|
addImages(intermediateSteps, responseMessage);
|
||||||
|
expect(responseMessage.text).toBe('\n');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should only return the first image when multiple images are present', () => {
|
||||||
|
const markdownWithMultipleImages = `
|
||||||
|

|
||||||
|

|
||||||
|

|
||||||
|
`;
|
||||||
|
intermediateSteps.push({ observation: markdownWithMultipleImages });
|
||||||
|
addImages(intermediateSteps, responseMessage);
|
||||||
|
expect(responseMessage.text).toBe('\n');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should not include any text or metadata surrounding the image markdown', () => {
|
||||||
|
const markdownWithMetadata = `
|
||||||
|
Title: Test Document
|
||||||
|
Author: John Doe
|
||||||
|

|
||||||
|
Some content after the image.
|
||||||
|
Vector values: [0.1, 0.2, 0.3]
|
||||||
|
`;
|
||||||
|
intermediateSteps.push({ observation: markdownWithMetadata });
|
||||||
|
addImages(intermediateSteps, responseMessage);
|
||||||
|
expect(responseMessage.text).toBe('\n');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle complex markdown with multiple images and only return the first one', () => {
|
||||||
|
const complexMarkdown = `
|
||||||
|
# Document Title
|
||||||
|
|
||||||
|
## Section 1
|
||||||
|
Here's some text with an embedded image:
|
||||||
|

|
||||||
|
|
||||||
|
## Section 2
|
||||||
|
More text here...
|
||||||
|

|
||||||
|
|
||||||
|
### Subsection
|
||||||
|
Even more content
|
||||||
|

|
||||||
|
`;
|
||||||
|
intermediateSteps.push({ observation: complexMarkdown });
|
||||||
|
addImages(intermediateSteps, responseMessage);
|
||||||
|
expect(responseMessage.text).toBe('\n');
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('basePath functionality', () => {
|
||||||
|
let originalDomainClient;
|
||||||
|
|
||||||
|
beforeEach(() => {
|
||||||
|
originalDomainClient = process.env.DOMAIN_CLIENT;
|
||||||
|
});
|
||||||
|
|
||||||
|
afterEach(() => {
|
||||||
|
process.env.DOMAIN_CLIENT = originalDomainClient;
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should prepend base path to image URLs when DOMAIN_CLIENT is set', () => {
|
||||||
|
process.env.DOMAIN_CLIENT = 'http://localhost:3080/librechat';
|
||||||
|
intermediateSteps.push({ observation: '' });
|
||||||
|
addImages(intermediateSteps, responseMessage);
|
||||||
|
expect(responseMessage.text).toBe('\n');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should not prepend base path when image URL already has base path', () => {
|
||||||
|
process.env.DOMAIN_CLIENT = 'http://localhost:3080/librechat';
|
||||||
|
intermediateSteps.push({ observation: '' });
|
||||||
|
addImages(intermediateSteps, responseMessage);
|
||||||
|
expect(responseMessage.text).toBe('\n');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should correct erroneous URLs with base path', () => {
|
||||||
|
process.env.DOMAIN_CLIENT = 'http://localhost:3080/librechat';
|
||||||
|
responseMessage.text = '';
|
||||||
|
intermediateSteps.push({ observation: '' });
|
||||||
|
addImages(intermediateSteps, responseMessage);
|
||||||
|
expect(responseMessage.text).toBe('');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle empty base path (root deployment)', () => {
|
||||||
|
process.env.DOMAIN_CLIENT = 'http://localhost:3080/';
|
||||||
|
intermediateSteps.push({ observation: '' });
|
||||||
|
addImages(intermediateSteps, responseMessage);
|
||||||
|
expect(responseMessage.text).toBe('\n');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle missing DOMAIN_CLIENT', () => {
|
||||||
|
delete process.env.DOMAIN_CLIENT;
|
||||||
|
intermediateSteps.push({ observation: '' });
|
||||||
|
addImages(intermediateSteps, responseMessage);
|
||||||
|
expect(responseMessage.text).toBe('\n');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle observation without image path match', () => {
|
||||||
|
process.env.DOMAIN_CLIENT = 'http://localhost:3080/librechat';
|
||||||
|
intermediateSteps.push({ observation: '' });
|
||||||
|
addImages(intermediateSteps, responseMessage);
|
||||||
|
expect(responseMessage.text).toBe('\n');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle nested subdirectories in base path', () => {
|
||||||
|
process.env.DOMAIN_CLIENT = 'http://localhost:3080/apps/librechat';
|
||||||
|
intermediateSteps.push({ observation: '' });
|
||||||
|
addImages(intermediateSteps, responseMessage);
|
||||||
|
expect(responseMessage.text).toBe('\n');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle multiple observations with mixed base path scenarios', () => {
|
||||||
|
process.env.DOMAIN_CLIENT = 'http://localhost:3080/librechat';
|
||||||
|
intermediateSteps.push({ observation: '' });
|
||||||
|
intermediateSteps.push({ observation: '' });
|
||||||
|
addImages(intermediateSteps, responseMessage);
|
||||||
|
expect(responseMessage.text).toBe(
|
||||||
|
'\n\n',
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle complex markdown with base path', () => {
|
||||||
|
process.env.DOMAIN_CLIENT = 'http://localhost:3080/librechat';
|
||||||
|
const complexMarkdown = `
|
||||||
|
# Document Title
|
||||||
|

|
||||||
|
Some text between images
|
||||||
|

|
||||||
|
`;
|
||||||
|
intermediateSteps.push({ observation: complexMarkdown });
|
||||||
|
addImages(intermediateSteps, responseMessage);
|
||||||
|
expect(responseMessage.text).toBe('\n');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle URLs that are already absolute', () => {
|
||||||
|
process.env.DOMAIN_CLIENT = 'http://localhost:3080/librechat';
|
||||||
|
intermediateSteps.push({ observation: '' });
|
||||||
|
addImages(intermediateSteps, responseMessage);
|
||||||
|
expect(responseMessage.text).toBe('\n');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle data URLs', () => {
|
||||||
|
process.env.DOMAIN_CLIENT = 'http://localhost:3080/librechat';
|
||||||
|
intermediateSteps.push({
|
||||||
|
observation:
|
||||||
|
'',
|
||||||
|
});
|
||||||
|
addImages(intermediateSteps, responseMessage);
|
||||||
|
expect(responseMessage.text).toBe(
|
||||||
|
'\n',
|
||||||
|
);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
88
api/app/clients/output_parsers/handleOutputs.js
Normal file
88
api/app/clients/output_parsers/handleOutputs.js
Normal file
|
|
@ -0,0 +1,88 @@
|
||||||
|
const { instructions, imageInstructions, errorInstructions } = require('../prompts');
|
||||||
|
|
||||||
|
function getActions(actions = [], functionsAgent = false) {
|
||||||
|
let output = 'Internal thoughts & actions taken:\n"';
|
||||||
|
|
||||||
|
if (actions[0]?.action && functionsAgent) {
|
||||||
|
actions = actions.map((step) => ({
|
||||||
|
log: `Action: ${step.action?.tool || ''}\nInput: ${
|
||||||
|
JSON.stringify(step.action?.toolInput) || ''
|
||||||
|
}\nObservation: ${step.observation}`,
|
||||||
|
}));
|
||||||
|
} else if (actions[0]?.action) {
|
||||||
|
actions = actions.map((step) => ({
|
||||||
|
log: `${step.action.log}\nObservation: ${step.observation}`,
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
|
||||||
|
actions.forEach((actionObj, index) => {
|
||||||
|
output += `${actionObj.log}`;
|
||||||
|
if (index < actions.length - 1) {
|
||||||
|
output += '\n';
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
return output + '"';
|
||||||
|
}
|
||||||
|
|
||||||
|
function buildErrorInput({ message, errorMessage, actions, functionsAgent }) {
|
||||||
|
const log = errorMessage.includes('Could not parse LLM output:')
|
||||||
|
? `A formatting error occurred with your response to the human's last message. You didn't follow the formatting instructions. Remember to ${instructions}`
|
||||||
|
: `You encountered an error while replying to the human's last message. Attempt to answer again or admit an answer cannot be given.\nError: ${errorMessage}`;
|
||||||
|
|
||||||
|
return `
|
||||||
|
${log}
|
||||||
|
|
||||||
|
${getActions(actions, functionsAgent)}
|
||||||
|
|
||||||
|
Human's last message: ${message}
|
||||||
|
`;
|
||||||
|
}
|
||||||
|
|
||||||
|
function buildPromptPrefix({ result, message, functionsAgent }) {
|
||||||
|
if ((result.output && result.output.includes('N/A')) || result.output === undefined) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (
|
||||||
|
result?.intermediateSteps?.length === 1 &&
|
||||||
|
result?.intermediateSteps[0]?.action?.toolInput === 'N/A'
|
||||||
|
) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
const internalActions =
|
||||||
|
result?.intermediateSteps?.length > 0
|
||||||
|
? getActions(result.intermediateSteps, functionsAgent)
|
||||||
|
: 'Internal Actions Taken: None';
|
||||||
|
|
||||||
|
const toolBasedInstructions = internalActions.toLowerCase().includes('image')
|
||||||
|
? imageInstructions
|
||||||
|
: '';
|
||||||
|
|
||||||
|
const errorMessage = result.errorMessage ? `${errorInstructions} ${result.errorMessage}\n` : '';
|
||||||
|
|
||||||
|
const preliminaryAnswer =
|
||||||
|
result.output?.length > 0 ? `Preliminary Answer: "${result.output.trim()}"` : '';
|
||||||
|
const prefix = preliminaryAnswer
|
||||||
|
? 'review and improve the answer you generated using plugins in response to the User Message below. The user hasn\'t seen your answer or thoughts yet.'
|
||||||
|
: 'respond to the User Message below based on your preliminary thoughts & actions.';
|
||||||
|
|
||||||
|
return `As a helpful AI Assistant, ${prefix}${errorMessage}\n${internalActions}
|
||||||
|
${preliminaryAnswer}
|
||||||
|
Reply conversationally to the User based on your ${
|
||||||
|
preliminaryAnswer ? 'preliminary answer, ' : ''
|
||||||
|
}internal actions, thoughts, and observations, making improvements wherever possible, but do not modify URLs.
|
||||||
|
${
|
||||||
|
preliminaryAnswer
|
||||||
|
? ''
|
||||||
|
: '\nIf there is an incomplete thought or action, you are expected to complete it in your response now.\n'
|
||||||
|
}You must cite sources if you are using any web links. ${toolBasedInstructions}
|
||||||
|
Only respond with your conversational reply to the following User Message:
|
||||||
|
"${message}"`;
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
buildErrorInput,
|
||||||
|
buildPromptPrefix,
|
||||||
|
};
|
||||||
7
api/app/clients/output_parsers/index.js
Normal file
7
api/app/clients/output_parsers/index.js
Normal file
|
|
@ -0,0 +1,7 @@
|
||||||
|
const addImages = require('./addImages');
|
||||||
|
const handleOutputs = require('./handleOutputs');
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
addImages,
|
||||||
|
...handleOutputs,
|
||||||
|
};
|
||||||
38
api/app/clients/prompts/handleInputs.js
Normal file
38
api/app/clients/prompts/handleInputs.js
Normal file
|
|
@ -0,0 +1,38 @@
|
||||||
|
// Escaping curly braces is necessary for LangChain to correctly process the prompt
|
||||||
|
function escapeBraces(str) {
|
||||||
|
return str
|
||||||
|
.replace(/({{2,})|(}{2,})/g, (match) => `${match[0]}`)
|
||||||
|
.replace(/{|}/g, (match) => `${match}${match}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
function getSnippet(text) {
|
||||||
|
let limit = 50;
|
||||||
|
let splitText = escapeBraces(text).split(' ');
|
||||||
|
|
||||||
|
if (splitText.length === 1 && splitText[0].length > limit) {
|
||||||
|
return splitText[0].substring(0, limit);
|
||||||
|
}
|
||||||
|
|
||||||
|
let result = '';
|
||||||
|
let spaceCount = 0;
|
||||||
|
|
||||||
|
for (let i = 0; i < splitText.length; i++) {
|
||||||
|
if (result.length + splitText[i].length <= limit) {
|
||||||
|
result += splitText[i] + ' ';
|
||||||
|
spaceCount++;
|
||||||
|
} else {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (spaceCount == 10) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return result.trim();
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
escapeBraces,
|
||||||
|
getSnippet,
|
||||||
|
};
|
||||||
|
|
@ -1,5 +1,7 @@
|
||||||
const formatMessages = require('./formatMessages');
|
const formatMessages = require('./formatMessages');
|
||||||
const summaryPrompts = require('./summaryPrompts');
|
const summaryPrompts = require('./summaryPrompts');
|
||||||
|
const handleInputs = require('./handleInputs');
|
||||||
|
const instructions = require('./instructions');
|
||||||
const truncate = require('./truncate');
|
const truncate = require('./truncate');
|
||||||
const createVisionPrompt = require('./createVisionPrompt');
|
const createVisionPrompt = require('./createVisionPrompt');
|
||||||
const createContextHandlers = require('./createContextHandlers');
|
const createContextHandlers = require('./createContextHandlers');
|
||||||
|
|
@ -7,6 +9,8 @@ const createContextHandlers = require('./createContextHandlers');
|
||||||
module.exports = {
|
module.exports = {
|
||||||
...formatMessages,
|
...formatMessages,
|
||||||
...summaryPrompts,
|
...summaryPrompts,
|
||||||
|
...handleInputs,
|
||||||
|
...instructions,
|
||||||
...truncate,
|
...truncate,
|
||||||
createVisionPrompt,
|
createVisionPrompt,
|
||||||
createContextHandlers,
|
createContextHandlers,
|
||||||
|
|
|
||||||
10
api/app/clients/prompts/instructions.js
Normal file
10
api/app/clients/prompts/instructions.js
Normal file
|
|
@ -0,0 +1,10 @@
|
||||||
|
module.exports = {
|
||||||
|
instructions:
|
||||||
|
'Remember, all your responses MUST be in the format described. Do not respond unless it\'s in the format described, using the structure of Action, Action Input, etc.',
|
||||||
|
errorInstructions:
|
||||||
|
'\nYou encountered an error in attempting a response. The user is not aware of the error so you shouldn\'t mention it.\nReview the actions taken carefully in case there is a partial or complete answer within them.\nError Message:',
|
||||||
|
imageInstructions:
|
||||||
|
'You must include the exact image paths from above, formatted in Markdown syntax: ',
|
||||||
|
completionInstructions:
|
||||||
|
'Instructions:\nYou are ChatGPT, a large language model trained by OpenAI. Respond conversationally.\nCurrent date:',
|
||||||
|
};
|
||||||
|
|
@ -18,17 +18,17 @@ function generateShadcnPrompt(options) {
|
||||||
Here are the components that are available, along with how to import them, and how to use them:
|
Here are the components that are available, along with how to import them, and how to use them:
|
||||||
|
|
||||||
${Object.values(components)
|
${Object.values(components)
|
||||||
.map((component) => {
|
.map((component) => {
|
||||||
if (useXML) {
|
if (useXML) {
|
||||||
return dedent`
|
return dedent`
|
||||||
<component>
|
<component>
|
||||||
<name>${component.componentName}</name>
|
<name>${component.componentName}</name>
|
||||||
<import-instructions>${component.importDocs}</import-instructions>
|
<import-instructions>${component.importDocs}</import-instructions>
|
||||||
<usage-instructions>${component.usageDocs}</usage-instructions>
|
<usage-instructions>${component.usageDocs}</usage-instructions>
|
||||||
</component>
|
</component>
|
||||||
`;
|
`;
|
||||||
} else {
|
} else {
|
||||||
return dedent`
|
return dedent`
|
||||||
# ${component.componentName}
|
# ${component.componentName}
|
||||||
|
|
||||||
## Import Instructions
|
## Import Instructions
|
||||||
|
|
@ -37,9 +37,9 @@ function generateShadcnPrompt(options) {
|
||||||
## Usage Instructions
|
## Usage Instructions
|
||||||
${component.usageDocs}
|
${component.usageDocs}
|
||||||
`;
|
`;
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
.join('\n\n')}
|
.join('\n\n')}
|
||||||
`;
|
`;
|
||||||
|
|
||||||
return systemPrompt;
|
return systemPrompt;
|
||||||
|
|
|
||||||
1043
api/app/clients/specs/AnthropicClient.test.js
Normal file
1043
api/app/clients/specs/AnthropicClient.test.js
Normal file
File diff suppressed because it is too large
Load diff
630
api/app/clients/specs/OpenAIClient.test.js
Normal file
630
api/app/clients/specs/OpenAIClient.test.js
Normal file
|
|
@ -0,0 +1,630 @@
|
||||||
|
jest.mock('~/cache/getLogStores');
|
||||||
|
require('dotenv').config();
|
||||||
|
const { fetchEventSource } = require('@waylaidwanderer/fetch-event-source');
|
||||||
|
const getLogStores = require('~/cache/getLogStores');
|
||||||
|
const OpenAIClient = require('../OpenAIClient');
|
||||||
|
jest.mock('meilisearch');
|
||||||
|
|
||||||
|
jest.mock('~/db/connect');
|
||||||
|
jest.mock('~/models', () => ({
|
||||||
|
User: jest.fn(),
|
||||||
|
Key: jest.fn(),
|
||||||
|
Session: jest.fn(),
|
||||||
|
Balance: jest.fn(),
|
||||||
|
Transaction: jest.fn(),
|
||||||
|
getMessages: jest.fn().mockResolvedValue([]),
|
||||||
|
saveMessage: jest.fn(),
|
||||||
|
updateMessage: jest.fn(),
|
||||||
|
deleteMessagesSince: jest.fn(),
|
||||||
|
deleteMessages: jest.fn(),
|
||||||
|
getConvoTitle: jest.fn(),
|
||||||
|
getConvo: jest.fn(),
|
||||||
|
saveConvo: jest.fn(),
|
||||||
|
deleteConvos: jest.fn(),
|
||||||
|
getPreset: jest.fn(),
|
||||||
|
getPresets: jest.fn(),
|
||||||
|
savePreset: jest.fn(),
|
||||||
|
deletePresets: jest.fn(),
|
||||||
|
findFileById: jest.fn(),
|
||||||
|
createFile: jest.fn(),
|
||||||
|
updateFile: jest.fn(),
|
||||||
|
deleteFile: jest.fn(),
|
||||||
|
deleteFiles: jest.fn(),
|
||||||
|
getFiles: jest.fn(),
|
||||||
|
updateFileUsage: jest.fn(),
|
||||||
|
}));
|
||||||
|
|
||||||
|
// Import the actual module but mock specific parts
|
||||||
|
const agents = jest.requireActual('@librechat/agents');
|
||||||
|
const { CustomOpenAIClient } = agents;
|
||||||
|
|
||||||
|
// Also mock ChatOpenAI to prevent real API calls
|
||||||
|
agents.ChatOpenAI = jest.fn().mockImplementation(() => {
|
||||||
|
return {};
|
||||||
|
});
|
||||||
|
agents.AzureChatOpenAI = jest.fn().mockImplementation(() => {
|
||||||
|
return {};
|
||||||
|
});
|
||||||
|
|
||||||
|
// Mock only the CustomOpenAIClient constructor
|
||||||
|
jest.spyOn(CustomOpenAIClient, 'constructor').mockImplementation(function (...options) {
|
||||||
|
return new CustomOpenAIClient(...options);
|
||||||
|
});
|
||||||
|
|
||||||
|
const finalChatCompletion = jest.fn().mockResolvedValue({
|
||||||
|
choices: [
|
||||||
|
{
|
||||||
|
message: { role: 'assistant', content: 'Mock message content' },
|
||||||
|
finish_reason: 'Mock finish reason',
|
||||||
|
},
|
||||||
|
],
|
||||||
|
});
|
||||||
|
|
||||||
|
const stream = jest.fn().mockImplementation(() => {
|
||||||
|
let isDone = false;
|
||||||
|
let isError = false;
|
||||||
|
let errorCallback = null;
|
||||||
|
|
||||||
|
const onEventHandlers = {
|
||||||
|
abort: () => {
|
||||||
|
// Mock abort behavior
|
||||||
|
},
|
||||||
|
error: (callback) => {
|
||||||
|
errorCallback = callback; // Save the error callback for later use
|
||||||
|
},
|
||||||
|
finalMessage: (callback) => {
|
||||||
|
callback({ role: 'assistant', content: 'Mock Response' });
|
||||||
|
isDone = true; // Set stream to done
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
const mockStream = {
|
||||||
|
on: jest.fn((event, callback) => {
|
||||||
|
if (onEventHandlers[event]) {
|
||||||
|
onEventHandlers[event](callback);
|
||||||
|
}
|
||||||
|
return mockStream;
|
||||||
|
}),
|
||||||
|
finalChatCompletion,
|
||||||
|
controller: { abort: jest.fn() },
|
||||||
|
triggerError: () => {
|
||||||
|
isError = true;
|
||||||
|
if (errorCallback) {
|
||||||
|
errorCallback(new Error('Mock error'));
|
||||||
|
}
|
||||||
|
},
|
||||||
|
[Symbol.asyncIterator]: () => {
|
||||||
|
return {
|
||||||
|
next: () => {
|
||||||
|
if (isError) {
|
||||||
|
return Promise.reject(new Error('Mock error'));
|
||||||
|
}
|
||||||
|
if (isDone) {
|
||||||
|
return Promise.resolve({ done: true });
|
||||||
|
}
|
||||||
|
const chunk = { choices: [{ delta: { content: 'Mock chunk' } }] };
|
||||||
|
return Promise.resolve({ value: chunk, done: false });
|
||||||
|
},
|
||||||
|
};
|
||||||
|
},
|
||||||
|
};
|
||||||
|
return mockStream;
|
||||||
|
});
|
||||||
|
|
||||||
|
const create = jest.fn().mockResolvedValue({
|
||||||
|
choices: [
|
||||||
|
{
|
||||||
|
message: { content: 'Mock message content' },
|
||||||
|
finish_reason: 'Mock finish reason',
|
||||||
|
},
|
||||||
|
],
|
||||||
|
});
|
||||||
|
|
||||||
|
// Mock the implementation of CustomOpenAIClient instances
|
||||||
|
jest.spyOn(CustomOpenAIClient.prototype, 'constructor').mockImplementation(function () {
|
||||||
|
return this;
|
||||||
|
});
|
||||||
|
|
||||||
|
// Create a mock for the CustomOpenAIClient class
|
||||||
|
const mockCustomOpenAIClient = jest.fn().mockImplementation(() => ({
|
||||||
|
beta: {
|
||||||
|
chat: {
|
||||||
|
completions: {
|
||||||
|
stream,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
chat: {
|
||||||
|
completions: {
|
||||||
|
create,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}));
|
||||||
|
|
||||||
|
CustomOpenAIClient.mockImplementation = mockCustomOpenAIClient;
|
||||||
|
|
||||||
|
describe('OpenAIClient', () => {
|
||||||
|
beforeEach(() => {
|
||||||
|
const mockCache = {
|
||||||
|
get: jest.fn().mockResolvedValue({}),
|
||||||
|
set: jest.fn(),
|
||||||
|
};
|
||||||
|
getLogStores.mockReturnValue(mockCache);
|
||||||
|
});
|
||||||
|
let client;
|
||||||
|
const model = 'gpt-4';
|
||||||
|
const parentMessageId = '1';
|
||||||
|
const messages = [
|
||||||
|
{ role: 'user', sender: 'User', text: 'Hello', messageId: parentMessageId },
|
||||||
|
{ role: 'assistant', sender: 'Assistant', text: 'Hi', messageId: '2' },
|
||||||
|
];
|
||||||
|
|
||||||
|
const defaultOptions = {
|
||||||
|
// debug: true,
|
||||||
|
req: {},
|
||||||
|
openaiApiKey: 'new-api-key',
|
||||||
|
modelOptions: {
|
||||||
|
model,
|
||||||
|
temperature: 0.7,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
const defaultAzureOptions = {
|
||||||
|
azureOpenAIApiInstanceName: 'your-instance-name',
|
||||||
|
azureOpenAIApiDeploymentName: 'your-deployment-name',
|
||||||
|
azureOpenAIApiVersion: '2020-07-01-preview',
|
||||||
|
};
|
||||||
|
|
||||||
|
let originalWarn;
|
||||||
|
|
||||||
|
beforeAll(() => {
|
||||||
|
originalWarn = console.warn;
|
||||||
|
console.warn = jest.fn();
|
||||||
|
});
|
||||||
|
|
||||||
|
afterAll(() => {
|
||||||
|
console.warn = originalWarn;
|
||||||
|
});
|
||||||
|
|
||||||
|
beforeEach(() => {
|
||||||
|
console.warn.mockClear();
|
||||||
|
});
|
||||||
|
|
||||||
|
beforeEach(() => {
|
||||||
|
const options = { ...defaultOptions };
|
||||||
|
client = new OpenAIClient('test-api-key', options);
|
||||||
|
client.summarizeMessages = jest.fn().mockResolvedValue({
|
||||||
|
role: 'assistant',
|
||||||
|
content: 'Refined answer',
|
||||||
|
tokenCount: 30,
|
||||||
|
});
|
||||||
|
client.buildPrompt = jest
|
||||||
|
.fn()
|
||||||
|
.mockResolvedValue({ prompt: messages.map((m) => m.text).join('\n') });
|
||||||
|
client.getMessages = jest.fn().mockResolvedValue([]);
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('setOptions', () => {
|
||||||
|
it('should set the options correctly', () => {
|
||||||
|
expect(client.apiKey).toBe('new-api-key');
|
||||||
|
expect(client.modelOptions.model).toBe(model);
|
||||||
|
expect(client.modelOptions.temperature).toBe(0.7);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should set FORCE_PROMPT based on OPENAI_FORCE_PROMPT or reverseProxyUrl', () => {
|
||||||
|
process.env.OPENAI_FORCE_PROMPT = 'true';
|
||||||
|
client.setOptions({});
|
||||||
|
expect(client.FORCE_PROMPT).toBe(true);
|
||||||
|
delete process.env.OPENAI_FORCE_PROMPT; // Cleanup
|
||||||
|
client.FORCE_PROMPT = undefined;
|
||||||
|
|
||||||
|
client.setOptions({ reverseProxyUrl: 'https://example.com/completions' });
|
||||||
|
expect(client.FORCE_PROMPT).toBe(true);
|
||||||
|
client.FORCE_PROMPT = undefined;
|
||||||
|
|
||||||
|
client.setOptions({ reverseProxyUrl: 'https://example.com/chat' });
|
||||||
|
expect(client.FORCE_PROMPT).toBe(false);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should set isChatCompletion based on useOpenRouter, reverseProxyUrl, or model', () => {
|
||||||
|
client.setOptions({ reverseProxyUrl: null });
|
||||||
|
// true by default since default model will be gpt-4o-mini
|
||||||
|
expect(client.isChatCompletion).toBe(true);
|
||||||
|
client.isChatCompletion = undefined;
|
||||||
|
|
||||||
|
// false because completions url will force prompt payload
|
||||||
|
client.setOptions({ reverseProxyUrl: 'https://example.com/completions' });
|
||||||
|
expect(client.isChatCompletion).toBe(false);
|
||||||
|
client.isChatCompletion = undefined;
|
||||||
|
|
||||||
|
client.setOptions({ modelOptions: { model: 'gpt-4o-mini' }, reverseProxyUrl: null });
|
||||||
|
expect(client.isChatCompletion).toBe(true);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should set completionsUrl and langchainProxy based on reverseProxyUrl', () => {
|
||||||
|
client.setOptions({ reverseProxyUrl: 'https://localhost:8080/v1/chat/completions' });
|
||||||
|
expect(client.completionsUrl).toBe('https://localhost:8080/v1/chat/completions');
|
||||||
|
expect(client.langchainProxy).toBe('https://localhost:8080/v1');
|
||||||
|
|
||||||
|
client.setOptions({ reverseProxyUrl: 'https://example.com/completions' });
|
||||||
|
expect(client.completionsUrl).toBe('https://example.com/completions');
|
||||||
|
expect(client.langchainProxy).toBe('https://example.com/completions');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('setOptions with Simplified Azure Integration', () => {
|
||||||
|
afterEach(() => {
|
||||||
|
delete process.env.AZURE_OPENAI_DEFAULT_MODEL;
|
||||||
|
delete process.env.AZURE_USE_MODEL_AS_DEPLOYMENT_NAME;
|
||||||
|
});
|
||||||
|
|
||||||
|
const azureOpenAIApiInstanceName = 'test-instance';
|
||||||
|
const azureOpenAIApiDeploymentName = 'test-deployment';
|
||||||
|
const azureOpenAIApiVersion = '2020-07-01-preview';
|
||||||
|
|
||||||
|
const createOptions = (model) => ({
|
||||||
|
modelOptions: { model },
|
||||||
|
azure: {
|
||||||
|
azureOpenAIApiInstanceName,
|
||||||
|
azureOpenAIApiDeploymentName,
|
||||||
|
azureOpenAIApiVersion,
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should set model from AZURE_OPENAI_DEFAULT_MODEL when Azure is enabled', () => {
|
||||||
|
process.env.AZURE_OPENAI_DEFAULT_MODEL = 'gpt-4-azure';
|
||||||
|
const options = createOptions('test');
|
||||||
|
client.azure = options.azure;
|
||||||
|
client.setOptions(options);
|
||||||
|
expect(client.modelOptions.model).toBe('gpt-4-azure');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should not change model if Azure is not enabled', () => {
|
||||||
|
process.env.AZURE_OPENAI_DEFAULT_MODEL = 'gpt-4-azure';
|
||||||
|
const originalModel = 'test';
|
||||||
|
client.azure = false;
|
||||||
|
client.setOptions(createOptions('test'));
|
||||||
|
expect(client.modelOptions.model).toBe(originalModel);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should not change model if AZURE_OPENAI_DEFAULT_MODEL is not set and model is passed', () => {
|
||||||
|
const originalModel = 'GROK-LLM';
|
||||||
|
const options = createOptions(originalModel);
|
||||||
|
client.azure = options.azure;
|
||||||
|
client.setOptions(options);
|
||||||
|
expect(client.modelOptions.model).toBe(originalModel);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should change model if AZURE_OPENAI_DEFAULT_MODEL is set and model is passed', () => {
|
||||||
|
process.env.AZURE_OPENAI_DEFAULT_MODEL = 'gpt-4-azure';
|
||||||
|
const originalModel = 'GROK-LLM';
|
||||||
|
const options = createOptions(originalModel);
|
||||||
|
client.azure = options.azure;
|
||||||
|
client.setOptions(options);
|
||||||
|
expect(client.modelOptions.model).toBe(process.env.AZURE_OPENAI_DEFAULT_MODEL);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should include model in deployment name if AZURE_USE_MODEL_AS_DEPLOYMENT_NAME is set', () => {
|
||||||
|
process.env.AZURE_USE_MODEL_AS_DEPLOYMENT_NAME = 'true';
|
||||||
|
const model = 'gpt-4-azure';
|
||||||
|
|
||||||
|
const AzureClient = new OpenAIClient('test-api-key', createOptions(model));
|
||||||
|
|
||||||
|
const expectedValue = `https://${azureOpenAIApiInstanceName}.openai.azure.com/openai/deployments/${model}/chat/completions?api-version=${azureOpenAIApiVersion}`;
|
||||||
|
|
||||||
|
expect(AzureClient.modelOptions.model).toBe(model);
|
||||||
|
expect(AzureClient.azureEndpoint).toBe(expectedValue);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should include model in deployment name if AZURE_USE_MODEL_AS_DEPLOYMENT_NAME and default model is set', () => {
|
||||||
|
const defaultModel = 'gpt-4-azure';
|
||||||
|
process.env.AZURE_USE_MODEL_AS_DEPLOYMENT_NAME = 'true';
|
||||||
|
process.env.AZURE_OPENAI_DEFAULT_MODEL = defaultModel;
|
||||||
|
const model = 'gpt-4-this-is-a-test-model-name';
|
||||||
|
|
||||||
|
const AzureClient = new OpenAIClient('test-api-key', createOptions(model));
|
||||||
|
|
||||||
|
const expectedValue = `https://${azureOpenAIApiInstanceName}.openai.azure.com/openai/deployments/${model}/chat/completions?api-version=${azureOpenAIApiVersion}`;
|
||||||
|
|
||||||
|
expect(AzureClient.modelOptions.model).toBe(defaultModel);
|
||||||
|
expect(AzureClient.azureEndpoint).toBe(expectedValue);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should not include model in deployment name if AZURE_USE_MODEL_AS_DEPLOYMENT_NAME is not set', () => {
|
||||||
|
const model = 'gpt-4-azure';
|
||||||
|
|
||||||
|
const AzureClient = new OpenAIClient('test-api-key', createOptions(model));
|
||||||
|
|
||||||
|
const expectedValue = `https://${azureOpenAIApiInstanceName}.openai.azure.com/openai/deployments/${azureOpenAIApiDeploymentName}/chat/completions?api-version=${azureOpenAIApiVersion}`;
|
||||||
|
|
||||||
|
expect(AzureClient.modelOptions.model).toBe(model);
|
||||||
|
expect(AzureClient.azureEndpoint).toBe(expectedValue);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('getTokenCount', () => {
|
||||||
|
it('should return the correct token count', () => {
|
||||||
|
const count = client.getTokenCount('Hello, world!');
|
||||||
|
expect(count).toBeGreaterThan(0);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('getSaveOptions', () => {
|
||||||
|
it('should return the correct save options', () => {
|
||||||
|
const options = client.getSaveOptions();
|
||||||
|
expect(options).toHaveProperty('chatGptLabel');
|
||||||
|
expect(options).toHaveProperty('modelLabel');
|
||||||
|
expect(options).toHaveProperty('promptPrefix');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('getBuildMessagesOptions', () => {
|
||||||
|
it('should return the correct build messages options', () => {
|
||||||
|
const options = client.getBuildMessagesOptions({ promptPrefix: 'Hello' });
|
||||||
|
expect(options).toHaveProperty('isChatCompletion');
|
||||||
|
expect(options).toHaveProperty('promptPrefix');
|
||||||
|
expect(options.promptPrefix).toBe('Hello');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('buildMessages', () => {
|
||||||
|
it('should build messages correctly for chat completion', async () => {
|
||||||
|
const result = await client.buildMessages(messages, parentMessageId, {
|
||||||
|
isChatCompletion: true,
|
||||||
|
});
|
||||||
|
expect(result).toHaveProperty('prompt');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should build messages correctly for non-chat completion', async () => {
|
||||||
|
const result = await client.buildMessages(messages, parentMessageId, {
|
||||||
|
isChatCompletion: false,
|
||||||
|
});
|
||||||
|
expect(result).toHaveProperty('prompt');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should build messages correctly with a promptPrefix', async () => {
|
||||||
|
const result = await client.buildMessages(messages, parentMessageId, {
|
||||||
|
isChatCompletion: true,
|
||||||
|
promptPrefix: 'Test Prefix',
|
||||||
|
});
|
||||||
|
expect(result).toHaveProperty('prompt');
|
||||||
|
const instructions = result.prompt.find((item) => item.content.includes('Test Prefix'));
|
||||||
|
expect(instructions).toBeDefined();
|
||||||
|
expect(instructions.content).toContain('Test Prefix');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle context strategy correctly', async () => {
|
||||||
|
client.contextStrategy = 'summarize';
|
||||||
|
const result = await client.buildMessages(messages, parentMessageId, {
|
||||||
|
isChatCompletion: true,
|
||||||
|
});
|
||||||
|
expect(result).toHaveProperty('prompt');
|
||||||
|
expect(result).toHaveProperty('tokenCountMap');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should assign name property for user messages when options.name is set', async () => {
|
||||||
|
client.options.name = 'Test User';
|
||||||
|
const result = await client.buildMessages(messages, parentMessageId, {
|
||||||
|
isChatCompletion: true,
|
||||||
|
});
|
||||||
|
const hasUserWithName = result.prompt.some(
|
||||||
|
(item) => item.role === 'user' && item.name === 'Test_User',
|
||||||
|
);
|
||||||
|
expect(hasUserWithName).toBe(true);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle promptPrefix from options when promptPrefix argument is not provided', async () => {
|
||||||
|
client.options.promptPrefix = 'Test Prefix from options';
|
||||||
|
const result = await client.buildMessages(messages, parentMessageId, {
|
||||||
|
isChatCompletion: true,
|
||||||
|
});
|
||||||
|
const instructions = result.prompt.find((item) =>
|
||||||
|
item.content.includes('Test Prefix from options'),
|
||||||
|
);
|
||||||
|
expect(instructions.content).toContain('Test Prefix from options');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle case when neither promptPrefix argument nor options.promptPrefix is set', async () => {
|
||||||
|
const result = await client.buildMessages(messages, parentMessageId, {
|
||||||
|
isChatCompletion: true,
|
||||||
|
});
|
||||||
|
const instructions = result.prompt.find((item) => item.content.includes('Test Prefix'));
|
||||||
|
expect(instructions).toBeUndefined();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle case when getMessagesForConversation returns null or an empty array', async () => {
|
||||||
|
const messages = [];
|
||||||
|
const result = await client.buildMessages(messages, parentMessageId, {
|
||||||
|
isChatCompletion: true,
|
||||||
|
});
|
||||||
|
expect(result.prompt).toEqual([]);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('getTokenCountForMessage', () => {
|
||||||
|
const example_messages = [
|
||||||
|
{
|
||||||
|
role: 'system',
|
||||||
|
content:
|
||||||
|
'You are a helpful, pattern-following assistant that translates corporate jargon into plain English.',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
role: 'system',
|
||||||
|
name: 'example_user',
|
||||||
|
content: 'New synergies will help drive top-line growth.',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
role: 'system',
|
||||||
|
name: 'example_assistant',
|
||||||
|
content: 'Things working well together will increase revenue.',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
role: 'system',
|
||||||
|
name: 'example_user',
|
||||||
|
content:
|
||||||
|
"Let's circle back when we have more bandwidth to touch base on opportunities for increased leverage.",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
role: 'system',
|
||||||
|
name: 'example_assistant',
|
||||||
|
content: "Let's talk later when we're less busy about how to do better.",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
role: 'user',
|
||||||
|
content:
|
||||||
|
"This late pivot means we don't have time to boil the ocean for the client deliverable.",
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
const testCases = [
|
||||||
|
{ model: 'gpt-3.5-turbo-0301', expected: 127 },
|
||||||
|
{ model: 'gpt-3.5-turbo-0613', expected: 129 },
|
||||||
|
{ model: 'gpt-3.5-turbo', expected: 129 },
|
||||||
|
{ model: 'gpt-4-0314', expected: 129 },
|
||||||
|
{ model: 'gpt-4-0613', expected: 129 },
|
||||||
|
{ model: 'gpt-4', expected: 129 },
|
||||||
|
{ model: 'unknown', expected: 129 },
|
||||||
|
];
|
||||||
|
|
||||||
|
testCases.forEach((testCase) => {
|
||||||
|
it(`should return ${testCase.expected} tokens for model ${testCase.model}`, () => {
|
||||||
|
client.modelOptions.model = testCase.model;
|
||||||
|
// 3 tokens for assistant label
|
||||||
|
let totalTokens = 3;
|
||||||
|
for (let message of example_messages) {
|
||||||
|
totalTokens += client.getTokenCountForMessage(message);
|
||||||
|
}
|
||||||
|
expect(totalTokens).toBe(testCase.expected);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
const vision_request = [
|
||||||
|
{
|
||||||
|
role: 'user',
|
||||||
|
content: [
|
||||||
|
{
|
||||||
|
type: 'text',
|
||||||
|
text: 'describe what is in this image?',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
type: 'image_url',
|
||||||
|
image_url: {
|
||||||
|
url: 'https://venturebeat.com/wp-content/uploads/2019/03/openai-1.png',
|
||||||
|
detail: 'high',
|
||||||
|
},
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
const expectedTokens = 14;
|
||||||
|
const visionModel = 'gpt-4-vision-preview';
|
||||||
|
|
||||||
|
it(`should return ${expectedTokens} tokens for model ${visionModel} (Vision Request)`, () => {
|
||||||
|
client.modelOptions.model = visionModel;
|
||||||
|
// 3 tokens for assistant label
|
||||||
|
let totalTokens = 3;
|
||||||
|
for (let message of vision_request) {
|
||||||
|
totalTokens += client.getTokenCountForMessage(message);
|
||||||
|
}
|
||||||
|
expect(totalTokens).toBe(expectedTokens);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('checkVisionRequest functionality', () => {
|
||||||
|
let client;
|
||||||
|
const attachments = [{ type: 'image/png' }];
|
||||||
|
|
||||||
|
beforeEach(() => {
|
||||||
|
client = new OpenAIClient('test-api-key', {
|
||||||
|
endpoint: 'ollama',
|
||||||
|
modelOptions: {
|
||||||
|
model: 'initial-model',
|
||||||
|
},
|
||||||
|
modelsConfig: {
|
||||||
|
ollama: ['initial-model', 'llava', 'other-model'],
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
client.defaultVisionModel = 'non-valid-default-model';
|
||||||
|
});
|
||||||
|
|
||||||
|
afterEach(() => {
|
||||||
|
jest.restoreAllMocks();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should set "llava" as the model if it is the first valid model when default validation fails', () => {
|
||||||
|
client.checkVisionRequest(attachments);
|
||||||
|
|
||||||
|
expect(client.modelOptions.model).toBe('llava');
|
||||||
|
expect(client.isVisionModel).toBeTruthy();
|
||||||
|
expect(client.modelOptions.stop).toBeUndefined();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('getStreamUsage', () => {
|
||||||
|
it('should return this.usage when completion_tokens_details is null', () => {
|
||||||
|
const client = new OpenAIClient('test-api-key', defaultOptions);
|
||||||
|
client.usage = {
|
||||||
|
completion_tokens_details: null,
|
||||||
|
prompt_tokens: 10,
|
||||||
|
completion_tokens: 20,
|
||||||
|
};
|
||||||
|
client.inputTokensKey = 'prompt_tokens';
|
||||||
|
client.outputTokensKey = 'completion_tokens';
|
||||||
|
|
||||||
|
const result = client.getStreamUsage();
|
||||||
|
|
||||||
|
expect(result).toEqual(client.usage);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return this.usage when completion_tokens_details is missing reasoning_tokens', () => {
|
||||||
|
const client = new OpenAIClient('test-api-key', defaultOptions);
|
||||||
|
client.usage = {
|
||||||
|
completion_tokens_details: {
|
||||||
|
other_tokens: 5,
|
||||||
|
},
|
||||||
|
prompt_tokens: 10,
|
||||||
|
completion_tokens: 20,
|
||||||
|
};
|
||||||
|
client.inputTokensKey = 'prompt_tokens';
|
||||||
|
client.outputTokensKey = 'completion_tokens';
|
||||||
|
|
||||||
|
const result = client.getStreamUsage();
|
||||||
|
|
||||||
|
expect(result).toEqual(client.usage);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should calculate output tokens correctly when completion_tokens_details is present with reasoning_tokens', () => {
|
||||||
|
const client = new OpenAIClient('test-api-key', defaultOptions);
|
||||||
|
client.usage = {
|
||||||
|
completion_tokens_details: {
|
||||||
|
reasoning_tokens: 30,
|
||||||
|
other_tokens: 5,
|
||||||
|
},
|
||||||
|
prompt_tokens: 10,
|
||||||
|
completion_tokens: 20,
|
||||||
|
};
|
||||||
|
client.inputTokensKey = 'prompt_tokens';
|
||||||
|
client.outputTokensKey = 'completion_tokens';
|
||||||
|
|
||||||
|
const result = client.getStreamUsage();
|
||||||
|
|
||||||
|
expect(result).toEqual({
|
||||||
|
reasoning_tokens: 30,
|
||||||
|
other_tokens: 5,
|
||||||
|
prompt_tokens: 10,
|
||||||
|
completion_tokens: 10, // |30 - 20| = 10
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return this.usage when it is undefined', () => {
|
||||||
|
const client = new OpenAIClient('test-api-key', defaultOptions);
|
||||||
|
client.usage = undefined;
|
||||||
|
|
||||||
|
const result = client.getStreamUsage();
|
||||||
|
|
||||||
|
expect(result).toBeUndefined();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
130
api/app/clients/specs/OpenAIClient.tokens.js
Normal file
130
api/app/clients/specs/OpenAIClient.tokens.js
Normal file
|
|
@ -0,0 +1,130 @@
|
||||||
|
/*
|
||||||
|
This is a test script to see how much memory is used by the client when encoding.
|
||||||
|
On my work machine, it was able to process 10,000 encoding requests / 48.686 seconds = approximately 205.4 RPS
|
||||||
|
I've significantly reduced the amount of encoding needed by saving token counts in the database, so these
|
||||||
|
numbers should only be hit with a large amount of concurrent users
|
||||||
|
It would take 103 concurrent users sending 1 message every 1 second to hit these numbers, which is rather unrealistic,
|
||||||
|
and at that point, out-sourcing the encoding to a separate server would be a better solution
|
||||||
|
Also, for scaling, could increase the rate at which the encoder resets; the trade-off is more resource usage on the server.
|
||||||
|
Initial memory usage: 25.93 megabytes
|
||||||
|
Peak memory usage: 55 megabytes
|
||||||
|
Final memory usage: 28.03 megabytes
|
||||||
|
Post-test (timeout of 15s): 21.91 megabytes
|
||||||
|
*/
|
||||||
|
|
||||||
|
require('dotenv').config();
|
||||||
|
const { OpenAIClient } = require('../');
|
||||||
|
|
||||||
|
function timeout(ms) {
|
||||||
|
return new Promise((resolve) => setTimeout(resolve, ms));
|
||||||
|
}
|
||||||
|
|
||||||
|
const run = async () => {
|
||||||
|
const text = `
|
||||||
|
The standard Lorem Ipsum passage, used since the 1500s
|
||||||
|
|
||||||
|
"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum."
|
||||||
|
Section 1.10.32 of "de Finibus Bonorum et Malorum", written by Cicero in 45 BC
|
||||||
|
|
||||||
|
"Sed ut perspiciatis unde omnis iste natus error sit voluptatem accusantium doloremque laudantium, totam rem aperiam, eaque ipsa quae ab illo inventore veritatis et quasi architecto beatae vitae dicta sunt explicabo. Nemo enim ipsam voluptatem quia voluptas sit aspernatur aut odit aut fugit, sed quia consequuntur magni dolores eos qui ratione voluptatem sequi nesciunt. Neque porro quisquam est, qui dolorem ipsum quia dolor sit amet, consectetur, adipisci velit, sed quia non numquam eius modi tempora incidunt ut labore et dolore magnam aliquam quaerat voluptatem. Ut enim ad minima veniam, quis nostrum exercitationem ullam corporis suscipit laboriosam, nisi ut aliquid ex ea commodi consequatur? Quis autem vel eum iure reprehenderit qui in ea voluptate velit esse quam nihil molestiae consequatur, vel illum qui dolorem eum fugiat quo voluptas nulla pariatur?"
|
||||||
|
1914 translation by H. Rackham
|
||||||
|
|
||||||
|
"But I must explain to you how all this mistaken idea of denouncing pleasure and praising pain was born and I will give you a complete account of the system, and expound the actual teachings of the great explorer of the truth, the master-builder of human happiness. No one rejects, dislikes, or avoids pleasure itself, because it is pleasure, but because those who do not know how to pursue pleasure rationally encounter consequences that are extremely painful. Nor again is there anyone who loves or pursues or desires to obtain pain of itself, because it is pain, but because occasionally circumstances occur in which toil and pain can procure him some great pleasure. To take a trivial example, which of us ever undertakes laborious physical exercise, except to obtain some advantage from it? But who has any right to find fault with a man who chooses to enjoy a pleasure that has no annoying consequences, or one who avoids a pain that produces no resultant pleasure?"
|
||||||
|
Section 1.10.33 of "de Finibus Bonorum et Malorum", written by Cicero in 45 BC
|
||||||
|
|
||||||
|
"At vero eos et accusamus et iusto odio dignissimos ducimus qui blanditiis praesentium voluptatum deleniti atque corrupti quos dolores et quas molestias excepturi sint occaecati cupiditate non provident, similique sunt in culpa qui officia deserunt mollitia animi, id est laborum et dolorum fuga. Et harum quidem rerum facilis est et expedita distinctio. Nam libero tempore, cum soluta nobis est eligendi optio cumque nihil impedit quo minus id quod maxime placeat facere possimus, omnis voluptas assumenda est, omnis dolor repellendus. Temporibus autem quibusdam et aut officiis debitis aut rerum necessitatibus saepe eveniet ut et voluptates repudiandae sint et molestiae non recusandae. Itaque earum rerum hic tenetur a sapiente delectus, ut aut reiciendis voluptatibus maiores alias consequatur aut perferendis doloribus asperiores repellat."
|
||||||
|
1914 translation by H. Rackham
|
||||||
|
|
||||||
|
"On the other hand, we denounce with righteous indignation and dislike men who are so beguiled and demoralized by the charms of pleasure of the moment, so blinded by desire, that they cannot foresee the pain and trouble that are bound to ensue; and equal blame belongs to those who fail in their duty through weakness of will, which is the same as saying through shrinking from toil and pain. These cases are perfectly simple and easy to distinguish. In a free hour, when our power of choice is untrammelled and when nothing prevents our being able to do what we like best, every pleasure is to be welcomed and every pain avoided. But in certain circumstances and owing to the claims of duty or the obligations of business it will frequently occur that pleasures have to be repudiated and annoyances accepted. The wise man therefore always holds in these matters to this principle of selection: he rejects pleasures to secure other greater pleasures, or else he endures pains to avoid worse pains."
|
||||||
|
`;
|
||||||
|
const model = 'gpt-3.5-turbo';
|
||||||
|
let maxContextTokens = 4095;
|
||||||
|
if (model === 'gpt-4') {
|
||||||
|
maxContextTokens = 8191;
|
||||||
|
} else if (model === 'gpt-4-32k') {
|
||||||
|
maxContextTokens = 32767;
|
||||||
|
}
|
||||||
|
const clientOptions = {
|
||||||
|
reverseProxyUrl: process.env.OPENAI_REVERSE_PROXY || null,
|
||||||
|
maxContextTokens,
|
||||||
|
modelOptions: {
|
||||||
|
model,
|
||||||
|
},
|
||||||
|
proxy: process.env.PROXY || null,
|
||||||
|
debug: true,
|
||||||
|
};
|
||||||
|
|
||||||
|
let apiKey = process.env.OPENAI_API_KEY;
|
||||||
|
|
||||||
|
const maxMemory = 0.05 * 1024 * 1024 * 1024;
|
||||||
|
|
||||||
|
// Calculate initial percentage of memory used
|
||||||
|
const initialMemoryUsage = process.memoryUsage().heapUsed;
|
||||||
|
|
||||||
|
function printProgressBar(percentageUsed) {
|
||||||
|
const filledBlocks = Math.round(percentageUsed / 2); // Each block represents 2%
|
||||||
|
const emptyBlocks = 50 - filledBlocks; // Total blocks is 50 (each represents 2%), so the rest are empty
|
||||||
|
const progressBar =
|
||||||
|
'[' +
|
||||||
|
'█'.repeat(filledBlocks) +
|
||||||
|
' '.repeat(emptyBlocks) +
|
||||||
|
'] ' +
|
||||||
|
percentageUsed.toFixed(2) +
|
||||||
|
'%';
|
||||||
|
console.log(progressBar);
|
||||||
|
}
|
||||||
|
|
||||||
|
const iterations = 10000;
|
||||||
|
console.time('loopTime');
|
||||||
|
// Trying to catch the error doesn't help; all future calls will immediately crash
|
||||||
|
for (let i = 0; i < iterations; i++) {
|
||||||
|
try {
|
||||||
|
console.log(`Iteration ${i}`);
|
||||||
|
const client = new OpenAIClient(apiKey, clientOptions);
|
||||||
|
|
||||||
|
client.getTokenCount(text);
|
||||||
|
// const encoder = client.constructor.getTokenizer('cl100k_base');
|
||||||
|
// console.log(`Iteration ${i}: call encode()...`);
|
||||||
|
// encoder.encode(text, 'all');
|
||||||
|
// encoder.free();
|
||||||
|
|
||||||
|
const memoryUsageDuringLoop = process.memoryUsage().heapUsed;
|
||||||
|
const percentageUsed = (memoryUsageDuringLoop / maxMemory) * 100;
|
||||||
|
printProgressBar(percentageUsed);
|
||||||
|
|
||||||
|
if (i === iterations - 1) {
|
||||||
|
console.log(' done');
|
||||||
|
// encoder.free();
|
||||||
|
}
|
||||||
|
} catch (e) {
|
||||||
|
console.log(`caught error! in Iteration ${i}`);
|
||||||
|
console.log(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
console.timeEnd('loopTime');
|
||||||
|
// Calculate final percentage of memory used
|
||||||
|
const finalMemoryUsage = process.memoryUsage().heapUsed;
|
||||||
|
// const finalPercentageUsed = finalMemoryUsage / maxMemory * 100;
|
||||||
|
console.log(`Initial memory usage: ${initialMemoryUsage / 1024 / 1024} megabytes`);
|
||||||
|
console.log(`Final memory usage: ${finalMemoryUsage / 1024 / 1024} megabytes`);
|
||||||
|
await timeout(15000);
|
||||||
|
const memoryUsageAfterTimeout = process.memoryUsage().heapUsed;
|
||||||
|
console.log(`Post timeout: ${memoryUsageAfterTimeout / 1024 / 1024} megabytes`);
|
||||||
|
};
|
||||||
|
|
||||||
|
run();
|
||||||
|
|
||||||
|
process.on('uncaughtException', (err) => {
|
||||||
|
if (!err.message.includes('fetch failed')) {
|
||||||
|
console.error('There was an uncaught error:');
|
||||||
|
console.error(err);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (err.message.includes('fetch failed')) {
|
||||||
|
console.log('fetch failed error caught');
|
||||||
|
// process.exit(0);
|
||||||
|
} else {
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
});
|
||||||
18
api/app/clients/tools/.well-known/Ai_PDF.json
Normal file
18
api/app/clients/tools/.well-known/Ai_PDF.json
Normal file
|
|
@ -0,0 +1,18 @@
|
||||||
|
{
|
||||||
|
"schema_version": "v1",
|
||||||
|
"name_for_human": "Ai PDF",
|
||||||
|
"name_for_model": "Ai_PDF",
|
||||||
|
"description_for_human": "Super-fast, interactive chats with PDFs of any size, complete with page references for fact checking.",
|
||||||
|
"description_for_model": "Provide a URL to a PDF and search the document. Break the user question in multiple semantic search queries and calls as needed. Think step by step.",
|
||||||
|
"auth": {
|
||||||
|
"type": "none"
|
||||||
|
},
|
||||||
|
"api": {
|
||||||
|
"type": "openapi",
|
||||||
|
"url": "https://plugin-3c56b9d4c8a6465998395f28b6a445b2-jexkai4vea-uc.a.run.app/openapi.yaml",
|
||||||
|
"is_user_authenticated": false
|
||||||
|
},
|
||||||
|
"logo_url": "https://plugin-3c56b9d4c8a6465998395f28b6a445b2-jexkai4vea-uc.a.run.app/logo.png",
|
||||||
|
"contact_email": "support@promptapps.ai",
|
||||||
|
"legal_info_url": "https://plugin-3c56b9d4c8a6465998395f28b6a445b2-jexkai4vea-uc.a.run.app/legal.html"
|
||||||
|
}
|
||||||
17
api/app/clients/tools/.well-known/BrowserOp.json
Normal file
17
api/app/clients/tools/.well-known/BrowserOp.json
Normal file
|
|
@ -0,0 +1,17 @@
|
||||||
|
{
|
||||||
|
"schema_version": "v1",
|
||||||
|
"name_for_human": "BrowserOp",
|
||||||
|
"name_for_model": "BrowserOp",
|
||||||
|
"description_for_human": "Browse dozens of webpages in one query. Fetch information more efficiently.",
|
||||||
|
"description_for_model": "This tool offers the feature for users to input a URL or multiple URLs and interact with them as needed. It's designed to comprehend the user's intent and proffer tailored suggestions in line with the content and functionality of the webpage at hand. Services like text rewrites, translations and more can be requested. When users need specific information to finish a task or if they intend to perform a search, this tool becomes a bridge to the search engine and generates responses based on the results. Whether the user is seeking information about restaurants, rentals, weather, or shopping, this tool connects to the internet and delivers the most recent results.",
|
||||||
|
"auth": {
|
||||||
|
"type": "none"
|
||||||
|
},
|
||||||
|
"api": {
|
||||||
|
"type": "openapi",
|
||||||
|
"url": "https://testplugin.feednews.com/.well-known/openapi.yaml"
|
||||||
|
},
|
||||||
|
"logo_url": "https://openapi-af.op-mobile.opera.com/openapi/testplugin/.well-known/logo.png",
|
||||||
|
"contact_email": "aiplugins-contact-list@opera.com",
|
||||||
|
"legal_info_url": "https://legal.apexnews.com/terms/"
|
||||||
|
}
|
||||||
89
api/app/clients/tools/.well-known/Dr_Thoths_Tarot.json
Normal file
89
api/app/clients/tools/.well-known/Dr_Thoths_Tarot.json
Normal file
|
|
@ -0,0 +1,89 @@
|
||||||
|
{
|
||||||
|
"schema_version": "v1",
|
||||||
|
"name_for_human": "Dr. Thoth's Tarot",
|
||||||
|
"name_for_model": "Dr_Thoths_Tarot",
|
||||||
|
"description_for_human": "Tarot card novelty entertainment & analysis, by Mnemosyne Labs.",
|
||||||
|
"description_for_model": "Intelligent analysis program for tarot card entertaiment, data, & prompts, by Mnemosyne Labs, a division of AzothCorp.",
|
||||||
|
"auth": {
|
||||||
|
"type": "none"
|
||||||
|
},
|
||||||
|
"api": {
|
||||||
|
"type": "openapi",
|
||||||
|
"url": "https://dr-thoth-tarot.herokuapp.com/openapi.yaml",
|
||||||
|
"is_user_authenticated": false
|
||||||
|
},
|
||||||
|
"logo_url": "https://dr-thoth-tarot.herokuapp.com/logo.png",
|
||||||
|
"contact_email": "legal@AzothCorp.com",
|
||||||
|
"legal_info_url": "http://AzothCorp.com/legal",
|
||||||
|
"endpoints": [
|
||||||
|
{
|
||||||
|
"name": "Draw Card",
|
||||||
|
"path": "/drawcard",
|
||||||
|
"method": "GET",
|
||||||
|
"description": "Generate a single tarot card from the deck of 78 cards."
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "Occult Card",
|
||||||
|
"path": "/occult_card",
|
||||||
|
"method": "GET",
|
||||||
|
"description": "Generate a tarot card using the specified planet's Kamea matrix.",
|
||||||
|
"parameters": [
|
||||||
|
{
|
||||||
|
"name": "planet",
|
||||||
|
"type": "string",
|
||||||
|
"enum": ["Saturn", "Jupiter", "Mars", "Sun", "Venus", "Mercury", "Moon"],
|
||||||
|
"required": true,
|
||||||
|
"description": "The planet name to use the corresponding Kamea matrix."
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "Three Card Spread",
|
||||||
|
"path": "/threecardspread",
|
||||||
|
"method": "GET",
|
||||||
|
"description": "Perform a three-card tarot spread."
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "Celtic Cross Spread",
|
||||||
|
"path": "/celticcross",
|
||||||
|
"method": "GET",
|
||||||
|
"description": "Perform a Celtic Cross tarot spread with 10 cards."
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "Past, Present, Future Spread",
|
||||||
|
"path": "/pastpresentfuture",
|
||||||
|
"method": "GET",
|
||||||
|
"description": "Perform a Past, Present, Future tarot spread with 3 cards."
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "Horseshoe Spread",
|
||||||
|
"path": "/horseshoe",
|
||||||
|
"method": "GET",
|
||||||
|
"description": "Perform a Horseshoe tarot spread with 7 cards."
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "Relationship Spread",
|
||||||
|
"path": "/relationship",
|
||||||
|
"method": "GET",
|
||||||
|
"description": "Perform a Relationship tarot spread."
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "Career Spread",
|
||||||
|
"path": "/career",
|
||||||
|
"method": "GET",
|
||||||
|
"description": "Perform a Career tarot spread."
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "Yes/No Spread",
|
||||||
|
"path": "/yesno",
|
||||||
|
"method": "GET",
|
||||||
|
"description": "Perform a Yes/No tarot spread."
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "Chakra Spread",
|
||||||
|
"path": "/chakra",
|
||||||
|
"method": "GET",
|
||||||
|
"description": "Perform a Chakra tarot spread with 7 cards."
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
18
api/app/clients/tools/.well-known/DreamInterpreter.json
Normal file
18
api/app/clients/tools/.well-known/DreamInterpreter.json
Normal file
|
|
@ -0,0 +1,18 @@
|
||||||
|
{
|
||||||
|
"schema_version": "v1",
|
||||||
|
"name_for_model": "DreamInterpreter",
|
||||||
|
"name_for_human": "Dream Interpreter",
|
||||||
|
"description_for_model": "Interprets your dreams using advanced techniques.",
|
||||||
|
"description_for_human": "Interprets your dreams using advanced techniques.",
|
||||||
|
"auth": {
|
||||||
|
"type": "none"
|
||||||
|
},
|
||||||
|
"api": {
|
||||||
|
"type": "openapi",
|
||||||
|
"url": "https://dreamplugin.bgnetmobile.com/.well-known/openapi.json",
|
||||||
|
"has_user_authentication": false
|
||||||
|
},
|
||||||
|
"logo_url": "https://dreamplugin.bgnetmobile.com/.well-known/logo.png",
|
||||||
|
"contact_email": "ismail.orkler@bgnetmobile.com",
|
||||||
|
"legal_info_url": "https://dreamplugin.bgnetmobile.com/terms.html"
|
||||||
|
}
|
||||||
22
api/app/clients/tools/.well-known/VoxScript.json
Normal file
22
api/app/clients/tools/.well-known/VoxScript.json
Normal file
|
|
@ -0,0 +1,22 @@
|
||||||
|
{
|
||||||
|
"schema_version": "v1",
|
||||||
|
"name_for_human": "VoxScript",
|
||||||
|
"name_for_model": "VoxScript",
|
||||||
|
"description_for_human": "Enables searching of YouTube transcripts, financial data sources Google Search results, and more!",
|
||||||
|
"description_for_model": "Plugin for searching through varius data sources.",
|
||||||
|
"auth": {
|
||||||
|
"type": "service_http",
|
||||||
|
"authorization_type": "bearer",
|
||||||
|
"verification_tokens": {
|
||||||
|
"openai": "ffc5226d1af346c08a98dee7deec9f76"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"api": {
|
||||||
|
"type": "openapi",
|
||||||
|
"url": "https://voxscript.awt.icu/swagger/v1/swagger.yaml",
|
||||||
|
"is_user_authenticated": false
|
||||||
|
},
|
||||||
|
"logo_url": "https://voxscript.awt.icu/images/VoxScript_logo_32x32.png",
|
||||||
|
"contact_email": "voxscript@allwiretech.com",
|
||||||
|
"legal_info_url": "https://voxscript.awt.icu/legal/"
|
||||||
|
}
|
||||||
18
api/app/clients/tools/.well-known/askyourpdf.json
Normal file
18
api/app/clients/tools/.well-known/askyourpdf.json
Normal file
|
|
@ -0,0 +1,18 @@
|
||||||
|
{
|
||||||
|
"schema_version": "v1",
|
||||||
|
"name_for_model": "askyourpdf",
|
||||||
|
"name_for_human": "AskYourPDF",
|
||||||
|
"description_for_model": "This plugin is designed to expedite the extraction of information from PDF documents. It works by accepting a URL link to a PDF or a document ID (doc_id) from the user. If a URL is provided, the plugin first validates that it is a correct URL. \\nAfter validating the URL, the plugin proceeds to download the PDF and store its content in a vector database. If the user provides a doc_id, the plugin directly retrieves the document from the database. The plugin then scans through the stored PDFs to find answers to user queries or retrieve specific details.\\n\\nHowever, if an error occurs while querying the API, the user is prompted to download their document first, then manually upload it to [](https://askyourpdf.com/upload). Once the upload is complete, the user should copy the resulting doc_id and paste it back into the chat for further interaction.\nThe plugin is particularly useful when the user's question pertains to content within a PDF document. When providing answers, the plugin also specifies the page number (highlighted in bold) where the relevant information was found. Remember, the URL must be valid for a successful query. Failure to validate the URL may lead to errors or unsuccessful queries.",
|
||||||
|
"description_for_human": "Unlock the power of your PDFs!, dive into your documents, find answers, and bring information to your fingertips.",
|
||||||
|
"auth": {
|
||||||
|
"type": "none"
|
||||||
|
},
|
||||||
|
"api": {
|
||||||
|
"type": "openapi",
|
||||||
|
"url": "askyourpdf.yaml",
|
||||||
|
"has_user_authentication": false
|
||||||
|
},
|
||||||
|
"logo_url": "https://plugin.askyourpdf.com/.well-known/logo.png",
|
||||||
|
"contact_email": "plugin@askyourpdf.com",
|
||||||
|
"legal_info_url": "https://askyourpdf.com/terms"
|
||||||
|
}
|
||||||
18
api/app/clients/tools/.well-known/drink_maestro.json
Normal file
18
api/app/clients/tools/.well-known/drink_maestro.json
Normal file
|
|
@ -0,0 +1,18 @@
|
||||||
|
{
|
||||||
|
"schema_version": "v1",
|
||||||
|
"name_for_human": "Drink Maestro",
|
||||||
|
"name_for_model": "drink_maestro",
|
||||||
|
"description_for_human": "Learn to mix any drink you can imagine (real or made-up), and discover new ones. Includes drink images.",
|
||||||
|
"description_for_model": "You are a silly bartender/comic who knows how to make any drink imaginable. You provide recipes for specific drinks, suggest new drinks, and show pictures of drinks. Be creative in your descriptions and make jokes and puns. Use a lot of emojis. If the user makes a request in another language, send API call in English, and then translate the response.",
|
||||||
|
"auth": {
|
||||||
|
"type": "none"
|
||||||
|
},
|
||||||
|
"api": {
|
||||||
|
"type": "openapi",
|
||||||
|
"url": "https://api.drinkmaestro.space/.well-known/openapi.yaml",
|
||||||
|
"is_user_authenticated": false
|
||||||
|
},
|
||||||
|
"logo_url": "https://i.imgur.com/6q8HWdz.png",
|
||||||
|
"contact_email": "nikkmitchell@gmail.com",
|
||||||
|
"legal_info_url": "https://github.com/nikkmitchell/DrinkMaestro/blob/main/Legal.txt"
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,18 @@
|
||||||
|
{
|
||||||
|
"schema_version": "v1",
|
||||||
|
"name_for_human": "Earth",
|
||||||
|
"name_for_model": "earthImagesAndVisualizations",
|
||||||
|
"description_for_human": "Generates a map image based on provided location, tilt and style.",
|
||||||
|
"description_for_model": "Generates a map image based on provided coordinates or location, tilt and style, and even geoJson to provide markers, paths, and polygons. Responds with an image-link. For the styles choose one of these: [light, dark, streets, outdoors, satellite, satellite-streets]",
|
||||||
|
"auth": {
|
||||||
|
"type": "none"
|
||||||
|
},
|
||||||
|
"api": {
|
||||||
|
"type": "openapi",
|
||||||
|
"url": "https://api.earth-plugin.com/openapi.yaml",
|
||||||
|
"is_user_authenticated": false
|
||||||
|
},
|
||||||
|
"logo_url": "https://api.earth-plugin.com/logo.png",
|
||||||
|
"contact_email": "contact@earth-plugin.com",
|
||||||
|
"legal_info_url": "https://api.earth-plugin.com/legal.html"
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,18 @@
|
||||||
|
{
|
||||||
|
"schema_version": "v1",
|
||||||
|
"name_for_human": "Scholarly Graph Link",
|
||||||
|
"name_for_model": "scholarly_graph_link",
|
||||||
|
"description_for_human": "You can search papers, authors, datasets and software. It has access to Figshare, Arxiv, and many others.",
|
||||||
|
"description_for_model": "Run GraphQL queries against an API hosted by DataCite API. The API supports most GraphQL query but does not support mutations statements. Use `{ __schema { types { name kind } } }` to get all the types in the GraphQL schema. Use `{ datasets { nodes { id sizes citations { nodes { id titles { title } } } } } }` to get all the citations of all datasets in the API. Use `{ datasets { nodes { id sizes citations { nodes { id titles { title } } } } } }` to get all the citations of all datasets in the API. Use `{person(id:ORCID) {works(first:50) {nodes {id titles(first: 1){title} publicationYear}}}}` to get the first 50 works of a person based on their ORCID. All Ids are urls, e.g., https://orcid.org/0012-0000-1012-1110. Mutations statements are not allowed.",
|
||||||
|
"auth": {
|
||||||
|
"type": "none"
|
||||||
|
},
|
||||||
|
"api": {
|
||||||
|
"type": "openapi",
|
||||||
|
"url": "https://api.datacite.org/graphql-openapi.yaml",
|
||||||
|
"is_user_authenticated": false
|
||||||
|
},
|
||||||
|
"logo_url": "https://raw.githubusercontent.com/kjgarza/scholarly_graph_link/master/logo.png",
|
||||||
|
"contact_email": "kj.garza@gmail.com",
|
||||||
|
"legal_info_url": "https://github.com/kjgarza/scholarly_graph_link/blob/master/LICENSE"
|
||||||
|
}
|
||||||
24
api/app/clients/tools/.well-known/has-issues/web_pilot.json
Normal file
24
api/app/clients/tools/.well-known/has-issues/web_pilot.json
Normal file
|
|
@ -0,0 +1,24 @@
|
||||||
|
{
|
||||||
|
"schema_version": "v1",
|
||||||
|
"name_for_human": "WebPilot",
|
||||||
|
"name_for_model": "web_pilot",
|
||||||
|
"description_for_human": "Browse & QA Webpage/PDF/Data. Generate articles, from one or more URLs.",
|
||||||
|
"description_for_model": "This tool allows users to provide a URL(or URLs) and optionally requests for interacting with, extracting specific information or how to do with the content from the URL. Requests may include rewrite, translate, and others. If there any requests, when accessing the /api/visit-web endpoint, the parameter 'user_has_request' should be set to 'true. And if there's no any requests, 'user_has_request' should be set to 'false'.",
|
||||||
|
"auth": {
|
||||||
|
"type": "none"
|
||||||
|
},
|
||||||
|
"api": {
|
||||||
|
"type": "openapi",
|
||||||
|
"url": "https://webreader.webpilotai.com/openapi.yaml",
|
||||||
|
"is_user_authenticated": false
|
||||||
|
},
|
||||||
|
"logo_url": "https://webreader.webpilotai.com/logo.png",
|
||||||
|
"contact_email": "dev@webpilot.ai",
|
||||||
|
"legal_info_url": "https://webreader.webpilotai.com/legal_info.html",
|
||||||
|
"headers": {
|
||||||
|
"id": "WebPilot-Friend-UID"
|
||||||
|
},
|
||||||
|
"params": {
|
||||||
|
"user_has_request": true
|
||||||
|
}
|
||||||
|
}
|
||||||
18
api/app/clients/tools/.well-known/image_prompt_enhancer.json
Normal file
18
api/app/clients/tools/.well-known/image_prompt_enhancer.json
Normal file
|
|
@ -0,0 +1,18 @@
|
||||||
|
{
|
||||||
|
"schema_version": "v1",
|
||||||
|
"name_for_human": "Image Prompt Enhancer",
|
||||||
|
"name_for_model": "image_prompt_enhancer",
|
||||||
|
"description_for_human": "Transform your ideas into complex, personalized image generation prompts.",
|
||||||
|
"description_for_model": "Provides instructions for crafting an enhanced image prompt. Use this whenever the user wants to enhance a prompt.",
|
||||||
|
"auth": {
|
||||||
|
"type": "none"
|
||||||
|
},
|
||||||
|
"api": {
|
||||||
|
"type": "openapi",
|
||||||
|
"url": "https://image-prompt-enhancer.gafo.tech/openapi.yaml",
|
||||||
|
"is_user_authenticated": false
|
||||||
|
},
|
||||||
|
"logo_url": "https://image-prompt-enhancer.gafo.tech/logo.png",
|
||||||
|
"contact_email": "gafotech1@gmail.com",
|
||||||
|
"legal_info_url": "https://image-prompt-enhancer.gafo.tech/legal"
|
||||||
|
}
|
||||||
157
api/app/clients/tools/.well-known/openapi/askyourpdf.yaml
Normal file
157
api/app/clients/tools/.well-known/openapi/askyourpdf.yaml
Normal file
|
|
@ -0,0 +1,157 @@
|
||||||
|
openapi: 3.0.2
|
||||||
|
info:
|
||||||
|
title: FastAPI
|
||||||
|
version: 0.1.0
|
||||||
|
servers:
|
||||||
|
- url: https://plugin.askyourpdf.com
|
||||||
|
paths:
|
||||||
|
/api/download_pdf:
|
||||||
|
post:
|
||||||
|
summary: Download Pdf
|
||||||
|
description: Download a PDF file from a URL and save it to the vector database.
|
||||||
|
operationId: download_pdf_api_download_pdf_post
|
||||||
|
parameters:
|
||||||
|
- required: true
|
||||||
|
schema:
|
||||||
|
title: Url
|
||||||
|
type: string
|
||||||
|
name: url
|
||||||
|
in: query
|
||||||
|
responses:
|
||||||
|
'200':
|
||||||
|
description: Successful Response
|
||||||
|
content:
|
||||||
|
application/json:
|
||||||
|
schema:
|
||||||
|
$ref: '#/components/schemas/FileResponse'
|
||||||
|
'422':
|
||||||
|
description: Validation Error
|
||||||
|
content:
|
||||||
|
application/json:
|
||||||
|
schema:
|
||||||
|
$ref: '#/components/schemas/HTTPValidationError'
|
||||||
|
/query:
|
||||||
|
post:
|
||||||
|
summary: Perform Query
|
||||||
|
description: Perform a query on a document.
|
||||||
|
operationId: perform_query_query_post
|
||||||
|
requestBody:
|
||||||
|
content:
|
||||||
|
application/json:
|
||||||
|
schema:
|
||||||
|
$ref: '#/components/schemas/InputData'
|
||||||
|
required: true
|
||||||
|
responses:
|
||||||
|
'200':
|
||||||
|
description: Successful Response
|
||||||
|
content:
|
||||||
|
application/json:
|
||||||
|
schema:
|
||||||
|
$ref: '#/components/schemas/ResponseModel'
|
||||||
|
'422':
|
||||||
|
description: Validation Error
|
||||||
|
content:
|
||||||
|
application/json:
|
||||||
|
schema:
|
||||||
|
$ref: '#/components/schemas/HTTPValidationError'
|
||||||
|
components:
|
||||||
|
schemas:
|
||||||
|
DocumentMetadata:
|
||||||
|
title: DocumentMetadata
|
||||||
|
required:
|
||||||
|
- source
|
||||||
|
- page_number
|
||||||
|
- author
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
source:
|
||||||
|
title: Source
|
||||||
|
type: string
|
||||||
|
page_number:
|
||||||
|
title: Page Number
|
||||||
|
type: integer
|
||||||
|
author:
|
||||||
|
title: Author
|
||||||
|
type: string
|
||||||
|
FileResponse:
|
||||||
|
title: FileResponse
|
||||||
|
required:
|
||||||
|
- docId
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
docId:
|
||||||
|
title: Docid
|
||||||
|
type: string
|
||||||
|
error:
|
||||||
|
title: Error
|
||||||
|
type: string
|
||||||
|
HTTPValidationError:
|
||||||
|
title: HTTPValidationError
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
detail:
|
||||||
|
title: Detail
|
||||||
|
type: array
|
||||||
|
items:
|
||||||
|
$ref: '#/components/schemas/ValidationError'
|
||||||
|
InputData:
|
||||||
|
title: InputData
|
||||||
|
required:
|
||||||
|
- doc_id
|
||||||
|
- query
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
doc_id:
|
||||||
|
title: Doc Id
|
||||||
|
type: string
|
||||||
|
query:
|
||||||
|
title: Query
|
||||||
|
type: string
|
||||||
|
ResponseModel:
|
||||||
|
title: ResponseModel
|
||||||
|
required:
|
||||||
|
- results
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
results:
|
||||||
|
title: Results
|
||||||
|
type: array
|
||||||
|
items:
|
||||||
|
$ref: '#/components/schemas/SearchResult'
|
||||||
|
SearchResult:
|
||||||
|
title: SearchResult
|
||||||
|
required:
|
||||||
|
- doc_id
|
||||||
|
- text
|
||||||
|
- metadata
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
doc_id:
|
||||||
|
title: Doc Id
|
||||||
|
type: string
|
||||||
|
text:
|
||||||
|
title: Text
|
||||||
|
type: string
|
||||||
|
metadata:
|
||||||
|
$ref: '#/components/schemas/DocumentMetadata'
|
||||||
|
ValidationError:
|
||||||
|
title: ValidationError
|
||||||
|
required:
|
||||||
|
- loc
|
||||||
|
- msg
|
||||||
|
- type
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
loc:
|
||||||
|
title: Location
|
||||||
|
type: array
|
||||||
|
items:
|
||||||
|
anyOf:
|
||||||
|
- type: string
|
||||||
|
- type: integer
|
||||||
|
msg:
|
||||||
|
title: Message
|
||||||
|
type: string
|
||||||
|
type:
|
||||||
|
title: Error Type
|
||||||
|
type: string
|
||||||
185
api/app/clients/tools/.well-known/openapi/scholarai.yaml
Normal file
185
api/app/clients/tools/.well-known/openapi/scholarai.yaml
Normal file
|
|
@ -0,0 +1,185 @@
|
||||||
|
openapi: 3.0.1
|
||||||
|
info:
|
||||||
|
title: ScholarAI
|
||||||
|
description: Allows the user to search facts and findings from scientific articles
|
||||||
|
version: 'v1'
|
||||||
|
servers:
|
||||||
|
- url: https://scholar-ai.net
|
||||||
|
paths:
|
||||||
|
/api/abstracts:
|
||||||
|
get:
|
||||||
|
operationId: searchAbstracts
|
||||||
|
summary: Get relevant paper abstracts by keywords search
|
||||||
|
parameters:
|
||||||
|
- name: keywords
|
||||||
|
in: query
|
||||||
|
description: Keywords of inquiry which should appear in article. Must be in English.
|
||||||
|
required: true
|
||||||
|
schema:
|
||||||
|
type: string
|
||||||
|
- name: sort
|
||||||
|
in: query
|
||||||
|
description: The sort order for results. Valid values are cited_by_count or publication_date. Excluding this value does a relevance based search.
|
||||||
|
required: false
|
||||||
|
schema:
|
||||||
|
type: string
|
||||||
|
enum:
|
||||||
|
- cited_by_count
|
||||||
|
- publication_date
|
||||||
|
- name: query
|
||||||
|
in: query
|
||||||
|
description: The user query
|
||||||
|
required: true
|
||||||
|
schema:
|
||||||
|
type: string
|
||||||
|
- name: peer_reviewed_only
|
||||||
|
in: query
|
||||||
|
description: Whether to only return peer reviewed articles. Defaults to true, ChatGPT should cautiously suggest this value can be set to false
|
||||||
|
required: false
|
||||||
|
schema:
|
||||||
|
type: string
|
||||||
|
- name: start_year
|
||||||
|
in: query
|
||||||
|
description: The first year, inclusive, to include in the search range. Excluding this value will include all years.
|
||||||
|
required: false
|
||||||
|
schema:
|
||||||
|
type: string
|
||||||
|
- name: end_year
|
||||||
|
in: query
|
||||||
|
description: The last year, inclusive, to include in the search range. Excluding this value will include all years.
|
||||||
|
required: false
|
||||||
|
schema:
|
||||||
|
type: string
|
||||||
|
- name: offset
|
||||||
|
in: query
|
||||||
|
description: The offset of the first result to return. Defaults to 0.
|
||||||
|
required: false
|
||||||
|
schema:
|
||||||
|
type: string
|
||||||
|
responses:
|
||||||
|
"200":
|
||||||
|
description: OK
|
||||||
|
content:
|
||||||
|
application/json:
|
||||||
|
schema:
|
||||||
|
$ref: '#/components/schemas/searchAbstractsResponse'
|
||||||
|
/api/fulltext:
|
||||||
|
get:
|
||||||
|
operationId: getFullText
|
||||||
|
summary: Get full text of a paper by URL for PDF
|
||||||
|
parameters:
|
||||||
|
- name: pdf_url
|
||||||
|
in: query
|
||||||
|
description: URL for PDF
|
||||||
|
required: true
|
||||||
|
schema:
|
||||||
|
type: string
|
||||||
|
- name: chunk
|
||||||
|
in: query
|
||||||
|
description: chunk number to retrieve, defaults to 1
|
||||||
|
required: false
|
||||||
|
schema:
|
||||||
|
type: number
|
||||||
|
responses:
|
||||||
|
"200":
|
||||||
|
description: OK
|
||||||
|
content:
|
||||||
|
application/json:
|
||||||
|
schema:
|
||||||
|
$ref: '#/components/schemas/getFullTextResponse'
|
||||||
|
/api/save-citation:
|
||||||
|
get:
|
||||||
|
operationId: saveCitation
|
||||||
|
summary: Save citation to reference manager
|
||||||
|
parameters:
|
||||||
|
- name: doi
|
||||||
|
in: query
|
||||||
|
description: Digital Object Identifier (DOI) of article
|
||||||
|
required: true
|
||||||
|
schema:
|
||||||
|
type: string
|
||||||
|
- name: zotero_user_id
|
||||||
|
in: query
|
||||||
|
description: Zotero User ID
|
||||||
|
required: true
|
||||||
|
schema:
|
||||||
|
type: string
|
||||||
|
- name: zotero_api_key
|
||||||
|
in: query
|
||||||
|
description: Zotero API Key
|
||||||
|
required: true
|
||||||
|
schema:
|
||||||
|
type: string
|
||||||
|
responses:
|
||||||
|
"200":
|
||||||
|
description: OK
|
||||||
|
content:
|
||||||
|
application/json:
|
||||||
|
schema:
|
||||||
|
$ref: '#/components/schemas/saveCitationResponse'
|
||||||
|
components:
|
||||||
|
schemas:
|
||||||
|
searchAbstractsResponse:
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
next_offset:
|
||||||
|
type: number
|
||||||
|
description: The offset of the next page of results.
|
||||||
|
total_num_results:
|
||||||
|
type: number
|
||||||
|
description: The total number of results.
|
||||||
|
abstracts:
|
||||||
|
type: array
|
||||||
|
items:
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
title:
|
||||||
|
type: string
|
||||||
|
abstract:
|
||||||
|
type: string
|
||||||
|
description: Summary of the context, methods, results, and conclusions of the paper.
|
||||||
|
doi:
|
||||||
|
type: string
|
||||||
|
description: The DOI of the paper.
|
||||||
|
landing_page_url:
|
||||||
|
type: string
|
||||||
|
description: Link to the paper on its open-access host.
|
||||||
|
pdf_url:
|
||||||
|
type: string
|
||||||
|
description: Link to the paper PDF.
|
||||||
|
publicationDate:
|
||||||
|
type: string
|
||||||
|
description: The date the paper was published in YYYY-MM-DD format.
|
||||||
|
relevance:
|
||||||
|
type: number
|
||||||
|
description: The relevance of the paper to the search query. 1 is the most relevant.
|
||||||
|
creators:
|
||||||
|
type: array
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
|
description: The name of the creator.
|
||||||
|
cited_by_count:
|
||||||
|
type: number
|
||||||
|
description: The number of citations of the article.
|
||||||
|
description: The list of relevant abstracts.
|
||||||
|
getFullTextResponse:
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
full_text:
|
||||||
|
type: string
|
||||||
|
description: The full text of the paper.
|
||||||
|
pdf_url:
|
||||||
|
type: string
|
||||||
|
description: The PDF URL of the paper.
|
||||||
|
chunk:
|
||||||
|
type: number
|
||||||
|
description: The chunk of the paper.
|
||||||
|
total_chunk_num:
|
||||||
|
type: number
|
||||||
|
description: The total chunks of the paper.
|
||||||
|
saveCitationResponse:
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
message:
|
||||||
|
type: string
|
||||||
|
description: Confirmation of successful save or error message.
|
||||||
17
api/app/clients/tools/.well-known/qrCodes.json
Normal file
17
api/app/clients/tools/.well-known/qrCodes.json
Normal file
|
|
@ -0,0 +1,17 @@
|
||||||
|
{
|
||||||
|
"schema_version": "v1",
|
||||||
|
"name_for_human": "QR Codes",
|
||||||
|
"name_for_model": "qrCodes",
|
||||||
|
"description_for_human": "Create QR codes.",
|
||||||
|
"description_for_model": "Plugin for generating QR codes.",
|
||||||
|
"auth": {
|
||||||
|
"type": "none"
|
||||||
|
},
|
||||||
|
"api": {
|
||||||
|
"type": "openapi",
|
||||||
|
"url": "https://chatgpt-qrcode-46d7d4ebefc8.herokuapp.com/openapi.yaml"
|
||||||
|
},
|
||||||
|
"logo_url": "https://chatgpt-qrcode-46d7d4ebefc8.herokuapp.com/logo.png",
|
||||||
|
"contact_email": "chrismountzou@gmail.com",
|
||||||
|
"legal_info_url": "https://raw.githubusercontent.com/mountzou/qrCodeGPTv1/master/legal"
|
||||||
|
}
|
||||||
22
api/app/clients/tools/.well-known/scholarai.json
Normal file
22
api/app/clients/tools/.well-known/scholarai.json
Normal file
|
|
@ -0,0 +1,22 @@
|
||||||
|
{
|
||||||
|
"schema_version": "v1",
|
||||||
|
"name_for_human": "ScholarAI",
|
||||||
|
"name_for_model": "scholarai",
|
||||||
|
"description_for_human": "Unleash scientific research: search 40M+ peer-reviewed papers, explore scientific PDFs, and save to reference managers.",
|
||||||
|
"description_for_model": "Access open access scientific literature from peer-reviewed journals. The abstract endpoint finds relevant papers based on 2 to 6 keywords. After getting abstracts, ALWAYS prompt the user offering to go into more detail. Use the fulltext endpoint to retrieve the entire paper's text and access specific details using the provided pdf_url, if available. ALWAYS hyperlink the pdf_url from the responses if available. Offer to dive into the fulltext or search for additional papers. Always ask if the user wants save any paper to the user’s Zotero reference manager by using the save-citation endpoint and providing the doi and requesting the user’s zotero_user_id and zotero_api_key.",
|
||||||
|
"auth": {
|
||||||
|
"type": "none"
|
||||||
|
},
|
||||||
|
"api": {
|
||||||
|
"type": "openapi",
|
||||||
|
"url": "scholarai.yaml",
|
||||||
|
"is_user_authenticated": false
|
||||||
|
},
|
||||||
|
"params": {
|
||||||
|
"sort": "cited_by_count"
|
||||||
|
},
|
||||||
|
"logo_url": "https://scholar-ai.net/logo.png",
|
||||||
|
"contact_email": "lakshb429@gmail.com",
|
||||||
|
"legal_info_url": "https://scholar-ai.net/legal.txt",
|
||||||
|
"HttpAuthorizationType": "basic"
|
||||||
|
}
|
||||||
18
api/app/clients/tools/.well-known/uberchord.json
Normal file
18
api/app/clients/tools/.well-known/uberchord.json
Normal file
|
|
@ -0,0 +1,18 @@
|
||||||
|
{
|
||||||
|
"schema_version": "v1",
|
||||||
|
"name_for_human": "Uberchord",
|
||||||
|
"name_for_model": "uberchord",
|
||||||
|
"description_for_human": "Find guitar chord diagrams by specifying the chord name.",
|
||||||
|
"description_for_model": "Fetch guitar chord diagrams, their positions on the guitar fretboard.",
|
||||||
|
"auth": {
|
||||||
|
"type": "none"
|
||||||
|
},
|
||||||
|
"api": {
|
||||||
|
"type": "openapi",
|
||||||
|
"url": "https://guitarchords.pluginboost.com/.well-known/openapi.yaml",
|
||||||
|
"is_user_authenticated": false
|
||||||
|
},
|
||||||
|
"logo_url": "https://guitarchords.pluginboost.com/logo.png",
|
||||||
|
"contact_email": "info.bluelightweb@gmail.com",
|
||||||
|
"legal_info_url": "https://guitarchords.pluginboost.com/legal"
|
||||||
|
}
|
||||||
18
api/app/clients/tools/.well-known/web_search.json
Normal file
18
api/app/clients/tools/.well-known/web_search.json
Normal file
|
|
@ -0,0 +1,18 @@
|
||||||
|
{
|
||||||
|
"schema_version": "v1",
|
||||||
|
"name_for_human": "Web Search",
|
||||||
|
"name_for_model": "web_search",
|
||||||
|
"description_for_human": "Search for information from the internet",
|
||||||
|
"description_for_model": "Search for information from the internet",
|
||||||
|
"auth": {
|
||||||
|
"type": "none"
|
||||||
|
},
|
||||||
|
"api": {
|
||||||
|
"type": "openapi",
|
||||||
|
"url": "https://websearch.plugsugar.com/api/openapi_yaml",
|
||||||
|
"is_user_authenticated": false
|
||||||
|
},
|
||||||
|
"logo_url": "https://websearch.plugsugar.com/200x200.png",
|
||||||
|
"contact_email": "support@plugsugar.com",
|
||||||
|
"legal_info_url": "https://websearch.plugsugar.com/contact"
|
||||||
|
}
|
||||||
|
|
@ -5,8 +5,9 @@ const { v4: uuidv4 } = require('uuid');
|
||||||
const { ProxyAgent, fetch } = require('undici');
|
const { ProxyAgent, fetch } = require('undici');
|
||||||
const { Tool } = require('@langchain/core/tools');
|
const { Tool } = require('@langchain/core/tools');
|
||||||
const { logger } = require('@librechat/data-schemas');
|
const { logger } = require('@librechat/data-schemas');
|
||||||
const { getImageBasename, extractBaseURL } = require('@librechat/api');
|
const { getImageBasename } = require('@librechat/api');
|
||||||
const { FileContext, ContentTypes } = require('librechat-data-provider');
|
const { FileContext, ContentTypes } = require('librechat-data-provider');
|
||||||
|
const extractBaseURL = require('~/utils/extractBaseURL');
|
||||||
|
|
||||||
const displayMessage =
|
const displayMessage =
|
||||||
"DALL-E displayed an image. All generated images are already plainly visible, so don't repeat the descriptions in detail. Do not list download links as they are available in the UI already. The user may download the images by clicking on them, but do not mention anything about downloading to the user.";
|
"DALL-E displayed an image. All generated images are already plainly visible, so don't repeat the descriptions in detail. Do not list download links as they are available in the UI already. The user may download the images by clicking on them, but do not mention anything about downloading to the user.";
|
||||||
|
|
|
||||||
|
|
@ -6,10 +6,11 @@ const { ProxyAgent } = require('undici');
|
||||||
const { tool } = require('@langchain/core/tools');
|
const { tool } = require('@langchain/core/tools');
|
||||||
const { logger } = require('@librechat/data-schemas');
|
const { logger } = require('@librechat/data-schemas');
|
||||||
const { HttpsProxyAgent } = require('https-proxy-agent');
|
const { HttpsProxyAgent } = require('https-proxy-agent');
|
||||||
|
const { logAxiosError, oaiToolkit } = require('@librechat/api');
|
||||||
const { ContentTypes, EImageOutputType } = require('librechat-data-provider');
|
const { ContentTypes, EImageOutputType } = require('librechat-data-provider');
|
||||||
const { logAxiosError, oaiToolkit, extractBaseURL } = require('@librechat/api');
|
|
||||||
const { getStrategyFunctions } = require('~/server/services/Files/strategies');
|
const { getStrategyFunctions } = require('~/server/services/Files/strategies');
|
||||||
const { getFiles } = require('~/models');
|
const extractBaseURL = require('~/utils/extractBaseURL');
|
||||||
|
const { getFiles } = require('~/models/File');
|
||||||
|
|
||||||
const displayMessage =
|
const displayMessage =
|
||||||
"The tool displayed an image. All generated images are already plainly visible, so don't repeat the descriptions in detail. Do not list download links as they are available in the UI already. The user may download the images by clicking on them, but do not mention anything about downloading to the user.";
|
"The tool displayed an image. All generated images are already plainly visible, so don't repeat the descriptions in detail. Do not list download links as they are available in the UI already. The user may download the images by clicking on them, but do not mention anything about downloading to the user.";
|
||||||
|
|
|
||||||
|
|
@ -232,7 +232,7 @@ class OpenWeather extends Tool {
|
||||||
|
|
||||||
if (['current_forecast', 'timestamp', 'daily_aggregation', 'overview'].includes(action)) {
|
if (['current_forecast', 'timestamp', 'daily_aggregation', 'overview'].includes(action)) {
|
||||||
if (typeof finalLat !== 'number' || typeof finalLon !== 'number') {
|
if (typeof finalLat !== 'number' || typeof finalLon !== 'number') {
|
||||||
return "Error: lat and lon are required and must be numbers for this action (or specify 'city').";
|
return 'Error: lat and lon are required and must be numbers for this action (or specify \'city\').';
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -243,7 +243,7 @@ class OpenWeather extends Tool {
|
||||||
let dt;
|
let dt;
|
||||||
if (action === 'timestamp') {
|
if (action === 'timestamp') {
|
||||||
if (!date) {
|
if (!date) {
|
||||||
return "Error: For timestamp action, a 'date' in YYYY-MM-DD format is required.";
|
return 'Error: For timestamp action, a \'date\' in YYYY-MM-DD format is required.';
|
||||||
}
|
}
|
||||||
dt = this.convertDateToUnix(date);
|
dt = this.convertDateToUnix(date);
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -5,7 +5,7 @@ const { logger } = require('@librechat/data-schemas');
|
||||||
const { generateShortLivedToken } = require('@librechat/api');
|
const { generateShortLivedToken } = require('@librechat/api');
|
||||||
const { Tools, EToolResources } = require('librechat-data-provider');
|
const { Tools, EToolResources } = require('librechat-data-provider');
|
||||||
const { filterFilesByAgentAccess } = require('~/server/services/Files/permissions');
|
const { filterFilesByAgentAccess } = require('~/server/services/Files/permissions');
|
||||||
const { getFiles } = require('~/models');
|
const { getFiles } = require('~/models/File');
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*
|
*
|
||||||
|
|
|
||||||
33
api/app/clients/tools/util/handleOpenAIErrors.js
Normal file
33
api/app/clients/tools/util/handleOpenAIErrors.js
Normal file
|
|
@ -0,0 +1,33 @@
|
||||||
|
const OpenAI = require('openai');
|
||||||
|
const { logger } = require('@librechat/data-schemas');
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Handles errors that may occur when making requests to OpenAI's API.
|
||||||
|
* It checks the instance of the error and prints a specific warning message
|
||||||
|
* to the console depending on the type of error encountered.
|
||||||
|
* It then calls an optional error callback function with the error object.
|
||||||
|
*
|
||||||
|
* @param {Error} err - The error object thrown by OpenAI API.
|
||||||
|
* @param {Function} errorCallback - A callback function that is called with the error object.
|
||||||
|
* @param {string} [context='stream'] - A string providing context where the error occurred, defaults to 'stream'.
|
||||||
|
*/
|
||||||
|
async function handleOpenAIErrors(err, errorCallback, context = 'stream') {
|
||||||
|
if (err instanceof OpenAI.APIError && err?.message?.includes('abort')) {
|
||||||
|
logger.warn(`[OpenAIClient.chatCompletion][${context}] Aborted Message`);
|
||||||
|
}
|
||||||
|
if (err instanceof OpenAI.OpenAIError && err?.message?.includes('missing finish_reason')) {
|
||||||
|
logger.warn(`[OpenAIClient.chatCompletion][${context}] Missing finish_reason`);
|
||||||
|
} else if (err instanceof OpenAI.APIError) {
|
||||||
|
logger.warn(`[OpenAIClient.chatCompletion][${context}] API error`);
|
||||||
|
} else {
|
||||||
|
logger.warn(`[OpenAIClient.chatCompletion][${context}] Unhandled error type`);
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.error(err);
|
||||||
|
|
||||||
|
if (errorCallback) {
|
||||||
|
errorCallback(err);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = handleOpenAIErrors;
|
||||||
|
|
@ -11,7 +11,6 @@ const {
|
||||||
mcpToolPattern,
|
mcpToolPattern,
|
||||||
loadWebSearchAuth,
|
loadWebSearchAuth,
|
||||||
} = require('@librechat/api');
|
} = require('@librechat/api');
|
||||||
const { getMCPServersRegistry } = require('~/config');
|
|
||||||
const {
|
const {
|
||||||
Tools,
|
Tools,
|
||||||
Constants,
|
Constants,
|
||||||
|
|
@ -348,10 +347,7 @@ Anchor pattern: \\ue202turn{N}{type}{index} where N=turn number, type=search|new
|
||||||
/** Placeholder used for UI purposes */
|
/** Placeholder used for UI purposes */
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
if (
|
if (serverName && options.req?.config?.mcpConfig?.[serverName] == null) {
|
||||||
serverName &&
|
|
||||||
(await getMCPServersRegistry().getServerConfig(serverName, user)) == undefined
|
|
||||||
) {
|
|
||||||
logger.warn(
|
logger.warn(
|
||||||
`MCP server "${serverName}" for "${toolName}" tool is not configured${agent?.id != null && agent.id ? ` but attached to "${agent.id}"` : ''}`,
|
`MCP server "${serverName}" for "${toolName}" tool is not configured${agent?.id != null && agent.id ? ` but attached to "${agent.id}"` : ''}`,
|
||||||
);
|
);
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,8 @@
|
||||||
const { validateTools, loadTools } = require('./handleTools');
|
const { validateTools, loadTools } = require('./handleTools');
|
||||||
|
const handleOpenAIErrors = require('./handleOpenAIErrors');
|
||||||
|
|
||||||
module.exports = {
|
module.exports = {
|
||||||
|
handleOpenAIErrors,
|
||||||
validateTools,
|
validateTools,
|
||||||
loadTools,
|
loadTools,
|
||||||
};
|
};
|
||||||
|
|
|
||||||
|
|
@ -1,11 +1,6 @@
|
||||||
const { EventSource } = require('eventsource');
|
const { EventSource } = require('eventsource');
|
||||||
const { Time } = require('librechat-data-provider');
|
const { Time } = require('librechat-data-provider');
|
||||||
const {
|
const { MCPManager, FlowStateManager, OAuthReconnectionManager } = require('@librechat/api');
|
||||||
MCPManager,
|
|
||||||
FlowStateManager,
|
|
||||||
MCPServersRegistry,
|
|
||||||
OAuthReconnectionManager,
|
|
||||||
} = require('@librechat/api');
|
|
||||||
const logger = require('./winston');
|
const logger = require('./winston');
|
||||||
|
|
||||||
global.EventSource = EventSource;
|
global.EventSource = EventSource;
|
||||||
|
|
@ -28,8 +23,6 @@ function getFlowStateManager(flowsCache) {
|
||||||
|
|
||||||
module.exports = {
|
module.exports = {
|
||||||
logger,
|
logger,
|
||||||
createMCPServersRegistry: MCPServersRegistry.createInstance,
|
|
||||||
getMCPServersRegistry: MCPServersRegistry.getInstance,
|
|
||||||
createMCPManager: MCPManager.createInstance,
|
createMCPManager: MCPManager.createInstance,
|
||||||
getMCPManager: MCPManager.getInstance,
|
getMCPManager: MCPManager.getInstance,
|
||||||
getFlowStateManager,
|
getFlowStateManager,
|
||||||
|
|
|
||||||
|
|
@ -1,35 +1,8 @@
|
||||||
const path = require('path');
|
const path = require('path');
|
||||||
const fs = require('fs');
|
|
||||||
const winston = require('winston');
|
const winston = require('winston');
|
||||||
require('winston-daily-rotate-file');
|
require('winston-daily-rotate-file');
|
||||||
|
|
||||||
/**
|
const logDir = path.join(__dirname, '..', 'logs');
|
||||||
* Determine the log directory.
|
|
||||||
* Priority:
|
|
||||||
* 1. LIBRECHAT_LOG_DIR environment variable (allows user override)
|
|
||||||
* 2. /app/logs if running in Docker (bind-mounted with correct permissions)
|
|
||||||
* 3. api/logs relative to this file (local development)
|
|
||||||
*/
|
|
||||||
const getLogDir = () => {
|
|
||||||
if (process.env.LIBRECHAT_LOG_DIR) {
|
|
||||||
return process.env.LIBRECHAT_LOG_DIR;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if running in Docker container (cwd is /app)
|
|
||||||
if (process.cwd() === '/app') {
|
|
||||||
const dockerLogDir = '/app/logs';
|
|
||||||
// Ensure the directory exists
|
|
||||||
if (!fs.existsSync(dockerLogDir)) {
|
|
||||||
fs.mkdirSync(dockerLogDir, { recursive: true });
|
|
||||||
}
|
|
||||||
return dockerLogDir;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Local development: use api/logs relative to this file
|
|
||||||
return path.join(__dirname, '..', 'logs');
|
|
||||||
};
|
|
||||||
|
|
||||||
const logDir = getLogDir();
|
|
||||||
|
|
||||||
const { NODE_ENV, DEBUG_LOGGING = false } = process.env;
|
const { NODE_ENV, DEBUG_LOGGING = false } = process.env;
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,36 +1,9 @@
|
||||||
const path = require('path');
|
const path = require('path');
|
||||||
const fs = require('fs');
|
|
||||||
const winston = require('winston');
|
const winston = require('winston');
|
||||||
require('winston-daily-rotate-file');
|
require('winston-daily-rotate-file');
|
||||||
const { redactFormat, redactMessage, debugTraverse, jsonTruncateFormat } = require('./parsers');
|
const { redactFormat, redactMessage, debugTraverse, jsonTruncateFormat } = require('./parsers');
|
||||||
|
|
||||||
/**
|
const logDir = path.join(__dirname, '..', 'logs');
|
||||||
* Determine the log directory.
|
|
||||||
* Priority:
|
|
||||||
* 1. LIBRECHAT_LOG_DIR environment variable (allows user override)
|
|
||||||
* 2. /app/logs if running in Docker (bind-mounted with correct permissions)
|
|
||||||
* 3. api/logs relative to this file (local development)
|
|
||||||
*/
|
|
||||||
const getLogDir = () => {
|
|
||||||
if (process.env.LIBRECHAT_LOG_DIR) {
|
|
||||||
return process.env.LIBRECHAT_LOG_DIR;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if running in Docker container (cwd is /app)
|
|
||||||
if (process.cwd() === '/app') {
|
|
||||||
const dockerLogDir = '/app/logs';
|
|
||||||
// Ensure the directory exists
|
|
||||||
if (!fs.existsSync(dockerLogDir)) {
|
|
||||||
fs.mkdirSync(dockerLogDir, { recursive: true });
|
|
||||||
}
|
|
||||||
return dockerLogDir;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Local development: use api/logs relative to this file
|
|
||||||
return path.join(__dirname, '..', 'logs');
|
|
||||||
};
|
|
||||||
|
|
||||||
const logDir = getLogDir();
|
|
||||||
|
|
||||||
const { NODE_ENV, DEBUG_LOGGING = true, CONSOLE_JSON = false, DEBUG_CONSOLE = false } = process.env;
|
const { NODE_ENV, DEBUG_LOGGING = true, CONSOLE_JSON = false, DEBUG_CONSOLE = false } = process.env;
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -4,7 +4,11 @@ module.exports = {
|
||||||
roots: ['<rootDir>'],
|
roots: ['<rootDir>'],
|
||||||
coverageDirectory: 'coverage',
|
coverageDirectory: 'coverage',
|
||||||
testTimeout: 30000, // 30 seconds timeout for all tests
|
testTimeout: 30000, // 30 seconds timeout for all tests
|
||||||
setupFiles: ['./test/jestSetup.js', './test/__mocks__/logger.js'],
|
setupFiles: [
|
||||||
|
'./test/jestSetup.js',
|
||||||
|
'./test/__mocks__/logger.js',
|
||||||
|
'./test/__mocks__/fetchEventSource.js',
|
||||||
|
],
|
||||||
moduleNameMapper: {
|
moduleNameMapper: {
|
||||||
'~/(.*)': '<rootDir>/$1',
|
'~/(.*)': '<rootDir>/$1',
|
||||||
'~/data/auth.json': '<rootDir>/__mocks__/auth.mock.json',
|
'~/data/auth.json': '<rootDir>/__mocks__/auth.mock.json',
|
||||||
|
|
|
||||||
29
api/lib/utils/mergeSort.js
Normal file
29
api/lib/utils/mergeSort.js
Normal file
|
|
@ -0,0 +1,29 @@
|
||||||
|
function mergeSort(arr, compareFn) {
|
||||||
|
if (arr.length <= 1) {
|
||||||
|
return arr;
|
||||||
|
}
|
||||||
|
|
||||||
|
const mid = Math.floor(arr.length / 2);
|
||||||
|
const leftArr = arr.slice(0, mid);
|
||||||
|
const rightArr = arr.slice(mid);
|
||||||
|
|
||||||
|
return merge(mergeSort(leftArr, compareFn), mergeSort(rightArr, compareFn), compareFn);
|
||||||
|
}
|
||||||
|
|
||||||
|
function merge(leftArr, rightArr, compareFn) {
|
||||||
|
const result = [];
|
||||||
|
let leftIndex = 0;
|
||||||
|
let rightIndex = 0;
|
||||||
|
|
||||||
|
while (leftIndex < leftArr.length && rightIndex < rightArr.length) {
|
||||||
|
if (compareFn(leftArr[leftIndex], rightArr[rightIndex]) < 0) {
|
||||||
|
result.push(leftArr[leftIndex++]);
|
||||||
|
} else {
|
||||||
|
result.push(rightArr[rightIndex++]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return result.concat(leftArr.slice(leftIndex)).concat(rightArr.slice(rightIndex));
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = mergeSort;
|
||||||
8
api/lib/utils/misc.js
Normal file
8
api/lib/utils/misc.js
Normal file
|
|
@ -0,0 +1,8 @@
|
||||||
|
const cleanUpPrimaryKeyValue = (value) => {
|
||||||
|
// For Bing convoId handling
|
||||||
|
return value.replace(/--/g, '|');
|
||||||
|
};
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
cleanUpPrimaryKeyValue,
|
||||||
|
};
|
||||||
|
|
@ -15,29 +15,6 @@ const { getMCPServerTools } = require('~/server/services/Config');
|
||||||
const { Agent, AclEntry } = require('~/db/models');
|
const { Agent, AclEntry } = require('~/db/models');
|
||||||
const { getActions } = require('./Action');
|
const { getActions } = require('./Action');
|
||||||
|
|
||||||
/**
|
|
||||||
* Extracts unique MCP server names from tools array
|
|
||||||
* Tools format: "toolName_mcp_serverName" or "sys__server__sys_mcp_serverName"
|
|
||||||
* @param {string[]} tools - Array of tool identifiers
|
|
||||||
* @returns {string[]} Array of unique MCP server names
|
|
||||||
*/
|
|
||||||
const extractMCPServerNames = (tools) => {
|
|
||||||
if (!tools || !Array.isArray(tools)) {
|
|
||||||
return [];
|
|
||||||
}
|
|
||||||
const serverNames = new Set();
|
|
||||||
for (const tool of tools) {
|
|
||||||
if (!tool || !tool.includes(mcp_delimiter)) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
const parts = tool.split(mcp_delimiter);
|
|
||||||
if (parts.length >= 2) {
|
|
||||||
serverNames.add(parts[parts.length - 1]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return Array.from(serverNames);
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create an agent with the provided data.
|
* Create an agent with the provided data.
|
||||||
* @param {Object} agentData - The agent data to create.
|
* @param {Object} agentData - The agent data to create.
|
||||||
|
|
@ -57,7 +34,6 @@ const createAgent = async (agentData) => {
|
||||||
},
|
},
|
||||||
],
|
],
|
||||||
category: agentData.category || 'general',
|
category: agentData.category || 'general',
|
||||||
mcpServerNames: extractMCPServerNames(agentData.tools),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
return (await Agent.create(initialAgentData)).toObject();
|
return (await Agent.create(initialAgentData)).toObject();
|
||||||
|
|
@ -378,13 +354,6 @@ const updateAgent = async (searchParameter, updateData, options = {}) => {
|
||||||
} = currentAgent.toObject();
|
} = currentAgent.toObject();
|
||||||
const { $push, $pull, $addToSet, ...directUpdates } = updateData;
|
const { $push, $pull, $addToSet, ...directUpdates } = updateData;
|
||||||
|
|
||||||
// Sync mcpServerNames when tools are updated
|
|
||||||
if (directUpdates.tools !== undefined) {
|
|
||||||
const mcpServerNames = extractMCPServerNames(directUpdates.tools);
|
|
||||||
directUpdates.mcpServerNames = mcpServerNames;
|
|
||||||
updateData.mcpServerNames = mcpServerNames; // Also update the original updateData
|
|
||||||
}
|
|
||||||
|
|
||||||
let actionsHash = null;
|
let actionsHash = null;
|
||||||
|
|
||||||
// Generate actions hash if agent has actions
|
// Generate actions hash if agent has actions
|
||||||
|
|
|
||||||
|
|
@ -28,7 +28,7 @@ const getConvo = async (user, conversationId) => {
|
||||||
return await Conversation.findOne({ user, conversationId }).lean();
|
return await Conversation.findOne({ user, conversationId }).lean();
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
logger.error('[getConvo] Error getting single conversation', error);
|
logger.error('[getConvo] Error getting single conversation', error);
|
||||||
throw new Error('Error getting single conversation');
|
return { message: 'Error getting single conversation' };
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
@ -151,21 +151,13 @@ module.exports = {
|
||||||
const result = await Conversation.bulkWrite(bulkOps);
|
const result = await Conversation.bulkWrite(bulkOps);
|
||||||
return result;
|
return result;
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
logger.error('[bulkSaveConvos] Error saving conversations in bulk', error);
|
logger.error('[saveBulkConversations] Error saving conversations in bulk', error);
|
||||||
throw new Error('Failed to save conversations in bulk.');
|
throw new Error('Failed to save conversations in bulk.');
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
getConvosByCursor: async (
|
getConvosByCursor: async (
|
||||||
user,
|
user,
|
||||||
{
|
{ cursor, limit = 25, isArchived = false, tags, search, order = 'desc' } = {},
|
||||||
cursor,
|
|
||||||
limit = 25,
|
|
||||||
isArchived = false,
|
|
||||||
tags,
|
|
||||||
search,
|
|
||||||
sortBy = 'createdAt',
|
|
||||||
sortDirection = 'desc',
|
|
||||||
} = {},
|
|
||||||
) => {
|
) => {
|
||||||
const filters = [{ user }];
|
const filters = [{ user }];
|
||||||
if (isArchived) {
|
if (isArchived) {
|
||||||
|
|
@ -192,77 +184,35 @@ module.exports = {
|
||||||
filters.push({ conversationId: { $in: matchingIds } });
|
filters.push({ conversationId: { $in: matchingIds } });
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
logger.error('[getConvosByCursor] Error during meiliSearch', error);
|
logger.error('[getConvosByCursor] Error during meiliSearch', error);
|
||||||
throw new Error('Error during meiliSearch');
|
return { message: 'Error during meiliSearch' };
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
const validSortFields = ['title', 'createdAt', 'updatedAt'];
|
|
||||||
if (!validSortFields.includes(sortBy)) {
|
|
||||||
throw new Error(
|
|
||||||
`Invalid sortBy field: ${sortBy}. Must be one of ${validSortFields.join(', ')}`,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
const finalSortBy = sortBy;
|
|
||||||
const finalSortDirection = sortDirection === 'asc' ? 'asc' : 'desc';
|
|
||||||
|
|
||||||
let cursorFilter = null;
|
|
||||||
if (cursor) {
|
if (cursor) {
|
||||||
try {
|
filters.push({ updatedAt: { $lt: new Date(cursor) } });
|
||||||
const decoded = JSON.parse(Buffer.from(cursor, 'base64').toString());
|
|
||||||
const { primary, secondary } = decoded;
|
|
||||||
const primaryValue = finalSortBy === 'title' ? primary : new Date(primary);
|
|
||||||
const secondaryValue = new Date(secondary);
|
|
||||||
const op = finalSortDirection === 'asc' ? '$gt' : '$lt';
|
|
||||||
|
|
||||||
cursorFilter = {
|
|
||||||
$or: [
|
|
||||||
{ [finalSortBy]: { [op]: primaryValue } },
|
|
||||||
{
|
|
||||||
[finalSortBy]: primaryValue,
|
|
||||||
updatedAt: { [op]: secondaryValue },
|
|
||||||
},
|
|
||||||
],
|
|
||||||
};
|
|
||||||
} catch (err) {
|
|
||||||
logger.warn('[getConvosByCursor] Invalid cursor format, starting from beginning');
|
|
||||||
}
|
|
||||||
if (cursorFilter) {
|
|
||||||
filters.push(cursorFilter);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const query = filters.length === 1 ? filters[0] : { $and: filters };
|
const query = filters.length === 1 ? filters[0] : { $and: filters };
|
||||||
|
|
||||||
try {
|
try {
|
||||||
const sortOrder = finalSortDirection === 'asc' ? 1 : -1;
|
|
||||||
const sortObj = { [finalSortBy]: sortOrder };
|
|
||||||
|
|
||||||
if (finalSortBy !== 'updatedAt') {
|
|
||||||
sortObj.updatedAt = sortOrder;
|
|
||||||
}
|
|
||||||
|
|
||||||
const convos = await Conversation.find(query)
|
const convos = await Conversation.find(query)
|
||||||
.select(
|
.select(
|
||||||
'conversationId endpoint title createdAt updatedAt user model agent_id assistant_id spec iconURL',
|
'conversationId endpoint title createdAt updatedAt user model agent_id assistant_id spec iconURL',
|
||||||
)
|
)
|
||||||
.sort(sortObj)
|
.sort({ updatedAt: order === 'asc' ? 1 : -1 })
|
||||||
.limit(limit + 1)
|
.limit(limit + 1)
|
||||||
.lean();
|
.lean();
|
||||||
|
|
||||||
let nextCursor = null;
|
let nextCursor = null;
|
||||||
if (convos.length > limit) {
|
if (convos.length > limit) {
|
||||||
const lastConvo = convos.pop();
|
const lastConvo = convos.pop();
|
||||||
const primaryValue = lastConvo[finalSortBy];
|
nextCursor = lastConvo.updatedAt.toISOString();
|
||||||
const primaryStr = finalSortBy === 'title' ? primaryValue : primaryValue.toISOString();
|
|
||||||
const secondaryStr = lastConvo.updatedAt.toISOString();
|
|
||||||
const composite = { primary: primaryStr, secondary: secondaryStr };
|
|
||||||
nextCursor = Buffer.from(JSON.stringify(composite)).toString('base64');
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return { conversations: convos, nextCursor };
|
return { conversations: convos, nextCursor };
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
logger.error('[getConvosByCursor] Error getting conversations', error);
|
logger.error('[getConvosByCursor] Error getting conversations', error);
|
||||||
throw new Error('Error getting conversations');
|
return { message: 'Error getting conversations' };
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
getConvosQueried: async (user, convoIds, cursor = null, limit = 25) => {
|
getConvosQueried: async (user, convoIds, cursor = null, limit = 25) => {
|
||||||
|
|
@ -302,7 +252,7 @@ module.exports = {
|
||||||
return { conversations: limited, nextCursor, convoMap };
|
return { conversations: limited, nextCursor, convoMap };
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
logger.error('[getConvosQueried] Error getting conversations', error);
|
logger.error('[getConvosQueried] Error getting conversations', error);
|
||||||
throw new Error('Error fetching conversations');
|
return { message: 'Error fetching conversations' };
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
getConvo,
|
getConvo,
|
||||||
|
|
@ -319,7 +269,7 @@ module.exports = {
|
||||||
}
|
}
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
logger.error('[getConvoTitle] Error getting conversation title', error);
|
logger.error('[getConvoTitle] Error getting conversation title', error);
|
||||||
throw new Error('Error getting conversation title');
|
return { message: 'Error getting conversation title' };
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
/**
|
/**
|
||||||
|
|
|
||||||
|
|
@ -1,7 +1,7 @@
|
||||||
const mongoose = require('mongoose');
|
const mongoose = require('mongoose');
|
||||||
const { v4: uuidv4 } = require('uuid');
|
const { v4: uuidv4 } = require('uuid');
|
||||||
|
const { createModels } = require('@librechat/data-schemas');
|
||||||
const { MongoMemoryServer } = require('mongodb-memory-server');
|
const { MongoMemoryServer } = require('mongodb-memory-server');
|
||||||
const { createModels, createMethods } = require('@librechat/data-schemas');
|
|
||||||
const {
|
const {
|
||||||
SystemRoles,
|
SystemRoles,
|
||||||
ResourceType,
|
ResourceType,
|
||||||
|
|
@ -9,6 +9,8 @@ const {
|
||||||
PrincipalType,
|
PrincipalType,
|
||||||
} = require('librechat-data-provider');
|
} = require('librechat-data-provider');
|
||||||
const { grantPermission } = require('~/server/services/PermissionService');
|
const { grantPermission } = require('~/server/services/PermissionService');
|
||||||
|
const { getFiles, createFile } = require('./File');
|
||||||
|
const { seedDefaultRoles } = require('~/models');
|
||||||
const { createAgent } = require('./Agent');
|
const { createAgent } = require('./Agent');
|
||||||
|
|
||||||
let File;
|
let File;
|
||||||
|
|
@ -16,10 +18,6 @@ let Agent;
|
||||||
let AclEntry;
|
let AclEntry;
|
||||||
let User;
|
let User;
|
||||||
let modelsToCleanup = [];
|
let modelsToCleanup = [];
|
||||||
let methods;
|
|
||||||
let getFiles;
|
|
||||||
let createFile;
|
|
||||||
let seedDefaultRoles;
|
|
||||||
|
|
||||||
describe('File Access Control', () => {
|
describe('File Access Control', () => {
|
||||||
let mongoServer;
|
let mongoServer;
|
||||||
|
|
@ -44,12 +42,6 @@ describe('File Access Control', () => {
|
||||||
AclEntry = dbModels.AclEntry;
|
AclEntry = dbModels.AclEntry;
|
||||||
User = dbModels.User;
|
User = dbModels.User;
|
||||||
|
|
||||||
// Create methods from data-schemas (includes file methods)
|
|
||||||
methods = createMethods(mongoose);
|
|
||||||
getFiles = methods.getFiles;
|
|
||||||
createFile = methods.createFile;
|
|
||||||
seedDefaultRoles = methods.seedDefaultRoles;
|
|
||||||
|
|
||||||
// Seed default roles
|
// Seed default roles
|
||||||
await seedDefaultRoles();
|
await seedDefaultRoles();
|
||||||
});
|
});
|
||||||
|
|
|
||||||
|
|
@ -2,6 +2,15 @@ const mongoose = require('mongoose');
|
||||||
const { createMethods } = require('@librechat/data-schemas');
|
const { createMethods } = require('@librechat/data-schemas');
|
||||||
const methods = createMethods(mongoose);
|
const methods = createMethods(mongoose);
|
||||||
const { comparePassword } = require('./userMethods');
|
const { comparePassword } = require('./userMethods');
|
||||||
|
const {
|
||||||
|
findFileById,
|
||||||
|
createFile,
|
||||||
|
updateFile,
|
||||||
|
deleteFile,
|
||||||
|
deleteFiles,
|
||||||
|
getFiles,
|
||||||
|
updateFileUsage,
|
||||||
|
} = require('./File');
|
||||||
const {
|
const {
|
||||||
getMessage,
|
getMessage,
|
||||||
getMessages,
|
getMessages,
|
||||||
|
|
@ -25,6 +34,13 @@ module.exports = {
|
||||||
...methods,
|
...methods,
|
||||||
seedDatabase,
|
seedDatabase,
|
||||||
comparePassword,
|
comparePassword,
|
||||||
|
findFileById,
|
||||||
|
createFile,
|
||||||
|
updateFile,
|
||||||
|
deleteFile,
|
||||||
|
deleteFiles,
|
||||||
|
getFiles,
|
||||||
|
updateFileUsage,
|
||||||
|
|
||||||
getMessage,
|
getMessage,
|
||||||
getMessages,
|
getMessages,
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,6 @@
|
||||||
const mongoose = require('mongoose');
|
const mongoose = require('mongoose');
|
||||||
const { logger, hashToken, getRandomValues } = require('@librechat/data-schemas');
|
const { getRandomValues } = require('@librechat/api');
|
||||||
|
const { logger, hashToken } = require('@librechat/data-schemas');
|
||||||
const { createToken, findToken } = require('~/models');
|
const { createToken, findToken } = require('~/models');
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,6 @@
|
||||||
{
|
{
|
||||||
"name": "@librechat/backend",
|
"name": "@librechat/backend",
|
||||||
"version": "v0.8.2-rc1",
|
"version": "v0.8.1",
|
||||||
"description": "",
|
"description": "",
|
||||||
"scripts": {
|
"scripts": {
|
||||||
"start": "echo 'please run this from the root directory'",
|
"start": "echo 'please run this from the root directory'",
|
||||||
|
|
@ -34,22 +34,26 @@
|
||||||
},
|
},
|
||||||
"homepage": "https://librechat.ai",
|
"homepage": "https://librechat.ai",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@aws-sdk/client-bedrock-runtime": "^3.941.0",
|
"@anthropic-ai/sdk": "^0.52.0",
|
||||||
"@aws-sdk/client-s3": "^3.758.0",
|
"@aws-sdk/client-s3": "^3.758.0",
|
||||||
"@aws-sdk/s3-request-presigner": "^3.758.0",
|
"@aws-sdk/s3-request-presigner": "^3.758.0",
|
||||||
"@azure/identity": "^4.7.0",
|
"@azure/identity": "^4.7.0",
|
||||||
"@azure/search-documents": "^12.0.0",
|
"@azure/search-documents": "^12.0.0",
|
||||||
"@azure/storage-blob": "^12.27.0",
|
"@azure/storage-blob": "^12.27.0",
|
||||||
|
"@google/generative-ai": "^0.24.0",
|
||||||
"@googleapis/youtube": "^20.0.0",
|
"@googleapis/youtube": "^20.0.0",
|
||||||
"@keyv/redis": "^4.3.3",
|
"@keyv/redis": "^4.3.3",
|
||||||
"@langchain/core": "^0.3.79",
|
"@langchain/core": "^0.3.79",
|
||||||
"@librechat/agents": "^3.0.52",
|
"@langchain/google-genai": "^0.2.13",
|
||||||
|
"@langchain/google-vertexai": "^0.2.13",
|
||||||
|
"@langchain/textsplitters": "^0.1.0",
|
||||||
|
"@librechat/agents": "^3.0.50",
|
||||||
"@librechat/api": "*",
|
"@librechat/api": "*",
|
||||||
"@librechat/data-schemas": "*",
|
"@librechat/data-schemas": "*",
|
||||||
"@microsoft/microsoft-graph-client": "^3.0.7",
|
"@microsoft/microsoft-graph-client": "^3.0.7",
|
||||||
"@modelcontextprotocol/sdk": "^1.24.3",
|
"@modelcontextprotocol/sdk": "^1.21.0",
|
||||||
"@node-saml/passport-saml": "^5.1.0",
|
"@node-saml/passport-saml": "^5.1.0",
|
||||||
"@smithy/node-http-handler": "^4.4.5",
|
"@waylaidwanderer/fetch-event-source": "^3.0.1",
|
||||||
"axios": "^1.12.1",
|
"axios": "^1.12.1",
|
||||||
"bcryptjs": "^2.4.3",
|
"bcryptjs": "^2.4.3",
|
||||||
"compression": "^1.8.1",
|
"compression": "^1.8.1",
|
||||||
|
|
@ -60,14 +64,15 @@
|
||||||
"dedent": "^1.5.3",
|
"dedent": "^1.5.3",
|
||||||
"dotenv": "^16.0.3",
|
"dotenv": "^16.0.3",
|
||||||
"eventsource": "^3.0.2",
|
"eventsource": "^3.0.2",
|
||||||
"express": "^5.1.0",
|
"express": "^4.21.2",
|
||||||
"express-mongo-sanitize": "^2.2.0",
|
"express-mongo-sanitize": "^2.2.0",
|
||||||
"express-rate-limit": "^8.2.1",
|
"express-rate-limit": "^7.4.1",
|
||||||
"express-session": "^1.18.2",
|
"express-session": "^1.18.2",
|
||||||
"express-static-gzip": "^2.2.0",
|
"express-static-gzip": "^2.2.0",
|
||||||
"file-type": "^18.7.0",
|
"file-type": "^18.7.0",
|
||||||
"firebase": "^11.0.2",
|
"firebase": "^11.0.2",
|
||||||
"form-data": "^4.0.4",
|
"form-data": "^4.0.4",
|
||||||
|
"googleapis": "^126.0.1",
|
||||||
"handlebars": "^4.7.7",
|
"handlebars": "^4.7.7",
|
||||||
"https-proxy-agent": "^7.0.6",
|
"https-proxy-agent": "^7.0.6",
|
||||||
"ioredis": "^5.3.2",
|
"ioredis": "^5.3.2",
|
||||||
|
|
|
||||||
|
|
@ -10,13 +10,7 @@ const {
|
||||||
setAuthTokens,
|
setAuthTokens,
|
||||||
registerUser,
|
registerUser,
|
||||||
} = require('~/server/services/AuthService');
|
} = require('~/server/services/AuthService');
|
||||||
const {
|
const { findUser, getUserById, deleteAllUserSessions, findSession } = require('~/models');
|
||||||
deleteAllUserSessions,
|
|
||||||
getUserById,
|
|
||||||
findSession,
|
|
||||||
updateUser,
|
|
||||||
findUser,
|
|
||||||
} = require('~/models');
|
|
||||||
const { getGraphApiToken } = require('~/server/services/GraphTokenService');
|
const { getGraphApiToken } = require('~/server/services/GraphTokenService');
|
||||||
const { getOAuthReconnectionManager } = require('~/config');
|
const { getOAuthReconnectionManager } = require('~/config');
|
||||||
const { getOpenIdConfig } = require('~/strategies');
|
const { getOpenIdConfig } = require('~/strategies');
|
||||||
|
|
@ -78,38 +72,16 @@ const refreshController = async (req, res) => {
|
||||||
const openIdConfig = getOpenIdConfig();
|
const openIdConfig = getOpenIdConfig();
|
||||||
const tokenset = await openIdClient.refreshTokenGrant(openIdConfig, refreshToken);
|
const tokenset = await openIdClient.refreshTokenGrant(openIdConfig, refreshToken);
|
||||||
const claims = tokenset.claims();
|
const claims = tokenset.claims();
|
||||||
const { user, error, migration } = await findOpenIDUser({
|
const { user, error } = await findOpenIDUser({
|
||||||
findUser,
|
findUser,
|
||||||
email: claims.email,
|
email: claims.email,
|
||||||
openidId: claims.sub,
|
openidId: claims.sub,
|
||||||
idOnTheSource: claims.oid,
|
idOnTheSource: claims.oid,
|
||||||
strategyName: 'refreshController',
|
strategyName: 'refreshController',
|
||||||
});
|
});
|
||||||
|
|
||||||
logger.debug(
|
|
||||||
`[refreshController] findOpenIDUser result: user=${user?.email ?? 'null'}, error=${error ?? 'null'}, migration=${migration}, userOpenidId=${user?.openidId ?? 'null'}, claimsSub=${claims.sub}`,
|
|
||||||
);
|
|
||||||
|
|
||||||
if (error || !user) {
|
if (error || !user) {
|
||||||
logger.warn(
|
|
||||||
`[refreshController] Redirecting to /login: error=${error ?? 'null'}, user=${user ? 'exists' : 'null'}`,
|
|
||||||
);
|
|
||||||
return res.status(401).redirect('/login');
|
return res.status(401).redirect('/login');
|
||||||
}
|
}
|
||||||
|
|
||||||
// Handle migration: update user with openidId if found by email without openidId
|
|
||||||
// Also handle case where user has mismatched openidId (e.g., after database switch)
|
|
||||||
if (migration || user.openidId !== claims.sub) {
|
|
||||||
const reason = migration ? 'migration' : 'openidId mismatch';
|
|
||||||
await updateUser(user._id.toString(), {
|
|
||||||
provider: 'openid',
|
|
||||||
openidId: claims.sub,
|
|
||||||
});
|
|
||||||
logger.info(
|
|
||||||
`[refreshController] Updated user ${user.email} openidId (${reason}): ${user.openidId ?? 'null'} -> ${claims.sub}`,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
const token = setOpenIDAuthTokens(tokenset, res, user._id.toString(), refreshToken);
|
const token = setOpenIDAuthTokens(tokenset, res, user._id.toString(), refreshToken);
|
||||||
|
|
||||||
user.federatedTokens = {
|
user.federatedTokens = {
|
||||||
|
|
|
||||||
247
api/server/controllers/EditController.js
Normal file
247
api/server/controllers/EditController.js
Normal file
|
|
@ -0,0 +1,247 @@
|
||||||
|
const { sendEvent } = require('@librechat/api');
|
||||||
|
const { logger } = require('@librechat/data-schemas');
|
||||||
|
const { getResponseSender } = require('librechat-data-provider');
|
||||||
|
const {
|
||||||
|
handleAbortError,
|
||||||
|
createAbortController,
|
||||||
|
cleanupAbortController,
|
||||||
|
} = require('~/server/middleware');
|
||||||
|
const {
|
||||||
|
disposeClient,
|
||||||
|
processReqData,
|
||||||
|
clientRegistry,
|
||||||
|
requestDataMap,
|
||||||
|
} = require('~/server/cleanup');
|
||||||
|
const { createOnProgress } = require('~/server/utils');
|
||||||
|
const { saveMessage } = require('~/models');
|
||||||
|
|
||||||
|
const EditController = async (req, res, next, initializeClient) => {
|
||||||
|
let {
|
||||||
|
text,
|
||||||
|
generation,
|
||||||
|
endpointOption,
|
||||||
|
conversationId,
|
||||||
|
modelDisplayLabel,
|
||||||
|
responseMessageId,
|
||||||
|
isContinued = false,
|
||||||
|
parentMessageId = null,
|
||||||
|
overrideParentMessageId = null,
|
||||||
|
} = req.body;
|
||||||
|
|
||||||
|
let client = null;
|
||||||
|
let abortKey = null;
|
||||||
|
let cleanupHandlers = [];
|
||||||
|
let clientRef = null; // Declare clientRef here
|
||||||
|
|
||||||
|
logger.debug('[EditController]', {
|
||||||
|
text,
|
||||||
|
generation,
|
||||||
|
isContinued,
|
||||||
|
conversationId,
|
||||||
|
...endpointOption,
|
||||||
|
modelsConfig: endpointOption.modelsConfig ? 'exists' : '',
|
||||||
|
});
|
||||||
|
|
||||||
|
let userMessage = null;
|
||||||
|
let userMessagePromise = null;
|
||||||
|
let promptTokens = null;
|
||||||
|
let getAbortData = null;
|
||||||
|
|
||||||
|
const sender = getResponseSender({
|
||||||
|
...endpointOption,
|
||||||
|
model: endpointOption.modelOptions.model,
|
||||||
|
modelDisplayLabel,
|
||||||
|
});
|
||||||
|
const userMessageId = parentMessageId;
|
||||||
|
const userId = req.user.id;
|
||||||
|
|
||||||
|
let reqDataContext = { userMessage, userMessagePromise, responseMessageId, promptTokens };
|
||||||
|
|
||||||
|
const updateReqData = (data = {}) => {
|
||||||
|
reqDataContext = processReqData(data, reqDataContext);
|
||||||
|
abortKey = reqDataContext.abortKey;
|
||||||
|
userMessage = reqDataContext.userMessage;
|
||||||
|
userMessagePromise = reqDataContext.userMessagePromise;
|
||||||
|
responseMessageId = reqDataContext.responseMessageId;
|
||||||
|
promptTokens = reqDataContext.promptTokens;
|
||||||
|
};
|
||||||
|
|
||||||
|
let { onProgress: progressCallback, getPartialText } = createOnProgress({
|
||||||
|
generation,
|
||||||
|
});
|
||||||
|
|
||||||
|
const performCleanup = () => {
|
||||||
|
logger.debug('[EditController] Performing cleanup');
|
||||||
|
if (Array.isArray(cleanupHandlers)) {
|
||||||
|
for (const handler of cleanupHandlers) {
|
||||||
|
try {
|
||||||
|
if (typeof handler === 'function') {
|
||||||
|
handler();
|
||||||
|
}
|
||||||
|
} catch (e) {
|
||||||
|
// Ignore
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (abortKey) {
|
||||||
|
logger.debug('[EditController] Cleaning up abort controller');
|
||||||
|
cleanupAbortController(abortKey);
|
||||||
|
abortKey = null;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (client) {
|
||||||
|
disposeClient(client);
|
||||||
|
client = null;
|
||||||
|
}
|
||||||
|
|
||||||
|
reqDataContext = null;
|
||||||
|
userMessage = null;
|
||||||
|
userMessagePromise = null;
|
||||||
|
promptTokens = null;
|
||||||
|
getAbortData = null;
|
||||||
|
progressCallback = null;
|
||||||
|
endpointOption = null;
|
||||||
|
cleanupHandlers = null;
|
||||||
|
|
||||||
|
if (requestDataMap.has(req)) {
|
||||||
|
requestDataMap.delete(req);
|
||||||
|
}
|
||||||
|
logger.debug('[EditController] Cleanup completed');
|
||||||
|
};
|
||||||
|
|
||||||
|
try {
|
||||||
|
({ client } = await initializeClient({ req, res, endpointOption }));
|
||||||
|
|
||||||
|
if (clientRegistry && client) {
|
||||||
|
clientRegistry.register(client, { userId }, client);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (client) {
|
||||||
|
requestDataMap.set(req, { client });
|
||||||
|
}
|
||||||
|
|
||||||
|
clientRef = new WeakRef(client);
|
||||||
|
|
||||||
|
getAbortData = () => {
|
||||||
|
const currentClient = clientRef?.deref();
|
||||||
|
const currentText =
|
||||||
|
currentClient?.getStreamText != null ? currentClient.getStreamText() : getPartialText();
|
||||||
|
|
||||||
|
return {
|
||||||
|
sender,
|
||||||
|
conversationId,
|
||||||
|
messageId: reqDataContext.responseMessageId,
|
||||||
|
parentMessageId: overrideParentMessageId ?? userMessageId,
|
||||||
|
text: currentText,
|
||||||
|
userMessage: userMessage,
|
||||||
|
userMessagePromise: userMessagePromise,
|
||||||
|
promptTokens: reqDataContext.promptTokens,
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
const { onStart, abortController } = createAbortController(
|
||||||
|
req,
|
||||||
|
res,
|
||||||
|
getAbortData,
|
||||||
|
updateReqData,
|
||||||
|
);
|
||||||
|
|
||||||
|
const closeHandler = () => {
|
||||||
|
logger.debug('[EditController] Request closed');
|
||||||
|
if (!abortController || abortController.signal.aborted || abortController.requestCompleted) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
abortController.abort();
|
||||||
|
logger.debug('[EditController] Request aborted on close');
|
||||||
|
};
|
||||||
|
|
||||||
|
res.on('close', closeHandler);
|
||||||
|
cleanupHandlers.push(() => {
|
||||||
|
try {
|
||||||
|
res.removeListener('close', closeHandler);
|
||||||
|
} catch (e) {
|
||||||
|
// Ignore
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
let response = await client.sendMessage(text, {
|
||||||
|
user: userId,
|
||||||
|
generation,
|
||||||
|
isContinued,
|
||||||
|
isEdited: true,
|
||||||
|
conversationId,
|
||||||
|
parentMessageId,
|
||||||
|
responseMessageId: reqDataContext.responseMessageId,
|
||||||
|
overrideParentMessageId,
|
||||||
|
getReqData: updateReqData,
|
||||||
|
onStart,
|
||||||
|
abortController,
|
||||||
|
progressCallback,
|
||||||
|
progressOptions: {
|
||||||
|
res,
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
const databasePromise = response.databasePromise;
|
||||||
|
delete response.databasePromise;
|
||||||
|
|
||||||
|
const { conversation: convoData = {} } = await databasePromise;
|
||||||
|
const conversation = { ...convoData };
|
||||||
|
conversation.title =
|
||||||
|
conversation && !conversation.title ? null : conversation?.title || 'New Chat';
|
||||||
|
|
||||||
|
if (client?.options?.attachments && endpointOption?.modelOptions?.model) {
|
||||||
|
conversation.model = endpointOption.modelOptions.model;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!abortController.signal.aborted) {
|
||||||
|
const finalUserMessage = reqDataContext.userMessage;
|
||||||
|
const finalResponseMessage = { ...response };
|
||||||
|
|
||||||
|
sendEvent(res, {
|
||||||
|
final: true,
|
||||||
|
conversation,
|
||||||
|
title: conversation.title,
|
||||||
|
requestMessage: finalUserMessage,
|
||||||
|
responseMessage: finalResponseMessage,
|
||||||
|
});
|
||||||
|
res.end();
|
||||||
|
|
||||||
|
await saveMessage(
|
||||||
|
req,
|
||||||
|
{ ...finalResponseMessage, user: userId },
|
||||||
|
{ context: 'api/server/controllers/EditController.js - response end' },
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
performCleanup();
|
||||||
|
} catch (error) {
|
||||||
|
logger.error('[EditController] Error handling request', error);
|
||||||
|
let partialText = '';
|
||||||
|
try {
|
||||||
|
const currentClient = clientRef?.deref();
|
||||||
|
partialText =
|
||||||
|
currentClient?.getStreamText != null ? currentClient.getStreamText() : getPartialText();
|
||||||
|
} catch (getTextError) {
|
||||||
|
logger.error('[EditController] Error calling getText() during error handling', getTextError);
|
||||||
|
}
|
||||||
|
|
||||||
|
handleAbortError(res, req, error, {
|
||||||
|
sender,
|
||||||
|
partialText,
|
||||||
|
conversationId,
|
||||||
|
messageId: reqDataContext.responseMessageId,
|
||||||
|
parentMessageId: overrideParentMessageId ?? userMessageId ?? parentMessageId,
|
||||||
|
userMessageId,
|
||||||
|
})
|
||||||
|
.catch((err) => {
|
||||||
|
logger.error('[EditController] Error in `handleAbortError` during catch block', err);
|
||||||
|
})
|
||||||
|
.finally(() => {
|
||||||
|
performCleanup();
|
||||||
|
});
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
module.exports = EditController;
|
||||||
|
|
@ -1,99 +0,0 @@
|
||||||
const { updateUser, getUserById } = require('~/models');
|
|
||||||
|
|
||||||
const MAX_FAVORITES = 50;
|
|
||||||
const MAX_STRING_LENGTH = 256;
|
|
||||||
|
|
||||||
const updateFavoritesController = async (req, res) => {
|
|
||||||
try {
|
|
||||||
const { favorites } = req.body;
|
|
||||||
const userId = req.user.id;
|
|
||||||
|
|
||||||
if (!favorites) {
|
|
||||||
return res.status(400).json({ message: 'Favorites data is required' });
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!Array.isArray(favorites)) {
|
|
||||||
return res.status(400).json({ message: 'Favorites must be an array' });
|
|
||||||
}
|
|
||||||
|
|
||||||
if (favorites.length > MAX_FAVORITES) {
|
|
||||||
return res.status(400).json({
|
|
||||||
code: 'MAX_FAVORITES_EXCEEDED',
|
|
||||||
message: `Maximum ${MAX_FAVORITES} favorites allowed`,
|
|
||||||
limit: MAX_FAVORITES,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
for (const fav of favorites) {
|
|
||||||
const hasAgent = !!fav.agentId;
|
|
||||||
const hasModel = !!(fav.model && fav.endpoint);
|
|
||||||
|
|
||||||
if (fav.agentId && fav.agentId.length > MAX_STRING_LENGTH) {
|
|
||||||
return res
|
|
||||||
.status(400)
|
|
||||||
.json({ message: `agentId exceeds maximum length of ${MAX_STRING_LENGTH}` });
|
|
||||||
}
|
|
||||||
if (fav.model && fav.model.length > MAX_STRING_LENGTH) {
|
|
||||||
return res
|
|
||||||
.status(400)
|
|
||||||
.json({ message: `model exceeds maximum length of ${MAX_STRING_LENGTH}` });
|
|
||||||
}
|
|
||||||
if (fav.endpoint && fav.endpoint.length > MAX_STRING_LENGTH) {
|
|
||||||
return res
|
|
||||||
.status(400)
|
|
||||||
.json({ message: `endpoint exceeds maximum length of ${MAX_STRING_LENGTH}` });
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!hasAgent && !hasModel) {
|
|
||||||
return res.status(400).json({
|
|
||||||
message: 'Each favorite must have either agentId or model+endpoint',
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
if (hasAgent && hasModel) {
|
|
||||||
return res.status(400).json({
|
|
||||||
message: 'Favorite cannot have both agentId and model/endpoint',
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const user = await updateUser(userId, { favorites });
|
|
||||||
|
|
||||||
if (!user) {
|
|
||||||
return res.status(404).json({ message: 'User not found' });
|
|
||||||
}
|
|
||||||
|
|
||||||
res.status(200).json(user.favorites);
|
|
||||||
} catch (error) {
|
|
||||||
console.error('Error updating favorites:', error);
|
|
||||||
res.status(500).json({ message: 'Internal server error' });
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
const getFavoritesController = async (req, res) => {
|
|
||||||
try {
|
|
||||||
const userId = req.user.id;
|
|
||||||
const user = await getUserById(userId, 'favorites');
|
|
||||||
|
|
||||||
if (!user) {
|
|
||||||
return res.status(404).json({ message: 'User not found' });
|
|
||||||
}
|
|
||||||
|
|
||||||
let favorites = user.favorites || [];
|
|
||||||
|
|
||||||
if (!Array.isArray(favorites)) {
|
|
||||||
favorites = [];
|
|
||||||
await updateUser(userId, { favorites: [] });
|
|
||||||
}
|
|
||||||
|
|
||||||
res.status(200).json(favorites);
|
|
||||||
} catch (error) {
|
|
||||||
console.error('Error fetching favorites:', error);
|
|
||||||
res.status(500).json({ message: 'Internal server error' });
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
module.exports = {
|
|
||||||
updateFavoritesController,
|
|
||||||
getFavoritesController,
|
|
||||||
};
|
|
||||||
|
|
@ -4,15 +4,13 @@
|
||||||
|
|
||||||
const mongoose = require('mongoose');
|
const mongoose = require('mongoose');
|
||||||
const { logger } = require('@librechat/data-schemas');
|
const { logger } = require('@librechat/data-schemas');
|
||||||
const { ResourceType, PrincipalType, PermissionBits } = require('librechat-data-provider');
|
const { ResourceType, PrincipalType } = require('librechat-data-provider');
|
||||||
const {
|
const {
|
||||||
bulkUpdateResourcePermissions,
|
bulkUpdateResourcePermissions,
|
||||||
ensureGroupPrincipalExists,
|
ensureGroupPrincipalExists,
|
||||||
getEffectivePermissions,
|
getEffectivePermissions,
|
||||||
ensurePrincipalExists,
|
ensurePrincipalExists,
|
||||||
getAvailableRoles,
|
getAvailableRoles,
|
||||||
findAccessibleResources,
|
|
||||||
getResourcePermissionsMap,
|
|
||||||
} = require('~/server/services/PermissionService');
|
} = require('~/server/services/PermissionService');
|
||||||
const { AclEntry } = require('~/db/models');
|
const { AclEntry } = require('~/db/models');
|
||||||
const {
|
const {
|
||||||
|
|
@ -477,58 +475,10 @@ const searchPrincipals = async (req, res) => {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
|
||||||
* Get user's effective permissions for all accessible resources of a type
|
|
||||||
* @route GET /api/permissions/{resourceType}/effective/all
|
|
||||||
*/
|
|
||||||
const getAllEffectivePermissions = async (req, res) => {
|
|
||||||
try {
|
|
||||||
const { resourceType } = req.params;
|
|
||||||
validateResourceType(resourceType);
|
|
||||||
|
|
||||||
const { id: userId } = req.user;
|
|
||||||
|
|
||||||
// Find all resources the user has at least VIEW access to
|
|
||||||
const accessibleResourceIds = await findAccessibleResources({
|
|
||||||
userId,
|
|
||||||
role: req.user.role,
|
|
||||||
resourceType,
|
|
||||||
requiredPermissions: PermissionBits.VIEW,
|
|
||||||
});
|
|
||||||
|
|
||||||
if (accessibleResourceIds.length === 0) {
|
|
||||||
return res.status(200).json({});
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get effective permissions for all accessible resources
|
|
||||||
const permissionsMap = await getResourcePermissionsMap({
|
|
||||||
userId,
|
|
||||||
role: req.user.role,
|
|
||||||
resourceType,
|
|
||||||
resourceIds: accessibleResourceIds,
|
|
||||||
});
|
|
||||||
|
|
||||||
// Convert Map to plain object for JSON response
|
|
||||||
const result = {};
|
|
||||||
for (const [resourceId, permBits] of permissionsMap) {
|
|
||||||
result[resourceId] = permBits;
|
|
||||||
}
|
|
||||||
|
|
||||||
res.status(200).json(result);
|
|
||||||
} catch (error) {
|
|
||||||
logger.error('Error getting all effective permissions:', error);
|
|
||||||
res.status(500).json({
|
|
||||||
error: 'Failed to get all effective permissions',
|
|
||||||
details: error.message,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
module.exports = {
|
module.exports = {
|
||||||
updateResourcePermissions,
|
updateResourcePermissions,
|
||||||
getResourcePermissions,
|
getResourcePermissions,
|
||||||
getResourceRoles,
|
getResourceRoles,
|
||||||
getUserEffectivePermissions,
|
getUserEffectivePermissions,
|
||||||
getAllEffectivePermissions,
|
|
||||||
searchPrincipals,
|
searchPrincipals,
|
||||||
};
|
};
|
||||||
|
|
|
||||||
|
|
@ -1,10 +1,11 @@
|
||||||
const { encryptV3, logger } = require('@librechat/data-schemas');
|
const { encryptV3 } = require('@librechat/api');
|
||||||
|
const { logger } = require('@librechat/data-schemas');
|
||||||
const {
|
const {
|
||||||
generateBackupCodes,
|
|
||||||
generateTOTPSecret,
|
|
||||||
verifyBackupCode,
|
|
||||||
getTOTPSecret,
|
|
||||||
verifyTOTP,
|
verifyTOTP,
|
||||||
|
getTOTPSecret,
|
||||||
|
verifyBackupCode,
|
||||||
|
generateTOTPSecret,
|
||||||
|
generateBackupCodes,
|
||||||
} = require('~/server/services/twoFactorService');
|
} = require('~/server/services/twoFactorService');
|
||||||
const { getUserById, updateUser } = require('~/models');
|
const { getUserById, updateUser } = require('~/models');
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -3,17 +3,16 @@ const { Tools, CacheKeys, Constants, FileSources } = require('librechat-data-pro
|
||||||
const {
|
const {
|
||||||
MCPOAuthHandler,
|
MCPOAuthHandler,
|
||||||
MCPTokenStorage,
|
MCPTokenStorage,
|
||||||
|
mcpServersRegistry,
|
||||||
normalizeHttpError,
|
normalizeHttpError,
|
||||||
extractWebSearchEnvVars,
|
extractWebSearchEnvVars,
|
||||||
} = require('@librechat/api');
|
} = require('@librechat/api');
|
||||||
const {
|
const {
|
||||||
deleteAllUserSessions,
|
deleteAllUserSessions,
|
||||||
deleteAllSharedLinks,
|
deleteAllSharedLinks,
|
||||||
updateUserPlugins,
|
|
||||||
deleteUserById,
|
deleteUserById,
|
||||||
deleteMessages,
|
deleteMessages,
|
||||||
deletePresets,
|
deletePresets,
|
||||||
deleteUserKey,
|
|
||||||
deleteConvos,
|
deleteConvos,
|
||||||
deleteFiles,
|
deleteFiles,
|
||||||
updateUser,
|
updateUser,
|
||||||
|
|
@ -33,10 +32,11 @@ const {
|
||||||
User,
|
User,
|
||||||
} = require('~/db/models');
|
} = require('~/db/models');
|
||||||
const { updateUserPluginAuth, deleteUserPluginAuth } = require('~/server/services/PluginService');
|
const { updateUserPluginAuth, deleteUserPluginAuth } = require('~/server/services/PluginService');
|
||||||
|
const { updateUserPluginsService, deleteUserKey } = require('~/server/services/UserService');
|
||||||
const { verifyEmail, resendVerificationEmail } = require('~/server/services/AuthService');
|
const { verifyEmail, resendVerificationEmail } = require('~/server/services/AuthService');
|
||||||
const { getMCPManager, getFlowStateManager, getMCPServersRegistry } = require('~/config');
|
|
||||||
const { needsRefresh, getNewS3URL } = require('~/server/services/Files/S3/crud');
|
const { needsRefresh, getNewS3URL } = require('~/server/services/Files/S3/crud');
|
||||||
const { processDeleteRequest } = require('~/server/services/Files/process');
|
const { processDeleteRequest } = require('~/server/services/Files/process');
|
||||||
|
const { getMCPManager, getFlowStateManager } = require('~/config');
|
||||||
const { getAppConfig } = require('~/server/services/Config');
|
const { getAppConfig } = require('~/server/services/Config');
|
||||||
const { deleteToolCalls } = require('~/models/ToolCall');
|
const { deleteToolCalls } = require('~/models/ToolCall');
|
||||||
const { deleteUserPrompts } = require('~/models/Prompt');
|
const { deleteUserPrompts } = require('~/models/Prompt');
|
||||||
|
|
@ -115,7 +115,13 @@ const updateUserPluginsController = async (req, res) => {
|
||||||
const { pluginKey, action, auth, isEntityTool } = req.body;
|
const { pluginKey, action, auth, isEntityTool } = req.body;
|
||||||
try {
|
try {
|
||||||
if (!isEntityTool) {
|
if (!isEntityTool) {
|
||||||
await updateUserPlugins(user._id, user.plugins, pluginKey, action);
|
const userPluginsService = await updateUserPluginsService(user, pluginKey, action);
|
||||||
|
|
||||||
|
if (userPluginsService instanceof Error) {
|
||||||
|
logger.error('[userPluginsService]', userPluginsService);
|
||||||
|
const { status, message } = normalizeHttpError(userPluginsService);
|
||||||
|
return res.status(status).send({ message });
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (auth == null) {
|
if (auth == null) {
|
||||||
|
|
@ -315,9 +321,9 @@ const maybeUninstallOAuthMCP = async (userId, pluginKey, appConfig) => {
|
||||||
|
|
||||||
const serverName = pluginKey.replace(Constants.mcp_prefix, '');
|
const serverName = pluginKey.replace(Constants.mcp_prefix, '');
|
||||||
const serverConfig =
|
const serverConfig =
|
||||||
(await getMCPServersRegistry().getServerConfig(serverName, userId)) ??
|
(await mcpServersRegistry.getServerConfig(serverName, userId)) ??
|
||||||
appConfig?.mcpServers?.[serverName];
|
appConfig?.mcpServers?.[serverName];
|
||||||
const oauthServers = await getMCPServersRegistry().getOAuthServers(userId);
|
const oauthServers = await mcpServersRegistry.getOAuthServers();
|
||||||
if (!oauthServers.has(serverName)) {
|
if (!oauthServers.has(serverName)) {
|
||||||
// this server does not use OAuth, so nothing to do here as well
|
// this server does not use OAuth, so nothing to do here as well
|
||||||
return;
|
return;
|
||||||
|
|
|
||||||
|
|
@ -73,10 +73,10 @@ describe('createToolEndCallback', () => {
|
||||||
tool_call_id: 'tool123',
|
tool_call_id: 'tool123',
|
||||||
artifact: {
|
artifact: {
|
||||||
[Tools.ui_resources]: {
|
[Tools.ui_resources]: {
|
||||||
data: [
|
data: {
|
||||||
{ type: 'button', label: 'Click me' },
|
0: { type: 'button', label: 'Click me' },
|
||||||
{ type: 'input', placeholder: 'Enter text' },
|
1: { type: 'input', placeholder: 'Enter text' },
|
||||||
],
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
@ -100,10 +100,10 @@ describe('createToolEndCallback', () => {
|
||||||
messageId: 'run456',
|
messageId: 'run456',
|
||||||
toolCallId: 'tool123',
|
toolCallId: 'tool123',
|
||||||
conversationId: 'thread789',
|
conversationId: 'thread789',
|
||||||
[Tools.ui_resources]: [
|
[Tools.ui_resources]: {
|
||||||
{ type: 'button', label: 'Click me' },
|
0: { type: 'button', label: 'Click me' },
|
||||||
{ type: 'input', placeholder: 'Enter text' },
|
1: { type: 'input', placeholder: 'Enter text' },
|
||||||
],
|
},
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
|
|
@ -115,7 +115,9 @@ describe('createToolEndCallback', () => {
|
||||||
tool_call_id: 'tool123',
|
tool_call_id: 'tool123',
|
||||||
artifact: {
|
artifact: {
|
||||||
[Tools.ui_resources]: {
|
[Tools.ui_resources]: {
|
||||||
data: [{ type: 'carousel', items: [] }],
|
data: {
|
||||||
|
0: { type: 'carousel', items: [] },
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
@ -134,7 +136,9 @@ describe('createToolEndCallback', () => {
|
||||||
messageId: 'run456',
|
messageId: 'run456',
|
||||||
toolCallId: 'tool123',
|
toolCallId: 'tool123',
|
||||||
conversationId: 'thread789',
|
conversationId: 'thread789',
|
||||||
[Tools.ui_resources]: [{ type: 'carousel', items: [] }],
|
[Tools.ui_resources]: {
|
||||||
|
0: { type: 'carousel', items: [] },
|
||||||
|
},
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
|
|
@ -151,7 +155,9 @@ describe('createToolEndCallback', () => {
|
||||||
tool_call_id: 'tool123',
|
tool_call_id: 'tool123',
|
||||||
artifact: {
|
artifact: {
|
||||||
[Tools.ui_resources]: {
|
[Tools.ui_resources]: {
|
||||||
data: [{ type: 'test' }],
|
data: {
|
||||||
|
0: { type: 'test' },
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
@ -178,7 +184,9 @@ describe('createToolEndCallback', () => {
|
||||||
tool_call_id: 'tool123',
|
tool_call_id: 'tool123',
|
||||||
artifact: {
|
artifact: {
|
||||||
[Tools.ui_resources]: {
|
[Tools.ui_resources]: {
|
||||||
data: [{ type: 'chart', data: [] }],
|
data: {
|
||||||
|
0: { type: 'chart', data: [] },
|
||||||
|
},
|
||||||
},
|
},
|
||||||
[Tools.web_search]: {
|
[Tools.web_search]: {
|
||||||
results: ['result1', 'result2'],
|
results: ['result1', 'result2'],
|
||||||
|
|
@ -201,7 +209,9 @@ describe('createToolEndCallback', () => {
|
||||||
// Check ui_resources attachment
|
// Check ui_resources attachment
|
||||||
const uiResourceAttachment = results.find((r) => r?.type === Tools.ui_resources);
|
const uiResourceAttachment = results.find((r) => r?.type === Tools.ui_resources);
|
||||||
expect(uiResourceAttachment).toBeTruthy();
|
expect(uiResourceAttachment).toBeTruthy();
|
||||||
expect(uiResourceAttachment[Tools.ui_resources]).toEqual([{ type: 'chart', data: [] }]);
|
expect(uiResourceAttachment[Tools.ui_resources]).toEqual({
|
||||||
|
0: { type: 'chart', data: [] },
|
||||||
|
});
|
||||||
|
|
||||||
// Check web_search attachment
|
// Check web_search attachment
|
||||||
const webSearchAttachment = results.find((r) => r?.type === Tools.web_search);
|
const webSearchAttachment = results.find((r) => r?.type === Tools.web_search);
|
||||||
|
|
@ -240,7 +250,7 @@ describe('createToolEndCallback', () => {
|
||||||
tool_call_id: 'tool123',
|
tool_call_id: 'tool123',
|
||||||
artifact: {
|
artifact: {
|
||||||
[Tools.ui_resources]: {
|
[Tools.ui_resources]: {
|
||||||
data: [],
|
data: {},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
@ -258,7 +268,7 @@ describe('createToolEndCallback', () => {
|
||||||
messageId: 'run456',
|
messageId: 'run456',
|
||||||
toolCallId: 'tool123',
|
toolCallId: 'tool123',
|
||||||
conversationId: 'thread789',
|
conversationId: 'thread789',
|
||||||
[Tools.ui_resources]: [],
|
[Tools.ui_resources]: {},
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -10,9 +10,7 @@ const {
|
||||||
sanitizeTitle,
|
sanitizeTitle,
|
||||||
resolveHeaders,
|
resolveHeaders,
|
||||||
createSafeUser,
|
createSafeUser,
|
||||||
initializeAgent,
|
|
||||||
getBalanceConfig,
|
getBalanceConfig,
|
||||||
getProviderConfig,
|
|
||||||
memoryInstructions,
|
memoryInstructions,
|
||||||
getTransactionsConfig,
|
getTransactionsConfig,
|
||||||
createMemoryProcessor,
|
createMemoryProcessor,
|
||||||
|
|
@ -40,16 +38,17 @@ const {
|
||||||
bedrockInputSchema,
|
bedrockInputSchema,
|
||||||
removeNullishValues,
|
removeNullishValues,
|
||||||
} = require('librechat-data-provider');
|
} = require('librechat-data-provider');
|
||||||
|
const { initializeAgent } = require('~/server/services/Endpoints/agents/agent');
|
||||||
const { spendTokens, spendStructuredTokens } = require('~/models/spendTokens');
|
const { spendTokens, spendStructuredTokens } = require('~/models/spendTokens');
|
||||||
|
const { getFormattedMemories, deleteMemory, setMemory } = require('~/models');
|
||||||
const { encodeAndFormat } = require('~/server/services/Files/images/encode');
|
const { encodeAndFormat } = require('~/server/services/Files/images/encode');
|
||||||
|
const { getProviderConfig } = require('~/server/services/Endpoints');
|
||||||
const { createContextHandlers } = require('~/app/clients/prompts');
|
const { createContextHandlers } = require('~/app/clients/prompts');
|
||||||
const { checkCapability } = require('~/server/services/Config');
|
const { checkCapability } = require('~/server/services/Config');
|
||||||
const { getConvoFiles } = require('~/models/Conversation');
|
|
||||||
const BaseClient = require('~/app/clients/BaseClient');
|
const BaseClient = require('~/app/clients/BaseClient');
|
||||||
const { getRoleByName } = require('~/models/Role');
|
const { getRoleByName } = require('~/models/Role');
|
||||||
const { loadAgent } = require('~/models/Agent');
|
const { loadAgent } = require('~/models/Agent');
|
||||||
const { getMCPManager } = require('~/config');
|
const { getMCPManager } = require('~/config');
|
||||||
const db = require('~/models');
|
|
||||||
|
|
||||||
const omitTitleOptions = new Set([
|
const omitTitleOptions = new Set([
|
||||||
'stream',
|
'stream',
|
||||||
|
|
@ -543,28 +542,18 @@ class AgentClient extends BaseClient {
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
const agent = await initializeAgent(
|
const agent = await initializeAgent({
|
||||||
{
|
req: this.options.req,
|
||||||
req: this.options.req,
|
res: this.options.res,
|
||||||
res: this.options.res,
|
agent: prelimAgent,
|
||||||
agent: prelimAgent,
|
allowedProviders,
|
||||||
allowedProviders,
|
endpointOption: {
|
||||||
endpointOption: {
|
endpoint:
|
||||||
endpoint:
|
prelimAgent.id !== Constants.EPHEMERAL_AGENT_ID
|
||||||
prelimAgent.id !== Constants.EPHEMERAL_AGENT_ID
|
? EModelEndpoint.agents
|
||||||
? EModelEndpoint.agents
|
: memoryConfig.agent?.provider,
|
||||||
: memoryConfig.agent?.provider,
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
{
|
});
|
||||||
getConvoFiles,
|
|
||||||
getFiles: db.getFiles,
|
|
||||||
getUserKey: db.getUserKey,
|
|
||||||
updateFilesUsage: db.updateFilesUsage,
|
|
||||||
getUserKeyValues: db.getUserKeyValues,
|
|
||||||
getToolFilesByIds: db.getToolFilesByIds,
|
|
||||||
},
|
|
||||||
);
|
|
||||||
|
|
||||||
if (!agent) {
|
if (!agent) {
|
||||||
logger.warn(
|
logger.warn(
|
||||||
|
|
@ -599,9 +588,9 @@ class AgentClient extends BaseClient {
|
||||||
messageId,
|
messageId,
|
||||||
conversationId,
|
conversationId,
|
||||||
memoryMethods: {
|
memoryMethods: {
|
||||||
setMemory: db.setMemory,
|
setMemory,
|
||||||
deleteMemory: db.deleteMemory,
|
deleteMemory,
|
||||||
getFormattedMemories: db.getFormattedMemories,
|
getFormattedMemories,
|
||||||
},
|
},
|
||||||
res: this.options.res,
|
res: this.options.res,
|
||||||
});
|
});
|
||||||
|
|
@ -1051,7 +1040,7 @@ class AgentClient extends BaseClient {
|
||||||
throw new Error('Run not initialized');
|
throw new Error('Run not initialized');
|
||||||
}
|
}
|
||||||
const { handleLLMEnd, collected: collectedMetadata } = createMetadataAggregator();
|
const { handleLLMEnd, collected: collectedMetadata } = createMetadataAggregator();
|
||||||
const { req, agent } = this.options;
|
const { req, res, agent } = this.options;
|
||||||
const appConfig = req.config;
|
const appConfig = req.config;
|
||||||
let endpoint = agent.endpoint;
|
let endpoint = agent.endpoint;
|
||||||
|
|
||||||
|
|
@ -1108,12 +1097,11 @@ class AgentClient extends BaseClient {
|
||||||
|
|
||||||
const options = await titleProviderConfig.getOptions({
|
const options = await titleProviderConfig.getOptions({
|
||||||
req,
|
req,
|
||||||
endpoint,
|
res,
|
||||||
model_parameters: clientOptions,
|
optionsOnly: true,
|
||||||
db: {
|
overrideEndpoint: endpoint,
|
||||||
getUserKey: db.getUserKey,
|
overrideModel: clientOptions.model,
|
||||||
getUserKeyValues: db.getUserKeyValues,
|
endpointOption: { model_parameters: clientOptions },
|
||||||
},
|
|
||||||
});
|
});
|
||||||
|
|
||||||
let provider = options.provider ?? titleProviderConfig.overrideProvider ?? agent.provider;
|
let provider = options.provider ?? titleProviderConfig.overrideProvider ?? agent.provider;
|
||||||
|
|
|
||||||
|
|
@ -38,13 +38,14 @@ const {
|
||||||
grantPermission,
|
grantPermission,
|
||||||
} = require('~/server/services/PermissionService');
|
} = require('~/server/services/PermissionService');
|
||||||
const { getStrategyFunctions } = require('~/server/services/Files/strategies');
|
const { getStrategyFunctions } = require('~/server/services/Files/strategies');
|
||||||
const { getCategoriesWithCounts, deleteFileByFilter } = require('~/models');
|
|
||||||
const { resizeAvatar } = require('~/server/services/Files/images/avatar');
|
const { resizeAvatar } = require('~/server/services/Files/images/avatar');
|
||||||
const { getFileStrategy } = require('~/server/utils/getFileStrategy');
|
const { getFileStrategy } = require('~/server/utils/getFileStrategy');
|
||||||
const { refreshS3Url } = require('~/server/services/Files/S3/crud');
|
const { refreshS3Url } = require('~/server/services/Files/S3/crud');
|
||||||
const { filterFile } = require('~/server/services/Files/process');
|
const { filterFile } = require('~/server/services/Files/process');
|
||||||
const { updateAction, getActions } = require('~/models/Action');
|
const { updateAction, getActions } = require('~/models/Action');
|
||||||
const { getCachedTools } = require('~/server/services/Config');
|
const { getCachedTools } = require('~/server/services/Config');
|
||||||
|
const { deleteFileByFilter } = require('~/models/File');
|
||||||
|
const { getCategoriesWithCounts } = require('~/models');
|
||||||
const { getLogStores } = require('~/cache');
|
const { getLogStores } = require('~/cache');
|
||||||
|
|
||||||
const systemTools = {
|
const systemTools = {
|
||||||
|
|
|
||||||
|
|
@ -7,7 +7,6 @@ const {
|
||||||
Constants,
|
Constants,
|
||||||
RunStatus,
|
RunStatus,
|
||||||
CacheKeys,
|
CacheKeys,
|
||||||
VisionModes,
|
|
||||||
ContentTypes,
|
ContentTypes,
|
||||||
EModelEndpoint,
|
EModelEndpoint,
|
||||||
ViolationTypes,
|
ViolationTypes,
|
||||||
|
|
@ -26,7 +25,6 @@ const {
|
||||||
const { runAssistant, createOnTextProgress } = require('~/server/services/AssistantService');
|
const { runAssistant, createOnTextProgress } = require('~/server/services/AssistantService');
|
||||||
const validateAuthor = require('~/server/middleware/assistants/validateAuthor');
|
const validateAuthor = require('~/server/middleware/assistants/validateAuthor');
|
||||||
const { formatMessage, createVisionPrompt } = require('~/app/clients/prompts');
|
const { formatMessage, createVisionPrompt } = require('~/app/clients/prompts');
|
||||||
const { encodeAndFormat } = require('~/server/services/Files/images/encode');
|
|
||||||
const { createRun, StreamRunManager } = require('~/server/services/Runs');
|
const { createRun, StreamRunManager } = require('~/server/services/Runs');
|
||||||
const { addTitle } = require('~/server/services/Endpoints/assistants');
|
const { addTitle } = require('~/server/services/Endpoints/assistants');
|
||||||
const { createRunBody } = require('~/server/services/createRunBody');
|
const { createRunBody } = require('~/server/services/createRunBody');
|
||||||
|
|
@ -66,7 +64,7 @@ const chatV1 = async (req, res) => {
|
||||||
clientTimestamp,
|
clientTimestamp,
|
||||||
} = req.body;
|
} = req.body;
|
||||||
|
|
||||||
/** @type {OpenAI} */
|
/** @type {OpenAIClient} */
|
||||||
let openai;
|
let openai;
|
||||||
/** @type {string|undefined} - the current thread id */
|
/** @type {string|undefined} - the current thread id */
|
||||||
let thread_id = _thread_id;
|
let thread_id = _thread_id;
|
||||||
|
|
@ -287,10 +285,11 @@ const chatV1 = async (req, res) => {
|
||||||
});
|
});
|
||||||
};
|
};
|
||||||
|
|
||||||
const { openai: _openai } = await getOpenAIClient({
|
const { openai: _openai, client } = await getOpenAIClient({
|
||||||
req,
|
req,
|
||||||
res,
|
res,
|
||||||
endpointOption,
|
endpointOption,
|
||||||
|
initAppClient: true,
|
||||||
});
|
});
|
||||||
|
|
||||||
openai = _openai;
|
openai = _openai;
|
||||||
|
|
@ -365,15 +364,7 @@ const chatV1 = async (req, res) => {
|
||||||
role: 'user',
|
role: 'user',
|
||||||
content: '',
|
content: '',
|
||||||
};
|
};
|
||||||
const { files, image_urls } = await encodeAndFormat(
|
const files = await client.addImageURLs(visionMessage, attachments);
|
||||||
req,
|
|
||||||
attachments,
|
|
||||||
{
|
|
||||||
endpoint: EModelEndpoint.assistants,
|
|
||||||
},
|
|
||||||
VisionModes.generative,
|
|
||||||
);
|
|
||||||
visionMessage.image_urls = image_urls.length ? image_urls : undefined;
|
|
||||||
if (!visionMessage.image_urls?.length) {
|
if (!visionMessage.image_urls?.length) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
@ -618,6 +609,7 @@ const chatV1 = async (req, res) => {
|
||||||
text,
|
text,
|
||||||
responseText: response.text,
|
responseText: response.text,
|
||||||
conversationId,
|
conversationId,
|
||||||
|
client,
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -61,7 +61,7 @@ const chatV2 = async (req, res) => {
|
||||||
clientTimestamp,
|
clientTimestamp,
|
||||||
} = req.body;
|
} = req.body;
|
||||||
|
|
||||||
/** @type {OpenAI} */
|
/** @type {OpenAIClient} */
|
||||||
let openai;
|
let openai;
|
||||||
/** @type {string|undefined} - the current thread id */
|
/** @type {string|undefined} - the current thread id */
|
||||||
let thread_id = _thread_id;
|
let thread_id = _thread_id;
|
||||||
|
|
@ -160,10 +160,11 @@ const chatV2 = async (req, res) => {
|
||||||
});
|
});
|
||||||
};
|
};
|
||||||
|
|
||||||
const { openai: _openai } = await getOpenAIClient({
|
const { openai: _openai, client } = await getOpenAIClient({
|
||||||
req,
|
req,
|
||||||
res,
|
res,
|
||||||
endpointOption,
|
endpointOption,
|
||||||
|
initAppClient: true,
|
||||||
});
|
});
|
||||||
|
|
||||||
openai = _openai;
|
openai = _openai;
|
||||||
|
|
@ -452,6 +453,7 @@ const chatV2 = async (req, res) => {
|
||||||
text,
|
text,
|
||||||
responseText: response.text,
|
responseText: response.text,
|
||||||
conversationId,
|
conversationId,
|
||||||
|
client,
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -63,7 +63,7 @@ const _listAssistants = async ({ req, res, version, query }) => {
|
||||||
* @returns {Promise<Array<Assistant>>} A promise that resolves to the response from the `openai.beta.assistants.list` method call.
|
* @returns {Promise<Array<Assistant>>} A promise that resolves to the response from the `openai.beta.assistants.list` method call.
|
||||||
*/
|
*/
|
||||||
const listAllAssistants = async ({ req, res, version, query }) => {
|
const listAllAssistants = async ({ req, res, version, query }) => {
|
||||||
/** @type {{ openai: OpenAI }} */
|
/** @type {{ openai: OpenAIClient }} */
|
||||||
const { openai } = await getOpenAIClient({ req, res, version });
|
const { openai } = await getOpenAIClient({ req, res, version });
|
||||||
const allAssistants = [];
|
const allAssistants = [];
|
||||||
|
|
||||||
|
|
@ -138,7 +138,6 @@ const listAssistantsForAzure = async ({ req, res, version, azureConfig = {}, que
|
||||||
|
|
||||||
/* The specified model is only necessary to
|
/* The specified model is only necessary to
|
||||||
fetch assistants for the shared instance */
|
fetch assistants for the shared instance */
|
||||||
req.body = req.body || {}; // Express 5: req.body is undefined instead of {} when no body parser runs
|
|
||||||
req.body.model = currentModelTuples[0][0];
|
req.body.model = currentModelTuples[0][0];
|
||||||
promises.push(listAllAssistants({ req, res, version, query }));
|
promises.push(listAllAssistants({ req, res, version, query }));
|
||||||
}
|
}
|
||||||
|
|
@ -182,10 +181,10 @@ const listAssistantsForAzure = async ({ req, res, version, azureConfig = {}, que
|
||||||
* @param {TEndpointOption} params.endpointOption - The endpoint options.
|
* @param {TEndpointOption} params.endpointOption - The endpoint options.
|
||||||
* @param {boolean} params.initAppClient - Whether to initialize the app client.
|
* @param {boolean} params.initAppClient - Whether to initialize the app client.
|
||||||
* @param {string} params.overrideEndpoint - The endpoint to override.
|
* @param {string} params.overrideEndpoint - The endpoint to override.
|
||||||
* @returns {Promise<{ openai: OpenAI, openAIApiKey: string }>} - The initialized OpenAI SDK client.
|
* @returns {Promise<{ openai: OpenAIClient, openAIApiKey: string; client: import('~/app/clients/OpenAIClient') }>} - The initialized OpenAI client.
|
||||||
*/
|
*/
|
||||||
async function getOpenAIClient({ req, res, endpointOption, initAppClient, overrideEndpoint }) {
|
async function getOpenAIClient({ req, res, endpointOption, initAppClient, overrideEndpoint }) {
|
||||||
let endpoint = overrideEndpoint ?? req.body?.endpoint ?? req.query?.endpoint;
|
let endpoint = overrideEndpoint ?? req.body.endpoint ?? req.query.endpoint;
|
||||||
const version = await getCurrentVersion(req, endpoint);
|
const version = await getCurrentVersion(req, endpoint);
|
||||||
if (!endpoint) {
|
if (!endpoint) {
|
||||||
throw new Error(`[${req.baseUrl}] Endpoint is required`);
|
throw new Error(`[${req.baseUrl}] Endpoint is required`);
|
||||||
|
|
|
||||||
|
|
@ -9,7 +9,7 @@ const { updateAssistantDoc, getAssistants } = require('~/models/Assistant');
|
||||||
const { getOpenAIClient, fetchAssistants } = require('./helpers');
|
const { getOpenAIClient, fetchAssistants } = require('./helpers');
|
||||||
const { getCachedTools } = require('~/server/services/Config');
|
const { getCachedTools } = require('~/server/services/Config');
|
||||||
const { manifestToolMap } = require('~/app/clients/tools');
|
const { manifestToolMap } = require('~/app/clients/tools');
|
||||||
const { deleteFileByFilter } = require('~/models');
|
const { deleteFileByFilter } = require('~/models/File');
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create an assistant.
|
* Create an assistant.
|
||||||
|
|
@ -259,7 +259,7 @@ function filterAssistantDocs({ documents, userId, assistantsConfig = {} }) {
|
||||||
const getAssistantDocuments = async (req, res) => {
|
const getAssistantDocuments = async (req, res) => {
|
||||||
try {
|
try {
|
||||||
const appConfig = req.config;
|
const appConfig = req.config;
|
||||||
const endpoint = req.query?.endpoint;
|
const endpoint = req.query;
|
||||||
const assistantsConfig = appConfig.endpoints?.[endpoint];
|
const assistantsConfig = appConfig.endpoints?.[endpoint];
|
||||||
const documents = await getAssistants(
|
const documents = await getAssistants(
|
||||||
{},
|
{},
|
||||||
|
|
|
||||||
|
|
@ -1,14 +1,16 @@
|
||||||
/**
|
/**
|
||||||
* MCP Tools Controller
|
* MCP Tools Controller
|
||||||
* Handles MCP-specific tool endpoints, decoupled from regular LibreChat tools
|
* Handles MCP-specific tool endpoints, decoupled from regular LibreChat tools
|
||||||
*
|
|
||||||
* @import { MCPServerRegistry } from '@librechat/api'
|
|
||||||
* @import { MCPServerDocument } from 'librechat-data-provider'
|
|
||||||
*/
|
*/
|
||||||
const { logger } = require('@librechat/data-schemas');
|
const { logger } = require('@librechat/data-schemas');
|
||||||
const { Constants, MCPServerUserInputSchema } = require('librechat-data-provider');
|
const { Constants } = require('librechat-data-provider');
|
||||||
const { cacheMCPServerTools, getMCPServerTools } = require('~/server/services/Config');
|
const {
|
||||||
const { getMCPManager, getMCPServersRegistry } = require('~/config');
|
cacheMCPServerTools,
|
||||||
|
getMCPServerTools,
|
||||||
|
getAppConfig,
|
||||||
|
} = require('~/server/services/Config');
|
||||||
|
const { getMCPManager } = require('~/config');
|
||||||
|
const { mcpServersRegistry } = require('@librechat/api');
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get all MCP tools available to the user
|
* Get all MCP tools available to the user
|
||||||
|
|
@ -21,14 +23,13 @@ const getMCPTools = async (req, res) => {
|
||||||
return res.status(401).json({ message: 'Unauthorized' });
|
return res.status(401).json({ message: 'Unauthorized' });
|
||||||
}
|
}
|
||||||
|
|
||||||
const mcpConfig = await getMCPServersRegistry().getAllServerConfigs(userId);
|
const appConfig = req.config ?? (await getAppConfig({ role: req.user?.role }));
|
||||||
const configuredServers = mcpConfig ? Object.keys(mcpConfig) : [];
|
if (!appConfig?.mcpConfig) {
|
||||||
|
|
||||||
if (!mcpConfig || Object.keys(mcpConfig).length == 0) {
|
|
||||||
return res.status(200).json({ servers: {} });
|
return res.status(200).json({ servers: {} });
|
||||||
}
|
}
|
||||||
|
|
||||||
const mcpManager = getMCPManager();
|
const mcpManager = getMCPManager();
|
||||||
|
const configuredServers = Object.keys(appConfig.mcpConfig);
|
||||||
const mcpServers = {};
|
const mcpServers = {};
|
||||||
|
|
||||||
const cachePromises = configuredServers.map((serverName) =>
|
const cachePromises = configuredServers.map((serverName) =>
|
||||||
|
|
@ -70,8 +71,8 @@ const getMCPTools = async (req, res) => {
|
||||||
const serverTools = serverToolsMap.get(serverName);
|
const serverTools = serverToolsMap.get(serverName);
|
||||||
|
|
||||||
// Get server config once
|
// Get server config once
|
||||||
const serverConfig = mcpConfig[serverName];
|
const serverConfig = appConfig.mcpConfig[serverName];
|
||||||
const rawServerConfig = await getMCPServersRegistry().getServerConfig(serverName, userId);
|
const rawServerConfig = await mcpServersRegistry.getServerConfig(serverName, userId);
|
||||||
|
|
||||||
// Initialize server object with all server-level data
|
// Initialize server object with all server-level data
|
||||||
const server = {
|
const server = {
|
||||||
|
|
@ -126,146 +127,7 @@ const getMCPTools = async (req, res) => {
|
||||||
res.status(500).json({ message: error.message });
|
res.status(500).json({ message: error.message });
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
/**
|
|
||||||
* Get all MCP servers with permissions
|
|
||||||
* @route GET /api/mcp/servers
|
|
||||||
*/
|
|
||||||
const getMCPServersList = async (req, res) => {
|
|
||||||
try {
|
|
||||||
const userId = req.user?.id;
|
|
||||||
if (!userId) {
|
|
||||||
return res.status(401).json({ message: 'Unauthorized' });
|
|
||||||
}
|
|
||||||
|
|
||||||
// 2. Get all server configs from registry (YAML + DB)
|
|
||||||
const serverConfigs = await getMCPServersRegistry().getAllServerConfigs(userId);
|
|
||||||
|
|
||||||
return res.json(serverConfigs);
|
|
||||||
} catch (error) {
|
|
||||||
logger.error('[getMCPServersList]', error);
|
|
||||||
res.status(500).json({ error: error.message });
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Create MCP server
|
|
||||||
* @route POST /api/mcp/servers
|
|
||||||
*/
|
|
||||||
const createMCPServerController = async (req, res) => {
|
|
||||||
try {
|
|
||||||
const userId = req.user?.id;
|
|
||||||
const { config } = req.body;
|
|
||||||
|
|
||||||
const validation = MCPServerUserInputSchema.safeParse(config);
|
|
||||||
if (!validation.success) {
|
|
||||||
return res.status(400).json({
|
|
||||||
message: 'Invalid configuration',
|
|
||||||
errors: validation.error.errors,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
const result = await getMCPServersRegistry().addServer(
|
|
||||||
'temp_server_name',
|
|
||||||
validation.data,
|
|
||||||
'DB',
|
|
||||||
userId,
|
|
||||||
);
|
|
||||||
res.status(201).json({
|
|
||||||
serverName: result.serverName,
|
|
||||||
...result.config,
|
|
||||||
});
|
|
||||||
} catch (error) {
|
|
||||||
logger.error('[createMCPServer]', error);
|
|
||||||
if (error.message?.startsWith('MCP_INSPECTION_FAILED')) {
|
|
||||||
return res.status(400).json({
|
|
||||||
error: 'MCP_INSPECTION_FAILED',
|
|
||||||
message: error.message,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
res.status(500).json({ message: error.message });
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Get MCP server by ID
|
|
||||||
*/
|
|
||||||
const getMCPServerById = async (req, res) => {
|
|
||||||
try {
|
|
||||||
const userId = req.user?.id;
|
|
||||||
const { serverName } = req.params;
|
|
||||||
if (!serverName) {
|
|
||||||
return res.status(400).json({ message: 'Server name is required' });
|
|
||||||
}
|
|
||||||
const parsedConfig = await getMCPServersRegistry().getServerConfig(serverName, userId);
|
|
||||||
|
|
||||||
if (!parsedConfig) {
|
|
||||||
return res.status(404).json({ message: 'MCP server not found' });
|
|
||||||
}
|
|
||||||
|
|
||||||
res.status(200).json(parsedConfig);
|
|
||||||
} catch (error) {
|
|
||||||
logger.error('[getMCPServerById]', error);
|
|
||||||
res.status(500).json({ message: error.message });
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Update MCP server
|
|
||||||
* @route PATCH /api/mcp/servers/:serverName
|
|
||||||
*/
|
|
||||||
const updateMCPServerController = async (req, res) => {
|
|
||||||
try {
|
|
||||||
const userId = req.user?.id;
|
|
||||||
const { serverName } = req.params;
|
|
||||||
const { config } = req.body;
|
|
||||||
|
|
||||||
const validation = MCPServerUserInputSchema.safeParse(config);
|
|
||||||
if (!validation.success) {
|
|
||||||
return res.status(400).json({
|
|
||||||
message: 'Invalid configuration',
|
|
||||||
errors: validation.error.errors,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
const parsedConfig = await getMCPServersRegistry().updateServer(
|
|
||||||
serverName,
|
|
||||||
validation.data,
|
|
||||||
'DB',
|
|
||||||
userId,
|
|
||||||
);
|
|
||||||
|
|
||||||
res.status(200).json(parsedConfig);
|
|
||||||
} catch (error) {
|
|
||||||
logger.error('[updateMCPServer]', error);
|
|
||||||
if (error.message?.startsWith('MCP_INSPECTION_FAILED:')) {
|
|
||||||
return res.status(400).json({
|
|
||||||
error: 'MCP_INSPECTION_FAILED',
|
|
||||||
message: error.message,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
res.status(500).json({ message: error.message });
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Delete MCP server
|
|
||||||
* @route DELETE /api/mcp/servers/:serverName
|
|
||||||
*/
|
|
||||||
const deleteMCPServerController = async (req, res) => {
|
|
||||||
try {
|
|
||||||
const userId = req.user?.id;
|
|
||||||
const { serverName } = req.params;
|
|
||||||
await getMCPServersRegistry().removeServer(serverName, 'DB', userId);
|
|
||||||
res.status(200).json({ message: 'MCP server deleted successfully' });
|
|
||||||
} catch (error) {
|
|
||||||
logger.error('[deleteMCPServer]', error);
|
|
||||||
res.status(500).json({ message: error.message });
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
module.exports = {
|
module.exports = {
|
||||||
getMCPTools,
|
getMCPTools,
|
||||||
getMCPServersList,
|
|
||||||
createMCPServerController,
|
|
||||||
getMCPServerById,
|
|
||||||
updateMCPServerController,
|
|
||||||
deleteMCPServerController,
|
|
||||||
};
|
};
|
||||||
|
|
|
||||||
|
|
@ -246,22 +246,7 @@ if (cluster.isMaster) {
|
||||||
app.use(noIndex);
|
app.use(noIndex);
|
||||||
app.use(express.json({ limit: '3mb' }));
|
app.use(express.json({ limit: '3mb' }));
|
||||||
app.use(express.urlencoded({ extended: true, limit: '3mb' }));
|
app.use(express.urlencoded({ extended: true, limit: '3mb' }));
|
||||||
|
|
||||||
app.use(handleJsonParseError);
|
app.use(handleJsonParseError);
|
||||||
|
|
||||||
/**
|
|
||||||
* Express 5 Compatibility: Make req.query writable for mongoSanitize
|
|
||||||
* In Express 5, req.query is read-only by default, but express-mongo-sanitize needs to modify it
|
|
||||||
*/
|
|
||||||
app.use((req, _res, next) => {
|
|
||||||
Object.defineProperty(req, 'query', {
|
|
||||||
...Object.getOwnPropertyDescriptor(req, 'query'),
|
|
||||||
value: req.query,
|
|
||||||
writable: true,
|
|
||||||
});
|
|
||||||
next();
|
|
||||||
});
|
|
||||||
|
|
||||||
app.use(mongoSanitize());
|
app.use(mongoSanitize());
|
||||||
app.use(cors());
|
app.use(cors());
|
||||||
app.use(cookieParser());
|
app.use(cookieParser());
|
||||||
|
|
@ -301,6 +286,7 @@ if (cluster.isMaster) {
|
||||||
app.use('/api/keys', routes.keys);
|
app.use('/api/keys', routes.keys);
|
||||||
app.use('/api/user', routes.user);
|
app.use('/api/user', routes.user);
|
||||||
app.use('/api/search', routes.search);
|
app.use('/api/search', routes.search);
|
||||||
|
app.use('/api/edit', routes.edit);
|
||||||
app.use('/api/messages', routes.messages);
|
app.use('/api/messages', routes.messages);
|
||||||
app.use('/api/convos', routes.convos);
|
app.use('/api/convos', routes.convos);
|
||||||
app.use('/api/presets', routes.presets);
|
app.use('/api/presets', routes.presets);
|
||||||
|
|
@ -343,12 +329,7 @@ if (cluster.isMaster) {
|
||||||
});
|
});
|
||||||
|
|
||||||
/** Start listening on shared port (cluster will distribute connections) */
|
/** Start listening on shared port (cluster will distribute connections) */
|
||||||
app.listen(port, host, async (err) => {
|
app.listen(port, host, async () => {
|
||||||
if (err) {
|
|
||||||
logger.error(`Worker ${process.pid} failed to start server:`, err);
|
|
||||||
process.exit(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
logger.info(
|
logger.info(
|
||||||
`Worker ${process.pid} started: Server listening at http://${
|
`Worker ${process.pid} started: Server listening at http://${
|
||||||
host == '0.0.0.0' ? 'localhost' : host
|
host == '0.0.0.0' ? 'localhost' : host
|
||||||
|
|
|
||||||
|
|
@ -83,20 +83,6 @@ const startServer = async () => {
|
||||||
app.use(express.json({ limit: '3mb' }));
|
app.use(express.json({ limit: '3mb' }));
|
||||||
app.use(express.urlencoded({ extended: true, limit: '3mb' }));
|
app.use(express.urlencoded({ extended: true, limit: '3mb' }));
|
||||||
app.use(handleJsonParseError);
|
app.use(handleJsonParseError);
|
||||||
|
|
||||||
/**
|
|
||||||
* Express 5 Compatibility: Make req.query writable for mongoSanitize
|
|
||||||
* In Express 5, req.query is read-only by default, but express-mongo-sanitize needs to modify it
|
|
||||||
*/
|
|
||||||
app.use((req, _res, next) => {
|
|
||||||
Object.defineProperty(req, 'query', {
|
|
||||||
...Object.getOwnPropertyDescriptor(req, 'query'),
|
|
||||||
value: req.query,
|
|
||||||
writable: true,
|
|
||||||
});
|
|
||||||
next();
|
|
||||||
});
|
|
||||||
|
|
||||||
app.use(mongoSanitize());
|
app.use(mongoSanitize());
|
||||||
app.use(cors());
|
app.use(cors());
|
||||||
app.use(cookieParser());
|
app.use(cookieParser());
|
||||||
|
|
@ -136,6 +122,7 @@ const startServer = async () => {
|
||||||
app.use('/api/keys', routes.keys);
|
app.use('/api/keys', routes.keys);
|
||||||
app.use('/api/user', routes.user);
|
app.use('/api/user', routes.user);
|
||||||
app.use('/api/search', routes.search);
|
app.use('/api/search', routes.search);
|
||||||
|
app.use('/api/edit', routes.edit);
|
||||||
app.use('/api/messages', routes.messages);
|
app.use('/api/messages', routes.messages);
|
||||||
app.use('/api/convos', routes.convos);
|
app.use('/api/convos', routes.convos);
|
||||||
app.use('/api/presets', routes.presets);
|
app.use('/api/presets', routes.presets);
|
||||||
|
|
@ -144,6 +131,7 @@ const startServer = async () => {
|
||||||
app.use('/api/endpoints', routes.endpoints);
|
app.use('/api/endpoints', routes.endpoints);
|
||||||
app.use('/api/balance', routes.balance);
|
app.use('/api/balance', routes.balance);
|
||||||
app.use('/api/models', routes.models);
|
app.use('/api/models', routes.models);
|
||||||
|
app.use('/api/plugins', routes.plugins);
|
||||||
app.use('/api/config', routes.config);
|
app.use('/api/config', routes.config);
|
||||||
app.use('/api/assistants', routes.assistants);
|
app.use('/api/assistants', routes.assistants);
|
||||||
app.use('/api/files', await routes.files.initialize());
|
app.use('/api/files', await routes.files.initialize());
|
||||||
|
|
@ -175,12 +163,7 @@ const startServer = async () => {
|
||||||
res.send(updatedIndexHtml);
|
res.send(updatedIndexHtml);
|
||||||
});
|
});
|
||||||
|
|
||||||
app.listen(port, host, async (err) => {
|
app.listen(port, host, async () => {
|
||||||
if (err) {
|
|
||||||
logger.error('Failed to start server:', err);
|
|
||||||
process.exit(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (host === '0.0.0.0') {
|
if (host === '0.0.0.0') {
|
||||||
logger.info(
|
logger.info(
|
||||||
`Server listening on all interfaces at port ${port}. Use http://localhost:${port} to access it`,
|
`Server listening on all interfaces at port ${port}. Use http://localhost:${port} to access it`,
|
||||||
|
|
|
||||||
|
|
@ -16,7 +16,6 @@ async function abortRun(req, res) {
|
||||||
const conversation = await getConvo(req.user.id, conversationId);
|
const conversation = await getConvo(req.user.id, conversationId);
|
||||||
|
|
||||||
if (conversation?.model) {
|
if (conversation?.model) {
|
||||||
req.body = req.body || {}; // Express 5: ensure req.body exists
|
|
||||||
req.body.model = conversation.model;
|
req.body.model = conversation.model;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,61 +0,0 @@
|
||||||
const { ResourceType } = require('librechat-data-provider');
|
|
||||||
const { canAccessResource } = require('./canAccessResource');
|
|
||||||
const { findMCPServerById } = require('~/models');
|
|
||||||
|
|
||||||
/**
|
|
||||||
* MCP Server ID resolver function
|
|
||||||
* Resolves custom MCP server ID (e.g., "mcp_abc123") to MongoDB ObjectId
|
|
||||||
*
|
|
||||||
* @param {string} mcpServerCustomId - Custom MCP server ID from route parameter
|
|
||||||
* @returns {Promise<Object|null>} MCP server document with _id field, or null if not found
|
|
||||||
*/
|
|
||||||
const resolveMCPServerId = async (mcpServerCustomId) => {
|
|
||||||
return await findMCPServerById(mcpServerCustomId);
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
|
||||||
* MCP Server-specific middleware factory that creates middleware to check MCP server access permissions.
|
|
||||||
* This middleware extends the generic canAccessResource to handle MCP server custom ID resolution.
|
|
||||||
*
|
|
||||||
* @param {Object} options - Configuration options
|
|
||||||
* @param {number} options.requiredPermission - The permission bit required (1=view, 2=edit, 4=delete, 8=share)
|
|
||||||
* @param {string} [options.resourceIdParam='serverName'] - The name of the route parameter containing the MCP server custom ID
|
|
||||||
* @returns {Function} Express middleware function
|
|
||||||
*
|
|
||||||
* @example
|
|
||||||
* // Basic usage for viewing MCP servers
|
|
||||||
* router.get('/servers/:serverName',
|
|
||||||
* canAccessMCPServerResource({ requiredPermission: 1 }),
|
|
||||||
* getMCPServer
|
|
||||||
* );
|
|
||||||
*
|
|
||||||
* @example
|
|
||||||
* // Custom resource ID parameter and edit permission
|
|
||||||
* router.patch('/servers/:id',
|
|
||||||
* canAccessMCPServerResource({
|
|
||||||
* requiredPermission: 2,
|
|
||||||
* resourceIdParam: 'id'
|
|
||||||
* }),
|
|
||||||
* updateMCPServer
|
|
||||||
* );
|
|
||||||
*/
|
|
||||||
const canAccessMCPServerResource = (options) => {
|
|
||||||
const { requiredPermission, resourceIdParam = 'serverName' } = options;
|
|
||||||
|
|
||||||
if (!requiredPermission || typeof requiredPermission !== 'number') {
|
|
||||||
throw new Error(
|
|
||||||
'canAccessMCPServerResource: requiredPermission is required and must be a number',
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
return canAccessResource({
|
|
||||||
resourceType: ResourceType.MCPSERVER,
|
|
||||||
requiredPermission,
|
|
||||||
resourceIdParam,
|
|
||||||
idResolver: resolveMCPServerId,
|
|
||||||
});
|
|
||||||
};
|
|
||||||
|
|
||||||
module.exports = {
|
|
||||||
canAccessMCPServerResource,
|
|
||||||
};
|
|
||||||
|
|
@ -1,627 +0,0 @@
|
||||||
const mongoose = require('mongoose');
|
|
||||||
const { ResourceType, PrincipalType, PrincipalModel } = require('librechat-data-provider');
|
|
||||||
const { MongoMemoryServer } = require('mongodb-memory-server');
|
|
||||||
const { canAccessMCPServerResource } = require('./canAccessMCPServerResource');
|
|
||||||
const { User, Role, AclEntry } = require('~/db/models');
|
|
||||||
const { createMCPServer } = require('~/models');
|
|
||||||
|
|
||||||
describe('canAccessMCPServerResource middleware', () => {
|
|
||||||
let mongoServer;
|
|
||||||
let req, res, next;
|
|
||||||
let testUser;
|
|
||||||
|
|
||||||
beforeAll(async () => {
|
|
||||||
mongoServer = await MongoMemoryServer.create();
|
|
||||||
const mongoUri = mongoServer.getUri();
|
|
||||||
await mongoose.connect(mongoUri);
|
|
||||||
});
|
|
||||||
|
|
||||||
afterAll(async () => {
|
|
||||||
await mongoose.disconnect();
|
|
||||||
await mongoServer.stop();
|
|
||||||
});
|
|
||||||
|
|
||||||
beforeEach(async () => {
|
|
||||||
await mongoose.connection.dropDatabase();
|
|
||||||
await Role.create({
|
|
||||||
name: 'test-role',
|
|
||||||
permissions: {
|
|
||||||
MCPSERVERS: {
|
|
||||||
USE: true,
|
|
||||||
CREATE: true,
|
|
||||||
SHARED_GLOBAL: false,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
});
|
|
||||||
|
|
||||||
// Create a test user
|
|
||||||
testUser = await User.create({
|
|
||||||
email: 'test@example.com',
|
|
||||||
name: 'Test User',
|
|
||||||
username: 'testuser',
|
|
||||||
role: 'test-role',
|
|
||||||
});
|
|
||||||
|
|
||||||
req = {
|
|
||||||
user: { id: testUser._id, role: testUser.role },
|
|
||||||
params: {},
|
|
||||||
};
|
|
||||||
res = {
|
|
||||||
status: jest.fn().mockReturnThis(),
|
|
||||||
json: jest.fn(),
|
|
||||||
};
|
|
||||||
next = jest.fn();
|
|
||||||
|
|
||||||
jest.clearAllMocks();
|
|
||||||
});
|
|
||||||
|
|
||||||
describe('middleware factory', () => {
|
|
||||||
test('should throw error if requiredPermission is not provided', () => {
|
|
||||||
expect(() => canAccessMCPServerResource({})).toThrow(
|
|
||||||
'canAccessMCPServerResource: requiredPermission is required and must be a number',
|
|
||||||
);
|
|
||||||
});
|
|
||||||
|
|
||||||
test('should throw error if requiredPermission is not a number', () => {
|
|
||||||
expect(() => canAccessMCPServerResource({ requiredPermission: '1' })).toThrow(
|
|
||||||
'canAccessMCPServerResource: requiredPermission is required and must be a number',
|
|
||||||
);
|
|
||||||
});
|
|
||||||
|
|
||||||
test('should throw error if requiredPermission is null', () => {
|
|
||||||
expect(() => canAccessMCPServerResource({ requiredPermission: null })).toThrow(
|
|
||||||
'canAccessMCPServerResource: requiredPermission is required and must be a number',
|
|
||||||
);
|
|
||||||
});
|
|
||||||
|
|
||||||
test('should create middleware with default resourceIdParam (serverName)', () => {
|
|
||||||
const middleware = canAccessMCPServerResource({ requiredPermission: 1 });
|
|
||||||
expect(typeof middleware).toBe('function');
|
|
||||||
expect(middleware.length).toBe(3); // Express middleware signature
|
|
||||||
});
|
|
||||||
|
|
||||||
test('should create middleware with custom resourceIdParam', () => {
|
|
||||||
const middleware = canAccessMCPServerResource({
|
|
||||||
requiredPermission: 2,
|
|
||||||
resourceIdParam: 'mcpId',
|
|
||||||
});
|
|
||||||
expect(typeof middleware).toBe('function');
|
|
||||||
expect(middleware.length).toBe(3);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
describe('permission checking with real MCP servers', () => {
|
|
||||||
test('should allow access when user is the MCP server author', async () => {
|
|
||||||
// Create an MCP server owned by the test user
|
|
||||||
const mcpServer = await createMCPServer({
|
|
||||||
config: {
|
|
||||||
type: 'sse',
|
|
||||||
url: 'https://example.com/mcp',
|
|
||||||
title: 'Test MCP Server',
|
|
||||||
},
|
|
||||||
author: testUser._id,
|
|
||||||
});
|
|
||||||
|
|
||||||
// Create ACL entry for the author (owner permissions)
|
|
||||||
await AclEntry.create({
|
|
||||||
principalType: PrincipalType.USER,
|
|
||||||
principalId: testUser._id,
|
|
||||||
principalModel: PrincipalModel.USER,
|
|
||||||
resourceType: ResourceType.MCPSERVER,
|
|
||||||
resourceId: mcpServer._id,
|
|
||||||
permBits: 15, // All permissions (1+2+4+8)
|
|
||||||
grantedBy: testUser._id,
|
|
||||||
});
|
|
||||||
|
|
||||||
req.params.serverName = mcpServer.serverName;
|
|
||||||
|
|
||||||
const middleware = canAccessMCPServerResource({ requiredPermission: 1 }); // VIEW permission
|
|
||||||
await middleware(req, res, next);
|
|
||||||
|
|
||||||
expect(next).toHaveBeenCalled();
|
|
||||||
expect(res.status).not.toHaveBeenCalled();
|
|
||||||
});
|
|
||||||
|
|
||||||
test('should deny access when user is not the author and has no ACL entry', async () => {
|
|
||||||
// Create an MCP server owned by a different user
|
|
||||||
const otherUser = await User.create({
|
|
||||||
email: 'other@example.com',
|
|
||||||
name: 'Other User',
|
|
||||||
username: 'otheruser',
|
|
||||||
role: 'test-role',
|
|
||||||
});
|
|
||||||
|
|
||||||
const mcpServer = await createMCPServer({
|
|
||||||
config: {
|
|
||||||
type: 'sse',
|
|
||||||
url: 'https://example.com/mcp',
|
|
||||||
title: 'Other User MCP Server',
|
|
||||||
},
|
|
||||||
author: otherUser._id,
|
|
||||||
});
|
|
||||||
|
|
||||||
// Create ACL entry for the other user (owner)
|
|
||||||
await AclEntry.create({
|
|
||||||
principalType: PrincipalType.USER,
|
|
||||||
principalId: otherUser._id,
|
|
||||||
principalModel: PrincipalModel.USER,
|
|
||||||
resourceType: ResourceType.MCPSERVER,
|
|
||||||
resourceId: mcpServer._id,
|
|
||||||
permBits: 15, // All permissions
|
|
||||||
grantedBy: otherUser._id,
|
|
||||||
});
|
|
||||||
|
|
||||||
req.params.serverName = mcpServer.serverName;
|
|
||||||
|
|
||||||
const middleware = canAccessMCPServerResource({ requiredPermission: 1 }); // VIEW permission
|
|
||||||
await middleware(req, res, next);
|
|
||||||
|
|
||||||
expect(next).not.toHaveBeenCalled();
|
|
||||||
expect(res.status).toHaveBeenCalledWith(403);
|
|
||||||
expect(res.json).toHaveBeenCalledWith({
|
|
||||||
error: 'Forbidden',
|
|
||||||
message: 'Insufficient permissions to access this mcpServer',
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
test('should allow access when user has ACL entry with sufficient permissions', async () => {
|
|
||||||
// Create an MCP server owned by a different user
|
|
||||||
const otherUser = await User.create({
|
|
||||||
email: 'other2@example.com',
|
|
||||||
name: 'Other User 2',
|
|
||||||
username: 'otheruser2',
|
|
||||||
role: 'test-role',
|
|
||||||
});
|
|
||||||
|
|
||||||
const mcpServer = await createMCPServer({
|
|
||||||
config: {
|
|
||||||
type: 'sse',
|
|
||||||
url: 'https://example.com/mcp',
|
|
||||||
title: 'Shared MCP Server',
|
|
||||||
},
|
|
||||||
author: otherUser._id,
|
|
||||||
});
|
|
||||||
|
|
||||||
// Create ACL entry granting view permission to test user
|
|
||||||
await AclEntry.create({
|
|
||||||
principalType: PrincipalType.USER,
|
|
||||||
principalId: testUser._id,
|
|
||||||
principalModel: PrincipalModel.USER,
|
|
||||||
resourceType: ResourceType.MCPSERVER,
|
|
||||||
resourceId: mcpServer._id,
|
|
||||||
permBits: 1, // VIEW permission
|
|
||||||
grantedBy: otherUser._id,
|
|
||||||
});
|
|
||||||
|
|
||||||
req.params.serverName = mcpServer.serverName;
|
|
||||||
|
|
||||||
const middleware = canAccessMCPServerResource({ requiredPermission: 1 }); // VIEW permission
|
|
||||||
await middleware(req, res, next);
|
|
||||||
|
|
||||||
expect(next).toHaveBeenCalled();
|
|
||||||
expect(res.status).not.toHaveBeenCalled();
|
|
||||||
});
|
|
||||||
|
|
||||||
test('should deny access when ACL permissions are insufficient', async () => {
|
|
||||||
// Create an MCP server owned by a different user
|
|
||||||
const otherUser = await User.create({
|
|
||||||
email: 'other3@example.com',
|
|
||||||
name: 'Other User 3',
|
|
||||||
username: 'otheruser3',
|
|
||||||
role: 'test-role',
|
|
||||||
});
|
|
||||||
|
|
||||||
const mcpServer = await createMCPServer({
|
|
||||||
config: {
|
|
||||||
type: 'sse',
|
|
||||||
url: 'https://example.com/mcp',
|
|
||||||
title: 'Limited Access MCP Server',
|
|
||||||
},
|
|
||||||
author: otherUser._id,
|
|
||||||
});
|
|
||||||
|
|
||||||
// Create ACL entry granting only view permission
|
|
||||||
await AclEntry.create({
|
|
||||||
principalType: PrincipalType.USER,
|
|
||||||
principalId: testUser._id,
|
|
||||||
principalModel: PrincipalModel.USER,
|
|
||||||
resourceType: ResourceType.MCPSERVER,
|
|
||||||
resourceId: mcpServer._id,
|
|
||||||
permBits: 1, // VIEW permission only
|
|
||||||
grantedBy: otherUser._id,
|
|
||||||
});
|
|
||||||
|
|
||||||
req.params.serverName = mcpServer.serverName;
|
|
||||||
|
|
||||||
const middleware = canAccessMCPServerResource({ requiredPermission: 2 }); // EDIT permission required
|
|
||||||
await middleware(req, res, next);
|
|
||||||
|
|
||||||
expect(next).not.toHaveBeenCalled();
|
|
||||||
expect(res.status).toHaveBeenCalledWith(403);
|
|
||||||
expect(res.json).toHaveBeenCalledWith({
|
|
||||||
error: 'Forbidden',
|
|
||||||
message: 'Insufficient permissions to access this mcpServer',
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
test('should handle non-existent MCP server', async () => {
|
|
||||||
req.params.serverName = 'non-existent-mcp-server';
|
|
||||||
|
|
||||||
const middleware = canAccessMCPServerResource({ requiredPermission: 1 });
|
|
||||||
await middleware(req, res, next);
|
|
||||||
|
|
||||||
expect(next).not.toHaveBeenCalled();
|
|
||||||
expect(res.status).toHaveBeenCalledWith(404);
|
|
||||||
expect(res.json).toHaveBeenCalledWith({
|
|
||||||
error: 'Not Found',
|
|
||||||
message: 'mcpServer not found',
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
test('should use custom resourceIdParam', async () => {
|
|
||||||
const mcpServer = await createMCPServer({
|
|
||||||
config: {
|
|
||||||
type: 'sse',
|
|
||||||
url: 'https://example.com/mcp',
|
|
||||||
title: 'Custom Param MCP Server',
|
|
||||||
},
|
|
||||||
author: testUser._id,
|
|
||||||
});
|
|
||||||
|
|
||||||
// Create ACL entry for the author
|
|
||||||
await AclEntry.create({
|
|
||||||
principalType: PrincipalType.USER,
|
|
||||||
principalId: testUser._id,
|
|
||||||
principalModel: PrincipalModel.USER,
|
|
||||||
resourceType: ResourceType.MCPSERVER,
|
|
||||||
resourceId: mcpServer._id,
|
|
||||||
permBits: 15, // All permissions
|
|
||||||
grantedBy: testUser._id,
|
|
||||||
});
|
|
||||||
|
|
||||||
req.params.mcpId = mcpServer.serverName; // Using custom param name
|
|
||||||
|
|
||||||
const middleware = canAccessMCPServerResource({
|
|
||||||
requiredPermission: 1,
|
|
||||||
resourceIdParam: 'mcpId',
|
|
||||||
});
|
|
||||||
await middleware(req, res, next);
|
|
||||||
|
|
||||||
expect(next).toHaveBeenCalled();
|
|
||||||
expect(res.status).not.toHaveBeenCalled();
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
describe('permission levels', () => {
|
|
||||||
let mcpServer;
|
|
||||||
|
|
||||||
beforeEach(async () => {
|
|
||||||
mcpServer = await createMCPServer({
|
|
||||||
config: {
|
|
||||||
type: 'sse',
|
|
||||||
url: 'https://example.com/mcp',
|
|
||||||
title: 'Permission Test MCP Server',
|
|
||||||
},
|
|
||||||
author: testUser._id,
|
|
||||||
});
|
|
||||||
|
|
||||||
// Create ACL entry with all permissions for the owner
|
|
||||||
await AclEntry.create({
|
|
||||||
principalType: PrincipalType.USER,
|
|
||||||
principalId: testUser._id,
|
|
||||||
principalModel: PrincipalModel.USER,
|
|
||||||
resourceType: ResourceType.MCPSERVER,
|
|
||||||
resourceId: mcpServer._id,
|
|
||||||
permBits: 15, // All permissions (1+2+4+8)
|
|
||||||
grantedBy: testUser._id,
|
|
||||||
});
|
|
||||||
|
|
||||||
req.params.serverName = mcpServer.serverName;
|
|
||||||
});
|
|
||||||
|
|
||||||
test('should support view permission (1)', async () => {
|
|
||||||
const middleware = canAccessMCPServerResource({ requiredPermission: 1 });
|
|
||||||
await middleware(req, res, next);
|
|
||||||
expect(next).toHaveBeenCalled();
|
|
||||||
});
|
|
||||||
|
|
||||||
test('should support edit permission (2)', async () => {
|
|
||||||
const middleware = canAccessMCPServerResource({ requiredPermission: 2 });
|
|
||||||
await middleware(req, res, next);
|
|
||||||
expect(next).toHaveBeenCalled();
|
|
||||||
});
|
|
||||||
|
|
||||||
test('should support delete permission (4)', async () => {
|
|
||||||
const middleware = canAccessMCPServerResource({ requiredPermission: 4 });
|
|
||||||
await middleware(req, res, next);
|
|
||||||
expect(next).toHaveBeenCalled();
|
|
||||||
});
|
|
||||||
|
|
||||||
test('should support share permission (8)', async () => {
|
|
||||||
const middleware = canAccessMCPServerResource({ requiredPermission: 8 });
|
|
||||||
await middleware(req, res, next);
|
|
||||||
expect(next).toHaveBeenCalled();
|
|
||||||
});
|
|
||||||
|
|
||||||
test('should support combined permissions', async () => {
|
|
||||||
const viewAndEdit = 1 | 2; // 3
|
|
||||||
const middleware = canAccessMCPServerResource({ requiredPermission: viewAndEdit });
|
|
||||||
await middleware(req, res, next);
|
|
||||||
expect(next).toHaveBeenCalled();
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
describe('integration with resolveMCPServerId', () => {
|
|
||||||
test('should resolve serverName to MongoDB ObjectId correctly', async () => {
|
|
||||||
const mcpServer = await createMCPServer({
|
|
||||||
config: {
|
|
||||||
type: 'sse',
|
|
||||||
url: 'https://example.com/mcp',
|
|
||||||
title: 'Integration Test MCP Server',
|
|
||||||
},
|
|
||||||
author: testUser._id,
|
|
||||||
});
|
|
||||||
|
|
||||||
// Create ACL entry for the author
|
|
||||||
await AclEntry.create({
|
|
||||||
principalType: PrincipalType.USER,
|
|
||||||
principalId: testUser._id,
|
|
||||||
principalModel: PrincipalModel.USER,
|
|
||||||
resourceType: ResourceType.MCPSERVER,
|
|
||||||
resourceId: mcpServer._id,
|
|
||||||
permBits: 15, // All permissions
|
|
||||||
grantedBy: testUser._id,
|
|
||||||
});
|
|
||||||
|
|
||||||
req.params.serverName = mcpServer.serverName;
|
|
||||||
|
|
||||||
const middleware = canAccessMCPServerResource({ requiredPermission: 1 });
|
|
||||||
await middleware(req, res, next);
|
|
||||||
|
|
||||||
expect(next).toHaveBeenCalled();
|
|
||||||
// Verify that req.resourceAccess was set correctly
|
|
||||||
expect(req.resourceAccess).toBeDefined();
|
|
||||||
expect(req.resourceAccess.resourceType).toBe(ResourceType.MCPSERVER);
|
|
||||||
expect(req.resourceAccess.resourceId.toString()).toBe(mcpServer._id.toString());
|
|
||||||
expect(req.resourceAccess.customResourceId).toBe(mcpServer.serverName);
|
|
||||||
});
|
|
||||||
|
|
||||||
test('should work with MCP server CRUD operations', async () => {
|
|
||||||
// Create MCP server
|
|
||||||
const mcpServer = await createMCPServer({
|
|
||||||
config: {
|
|
||||||
type: 'sse',
|
|
||||||
url: 'https://example.com/mcp',
|
|
||||||
title: 'CRUD Test MCP Server',
|
|
||||||
description: 'Testing integration',
|
|
||||||
},
|
|
||||||
author: testUser._id,
|
|
||||||
});
|
|
||||||
|
|
||||||
// Create ACL entry for the author
|
|
||||||
await AclEntry.create({
|
|
||||||
principalType: PrincipalType.USER,
|
|
||||||
principalId: testUser._id,
|
|
||||||
principalModel: PrincipalModel.USER,
|
|
||||||
resourceType: ResourceType.MCPSERVER,
|
|
||||||
resourceId: mcpServer._id,
|
|
||||||
permBits: 15, // All permissions
|
|
||||||
grantedBy: testUser._id,
|
|
||||||
});
|
|
||||||
|
|
||||||
req.params.serverName = mcpServer.serverName;
|
|
||||||
|
|
||||||
// Test view access
|
|
||||||
const viewMiddleware = canAccessMCPServerResource({ requiredPermission: 1 });
|
|
||||||
await viewMiddleware(req, res, next);
|
|
||||||
expect(next).toHaveBeenCalled();
|
|
||||||
jest.clearAllMocks();
|
|
||||||
|
|
||||||
// Update the MCP server
|
|
||||||
const { updateMCPServer } = require('~/models');
|
|
||||||
await updateMCPServer(mcpServer.serverName, {
|
|
||||||
config: {
|
|
||||||
type: 'sse',
|
|
||||||
url: 'https://example.com/mcp',
|
|
||||||
title: 'CRUD Test MCP Server',
|
|
||||||
description: 'Updated description',
|
|
||||||
},
|
|
||||||
});
|
|
||||||
|
|
||||||
// Test edit access
|
|
||||||
const editMiddleware = canAccessMCPServerResource({ requiredPermission: 2 });
|
|
||||||
await editMiddleware(req, res, next);
|
|
||||||
expect(next).toHaveBeenCalled();
|
|
||||||
});
|
|
||||||
|
|
||||||
test('should handle stdio type MCP server', async () => {
|
|
||||||
const mcpServer = await createMCPServer({
|
|
||||||
config: {
|
|
||||||
type: 'stdio',
|
|
||||||
command: 'node',
|
|
||||||
args: ['server.js'],
|
|
||||||
title: 'Stdio MCP Server',
|
|
||||||
},
|
|
||||||
author: testUser._id,
|
|
||||||
});
|
|
||||||
|
|
||||||
// Create ACL entry for the author
|
|
||||||
await AclEntry.create({
|
|
||||||
principalType: PrincipalType.USER,
|
|
||||||
principalId: testUser._id,
|
|
||||||
principalModel: PrincipalModel.USER,
|
|
||||||
resourceType: ResourceType.MCPSERVER,
|
|
||||||
resourceId: mcpServer._id,
|
|
||||||
permBits: 15,
|
|
||||||
grantedBy: testUser._id,
|
|
||||||
});
|
|
||||||
|
|
||||||
req.params.serverName = mcpServer.serverName;
|
|
||||||
|
|
||||||
const middleware = canAccessMCPServerResource({ requiredPermission: 1 });
|
|
||||||
await middleware(req, res, next);
|
|
||||||
|
|
||||||
expect(next).toHaveBeenCalled();
|
|
||||||
expect(req.resourceAccess.resourceInfo.config.type).toBe('stdio');
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
describe('authentication and authorization edge cases', () => {
|
|
||||||
test('should return 400 when serverName parameter is missing', async () => {
|
|
||||||
// Don't set req.params.serverName
|
|
||||||
|
|
||||||
const middleware = canAccessMCPServerResource({ requiredPermission: 1 });
|
|
||||||
await middleware(req, res, next);
|
|
||||||
|
|
||||||
expect(next).not.toHaveBeenCalled();
|
|
||||||
expect(res.status).toHaveBeenCalledWith(400);
|
|
||||||
expect(res.json).toHaveBeenCalledWith({
|
|
||||||
error: 'Bad Request',
|
|
||||||
message: 'serverName is required',
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
test('should return 401 when user is not authenticated', async () => {
|
|
||||||
req.user = null;
|
|
||||||
req.params.serverName = 'some-server';
|
|
||||||
|
|
||||||
const middleware = canAccessMCPServerResource({ requiredPermission: 1 });
|
|
||||||
await middleware(req, res, next);
|
|
||||||
|
|
||||||
expect(next).not.toHaveBeenCalled();
|
|
||||||
expect(res.status).toHaveBeenCalledWith(401);
|
|
||||||
expect(res.json).toHaveBeenCalledWith({
|
|
||||||
error: 'Unauthorized',
|
|
||||||
message: 'Authentication required',
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
test('should return 401 when user id is missing', async () => {
|
|
||||||
req.user = { role: 'test-role' }; // No id
|
|
||||||
req.params.serverName = 'some-server';
|
|
||||||
|
|
||||||
const middleware = canAccessMCPServerResource({ requiredPermission: 1 });
|
|
||||||
await middleware(req, res, next);
|
|
||||||
|
|
||||||
expect(next).not.toHaveBeenCalled();
|
|
||||||
expect(res.status).toHaveBeenCalledWith(401);
|
|
||||||
expect(res.json).toHaveBeenCalledWith({
|
|
||||||
error: 'Unauthorized',
|
|
||||||
message: 'Authentication required',
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
test('should allow admin users to bypass permission checks', async () => {
|
|
||||||
const { SystemRoles } = require('librechat-data-provider');
|
|
||||||
|
|
||||||
// Create an MCP server owned by another user
|
|
||||||
const otherUser = await User.create({
|
|
||||||
email: 'owner@example.com',
|
|
||||||
name: 'Owner User',
|
|
||||||
username: 'owneruser',
|
|
||||||
role: 'test-role',
|
|
||||||
});
|
|
||||||
|
|
||||||
const mcpServer = await createMCPServer({
|
|
||||||
config: {
|
|
||||||
type: 'sse',
|
|
||||||
url: 'https://example.com/mcp',
|
|
||||||
title: 'Admin Test MCP Server',
|
|
||||||
},
|
|
||||||
author: otherUser._id,
|
|
||||||
});
|
|
||||||
|
|
||||||
// Set user as admin
|
|
||||||
req.user = { id: testUser._id, role: SystemRoles.ADMIN };
|
|
||||||
req.params.serverName = mcpServer.serverName;
|
|
||||||
|
|
||||||
const middleware = canAccessMCPServerResource({ requiredPermission: 4 }); // DELETE permission
|
|
||||||
await middleware(req, res, next);
|
|
||||||
|
|
||||||
expect(next).toHaveBeenCalled();
|
|
||||||
expect(res.status).not.toHaveBeenCalled();
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
describe('error handling', () => {
|
|
||||||
test('should handle server returning null gracefully (treated as not found)', async () => {
|
|
||||||
// When an MCP server is not found, findMCPServerById returns null
|
|
||||||
// which the middleware correctly handles as a 404
|
|
||||||
req.params.serverName = 'definitely-non-existent-server';
|
|
||||||
|
|
||||||
const middleware = canAccessMCPServerResource({ requiredPermission: 1 });
|
|
||||||
await middleware(req, res, next);
|
|
||||||
|
|
||||||
expect(next).not.toHaveBeenCalled();
|
|
||||||
expect(res.status).toHaveBeenCalledWith(404);
|
|
||||||
expect(res.json).toHaveBeenCalledWith({
|
|
||||||
error: 'Not Found',
|
|
||||||
message: 'mcpServer not found',
|
|
||||||
});
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
describe('multiple servers with same title', () => {
|
|
||||||
test('should handle MCP servers with auto-generated suffixes', async () => {
|
|
||||||
// Create multiple servers with the same title (will have different serverNames)
|
|
||||||
const mcpServer1 = await createMCPServer({
|
|
||||||
config: {
|
|
||||||
type: 'sse',
|
|
||||||
url: 'https://example.com/mcp1',
|
|
||||||
title: 'Duplicate Title',
|
|
||||||
},
|
|
||||||
author: testUser._id,
|
|
||||||
});
|
|
||||||
|
|
||||||
const mcpServer2 = await createMCPServer({
|
|
||||||
config: {
|
|
||||||
type: 'sse',
|
|
||||||
url: 'https://example.com/mcp2',
|
|
||||||
title: 'Duplicate Title',
|
|
||||||
},
|
|
||||||
author: testUser._id,
|
|
||||||
});
|
|
||||||
|
|
||||||
// Create ACL entries for both
|
|
||||||
await AclEntry.create({
|
|
||||||
principalType: PrincipalType.USER,
|
|
||||||
principalId: testUser._id,
|
|
||||||
principalModel: PrincipalModel.USER,
|
|
||||||
resourceType: ResourceType.MCPSERVER,
|
|
||||||
resourceId: mcpServer1._id,
|
|
||||||
permBits: 15,
|
|
||||||
grantedBy: testUser._id,
|
|
||||||
});
|
|
||||||
|
|
||||||
await AclEntry.create({
|
|
||||||
principalType: PrincipalType.USER,
|
|
||||||
principalId: testUser._id,
|
|
||||||
principalModel: PrincipalModel.USER,
|
|
||||||
resourceType: ResourceType.MCPSERVER,
|
|
||||||
resourceId: mcpServer2._id,
|
|
||||||
permBits: 15,
|
|
||||||
grantedBy: testUser._id,
|
|
||||||
});
|
|
||||||
|
|
||||||
// Verify they have different serverNames
|
|
||||||
expect(mcpServer1.serverName).toBe('duplicate-title');
|
|
||||||
expect(mcpServer2.serverName).toBe('duplicate-title-2');
|
|
||||||
|
|
||||||
// Test access to first server
|
|
||||||
req.params.serverName = mcpServer1.serverName;
|
|
||||||
const middleware1 = canAccessMCPServerResource({ requiredPermission: 1 });
|
|
||||||
await middleware1(req, res, next);
|
|
||||||
expect(next).toHaveBeenCalled();
|
|
||||||
expect(req.resourceAccess.resourceId.toString()).toBe(mcpServer1._id.toString());
|
|
||||||
|
|
||||||
jest.clearAllMocks();
|
|
||||||
|
|
||||||
// Test access to second server
|
|
||||||
req.params.serverName = mcpServer2.serverName;
|
|
||||||
const middleware2 = canAccessMCPServerResource({ requiredPermission: 1 });
|
|
||||||
await middleware2(req, res, next);
|
|
||||||
expect(next).toHaveBeenCalled();
|
|
||||||
expect(req.resourceAccess.resourceId.toString()).toBe(mcpServer2._id.toString());
|
|
||||||
});
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
@ -2,7 +2,7 @@ const { logger } = require('@librechat/data-schemas');
|
||||||
const { PermissionBits, hasPermissions, ResourceType } = require('librechat-data-provider');
|
const { PermissionBits, hasPermissions, ResourceType } = require('librechat-data-provider');
|
||||||
const { getEffectivePermissions } = require('~/server/services/PermissionService');
|
const { getEffectivePermissions } = require('~/server/services/PermissionService');
|
||||||
const { getAgents } = require('~/models/Agent');
|
const { getAgents } = require('~/models/Agent');
|
||||||
const { getFiles } = require('~/models');
|
const { getFiles } = require('~/models/File');
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Checks if user has access to a file through agent permissions
|
* Checks if user has access to a file through agent permissions
|
||||||
|
|
|
||||||
|
|
@ -4,7 +4,7 @@ const { MongoMemoryServer } = require('mongodb-memory-server');
|
||||||
const { fileAccess } = require('./fileAccess');
|
const { fileAccess } = require('./fileAccess');
|
||||||
const { User, Role, AclEntry } = require('~/db/models');
|
const { User, Role, AclEntry } = require('~/db/models');
|
||||||
const { createAgent } = require('~/models/Agent');
|
const { createAgent } = require('~/models/Agent');
|
||||||
const { createFile } = require('~/models');
|
const { createFile } = require('~/models/File');
|
||||||
|
|
||||||
describe('fileAccess middleware', () => {
|
describe('fileAccess middleware', () => {
|
||||||
let mongoServer;
|
let mongoServer;
|
||||||
|
|
|
||||||
|
|
@ -3,7 +3,6 @@ const { canAccessAgentResource } = require('./canAccessAgentResource');
|
||||||
const { canAccessAgentFromBody } = require('./canAccessAgentFromBody');
|
const { canAccessAgentFromBody } = require('./canAccessAgentFromBody');
|
||||||
const { canAccessPromptViaGroup } = require('./canAccessPromptViaGroup');
|
const { canAccessPromptViaGroup } = require('./canAccessPromptViaGroup');
|
||||||
const { canAccessPromptGroupResource } = require('./canAccessPromptGroupResource');
|
const { canAccessPromptGroupResource } = require('./canAccessPromptGroupResource');
|
||||||
const { canAccessMCPServerResource } = require('./canAccessMCPServerResource');
|
|
||||||
|
|
||||||
module.exports = {
|
module.exports = {
|
||||||
canAccessResource,
|
canAccessResource,
|
||||||
|
|
@ -11,5 +10,4 @@ module.exports = {
|
||||||
canAccessAgentFromBody,
|
canAccessAgentFromBody,
|
||||||
canAccessPromptViaGroup,
|
canAccessPromptViaGroup,
|
||||||
canAccessPromptGroupResource,
|
canAccessPromptGroupResource,
|
||||||
canAccessMCPServerResource,
|
|
||||||
};
|
};
|
||||||
|
|
|
||||||
|
|
@ -8,11 +8,22 @@ const {
|
||||||
} = require('librechat-data-provider');
|
} = require('librechat-data-provider');
|
||||||
const azureAssistants = require('~/server/services/Endpoints/azureAssistants');
|
const azureAssistants = require('~/server/services/Endpoints/azureAssistants');
|
||||||
const assistants = require('~/server/services/Endpoints/assistants');
|
const assistants = require('~/server/services/Endpoints/assistants');
|
||||||
|
const { processFiles } = require('~/server/services/Files/process');
|
||||||
|
const anthropic = require('~/server/services/Endpoints/anthropic');
|
||||||
|
const bedrock = require('~/server/services/Endpoints/bedrock');
|
||||||
|
const openAI = require('~/server/services/Endpoints/openAI');
|
||||||
const agents = require('~/server/services/Endpoints/agents');
|
const agents = require('~/server/services/Endpoints/agents');
|
||||||
const { updateFilesUsage } = require('~/models');
|
const custom = require('~/server/services/Endpoints/custom');
|
||||||
|
const google = require('~/server/services/Endpoints/google');
|
||||||
|
|
||||||
const buildFunction = {
|
const buildFunction = {
|
||||||
|
[EModelEndpoint.openAI]: openAI.buildOptions,
|
||||||
|
[EModelEndpoint.google]: google.buildOptions,
|
||||||
|
[EModelEndpoint.custom]: custom.buildOptions,
|
||||||
[EModelEndpoint.agents]: agents.buildOptions,
|
[EModelEndpoint.agents]: agents.buildOptions,
|
||||||
|
[EModelEndpoint.bedrock]: bedrock.buildOptions,
|
||||||
|
[EModelEndpoint.azureOpenAI]: openAI.buildOptions,
|
||||||
|
[EModelEndpoint.anthropic]: anthropic.buildOptions,
|
||||||
[EModelEndpoint.assistants]: assistants.buildOptions,
|
[EModelEndpoint.assistants]: assistants.buildOptions,
|
||||||
[EModelEndpoint.azureAssistants]: azureAssistants.buildOptions,
|
[EModelEndpoint.azureAssistants]: azureAssistants.buildOptions,
|
||||||
};
|
};
|
||||||
|
|
@ -78,11 +89,10 @@ async function buildEndpointOption(req, res, next) {
|
||||||
: buildFunction[endpointType ?? endpoint];
|
: buildFunction[endpointType ?? endpoint];
|
||||||
|
|
||||||
// TODO: use object params
|
// TODO: use object params
|
||||||
req.body = req.body || {}; // Express 5: ensure req.body exists
|
|
||||||
req.body.endpointOption = await builder(endpoint, parsedBody, endpointType);
|
req.body.endpointOption = await builder(endpoint, parsedBody, endpointType);
|
||||||
|
|
||||||
if (req.body.files && !isAgents) {
|
if (req.body.files && !isAgents) {
|
||||||
req.body.endpointOption.attachments = updateFilesUsage(req.body.files);
|
req.body.endpointOption.attachments = processFiles(req.body.files);
|
||||||
}
|
}
|
||||||
|
|
||||||
next();
|
next();
|
||||||
|
|
|
||||||
|
|
@ -19,14 +19,14 @@ const message = 'Your account has been temporarily banned due to violations of o
|
||||||
* @param {Object} req - Express Request object.
|
* @param {Object} req - Express Request object.
|
||||||
* @param {Object} res - Express Response object.
|
* @param {Object} res - Express Response object.
|
||||||
*
|
*
|
||||||
* @returns {Promise<Object>} - Returns a Promise which when resolved sends a response status of 403 with a specific message if request is not of api/agents/chat. If it is, calls `denyRequest()` function.
|
* @returns {Promise<Object>} - Returns a Promise which when resolved sends a response status of 403 with a specific message if request is not of api/ask or api/edit types. If it is, calls `denyRequest()` function.
|
||||||
*/
|
*/
|
||||||
const banResponse = async (req, res) => {
|
const banResponse = async (req, res) => {
|
||||||
const ua = uap(req.headers['user-agent']);
|
const ua = uap(req.headers['user-agent']);
|
||||||
const { baseUrl, originalUrl } = req;
|
const { baseUrl } = req;
|
||||||
if (!ua.browser.name) {
|
if (!ua.browser.name) {
|
||||||
return res.status(403).json({ message });
|
return res.status(403).json({ message });
|
||||||
} else if (baseUrl === '/api/agents' && originalUrl.startsWith('/api/agents/chat')) {
|
} else if (baseUrl === '/api/ask' || baseUrl === '/api/edit') {
|
||||||
return await denyRequest(req, res, { type: ViolationTypes.BAN });
|
return await denyRequest(req, res, { type: ViolationTypes.BAN });
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -4,6 +4,7 @@ const buildEndpointOption = require('./buildEndpointOption');
|
||||||
const validateMessageReq = require('./validateMessageReq');
|
const validateMessageReq = require('./validateMessageReq');
|
||||||
const checkDomainAllowed = require('./checkDomainAllowed');
|
const checkDomainAllowed = require('./checkDomainAllowed');
|
||||||
const concurrentLimiter = require('./concurrentLimiter');
|
const concurrentLimiter = require('./concurrentLimiter');
|
||||||
|
const validateEndpoint = require('./validateEndpoint');
|
||||||
const requireLocalAuth = require('./requireLocalAuth');
|
const requireLocalAuth = require('./requireLocalAuth');
|
||||||
const canDeleteAccount = require('./canDeleteAccount');
|
const canDeleteAccount = require('./canDeleteAccount');
|
||||||
const accessResources = require('./accessResources');
|
const accessResources = require('./accessResources');
|
||||||
|
|
@ -41,6 +42,7 @@ module.exports = {
|
||||||
requireLdapAuth,
|
requireLdapAuth,
|
||||||
requireLocalAuth,
|
requireLocalAuth,
|
||||||
canDeleteAccount,
|
canDeleteAccount,
|
||||||
|
validateEndpoint,
|
||||||
configMiddleware,
|
configMiddleware,
|
||||||
concurrentLimiter,
|
concurrentLimiter,
|
||||||
checkDomainAllowed,
|
checkDomainAllowed,
|
||||||
|
|
|
||||||
20
api/server/middleware/validateEndpoint.js
Normal file
20
api/server/middleware/validateEndpoint.js
Normal file
|
|
@ -0,0 +1,20 @@
|
||||||
|
const { handleError } = require('@librechat/api');
|
||||||
|
|
||||||
|
function validateEndpoint(req, res, next) {
|
||||||
|
const { endpoint: _endpoint, endpointType } = req.body;
|
||||||
|
const endpoint = endpointType ?? _endpoint;
|
||||||
|
|
||||||
|
if (!req.body.text || req.body.text.length === 0) {
|
||||||
|
return handleError(res, { text: 'Prompt empty or too short' });
|
||||||
|
}
|
||||||
|
|
||||||
|
const pathEndpoint = req.baseUrl.split('/')[3];
|
||||||
|
|
||||||
|
if (endpoint !== pathEndpoint) {
|
||||||
|
return handleError(res, { text: 'Illegal request: Endpoint mismatch' });
|
||||||
|
}
|
||||||
|
|
||||||
|
next();
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = validateEndpoint;
|
||||||
|
|
@ -43,6 +43,7 @@ afterEach(() => {
|
||||||
|
|
||||||
//TODO: This works/passes locally but http request tests fail with 404 in CI. Need to figure out why.
|
//TODO: This works/passes locally but http request tests fail with 404 in CI. Need to figure out why.
|
||||||
|
|
||||||
|
// eslint-disable-next-line jest/no-disabled-tests
|
||||||
describe.skip('GET /', () => {
|
describe.skip('GET /', () => {
|
||||||
it('should return 200 and the correct body', async () => {
|
it('should return 200 and the correct body', async () => {
|
||||||
process.env.APP_TITLE = 'Test Title';
|
process.env.APP_TITLE = 'Test Title';
|
||||||
|
|
|
||||||
|
|
@ -3,15 +3,6 @@ const request = require('supertest');
|
||||||
const mongoose = require('mongoose');
|
const mongoose = require('mongoose');
|
||||||
const { MongoMemoryServer } = require('mongodb-memory-server');
|
const { MongoMemoryServer } = require('mongodb-memory-server');
|
||||||
|
|
||||||
const mockRegistryInstance = {
|
|
||||||
getServerConfig: jest.fn(),
|
|
||||||
getOAuthServers: jest.fn(),
|
|
||||||
getAllServerConfigs: jest.fn(),
|
|
||||||
addServer: jest.fn(),
|
|
||||||
updateServer: jest.fn(),
|
|
||||||
removeServer: jest.fn(),
|
|
||||||
};
|
|
||||||
|
|
||||||
jest.mock('@librechat/api', () => ({
|
jest.mock('@librechat/api', () => ({
|
||||||
...jest.requireActual('@librechat/api'),
|
...jest.requireActual('@librechat/api'),
|
||||||
MCPOAuthHandler: {
|
MCPOAuthHandler: {
|
||||||
|
|
@ -22,14 +13,11 @@ jest.mock('@librechat/api', () => ({
|
||||||
},
|
},
|
||||||
MCPTokenStorage: {
|
MCPTokenStorage: {
|
||||||
storeTokens: jest.fn(),
|
storeTokens: jest.fn(),
|
||||||
getClientInfoAndMetadata: jest.fn(),
|
|
||||||
getTokens: jest.fn(),
|
|
||||||
deleteUserTokens: jest.fn(),
|
|
||||||
},
|
},
|
||||||
getUserMCPAuthMap: jest.fn(),
|
getUserMCPAuthMap: jest.fn(),
|
||||||
generateCheckAccess: jest.fn(() => (req, res, next) => next()),
|
mcpServersRegistry: {
|
||||||
MCPServersRegistry: {
|
getServerConfig: jest.fn(),
|
||||||
getInstance: () => mockRegistryInstance,
|
getOAuthServers: jest.fn(),
|
||||||
},
|
},
|
||||||
}));
|
}));
|
||||||
|
|
||||||
|
|
@ -50,9 +38,6 @@ jest.mock('@librechat/data-schemas', () => ({
|
||||||
findById: jest.fn(),
|
findById: jest.fn(),
|
||||||
},
|
},
|
||||||
})),
|
})),
|
||||||
createMethods: jest.fn(() => ({
|
|
||||||
findUser: jest.fn(),
|
|
||||||
})),
|
|
||||||
}));
|
}));
|
||||||
|
|
||||||
jest.mock('~/models', () => ({
|
jest.mock('~/models', () => ({
|
||||||
|
|
@ -61,7 +46,6 @@ jest.mock('~/models', () => ({
|
||||||
createToken: jest.fn(),
|
createToken: jest.fn(),
|
||||||
deleteTokens: jest.fn(),
|
deleteTokens: jest.fn(),
|
||||||
findPluginAuthsByKeys: jest.fn(),
|
findPluginAuthsByKeys: jest.fn(),
|
||||||
getRoleByName: jest.fn(),
|
|
||||||
}));
|
}));
|
||||||
|
|
||||||
jest.mock('~/server/services/Config', () => ({
|
jest.mock('~/server/services/Config', () => ({
|
||||||
|
|
@ -87,8 +71,6 @@ jest.mock('~/server/services/PluginService', () => ({
|
||||||
jest.mock('~/config', () => ({
|
jest.mock('~/config', () => ({
|
||||||
getMCPManager: jest.fn(),
|
getMCPManager: jest.fn(),
|
||||||
getFlowStateManager: jest.fn(),
|
getFlowStateManager: jest.fn(),
|
||||||
getOAuthReconnectionManager: jest.fn(),
|
|
||||||
getMCPServersRegistry: jest.fn(() => mockRegistryInstance),
|
|
||||||
}));
|
}));
|
||||||
|
|
||||||
jest.mock('~/cache', () => ({
|
jest.mock('~/cache', () => ({
|
||||||
|
|
@ -97,7 +79,6 @@ jest.mock('~/cache', () => ({
|
||||||
|
|
||||||
jest.mock('~/server/middleware', () => ({
|
jest.mock('~/server/middleware', () => ({
|
||||||
requireJwtAuth: (req, res, next) => next(),
|
requireJwtAuth: (req, res, next) => next(),
|
||||||
canAccessMCPServerResource: () => (req, res, next) => next(),
|
|
||||||
}));
|
}));
|
||||||
|
|
||||||
jest.mock('~/server/services/Tools/mcp', () => ({
|
jest.mock('~/server/services/Tools/mcp', () => ({
|
||||||
|
|
@ -138,7 +119,7 @@ describe('MCP Routes', () => {
|
||||||
});
|
});
|
||||||
|
|
||||||
describe('GET /:serverName/oauth/initiate', () => {
|
describe('GET /:serverName/oauth/initiate', () => {
|
||||||
const { MCPOAuthHandler } = require('@librechat/api');
|
const { MCPOAuthHandler, mcpServersRegistry } = require('@librechat/api');
|
||||||
const { getLogStores } = require('~/cache');
|
const { getLogStores } = require('~/cache');
|
||||||
|
|
||||||
it('should initiate OAuth flow successfully', async () => {
|
it('should initiate OAuth flow successfully', async () => {
|
||||||
|
|
@ -153,7 +134,7 @@ describe('MCP Routes', () => {
|
||||||
|
|
||||||
getLogStores.mockReturnValue({});
|
getLogStores.mockReturnValue({});
|
||||||
require('~/config').getFlowStateManager.mockReturnValue(mockFlowManager);
|
require('~/config').getFlowStateManager.mockReturnValue(mockFlowManager);
|
||||||
mockRegistryInstance.getServerConfig.mockResolvedValue({});
|
mcpServersRegistry.getServerConfig.mockResolvedValue({});
|
||||||
|
|
||||||
MCPOAuthHandler.initiateOAuthFlow.mockResolvedValue({
|
MCPOAuthHandler.initiateOAuthFlow.mockResolvedValue({
|
||||||
authorizationUrl: 'https://oauth.example.com/auth',
|
authorizationUrl: 'https://oauth.example.com/auth',
|
||||||
|
|
@ -307,7 +288,7 @@ describe('MCP Routes', () => {
|
||||||
});
|
});
|
||||||
|
|
||||||
it('should handle OAuth callback successfully', async () => {
|
it('should handle OAuth callback successfully', async () => {
|
||||||
// mockRegistryInstance is defined at the top of the file
|
const { mcpServersRegistry } = require('@librechat/api');
|
||||||
const mockFlowManager = {
|
const mockFlowManager = {
|
||||||
getFlowState: jest.fn().mockResolvedValue({ status: 'PENDING' }),
|
getFlowState: jest.fn().mockResolvedValue({ status: 'PENDING' }),
|
||||||
completeFlow: jest.fn().mockResolvedValue(),
|
completeFlow: jest.fn().mockResolvedValue(),
|
||||||
|
|
@ -328,7 +309,7 @@ describe('MCP Routes', () => {
|
||||||
MCPOAuthHandler.getFlowState.mockResolvedValue(mockFlowState);
|
MCPOAuthHandler.getFlowState.mockResolvedValue(mockFlowState);
|
||||||
MCPOAuthHandler.completeOAuthFlow.mockResolvedValue(mockTokens);
|
MCPOAuthHandler.completeOAuthFlow.mockResolvedValue(mockTokens);
|
||||||
MCPTokenStorage.storeTokens.mockResolvedValue();
|
MCPTokenStorage.storeTokens.mockResolvedValue();
|
||||||
mockRegistryInstance.getServerConfig.mockResolvedValue({});
|
mcpServersRegistry.getServerConfig.mockResolvedValue({});
|
||||||
getLogStores.mockReturnValue({});
|
getLogStores.mockReturnValue({});
|
||||||
require('~/config').getFlowStateManager.mockReturnValue(mockFlowManager);
|
require('~/config').getFlowStateManager.mockReturnValue(mockFlowManager);
|
||||||
|
|
||||||
|
|
@ -400,7 +381,7 @@ describe('MCP Routes', () => {
|
||||||
});
|
});
|
||||||
|
|
||||||
it('should handle system-level OAuth completion', async () => {
|
it('should handle system-level OAuth completion', async () => {
|
||||||
// mockRegistryInstance is defined at the top of the file
|
const { mcpServersRegistry } = require('@librechat/api');
|
||||||
const mockFlowManager = {
|
const mockFlowManager = {
|
||||||
getFlowState: jest.fn().mockResolvedValue({ status: 'PENDING' }),
|
getFlowState: jest.fn().mockResolvedValue({ status: 'PENDING' }),
|
||||||
completeFlow: jest.fn().mockResolvedValue(),
|
completeFlow: jest.fn().mockResolvedValue(),
|
||||||
|
|
@ -421,7 +402,7 @@ describe('MCP Routes', () => {
|
||||||
MCPOAuthHandler.getFlowState.mockResolvedValue(mockFlowState);
|
MCPOAuthHandler.getFlowState.mockResolvedValue(mockFlowState);
|
||||||
MCPOAuthHandler.completeOAuthFlow.mockResolvedValue(mockTokens);
|
MCPOAuthHandler.completeOAuthFlow.mockResolvedValue(mockTokens);
|
||||||
MCPTokenStorage.storeTokens.mockResolvedValue();
|
MCPTokenStorage.storeTokens.mockResolvedValue();
|
||||||
mockRegistryInstance.getServerConfig.mockResolvedValue({});
|
mcpServersRegistry.getServerConfig.mockResolvedValue({});
|
||||||
getLogStores.mockReturnValue({});
|
getLogStores.mockReturnValue({});
|
||||||
require('~/config').getFlowStateManager.mockReturnValue(mockFlowManager);
|
require('~/config').getFlowStateManager.mockReturnValue(mockFlowManager);
|
||||||
|
|
||||||
|
|
@ -436,7 +417,7 @@ describe('MCP Routes', () => {
|
||||||
});
|
});
|
||||||
|
|
||||||
it('should handle reconnection failure after OAuth', async () => {
|
it('should handle reconnection failure after OAuth', async () => {
|
||||||
// mockRegistryInstance is defined at the top of the file
|
const { mcpServersRegistry } = require('@librechat/api');
|
||||||
const mockFlowManager = {
|
const mockFlowManager = {
|
||||||
getFlowState: jest.fn().mockResolvedValue({ status: 'PENDING' }),
|
getFlowState: jest.fn().mockResolvedValue({ status: 'PENDING' }),
|
||||||
completeFlow: jest.fn().mockResolvedValue(),
|
completeFlow: jest.fn().mockResolvedValue(),
|
||||||
|
|
@ -457,7 +438,7 @@ describe('MCP Routes', () => {
|
||||||
MCPOAuthHandler.getFlowState.mockResolvedValue(mockFlowState);
|
MCPOAuthHandler.getFlowState.mockResolvedValue(mockFlowState);
|
||||||
MCPOAuthHandler.completeOAuthFlow.mockResolvedValue(mockTokens);
|
MCPOAuthHandler.completeOAuthFlow.mockResolvedValue(mockTokens);
|
||||||
MCPTokenStorage.storeTokens.mockResolvedValue();
|
MCPTokenStorage.storeTokens.mockResolvedValue();
|
||||||
mockRegistryInstance.getServerConfig.mockResolvedValue({});
|
mcpServersRegistry.getServerConfig.mockResolvedValue({});
|
||||||
getLogStores.mockReturnValue({});
|
getLogStores.mockReturnValue({});
|
||||||
require('~/config').getFlowStateManager.mockReturnValue(mockFlowManager);
|
require('~/config').getFlowStateManager.mockReturnValue(mockFlowManager);
|
||||||
|
|
||||||
|
|
@ -482,7 +463,7 @@ describe('MCP Routes', () => {
|
||||||
});
|
});
|
||||||
|
|
||||||
it('should redirect to error page if token storage fails', async () => {
|
it('should redirect to error page if token storage fails', async () => {
|
||||||
// mockRegistryInstance is defined at the top of the file
|
const { mcpServersRegistry } = require('@librechat/api');
|
||||||
const mockFlowManager = {
|
const mockFlowManager = {
|
||||||
completeFlow: jest.fn().mockResolvedValue(),
|
completeFlow: jest.fn().mockResolvedValue(),
|
||||||
deleteFlow: jest.fn().mockResolvedValue(true),
|
deleteFlow: jest.fn().mockResolvedValue(true),
|
||||||
|
|
@ -502,7 +483,7 @@ describe('MCP Routes', () => {
|
||||||
MCPOAuthHandler.getFlowState.mockResolvedValue(mockFlowState);
|
MCPOAuthHandler.getFlowState.mockResolvedValue(mockFlowState);
|
||||||
MCPOAuthHandler.completeOAuthFlow.mockResolvedValue(mockTokens);
|
MCPOAuthHandler.completeOAuthFlow.mockResolvedValue(mockTokens);
|
||||||
MCPTokenStorage.storeTokens.mockRejectedValue(new Error('store failed'));
|
MCPTokenStorage.storeTokens.mockRejectedValue(new Error('store failed'));
|
||||||
mockRegistryInstance.getServerConfig.mockResolvedValue({});
|
mcpServersRegistry.getServerConfig.mockResolvedValue({});
|
||||||
getLogStores.mockReturnValue({});
|
getLogStores.mockReturnValue({});
|
||||||
require('~/config').getFlowStateManager.mockReturnValue(mockFlowManager);
|
require('~/config').getFlowStateManager.mockReturnValue(mockFlowManager);
|
||||||
|
|
||||||
|
|
@ -522,7 +503,7 @@ describe('MCP Routes', () => {
|
||||||
});
|
});
|
||||||
|
|
||||||
it('should use original flow state credentials when storing tokens', async () => {
|
it('should use original flow state credentials when storing tokens', async () => {
|
||||||
// mockRegistryInstance is defined at the top of the file
|
const { mcpServersRegistry } = require('@librechat/api');
|
||||||
const mockFlowManager = {
|
const mockFlowManager = {
|
||||||
getFlowState: jest.fn(),
|
getFlowState: jest.fn(),
|
||||||
completeFlow: jest.fn().mockResolvedValue(),
|
completeFlow: jest.fn().mockResolvedValue(),
|
||||||
|
|
@ -554,7 +535,7 @@ describe('MCP Routes', () => {
|
||||||
MCPOAuthHandler.getFlowState.mockResolvedValue(flowState);
|
MCPOAuthHandler.getFlowState.mockResolvedValue(flowState);
|
||||||
MCPOAuthHandler.completeOAuthFlow.mockResolvedValue(mockTokens);
|
MCPOAuthHandler.completeOAuthFlow.mockResolvedValue(mockTokens);
|
||||||
MCPTokenStorage.storeTokens.mockResolvedValue();
|
MCPTokenStorage.storeTokens.mockResolvedValue();
|
||||||
mockRegistryInstance.getServerConfig.mockResolvedValue({});
|
mcpServersRegistry.getServerConfig.mockResolvedValue({});
|
||||||
getLogStores.mockReturnValue({});
|
getLogStores.mockReturnValue({});
|
||||||
require('~/config').getFlowStateManager.mockReturnValue(mockFlowManager);
|
require('~/config').getFlowStateManager.mockReturnValue(mockFlowManager);
|
||||||
|
|
||||||
|
|
@ -855,14 +836,14 @@ describe('MCP Routes', () => {
|
||||||
});
|
});
|
||||||
|
|
||||||
describe('POST /:serverName/reinitialize', () => {
|
describe('POST /:serverName/reinitialize', () => {
|
||||||
// mockRegistryInstance is defined at the top of the file
|
const { mcpServersRegistry } = require('@librechat/api');
|
||||||
|
|
||||||
it('should return 404 when server is not found in configuration', async () => {
|
it('should return 404 when server is not found in configuration', async () => {
|
||||||
const mockMcpManager = {
|
const mockMcpManager = {
|
||||||
disconnectUserConnection: jest.fn().mockResolvedValue(),
|
disconnectUserConnection: jest.fn().mockResolvedValue(),
|
||||||
};
|
};
|
||||||
|
|
||||||
mockRegistryInstance.getServerConfig.mockResolvedValue(null);
|
mcpServersRegistry.getServerConfig.mockResolvedValue(null);
|
||||||
require('~/config').getMCPManager.mockReturnValue(mockMcpManager);
|
require('~/config').getMCPManager.mockReturnValue(mockMcpManager);
|
||||||
require('~/config').getFlowStateManager.mockReturnValue({});
|
require('~/config').getFlowStateManager.mockReturnValue({});
|
||||||
require('~/cache').getLogStores.mockReturnValue({});
|
require('~/cache').getLogStores.mockReturnValue({});
|
||||||
|
|
@ -887,7 +868,7 @@ describe('MCP Routes', () => {
|
||||||
}),
|
}),
|
||||||
};
|
};
|
||||||
|
|
||||||
mockRegistryInstance.getServerConfig.mockResolvedValue({
|
mcpServersRegistry.getServerConfig.mockResolvedValue({
|
||||||
customUserVars: {},
|
customUserVars: {},
|
||||||
});
|
});
|
||||||
require('~/config').getMCPManager.mockReturnValue(mockMcpManager);
|
require('~/config').getMCPManager.mockReturnValue(mockMcpManager);
|
||||||
|
|
@ -920,7 +901,7 @@ describe('MCP Routes', () => {
|
||||||
getUserConnection: jest.fn().mockRejectedValue(new Error('Connection failed')),
|
getUserConnection: jest.fn().mockRejectedValue(new Error('Connection failed')),
|
||||||
};
|
};
|
||||||
|
|
||||||
mockRegistryInstance.getServerConfig.mockResolvedValue({});
|
mcpServersRegistry.getServerConfig.mockResolvedValue({});
|
||||||
require('~/config').getMCPManager.mockReturnValue(mockMcpManager);
|
require('~/config').getMCPManager.mockReturnValue(mockMcpManager);
|
||||||
require('~/config').getFlowStateManager.mockReturnValue({});
|
require('~/config').getFlowStateManager.mockReturnValue({});
|
||||||
require('~/cache').getLogStores.mockReturnValue({});
|
require('~/cache').getLogStores.mockReturnValue({});
|
||||||
|
|
@ -939,7 +920,7 @@ describe('MCP Routes', () => {
|
||||||
disconnectUserConnection: jest.fn(),
|
disconnectUserConnection: jest.fn(),
|
||||||
};
|
};
|
||||||
|
|
||||||
mockRegistryInstance.getServerConfig.mockImplementation(() => {
|
mcpServersRegistry.getServerConfig.mockImplementation(() => {
|
||||||
throw new Error('Config loading failed');
|
throw new Error('Config loading failed');
|
||||||
});
|
});
|
||||||
require('~/config').getMCPManager.mockReturnValue(mockMcpManager);
|
require('~/config').getMCPManager.mockReturnValue(mockMcpManager);
|
||||||
|
|
@ -978,9 +959,7 @@ describe('MCP Routes', () => {
|
||||||
getUserConnection: jest.fn().mockResolvedValue(mockUserConnection),
|
getUserConnection: jest.fn().mockResolvedValue(mockUserConnection),
|
||||||
};
|
};
|
||||||
|
|
||||||
mockRegistryInstance.getServerConfig.mockResolvedValue({
|
mcpServersRegistry.getServerConfig.mockResolvedValue({ endpoint: 'http://test-server.com' });
|
||||||
endpoint: 'http://test-server.com',
|
|
||||||
});
|
|
||||||
require('~/config').getMCPManager.mockReturnValue(mockMcpManager);
|
require('~/config').getMCPManager.mockReturnValue(mockMcpManager);
|
||||||
require('~/config').getFlowStateManager.mockReturnValue({});
|
require('~/config').getFlowStateManager.mockReturnValue({});
|
||||||
require('~/cache').getLogStores.mockReturnValue({});
|
require('~/cache').getLogStores.mockReturnValue({});
|
||||||
|
|
@ -1025,7 +1004,7 @@ describe('MCP Routes', () => {
|
||||||
getUserConnection: jest.fn().mockResolvedValue(mockUserConnection),
|
getUserConnection: jest.fn().mockResolvedValue(mockUserConnection),
|
||||||
};
|
};
|
||||||
|
|
||||||
mockRegistryInstance.getServerConfig.mockResolvedValue({
|
mcpServersRegistry.getServerConfig.mockResolvedValue({
|
||||||
endpoint: 'http://test-server.com',
|
endpoint: 'http://test-server.com',
|
||||||
customUserVars: {
|
customUserVars: {
|
||||||
API_KEY: 'some-env-var',
|
API_KEY: 'some-env-var',
|
||||||
|
|
@ -1235,12 +1214,12 @@ describe('MCP Routes', () => {
|
||||||
|
|
||||||
describe('GET /:serverName/auth-values', () => {
|
describe('GET /:serverName/auth-values', () => {
|
||||||
const { getUserPluginAuthValue } = require('~/server/services/PluginService');
|
const { getUserPluginAuthValue } = require('~/server/services/PluginService');
|
||||||
// mockRegistryInstance is defined at the top of the file
|
const { mcpServersRegistry } = require('@librechat/api');
|
||||||
|
|
||||||
it('should return auth value flags for server', async () => {
|
it('should return auth value flags for server', async () => {
|
||||||
const mockMcpManager = {};
|
const mockMcpManager = {};
|
||||||
|
|
||||||
mockRegistryInstance.getServerConfig.mockResolvedValue({
|
mcpServersRegistry.getServerConfig.mockResolvedValue({
|
||||||
customUserVars: {
|
customUserVars: {
|
||||||
API_KEY: 'some-env-var',
|
API_KEY: 'some-env-var',
|
||||||
SECRET_TOKEN: 'another-env-var',
|
SECRET_TOKEN: 'another-env-var',
|
||||||
|
|
@ -1267,7 +1246,7 @@ describe('MCP Routes', () => {
|
||||||
it('should return 404 when server is not found in configuration', async () => {
|
it('should return 404 when server is not found in configuration', async () => {
|
||||||
const mockMcpManager = {};
|
const mockMcpManager = {};
|
||||||
|
|
||||||
mockRegistryInstance.getServerConfig.mockResolvedValue(null);
|
mcpServersRegistry.getServerConfig.mockResolvedValue(null);
|
||||||
require('~/config').getMCPManager.mockReturnValue(mockMcpManager);
|
require('~/config').getMCPManager.mockReturnValue(mockMcpManager);
|
||||||
|
|
||||||
const response = await request(app).get('/api/mcp/non-existent-server/auth-values');
|
const response = await request(app).get('/api/mcp/non-existent-server/auth-values');
|
||||||
|
|
@ -1281,7 +1260,7 @@ describe('MCP Routes', () => {
|
||||||
it('should handle errors when checking auth values', async () => {
|
it('should handle errors when checking auth values', async () => {
|
||||||
const mockMcpManager = {};
|
const mockMcpManager = {};
|
||||||
|
|
||||||
mockRegistryInstance.getServerConfig.mockResolvedValue({
|
mcpServersRegistry.getServerConfig.mockResolvedValue({
|
||||||
customUserVars: {
|
customUserVars: {
|
||||||
API_KEY: 'some-env-var',
|
API_KEY: 'some-env-var',
|
||||||
},
|
},
|
||||||
|
|
@ -1304,7 +1283,7 @@ describe('MCP Routes', () => {
|
||||||
it('should return 500 when auth values check throws unexpected error', async () => {
|
it('should return 500 when auth values check throws unexpected error', async () => {
|
||||||
const mockMcpManager = {};
|
const mockMcpManager = {};
|
||||||
|
|
||||||
mockRegistryInstance.getServerConfig.mockImplementation(() => {
|
mcpServersRegistry.getServerConfig.mockImplementation(() => {
|
||||||
throw new Error('Config loading failed');
|
throw new Error('Config loading failed');
|
||||||
});
|
});
|
||||||
require('~/config').getMCPManager.mockReturnValue(mockMcpManager);
|
require('~/config').getMCPManager.mockReturnValue(mockMcpManager);
|
||||||
|
|
@ -1318,7 +1297,7 @@ describe('MCP Routes', () => {
|
||||||
it('should handle customUserVars that is not an object', async () => {
|
it('should handle customUserVars that is not an object', async () => {
|
||||||
const mockMcpManager = {};
|
const mockMcpManager = {};
|
||||||
|
|
||||||
mockRegistryInstance.getServerConfig.mockResolvedValue({
|
mcpServersRegistry.getServerConfig.mockResolvedValue({
|
||||||
customUserVars: 'not-an-object',
|
customUserVars: 'not-an-object',
|
||||||
});
|
});
|
||||||
require('~/config').getMCPManager.mockReturnValue(mockMcpManager);
|
require('~/config').getMCPManager.mockReturnValue(mockMcpManager);
|
||||||
|
|
@ -1347,7 +1326,7 @@ describe('MCP Routes', () => {
|
||||||
|
|
||||||
describe('GET /:serverName/oauth/callback - Edge Cases', () => {
|
describe('GET /:serverName/oauth/callback - Edge Cases', () => {
|
||||||
it('should handle OAuth callback without toolFlowId (falsy toolFlowId)', async () => {
|
it('should handle OAuth callback without toolFlowId (falsy toolFlowId)', async () => {
|
||||||
const { MCPOAuthHandler, MCPTokenStorage } = require('@librechat/api');
|
const { MCPOAuthHandler, MCPTokenStorage, mcpServersRegistry } = require('@librechat/api');
|
||||||
const mockTokens = {
|
const mockTokens = {
|
||||||
access_token: 'edge-access-token',
|
access_token: 'edge-access-token',
|
||||||
refresh_token: 'edge-refresh-token',
|
refresh_token: 'edge-refresh-token',
|
||||||
|
|
@ -1365,7 +1344,7 @@ describe('MCP Routes', () => {
|
||||||
});
|
});
|
||||||
MCPOAuthHandler.completeOAuthFlow = jest.fn().mockResolvedValue(mockTokens);
|
MCPOAuthHandler.completeOAuthFlow = jest.fn().mockResolvedValue(mockTokens);
|
||||||
MCPTokenStorage.storeTokens.mockResolvedValue();
|
MCPTokenStorage.storeTokens.mockResolvedValue();
|
||||||
mockRegistryInstance.getServerConfig.mockResolvedValue({});
|
mcpServersRegistry.getServerConfig.mockResolvedValue({});
|
||||||
|
|
||||||
const mockFlowManager = {
|
const mockFlowManager = {
|
||||||
getFlowState: jest.fn().mockResolvedValue({ status: 'PENDING' }),
|
getFlowState: jest.fn().mockResolvedValue({ status: 'PENDING' }),
|
||||||
|
|
@ -1392,7 +1371,7 @@ describe('MCP Routes', () => {
|
||||||
it('should handle null cached tools in OAuth callback (triggers || {} fallback)', async () => {
|
it('should handle null cached tools in OAuth callback (triggers || {} fallback)', async () => {
|
||||||
const { getCachedTools } = require('~/server/services/Config');
|
const { getCachedTools } = require('~/server/services/Config');
|
||||||
getCachedTools.mockResolvedValue(null);
|
getCachedTools.mockResolvedValue(null);
|
||||||
const { MCPOAuthHandler, MCPTokenStorage } = require('@librechat/api');
|
const { MCPOAuthHandler, MCPTokenStorage, mcpServersRegistry } = require('@librechat/api');
|
||||||
const mockTokens = {
|
const mockTokens = {
|
||||||
access_token: 'edge-access-token',
|
access_token: 'edge-access-token',
|
||||||
refresh_token: 'edge-refresh-token',
|
refresh_token: 'edge-refresh-token',
|
||||||
|
|
@ -1418,7 +1397,7 @@ describe('MCP Routes', () => {
|
||||||
});
|
});
|
||||||
MCPOAuthHandler.completeOAuthFlow.mockResolvedValue(mockTokens);
|
MCPOAuthHandler.completeOAuthFlow.mockResolvedValue(mockTokens);
|
||||||
MCPTokenStorage.storeTokens.mockResolvedValue();
|
MCPTokenStorage.storeTokens.mockResolvedValue();
|
||||||
mockRegistryInstance.getServerConfig.mockResolvedValue({});
|
mcpServersRegistry.getServerConfig.mockResolvedValue({});
|
||||||
|
|
||||||
const mockMcpManager = {
|
const mockMcpManager = {
|
||||||
getUserConnection: jest.fn().mockResolvedValue({
|
getUserConnection: jest.fn().mockResolvedValue({
|
||||||
|
|
@ -1436,282 +1415,4 @@ describe('MCP Routes', () => {
|
||||||
expect(response.headers.location).toContain('/oauth/success');
|
expect(response.headers.location).toContain('/oauth/success');
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
describe('GET /servers', () => {
|
|
||||||
// mockRegistryInstance is defined at the top of the file
|
|
||||||
|
|
||||||
it('should return all server configs for authenticated user', async () => {
|
|
||||||
const mockServerConfigs = {
|
|
||||||
'server-1': {
|
|
||||||
endpoint: 'http://server1.com',
|
|
||||||
name: 'Server 1',
|
|
||||||
},
|
|
||||||
'server-2': {
|
|
||||||
endpoint: 'http://server2.com',
|
|
||||||
name: 'Server 2',
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
mockRegistryInstance.getAllServerConfigs.mockResolvedValue(mockServerConfigs);
|
|
||||||
|
|
||||||
const response = await request(app).get('/api/mcp/servers');
|
|
||||||
|
|
||||||
expect(response.status).toBe(200);
|
|
||||||
expect(response.body).toEqual(mockServerConfigs);
|
|
||||||
expect(mockRegistryInstance.getAllServerConfigs).toHaveBeenCalledWith('test-user-id');
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should return empty object when no servers are configured', async () => {
|
|
||||||
mockRegistryInstance.getAllServerConfigs.mockResolvedValue({});
|
|
||||||
|
|
||||||
const response = await request(app).get('/api/mcp/servers');
|
|
||||||
|
|
||||||
expect(response.status).toBe(200);
|
|
||||||
expect(response.body).toEqual({});
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should return 401 when user is not authenticated', async () => {
|
|
||||||
const unauthApp = express();
|
|
||||||
unauthApp.use(express.json());
|
|
||||||
unauthApp.use((req, _res, next) => {
|
|
||||||
req.user = null;
|
|
||||||
next();
|
|
||||||
});
|
|
||||||
unauthApp.use('/api/mcp', mcpRouter);
|
|
||||||
|
|
||||||
const response = await request(unauthApp).get('/api/mcp/servers');
|
|
||||||
|
|
||||||
expect(response.status).toBe(401);
|
|
||||||
expect(response.body).toEqual({ message: 'Unauthorized' });
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should return 500 when server config retrieval fails', async () => {
|
|
||||||
mockRegistryInstance.getAllServerConfigs.mockRejectedValue(new Error('Database error'));
|
|
||||||
|
|
||||||
const response = await request(app).get('/api/mcp/servers');
|
|
||||||
|
|
||||||
expect(response.status).toBe(500);
|
|
||||||
expect(response.body).toEqual({ error: 'Database error' });
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
describe('POST /servers', () => {
|
|
||||||
it('should create MCP server with valid SSE config', async () => {
|
|
||||||
const validConfig = {
|
|
||||||
type: 'sse',
|
|
||||||
url: 'https://mcp-server.example.com/sse',
|
|
||||||
title: 'Test SSE Server',
|
|
||||||
description: 'A test SSE server',
|
|
||||||
};
|
|
||||||
|
|
||||||
mockRegistryInstance.addServer.mockResolvedValue({
|
|
||||||
serverName: 'test-sse-server',
|
|
||||||
config: validConfig,
|
|
||||||
});
|
|
||||||
|
|
||||||
const response = await request(app).post('/api/mcp/servers').send({ config: validConfig });
|
|
||||||
|
|
||||||
expect(response.status).toBe(201);
|
|
||||||
expect(response.body).toEqual({
|
|
||||||
serverName: 'test-sse-server',
|
|
||||||
...validConfig,
|
|
||||||
});
|
|
||||||
expect(mockRegistryInstance.addServer).toHaveBeenCalledWith(
|
|
||||||
'temp_server_name',
|
|
||||||
expect.objectContaining({
|
|
||||||
type: 'sse',
|
|
||||||
url: 'https://mcp-server.example.com/sse',
|
|
||||||
}),
|
|
||||||
'DB',
|
|
||||||
'test-user-id',
|
|
||||||
);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should create MCP server with valid stdio config', async () => {
|
|
||||||
const validConfig = {
|
|
||||||
type: 'stdio',
|
|
||||||
command: 'node',
|
|
||||||
args: ['server.js'],
|
|
||||||
title: 'Test Stdio Server',
|
|
||||||
};
|
|
||||||
|
|
||||||
mockRegistryInstance.addServer.mockResolvedValue({
|
|
||||||
serverName: 'test-stdio-server',
|
|
||||||
config: validConfig,
|
|
||||||
});
|
|
||||||
|
|
||||||
const response = await request(app).post('/api/mcp/servers').send({ config: validConfig });
|
|
||||||
|
|
||||||
expect(response.status).toBe(201);
|
|
||||||
expect(response.body.serverName).toBe('test-stdio-server');
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should return 400 for invalid configuration', async () => {
|
|
||||||
const invalidConfig = {
|
|
||||||
type: 'sse',
|
|
||||||
// Missing required 'url' field
|
|
||||||
title: 'Invalid Server',
|
|
||||||
};
|
|
||||||
|
|
||||||
const response = await request(app).post('/api/mcp/servers').send({ config: invalidConfig });
|
|
||||||
|
|
||||||
expect(response.status).toBe(400);
|
|
||||||
expect(response.body.message).toBe('Invalid configuration');
|
|
||||||
expect(response.body.errors).toBeDefined();
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should return 400 for SSE config with invalid URL protocol', async () => {
|
|
||||||
const invalidConfig = {
|
|
||||||
type: 'sse',
|
|
||||||
url: 'ws://invalid-protocol.example.com/sse',
|
|
||||||
title: 'Invalid Protocol Server',
|
|
||||||
};
|
|
||||||
|
|
||||||
const response = await request(app).post('/api/mcp/servers').send({ config: invalidConfig });
|
|
||||||
|
|
||||||
expect(response.status).toBe(400);
|
|
||||||
expect(response.body.message).toBe('Invalid configuration');
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should return 500 when registry throws error', async () => {
|
|
||||||
const validConfig = {
|
|
||||||
type: 'sse',
|
|
||||||
url: 'https://mcp-server.example.com/sse',
|
|
||||||
title: 'Test Server',
|
|
||||||
};
|
|
||||||
|
|
||||||
mockRegistryInstance.addServer.mockRejectedValue(new Error('Database connection failed'));
|
|
||||||
|
|
||||||
const response = await request(app).post('/api/mcp/servers').send({ config: validConfig });
|
|
||||||
|
|
||||||
expect(response.status).toBe(500);
|
|
||||||
expect(response.body).toEqual({ message: 'Database connection failed' });
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
describe('GET /servers/:serverName', () => {
|
|
||||||
it('should return server config when found', async () => {
|
|
||||||
const mockConfig = {
|
|
||||||
type: 'sse',
|
|
||||||
url: 'https://mcp-server.example.com/sse',
|
|
||||||
title: 'Test Server',
|
|
||||||
};
|
|
||||||
|
|
||||||
mockRegistryInstance.getServerConfig.mockResolvedValue(mockConfig);
|
|
||||||
|
|
||||||
const response = await request(app).get('/api/mcp/servers/test-server');
|
|
||||||
|
|
||||||
expect(response.status).toBe(200);
|
|
||||||
expect(response.body).toEqual(mockConfig);
|
|
||||||
expect(mockRegistryInstance.getServerConfig).toHaveBeenCalledWith(
|
|
||||||
'test-server',
|
|
||||||
'test-user-id',
|
|
||||||
);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should return 404 when server not found', async () => {
|
|
||||||
mockRegistryInstance.getServerConfig.mockResolvedValue(null);
|
|
||||||
|
|
||||||
const response = await request(app).get('/api/mcp/servers/non-existent-server');
|
|
||||||
|
|
||||||
expect(response.status).toBe(404);
|
|
||||||
expect(response.body).toEqual({ message: 'MCP server not found' });
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should return 500 when registry throws error', async () => {
|
|
||||||
mockRegistryInstance.getServerConfig.mockRejectedValue(new Error('Database error'));
|
|
||||||
|
|
||||||
const response = await request(app).get('/api/mcp/servers/error-server');
|
|
||||||
|
|
||||||
expect(response.status).toBe(500);
|
|
||||||
expect(response.body).toEqual({ message: 'Database error' });
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
describe('PATCH /servers/:serverName', () => {
|
|
||||||
it('should update server with valid config', async () => {
|
|
||||||
const updatedConfig = {
|
|
||||||
type: 'sse',
|
|
||||||
url: 'https://updated-mcp-server.example.com/sse',
|
|
||||||
title: 'Updated Server',
|
|
||||||
description: 'Updated description',
|
|
||||||
};
|
|
||||||
|
|
||||||
mockRegistryInstance.updateServer.mockResolvedValue(updatedConfig);
|
|
||||||
|
|
||||||
const response = await request(app)
|
|
||||||
.patch('/api/mcp/servers/test-server')
|
|
||||||
.send({ config: updatedConfig });
|
|
||||||
|
|
||||||
expect(response.status).toBe(200);
|
|
||||||
expect(response.body).toEqual(updatedConfig);
|
|
||||||
expect(mockRegistryInstance.updateServer).toHaveBeenCalledWith(
|
|
||||||
'test-server',
|
|
||||||
expect.objectContaining({
|
|
||||||
type: 'sse',
|
|
||||||
url: 'https://updated-mcp-server.example.com/sse',
|
|
||||||
}),
|
|
||||||
'DB',
|
|
||||||
'test-user-id',
|
|
||||||
);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should return 400 for invalid configuration', async () => {
|
|
||||||
const invalidConfig = {
|
|
||||||
type: 'sse',
|
|
||||||
// Missing required 'url' field
|
|
||||||
title: 'Invalid Update',
|
|
||||||
};
|
|
||||||
|
|
||||||
const response = await request(app)
|
|
||||||
.patch('/api/mcp/servers/test-server')
|
|
||||||
.send({ config: invalidConfig });
|
|
||||||
|
|
||||||
expect(response.status).toBe(400);
|
|
||||||
expect(response.body.message).toBe('Invalid configuration');
|
|
||||||
expect(response.body.errors).toBeDefined();
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should return 500 when registry throws error', async () => {
|
|
||||||
const validConfig = {
|
|
||||||
type: 'sse',
|
|
||||||
url: 'https://mcp-server.example.com/sse',
|
|
||||||
title: 'Test Server',
|
|
||||||
};
|
|
||||||
|
|
||||||
mockRegistryInstance.updateServer.mockRejectedValue(new Error('Update failed'));
|
|
||||||
|
|
||||||
const response = await request(app)
|
|
||||||
.patch('/api/mcp/servers/test-server')
|
|
||||||
.send({ config: validConfig });
|
|
||||||
|
|
||||||
expect(response.status).toBe(500);
|
|
||||||
expect(response.body).toEqual({ message: 'Update failed' });
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
describe('DELETE /servers/:serverName', () => {
|
|
||||||
it('should delete server successfully', async () => {
|
|
||||||
mockRegistryInstance.removeServer.mockResolvedValue(undefined);
|
|
||||||
|
|
||||||
const response = await request(app).delete('/api/mcp/servers/test-server');
|
|
||||||
|
|
||||||
expect(response.status).toBe(200);
|
|
||||||
expect(response.body).toEqual({ message: 'MCP server deleted successfully' });
|
|
||||||
expect(mockRegistryInstance.removeServer).toHaveBeenCalledWith(
|
|
||||||
'test-server',
|
|
||||||
'DB',
|
|
||||||
'test-user-id',
|
|
||||||
);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should return 500 when registry throws error', async () => {
|
|
||||||
mockRegistryInstance.removeServer.mockRejectedValue(new Error('Deletion failed'));
|
|
||||||
|
|
||||||
const response = await request(app).delete('/api/mcp/servers/error-server');
|
|
||||||
|
|
||||||
expect(response.status).toBe(500);
|
|
||||||
expect(response.body).toEqual({ message: 'Deletion failed' });
|
|
||||||
});
|
|
||||||
});
|
|
||||||
});
|
});
|
||||||
|
|
|
||||||
|
|
@ -2,7 +2,6 @@ const express = require('express');
|
||||||
const { ResourceType, PermissionBits } = require('librechat-data-provider');
|
const { ResourceType, PermissionBits } = require('librechat-data-provider');
|
||||||
const {
|
const {
|
||||||
getUserEffectivePermissions,
|
getUserEffectivePermissions,
|
||||||
getAllEffectivePermissions,
|
|
||||||
updateResourcePermissions,
|
updateResourcePermissions,
|
||||||
getResourcePermissions,
|
getResourcePermissions,
|
||||||
getResourceRoles,
|
getResourceRoles,
|
||||||
|
|
@ -10,7 +9,6 @@ const {
|
||||||
} = require('~/server/controllers/PermissionsController');
|
} = require('~/server/controllers/PermissionsController');
|
||||||
const { requireJwtAuth, checkBan, uaParser, canAccessResource } = require('~/server/middleware');
|
const { requireJwtAuth, checkBan, uaParser, canAccessResource } = require('~/server/middleware');
|
||||||
const { checkPeoplePickerAccess } = require('~/server/middleware/checkPeoplePickerAccess');
|
const { checkPeoplePickerAccess } = require('~/server/middleware/checkPeoplePickerAccess');
|
||||||
const { findMCPServerById } = require('~/models');
|
|
||||||
|
|
||||||
const router = express.Router();
|
const router = express.Router();
|
||||||
|
|
||||||
|
|
@ -65,13 +63,6 @@ router.put(
|
||||||
requiredPermission: PermissionBits.SHARE,
|
requiredPermission: PermissionBits.SHARE,
|
||||||
resourceIdParam: 'resourceId',
|
resourceIdParam: 'resourceId',
|
||||||
});
|
});
|
||||||
} else if (resourceType === ResourceType.MCPSERVER) {
|
|
||||||
middleware = canAccessResource({
|
|
||||||
resourceType: ResourceType.MCPSERVER,
|
|
||||||
requiredPermission: PermissionBits.SHARE,
|
|
||||||
resourceIdParam: 'resourceId',
|
|
||||||
idResolver: findMCPServerById,
|
|
||||||
});
|
|
||||||
} else {
|
} else {
|
||||||
return res.status(400).json({
|
return res.status(400).json({
|
||||||
error: 'Bad Request',
|
error: 'Bad Request',
|
||||||
|
|
@ -85,12 +76,6 @@ router.put(
|
||||||
updateResourcePermissions,
|
updateResourcePermissions,
|
||||||
);
|
);
|
||||||
|
|
||||||
/**
|
|
||||||
* GET /api/permissions/{resourceType}/effective/all
|
|
||||||
* Get user's effective permissions for all accessible resources of a type
|
|
||||||
*/
|
|
||||||
router.get('/:resourceType/effective/all', getAllEffectivePermissions);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* GET /api/permissions/{resourceType}/{resourceId}/effective
|
* GET /api/permissions/{resourceType}/{resourceId}/effective
|
||||||
* Get user's effective permissions for a specific resource
|
* Get user's effective permissions for a specific resource
|
||||||
|
|
|
||||||
|
|
@ -154,7 +154,6 @@ router.post('/:assistant_id', async (req, res) => {
|
||||||
router.delete('/:assistant_id/:action_id/:model', async (req, res) => {
|
router.delete('/:assistant_id/:action_id/:model', async (req, res) => {
|
||||||
try {
|
try {
|
||||||
const { assistant_id, action_id, model } = req.params;
|
const { assistant_id, action_id, model } = req.params;
|
||||||
req.body = req.body || {}; // Express 5: ensure req.body exists
|
|
||||||
req.body.model = model;
|
req.body.model = model;
|
||||||
const { openai } = await getOpenAIClient({ req, res });
|
const { openai } = await getOpenAIClient({ req, res });
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -5,6 +5,7 @@ const {
|
||||||
setHeaders,
|
setHeaders,
|
||||||
handleAbort,
|
handleAbort,
|
||||||
validateModel,
|
validateModel,
|
||||||
|
// validateEndpoint,
|
||||||
buildEndpointOption,
|
buildEndpointOption,
|
||||||
} = require('~/server/middleware');
|
} = require('~/server/middleware');
|
||||||
const validateConvoAccess = require('~/server/middleware/validate/convoAccess');
|
const validateConvoAccess = require('~/server/middleware/validate/convoAccess');
|
||||||
|
|
|
||||||
|
|
@ -5,6 +5,7 @@ const {
|
||||||
setHeaders,
|
setHeaders,
|
||||||
handleAbort,
|
handleAbort,
|
||||||
validateModel,
|
validateModel,
|
||||||
|
// validateEndpoint,
|
||||||
buildEndpointOption,
|
buildEndpointOption,
|
||||||
} = require('~/server/middleware');
|
} = require('~/server/middleware');
|
||||||
const validateConvoAccess = require('~/server/middleware/validate/convoAccess');
|
const validateConvoAccess = require('~/server/middleware/validate/convoAccess');
|
||||||
|
|
|
||||||
|
|
@ -1,11 +1,18 @@
|
||||||
const express = require('express');
|
const express = require('express');
|
||||||
const { logger } = require('@librechat/data-schemas');
|
const { logger } = require('@librechat/data-schemas');
|
||||||
const { isEnabled, getBalanceConfig } = require('@librechat/api');
|
const { isEnabled, getBalanceConfig } = require('@librechat/api');
|
||||||
const { Constants, CacheKeys, defaultSocialLogins } = require('librechat-data-provider');
|
const {
|
||||||
|
Constants,
|
||||||
|
CacheKeys,
|
||||||
|
removeNullishValues,
|
||||||
|
defaultSocialLogins,
|
||||||
|
} = require('librechat-data-provider');
|
||||||
const { getLdapConfig } = require('~/server/services/Config/ldap');
|
const { getLdapConfig } = require('~/server/services/Config/ldap');
|
||||||
const { getAppConfig } = require('~/server/services/Config/app');
|
const { getAppConfig } = require('~/server/services/Config/app');
|
||||||
const { getProjectByName } = require('~/models/Project');
|
const { getProjectByName } = require('~/models/Project');
|
||||||
|
const { getMCPManager } = require('~/config');
|
||||||
const { getLogStores } = require('~/cache');
|
const { getLogStores } = require('~/cache');
|
||||||
|
const { mcpServersRegistry } = require('@librechat/api');
|
||||||
|
|
||||||
const router = express.Router();
|
const router = express.Router();
|
||||||
const emailLoginEnabled =
|
const emailLoginEnabled =
|
||||||
|
|
@ -23,11 +30,46 @@ const publicSharedLinksEnabled =
|
||||||
const sharePointFilePickerEnabled = isEnabled(process.env.ENABLE_SHAREPOINT_FILEPICKER);
|
const sharePointFilePickerEnabled = isEnabled(process.env.ENABLE_SHAREPOINT_FILEPICKER);
|
||||||
const openidReuseTokens = isEnabled(process.env.OPENID_REUSE_TOKENS);
|
const openidReuseTokens = isEnabled(process.env.OPENID_REUSE_TOKENS);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Fetches MCP servers from registry and adds them to the payload.
|
||||||
|
* Registry now includes all configured servers (from YAML) plus inspection data when available.
|
||||||
|
* Always fetches fresh to avoid caching incomplete initialization state.
|
||||||
|
*/
|
||||||
|
const getMCPServers = async (payload, appConfig) => {
|
||||||
|
try {
|
||||||
|
if (appConfig?.mcpConfig == null) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
const mcpManager = getMCPManager();
|
||||||
|
if (!mcpManager) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
const mcpServers = await mcpServersRegistry.getAllServerConfigs();
|
||||||
|
if (!mcpServers) return;
|
||||||
|
for (const serverName in mcpServers) {
|
||||||
|
if (!payload.mcpServers) {
|
||||||
|
payload.mcpServers = {};
|
||||||
|
}
|
||||||
|
const serverConfig = mcpServers[serverName];
|
||||||
|
payload.mcpServers[serverName] = removeNullishValues({
|
||||||
|
startup: serverConfig?.startup,
|
||||||
|
chatMenu: serverConfig?.chatMenu,
|
||||||
|
isOAuth: serverConfig.requiresOAuth,
|
||||||
|
customUserVars: serverConfig?.customUserVars,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
logger.error('Error loading MCP servers', error);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
router.get('/', async function (req, res) {
|
router.get('/', async function (req, res) {
|
||||||
const cache = getLogStores(CacheKeys.CONFIG_STORE);
|
const cache = getLogStores(CacheKeys.CONFIG_STORE);
|
||||||
|
|
||||||
const cachedStartupConfig = await cache.get(CacheKeys.STARTUP_CONFIG);
|
const cachedStartupConfig = await cache.get(CacheKeys.STARTUP_CONFIG);
|
||||||
if (cachedStartupConfig) {
|
if (cachedStartupConfig) {
|
||||||
|
const appConfig = await getAppConfig({ role: req.user?.role });
|
||||||
|
await getMCPServers(cachedStartupConfig, appConfig);
|
||||||
res.send(cachedStartupConfig);
|
res.send(cachedStartupConfig);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
@ -148,6 +190,7 @@ router.get('/', async function (req, res) {
|
||||||
}
|
}
|
||||||
|
|
||||||
await cache.set(CacheKeys.STARTUP_CONFIG, payload);
|
await cache.set(CacheKeys.STARTUP_CONFIG, payload);
|
||||||
|
await getMCPServers(payload, appConfig);
|
||||||
return res.status(200).send(payload);
|
return res.status(200).send(payload);
|
||||||
} catch (err) {
|
} catch (err) {
|
||||||
logger.error('Error in startup config', err);
|
logger.error('Error in startup config', err);
|
||||||
|
|
|
||||||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue