🧵 refactor: Migrate Endpoint Initialization to TypeScript (#10794)

* refactor: move endpoint initialization methods to typescript

* refactor: move agent init to packages/api

- Introduced `initialize.ts` for agent initialization, including file processing and tool loading.
- Updated `resources.ts` to allow optional appConfig parameter.
- Enhanced endpoint configuration handling in various initialization files to support model parameters.
- Added new artifacts and prompts for React component generation.
- Refactored existing code to improve type safety and maintainability.

* refactor: streamline endpoint initialization and enhance type safety

- Updated initialization functions across various endpoints to use a consistent request structure, replacing `unknown` types with `ServerResponse`.
- Simplified request handling by directly extracting keys from the request body.
- Improved type safety by ensuring user IDs are safely accessed with optional chaining.
- Removed unnecessary parameters and streamlined model options handling for better clarity and maintainability.

* refactor: moved ModelService and extractBaseURL to packages/api

- Added comprehensive tests for the models fetching functionality, covering scenarios for OpenAI, Anthropic, Google, and Ollama models.
- Updated existing endpoint index to include the new models module.
- Enhanced utility functions for URL extraction and model data processing.
- Improved type safety and error handling across the models fetching logic.

* refactor: consolidate utility functions and remove unused files

- Merged `deriveBaseURL` and `extractBaseURL` into the `@librechat/api` module for better organization.
- Removed redundant utility files and their associated tests to streamline the codebase.
- Updated imports across various client files to utilize the new consolidated functions.
- Enhanced overall maintainability by reducing the number of utility modules.

* refactor: replace ModelService references with direct imports from @librechat/api and remove ModelService file

* refactor: move encrypt/decrypt methods and key db methods to data-schemas, use `getProviderConfig` from `@librechat/api`

* chore: remove unused 'res' from options in AgentClient

* refactor: file model imports and methods

- Updated imports in various controllers and services to use the unified file model from '~/models' instead of '~/models/File'.
- Consolidated file-related methods into a new file methods module in the data-schemas package.
- Added comprehensive tests for file methods including creation, retrieval, updating, and deletion.
- Enhanced the initializeAgent function to accept dependency injection for file-related methods.
- Improved error handling and logging in file methods.

* refactor: streamline database method references in agent initialization

* refactor: enhance file method tests and update type references to IMongoFile

* refactor: consolidate database method imports in agent client and initialization

* chore: remove redundant import of initializeAgent from @librechat/api

* refactor: move checkUserKeyExpiry utility to @librechat/api and update references across endpoints

* refactor: move updateUserPlugins logic to user.ts and simplify UserController

* refactor: update imports for user key management and remove UserService

* refactor: remove unused Anthropics and Bedrock endpoint files and clean up imports

* refactor: consolidate and update encryption imports across various files to use @librechat/data-schemas

* chore: update file model mock to use unified import from '~/models'

* chore: import order

* refactor: remove migrated to TS agent.js file and its associated logic from the endpoints

* chore: add reusable function to extract imports from source code in unused-packages workflow

* chore: enhance unused-packages workflow to include @librechat/api dependencies and improve dependency extraction

* chore: improve dependency extraction in unused-packages workflow with enhanced error handling and debugging output

* chore: add detailed debugging output to unused-packages workflow for better visibility into unused dependencies and exclusion lists

* chore: refine subpath handling in unused-packages workflow to correctly process scoped and non-scoped package imports

* chore: clean up unused debug output in unused-packages workflow and reorganize type imports in initialize.ts
This commit is contained in:
Danny Avila 2025-12-03 17:21:41 -05:00
parent 1a11b64266
commit 04a4a2aa44
No known key found for this signature in database
GPG key ID: BF31EEB2C5CA0956
103 changed files with 4135 additions and 2647 deletions

View file

@ -8,6 +8,7 @@ on:
- 'client/**'
- 'api/**'
- 'packages/client/**'
- 'packages/api/**'
jobs:
detect-unused-packages:
@ -63,35 +64,45 @@ jobs:
extract_deps_from_code() {
local folder=$1
local output_file=$2
if [[ -d "$folder" ]]; then
# Extract require() statements
grep -rEho "require\\(['\"]([a-zA-Z0-9@/._-]+)['\"]\\)" "$folder" --include=\*.{js,ts,tsx,jsx,mjs,cjs} | \
sed -E "s/require\\(['\"]([a-zA-Z0-9@/._-]+)['\"]\\)/\1/" > "$output_file"
# Extract ES6 imports - various patterns
# import x from 'module'
grep -rEho "import .* from ['\"]([a-zA-Z0-9@/._-]+)['\"]" "$folder" --include=\*.{js,ts,tsx,jsx,mjs,cjs} | \
sed -E "s/import .* from ['\"]([a-zA-Z0-9@/._-]+)['\"]/\1/" >> "$output_file"
# Initialize empty output file
> "$output_file"
if [[ -d "$folder" ]]; then
# Extract require() statements (use explicit includes for portability)
grep -rEho "require\\(['\"]([a-zA-Z0-9@/._-]+)['\"]\\)" "$folder" \
--include='*.js' --include='*.ts' --include='*.tsx' --include='*.jsx' --include='*.mjs' --include='*.cjs' 2>/dev/null | \
sed -E "s/require\\(['\"]([a-zA-Z0-9@/._-]+)['\"]\\)/\1/" >> "$output_file" || true
# Extract ES6 imports - import x from 'module'
grep -rEho "import .* from ['\"]([a-zA-Z0-9@/._-]+)['\"]" "$folder" \
--include='*.js' --include='*.ts' --include='*.tsx' --include='*.jsx' --include='*.mjs' --include='*.cjs' 2>/dev/null | \
sed -E "s/import .* from ['\"]([a-zA-Z0-9@/._-]+)['\"]/\1/" >> "$output_file" || true
# import 'module' (side-effect imports)
grep -rEho "import ['\"]([a-zA-Z0-9@/._-]+)['\"]" "$folder" --include=\*.{js,ts,tsx,jsx,mjs,cjs} | \
sed -E "s/import ['\"]([a-zA-Z0-9@/._-]+)['\"]/\1/" >> "$output_file"
grep -rEho "import ['\"]([a-zA-Z0-9@/._-]+)['\"]" "$folder" \
--include='*.js' --include='*.ts' --include='*.tsx' --include='*.jsx' --include='*.mjs' --include='*.cjs' 2>/dev/null | \
sed -E "s/import ['\"]([a-zA-Z0-9@/._-]+)['\"]/\1/" >> "$output_file" || true
# export { x } from 'module' or export * from 'module'
grep -rEho "export .* from ['\"]([a-zA-Z0-9@/._-]+)['\"]" "$folder" --include=\*.{js,ts,tsx,jsx,mjs,cjs} | \
sed -E "s/export .* from ['\"]([a-zA-Z0-9@/._-]+)['\"]/\1/" >> "$output_file"
grep -rEho "export .* from ['\"]([a-zA-Z0-9@/._-]+)['\"]" "$folder" \
--include='*.js' --include='*.ts' --include='*.tsx' --include='*.jsx' --include='*.mjs' --include='*.cjs' 2>/dev/null | \
sed -E "s/export .* from ['\"]([a-zA-Z0-9@/._-]+)['\"]/\1/" >> "$output_file" || true
# import type { x } from 'module' (TypeScript)
grep -rEho "import type .* from ['\"]([a-zA-Z0-9@/._-]+)['\"]" "$folder" --include=\*.{ts,tsx} | \
sed -E "s/import type .* from ['\"]([a-zA-Z0-9@/._-]+)['\"]/\1/" >> "$output_file"
grep -rEho "import type .* from ['\"]([a-zA-Z0-9@/._-]+)['\"]" "$folder" \
--include='*.ts' --include='*.tsx' 2>/dev/null | \
sed -E "s/import type .* from ['\"]([a-zA-Z0-9@/._-]+)['\"]/\1/" >> "$output_file" || true
# Remove subpath imports but keep the base package
# e.g., '@tanstack/react-query/devtools' becomes '@tanstack/react-query'
sed -i -E 's|^(@?[a-zA-Z0-9-]+(/[a-zA-Z0-9-]+)?)/.*|\1|' "$output_file"
# For scoped packages: '@scope/pkg/subpath' -> '@scope/pkg'
# For regular packages: 'pkg/subpath' -> 'pkg'
# Scoped packages (must keep @scope/package, strip anything after)
sed -i -E 's|^(@[a-zA-Z0-9_-]+/[a-zA-Z0-9_-]+)/.*|\1|' "$output_file" 2>/dev/null || true
# Non-scoped packages (keep package name, strip subpath)
sed -i -E 's|^([a-zA-Z0-9_-]+)/.*|\1|' "$output_file" 2>/dev/null || true
sort -u "$output_file" -o "$output_file"
else
touch "$output_file"
fi
}
@ -99,8 +110,10 @@ jobs:
extract_deps_from_code "client" client_used_code.txt
extract_deps_from_code "api" api_used_code.txt
# Extract dependencies used by @librechat/client package
# Extract dependencies used by workspace packages
# These packages are used in the workspace but dependencies are provided by parent package.json
extract_deps_from_code "packages/client" packages_client_used_code.txt
extract_deps_from_code "packages/api" packages_api_used_code.txt
- name: Get @librechat/client dependencies
id: get-librechat-client-deps
@ -126,6 +139,30 @@ jobs:
touch librechat_client_deps.txt
fi
- name: Get @librechat/api dependencies
id: get-librechat-api-deps
run: |
if [[ -f "packages/api/package.json" ]]; then
# Get all dependencies from @librechat/api (dependencies, devDependencies, and peerDependencies)
DEPS=$(jq -r '.dependencies // {} | keys[]' packages/api/package.json 2>/dev/null || echo "")
DEV_DEPS=$(jq -r '.devDependencies // {} | keys[]' packages/api/package.json 2>/dev/null || echo "")
PEER_DEPS=$(jq -r '.peerDependencies // {} | keys[]' packages/api/package.json 2>/dev/null || echo "")
# Combine all dependencies
echo "$DEPS" > librechat_api_deps.txt
echo "$DEV_DEPS" >> librechat_api_deps.txt
echo "$PEER_DEPS" >> librechat_api_deps.txt
# Also include dependencies that are imported in packages/api
cat packages_api_used_code.txt >> librechat_api_deps.txt
# Remove empty lines and sort
grep -v '^$' librechat_api_deps.txt | sort -u > temp_deps.txt
mv temp_deps.txt librechat_api_deps.txt
else
touch librechat_api_deps.txt
fi
- name: Extract Workspace Dependencies
id: extract-workspace-deps
run: |
@ -184,8 +221,8 @@ jobs:
chmod -R 755 client
cd client
UNUSED=$(depcheck --json | jq -r '.dependencies | join("\n")' || echo "")
# Exclude dependencies used in scripts, code, and workspace packages
UNUSED=$(comm -23 <(echo "$UNUSED" | sort) <(cat ../client_used_deps.txt ../client_used_code.txt ../client_workspace_deps.txt | sort) || echo "")
# Exclude dependencies used in scripts, code, workspace packages, and @librechat/client imports
UNUSED=$(comm -23 <(echo "$UNUSED" | sort) <(cat ../client_used_deps.txt ../client_used_code.txt ../client_workspace_deps.txt ../packages_client_used_code.txt ../librechat_client_deps.txt 2>/dev/null | sort -u) || echo "")
# Filter out false positives
UNUSED=$(echo "$UNUSED" | grep -v "^micromark-extension-llm-math$" || echo "")
echo "CLIENT_UNUSED<<EOF" >> $GITHUB_ENV
@ -201,8 +238,8 @@ jobs:
chmod -R 755 api
cd api
UNUSED=$(depcheck --json | jq -r '.dependencies | join("\n")' || echo "")
# Exclude dependencies used in scripts, code, and workspace packages
UNUSED=$(comm -23 <(echo "$UNUSED" | sort) <(cat ../api_used_deps.txt ../api_used_code.txt ../api_workspace_deps.txt | sort) || echo "")
# Exclude dependencies used in scripts, code, workspace packages, and @librechat/api imports
UNUSED=$(comm -23 <(echo "$UNUSED" | sort) <(cat ../api_used_deps.txt ../api_used_code.txt ../api_workspace_deps.txt ../packages_api_used_code.txt ../librechat_api_deps.txt 2>/dev/null | sort -u) || echo "")
echo "API_UNUSED<<EOF" >> $GITHUB_ENV
echo "$UNUSED" >> $GITHUB_ENV
echo "EOF" >> $GITHUB_ENV

View file

@ -20,11 +20,17 @@ const {
isAgentsEndpoint,
supportsBalanceCheck,
} = require('librechat-data-provider');
const { getMessages, saveMessage, updateMessage, saveConvo, getConvo } = require('~/models');
const {
updateMessage,
getMessages,
saveMessage,
saveConvo,
getConvo,
getFiles,
} = require('~/models');
const { getStrategyFunctions } = require('~/server/services/Files/strategies');
const { checkBalance } = require('~/models/balanceMethods');
const { truncateToolCallOutputs } = require('./prompts');
const { getFiles } = require('~/models/File');
const TextStream = require('./TextStream');
class BaseClient {

View file

@ -2,10 +2,9 @@ const { z } = require('zod');
const axios = require('axios');
const { Ollama } = require('ollama');
const { sleep } = require('@librechat/agents');
const { resolveHeaders } = require('@librechat/api');
const { logger } = require('@librechat/data-schemas');
const { Constants } = require('librechat-data-provider');
const { deriveBaseURL } = require('~/utils');
const { resolveHeaders, deriveBaseURL } = require('@librechat/api');
const ollamaPayloadSchema = z.object({
mirostat: z.number().optional(),

View file

@ -5,9 +5,8 @@ const { v4: uuidv4 } = require('uuid');
const { ProxyAgent, fetch } = require('undici');
const { Tool } = require('@langchain/core/tools');
const { logger } = require('@librechat/data-schemas');
const { getImageBasename } = require('@librechat/api');
const { getImageBasename, extractBaseURL } = require('@librechat/api');
const { FileContext, ContentTypes } = require('librechat-data-provider');
const extractBaseURL = require('~/utils/extractBaseURL');
const displayMessage =
"DALL-E displayed an image. All generated images are already plainly visible, so don't repeat the descriptions in detail. Do not list download links as they are available in the UI already. The user may download the images by clicking on them, but do not mention anything about downloading to the user.";

View file

@ -6,11 +6,10 @@ const { ProxyAgent } = require('undici');
const { tool } = require('@langchain/core/tools');
const { logger } = require('@librechat/data-schemas');
const { HttpsProxyAgent } = require('https-proxy-agent');
const { logAxiosError, oaiToolkit } = require('@librechat/api');
const { ContentTypes, EImageOutputType } = require('librechat-data-provider');
const { logAxiosError, oaiToolkit, extractBaseURL } = require('@librechat/api');
const { getStrategyFunctions } = require('~/server/services/Files/strategies');
const extractBaseURL = require('~/utils/extractBaseURL');
const { getFiles } = require('~/models/File');
const { getFiles } = require('~/models');
const displayMessage =
"The tool displayed an image. All generated images are already plainly visible, so don't repeat the descriptions in detail. Do not list download links as they are available in the UI already. The user may download the images by clicking on them, but do not mention anything about downloading to the user.";

View file

@ -5,7 +5,7 @@ const { logger } = require('@librechat/data-schemas');
const { generateShortLivedToken } = require('@librechat/api');
const { Tools, EToolResources } = require('librechat-data-provider');
const { filterFilesByAgentAccess } = require('~/server/services/Files/permissions');
const { getFiles } = require('~/models/File');
const { getFiles } = require('~/models');
/**
*

View file

@ -1,7 +1,7 @@
const mongoose = require('mongoose');
const { v4: uuidv4 } = require('uuid');
const { createModels } = require('@librechat/data-schemas');
const { MongoMemoryServer } = require('mongodb-memory-server');
const { createModels, createMethods } = require('@librechat/data-schemas');
const {
SystemRoles,
ResourceType,
@ -9,8 +9,6 @@ const {
PrincipalType,
} = require('librechat-data-provider');
const { grantPermission } = require('~/server/services/PermissionService');
const { getFiles, createFile } = require('./File');
const { seedDefaultRoles } = require('~/models');
const { createAgent } = require('./Agent');
let File;
@ -18,6 +16,10 @@ let Agent;
let AclEntry;
let User;
let modelsToCleanup = [];
let methods;
let getFiles;
let createFile;
let seedDefaultRoles;
describe('File Access Control', () => {
let mongoServer;
@ -42,6 +44,12 @@ describe('File Access Control', () => {
AclEntry = dbModels.AclEntry;
User = dbModels.User;
// Create methods from data-schemas (includes file methods)
methods = createMethods(mongoose);
getFiles = methods.getFiles;
createFile = methods.createFile;
seedDefaultRoles = methods.seedDefaultRoles;
// Seed default roles
await seedDefaultRoles();
});

View file

@ -2,15 +2,6 @@ const mongoose = require('mongoose');
const { createMethods } = require('@librechat/data-schemas');
const methods = createMethods(mongoose);
const { comparePassword } = require('./userMethods');
const {
findFileById,
createFile,
updateFile,
deleteFile,
deleteFiles,
getFiles,
updateFileUsage,
} = require('./File');
const {
getMessage,
getMessages,
@ -34,13 +25,6 @@ module.exports = {
...methods,
seedDatabase,
comparePassword,
findFileById,
createFile,
updateFile,
deleteFile,
deleteFiles,
getFiles,
updateFileUsage,
getMessage,
getMessages,

View file

@ -1,6 +1,5 @@
const mongoose = require('mongoose');
const { getRandomValues } = require('@librechat/api');
const { logger, hashToken } = require('@librechat/data-schemas');
const { logger, hashToken, getRandomValues } = require('@librechat/data-schemas');
const { createToken, findToken } = require('~/models');
/**

View file

@ -1,11 +1,10 @@
const { encryptV3 } = require('@librechat/api');
const { logger } = require('@librechat/data-schemas');
const { encryptV3, logger } = require('@librechat/data-schemas');
const {
verifyTOTP,
getTOTPSecret,
verifyBackupCode,
generateTOTPSecret,
generateBackupCodes,
generateTOTPSecret,
verifyBackupCode,
getTOTPSecret,
verifyTOTP,
} = require('~/server/services/twoFactorService');
const { getUserById, updateUser } = require('~/models');

View file

@ -9,9 +9,11 @@ const {
const {
deleteAllUserSessions,
deleteAllSharedLinks,
updateUserPlugins,
deleteUserById,
deleteMessages,
deletePresets,
deleteUserKey,
deleteConvos,
deleteFiles,
updateUser,
@ -31,7 +33,6 @@ const {
User,
} = require('~/db/models');
const { updateUserPluginAuth, deleteUserPluginAuth } = require('~/server/services/PluginService');
const { updateUserPluginsService, deleteUserKey } = require('~/server/services/UserService');
const { verifyEmail, resendVerificationEmail } = require('~/server/services/AuthService');
const { getMCPManager, getFlowStateManager, getMCPServersRegistry } = require('~/config');
const { needsRefresh, getNewS3URL } = require('~/server/services/Files/S3/crud');
@ -114,13 +115,7 @@ const updateUserPluginsController = async (req, res) => {
const { pluginKey, action, auth, isEntityTool } = req.body;
try {
if (!isEntityTool) {
const userPluginsService = await updateUserPluginsService(user, pluginKey, action);
if (userPluginsService instanceof Error) {
logger.error('[userPluginsService]', userPluginsService);
const { status, message } = normalizeHttpError(userPluginsService);
return res.status(status).send({ message });
}
await updateUserPlugins(user._id, user.plugins, pluginKey, action);
}
if (auth == null) {

View file

@ -10,7 +10,9 @@ const {
sanitizeTitle,
resolveHeaders,
createSafeUser,
initializeAgent,
getBalanceConfig,
getProviderConfig,
memoryInstructions,
getTransactionsConfig,
createMemoryProcessor,
@ -38,17 +40,16 @@ const {
bedrockInputSchema,
removeNullishValues,
} = require('librechat-data-provider');
const { initializeAgent } = require('~/server/services/Endpoints/agents/agent');
const { spendTokens, spendStructuredTokens } = require('~/models/spendTokens');
const { getFormattedMemories, deleteMemory, setMemory } = require('~/models');
const { encodeAndFormat } = require('~/server/services/Files/images/encode');
const { getProviderConfig } = require('~/server/services/Endpoints');
const { createContextHandlers } = require('~/app/clients/prompts');
const { checkCapability } = require('~/server/services/Config');
const { getConvoFiles } = require('~/models/Conversation');
const BaseClient = require('~/app/clients/BaseClient');
const { getRoleByName } = require('~/models/Role');
const { loadAgent } = require('~/models/Agent');
const { getMCPManager } = require('~/config');
const db = require('~/models');
const omitTitleOptions = new Set([
'stream',
@ -542,18 +543,28 @@ class AgentClient extends BaseClient {
);
}
const agent = await initializeAgent({
req: this.options.req,
res: this.options.res,
agent: prelimAgent,
allowedProviders,
endpointOption: {
endpoint:
prelimAgent.id !== Constants.EPHEMERAL_AGENT_ID
? EModelEndpoint.agents
: memoryConfig.agent?.provider,
const agent = await initializeAgent(
{
req: this.options.req,
res: this.options.res,
agent: prelimAgent,
allowedProviders,
endpointOption: {
endpoint:
prelimAgent.id !== Constants.EPHEMERAL_AGENT_ID
? EModelEndpoint.agents
: memoryConfig.agent?.provider,
},
},
});
{
getConvoFiles,
getFiles: db.getFiles,
getUserKey: db.getUserKey,
updateFilesUsage: db.updateFilesUsage,
getUserKeyValues: db.getUserKeyValues,
getToolFilesByIds: db.getToolFilesByIds,
},
);
if (!agent) {
logger.warn(
@ -588,9 +599,9 @@ class AgentClient extends BaseClient {
messageId,
conversationId,
memoryMethods: {
setMemory,
deleteMemory,
getFormattedMemories,
setMemory: db.setMemory,
deleteMemory: db.deleteMemory,
getFormattedMemories: db.getFormattedMemories,
},
res: this.options.res,
});
@ -1040,7 +1051,7 @@ class AgentClient extends BaseClient {
throw new Error('Run not initialized');
}
const { handleLLMEnd, collected: collectedMetadata } = createMetadataAggregator();
const { req, res, agent } = this.options;
const { req, agent } = this.options;
const appConfig = req.config;
let endpoint = agent.endpoint;
@ -1097,11 +1108,12 @@ class AgentClient extends BaseClient {
const options = await titleProviderConfig.getOptions({
req,
res,
optionsOnly: true,
overrideEndpoint: endpoint,
overrideModel: clientOptions.model,
endpointOption: { model_parameters: clientOptions },
endpoint,
model_parameters: clientOptions,
db: {
getUserKey: db.getUserKey,
getUserKeyValues: db.getUserKeyValues,
},
});
let provider = options.provider ?? titleProviderConfig.overrideProvider ?? agent.provider;

View file

@ -38,14 +38,13 @@ const {
grantPermission,
} = require('~/server/services/PermissionService');
const { getStrategyFunctions } = require('~/server/services/Files/strategies');
const { getCategoriesWithCounts, deleteFileByFilter } = require('~/models');
const { resizeAvatar } = require('~/server/services/Files/images/avatar');
const { getFileStrategy } = require('~/server/utils/getFileStrategy');
const { refreshS3Url } = require('~/server/services/Files/S3/crud');
const { filterFile } = require('~/server/services/Files/process');
const { updateAction, getActions } = require('~/models/Action');
const { getCachedTools } = require('~/server/services/Config');
const { deleteFileByFilter } = require('~/models/File');
const { getCategoriesWithCounts } = require('~/models');
const { getLogStores } = require('~/cache');
const systemTools = {

View file

@ -9,7 +9,7 @@ const { updateAssistantDoc, getAssistants } = require('~/models/Assistant');
const { getOpenAIClient, fetchAssistants } = require('./helpers');
const { getCachedTools } = require('~/server/services/Config');
const { manifestToolMap } = require('~/app/clients/tools');
const { deleteFileByFilter } = require('~/models/File');
const { deleteFileByFilter } = require('~/models');
/**
* Create an assistant.

View file

@ -2,7 +2,7 @@ const { logger } = require('@librechat/data-schemas');
const { PermissionBits, hasPermissions, ResourceType } = require('librechat-data-provider');
const { getEffectivePermissions } = require('~/server/services/PermissionService');
const { getAgents } = require('~/models/Agent');
const { getFiles } = require('~/models/File');
const { getFiles } = require('~/models');
/**
* Checks if user has access to a file through agent permissions

View file

@ -4,7 +4,7 @@ const { MongoMemoryServer } = require('mongodb-memory-server');
const { fileAccess } = require('./fileAccess');
const { User, Role, AclEntry } = require('~/db/models');
const { createAgent } = require('~/models/Agent');
const { createFile } = require('~/models/File');
const { createFile } = require('~/models');
describe('fileAccess middleware', () => {
let mongoServer;

View file

@ -8,22 +8,11 @@ const {
} = require('librechat-data-provider');
const azureAssistants = require('~/server/services/Endpoints/azureAssistants');
const assistants = require('~/server/services/Endpoints/assistants');
const { processFiles } = require('~/server/services/Files/process');
const anthropic = require('~/server/services/Endpoints/anthropic');
const bedrock = require('~/server/services/Endpoints/bedrock');
const openAI = require('~/server/services/Endpoints/openAI');
const agents = require('~/server/services/Endpoints/agents');
const custom = require('~/server/services/Endpoints/custom');
const google = require('~/server/services/Endpoints/google');
const { updateFilesUsage } = require('~/models');
const buildFunction = {
[EModelEndpoint.openAI]: openAI.buildOptions,
[EModelEndpoint.google]: google.buildOptions,
[EModelEndpoint.custom]: custom.buildOptions,
[EModelEndpoint.agents]: agents.buildOptions,
[EModelEndpoint.bedrock]: bedrock.buildOptions,
[EModelEndpoint.azureOpenAI]: openAI.buildOptions,
[EModelEndpoint.anthropic]: anthropic.buildOptions,
[EModelEndpoint.assistants]: assistants.buildOptions,
[EModelEndpoint.azureAssistants]: azureAssistants.buildOptions,
};
@ -93,7 +82,7 @@ async function buildEndpointOption(req, res, next) {
req.body.endpointOption = await builder(endpoint, parsedBody, endpointType);
if (req.body.files && !isAgents) {
req.body.endpointOption.attachments = processFiles(req.body.files);
req.body.endpointOption.attachments = updateFilesUsage(req.body.files);
}
next();

View file

@ -6,7 +6,7 @@ const { createMethods } = require('@librechat/data-schemas');
const { MongoMemoryServer } = require('mongodb-memory-server');
const { AccessRoleIds, ResourceType, PrincipalType } = require('librechat-data-provider');
const { createAgent } = require('~/models/Agent');
const { createFile } = require('~/models/File');
const { createFile } = require('~/models');
// Only mock the external dependencies that we don't want to test
jest.mock('~/server/services/Files/process', () => ({

View file

@ -26,7 +26,7 @@ const { checkPermission } = require('~/server/services/PermissionService');
const { loadAuthValues } = require('~/server/services/Tools/credentials');
const { refreshS3FileUrls } = require('~/server/services/Files/S3/crud');
const { hasAccessToFilesViaAgent } = require('~/server/services/Files');
const { getFiles, batchUpdateFiles } = require('~/models/File');
const { getFiles, batchUpdateFiles } = require('~/models');
const { cleanFileName } = require('~/server/utils/files');
const { getAssistant } = require('~/models/Assistant');
const { getAgent } = require('~/models/Agent');

View file

@ -11,7 +11,7 @@ const {
PrincipalType,
} = require('librechat-data-provider');
const { createAgent } = require('~/models/Agent');
const { createFile } = require('~/models/File');
const { createFile } = require('~/models');
// Only mock the external dependencies that we don't want to test
jest.mock('~/server/services/Files/process', () => ({

View file

@ -1,7 +1,8 @@
const express = require('express');
const { updateUserKey, deleteUserKey, getUserKeyExpiry } = require('~/models');
const { requireJwtAuth } = require('~/server/middleware');
const router = express.Router();
const { updateUserKey, deleteUserKey, getUserKeyExpiry } = require('../services/UserService');
const { requireJwtAuth } = require('../middleware/');
router.put('/', requireJwtAuth, async (req, res) => {
await updateUserKey({ userId: req.user.id, ...req.body });

View file

@ -1,15 +1,9 @@
const jwt = require('jsonwebtoken');
const { nanoid } = require('nanoid');
const { tool } = require('@langchain/core/tools');
const { logger } = require('@librechat/data-schemas');
const { GraphEvents, sleep } = require('@librechat/agents');
const {
sendEvent,
encryptV2,
decryptV2,
logAxiosError,
refreshAccessToken,
} = require('@librechat/api');
const { logger, encryptV2, decryptV2 } = require('@librechat/data-schemas');
const { sendEvent, logAxiosError, refreshAccessToken } = require('@librechat/api');
const {
Time,
CacheKeys,

View file

@ -1,10 +1,9 @@
const { isUserProvided } = require('@librechat/api');
const { isUserProvided, fetchModels } = require('@librechat/api');
const {
EModelEndpoint,
extractEnvVariable,
normalizeEndpointName,
} = require('librechat-data-provider');
const { fetchModels } = require('~/server/services/ModelService');
const { getAppConfig } = require('./app');
/**

View file

@ -1,8 +1,11 @@
const { fetchModels } = require('~/server/services/ModelService');
const { fetchModels } = require('@librechat/api');
const loadConfigModels = require('./loadConfigModels');
const { getAppConfig } = require('./app');
jest.mock('~/server/services/ModelService');
jest.mock('@librechat/api', () => ({
...jest.requireActual('@librechat/api'),
fetchModels: jest.fn(),
}));
jest.mock('./app');
const exampleConfig = {

View file

@ -5,7 +5,7 @@ const {
getBedrockModels,
getOpenAIModels,
getGoogleModels,
} = require('~/server/services/ModelService');
} = require('@librechat/api');
/**
* Loads the default models for the application.

View file

@ -1,226 +0,0 @@
const { Providers } = require('@librechat/agents');
const {
primeResources,
getModelMaxTokens,
extractLibreChatParams,
filterFilesByEndpointConfig,
optionalChainWithEmptyCheck,
} = require('@librechat/api');
const {
ErrorTypes,
EModelEndpoint,
EToolResources,
paramEndpoints,
isAgentsEndpoint,
replaceSpecialVars,
providerEndpointMap,
} = require('librechat-data-provider');
const generateArtifactsPrompt = require('~/app/clients/prompts/artifacts');
const { getProviderConfig } = require('~/server/services/Endpoints');
const { processFiles } = require('~/server/services/Files/process');
const { getFiles, getToolFilesByIds } = require('~/models/File');
const { getConvoFiles } = require('~/models/Conversation');
/**
* @param {object} params
* @param {ServerRequest} params.req
* @param {ServerResponse} params.res
* @param {Agent} params.agent
* @param {string | null} [params.conversationId]
* @param {Array<IMongoFile>} [params.requestFiles]
* @param {typeof import('~/server/services/ToolService').loadAgentTools | undefined} [params.loadTools]
* @param {TEndpointOption} [params.endpointOption]
* @param {Set<string>} [params.allowedProviders]
* @param {boolean} [params.isInitialAgent]
* @returns {Promise<Agent & {
* tools: StructuredTool[],
* attachments: Array<MongoFile>,
* toolContextMap: Record<string, unknown>,
* maxContextTokens: number,
* userMCPAuthMap?: Record<string, Record<string, string>>
* }>}
*/
const initializeAgent = async ({
req,
res,
agent,
loadTools,
requestFiles,
conversationId,
endpointOption,
allowedProviders,
isInitialAgent = false,
}) => {
const appConfig = req.config;
if (
isAgentsEndpoint(endpointOption?.endpoint) &&
allowedProviders.size > 0 &&
!allowedProviders.has(agent.provider)
) {
throw new Error(
`{ "type": "${ErrorTypes.INVALID_AGENT_PROVIDER}", "info": "${agent.provider}" }`,
);
}
let currentFiles;
const _modelOptions = structuredClone(
Object.assign(
{ model: agent.model },
agent.model_parameters ?? { model: agent.model },
isInitialAgent === true ? endpointOption?.model_parameters : {},
),
);
const { resendFiles, maxContextTokens, modelOptions } = extractLibreChatParams(_modelOptions);
const provider = agent.provider;
agent.endpoint = provider;
if (isInitialAgent && conversationId != null && resendFiles) {
const fileIds = (await getConvoFiles(conversationId)) ?? [];
/** @type {Set<EToolResources>} */
const toolResourceSet = new Set();
for (const tool of agent.tools) {
if (EToolResources[tool]) {
toolResourceSet.add(EToolResources[tool]);
}
}
const toolFiles = await getToolFilesByIds(fileIds, toolResourceSet);
if (requestFiles.length || toolFiles.length) {
currentFiles = await processFiles(requestFiles.concat(toolFiles));
}
} else if (isInitialAgent && requestFiles.length) {
currentFiles = await processFiles(requestFiles);
}
if (currentFiles && currentFiles.length) {
let endpointType;
if (!paramEndpoints.has(agent.endpoint)) {
endpointType = EModelEndpoint.custom;
}
currentFiles = filterFilesByEndpointConfig(req, {
files: currentFiles,
endpoint: agent.endpoint,
endpointType,
});
}
const { attachments, tool_resources } = await primeResources({
req,
getFiles,
appConfig,
agentId: agent.id,
attachments: currentFiles,
tool_resources: agent.tool_resources,
requestFileSet: new Set(requestFiles?.map((file) => file.file_id)),
});
const {
tools: structuredTools,
toolContextMap,
userMCPAuthMap,
} = (await loadTools?.({
req,
res,
provider,
agentId: agent.id,
tools: agent.tools,
model: agent.model,
tool_resources,
})) ?? {};
const { getOptions, overrideProvider } = getProviderConfig({ provider, appConfig });
if (overrideProvider !== agent.provider) {
agent.provider = overrideProvider;
}
const _endpointOption =
isInitialAgent === true
? Object.assign({}, endpointOption, { model_parameters: modelOptions })
: { model_parameters: modelOptions };
const options = await getOptions({
req,
res,
optionsOnly: true,
overrideEndpoint: provider,
overrideModel: agent.model,
endpointOption: _endpointOption,
});
const tokensModel =
agent.provider === EModelEndpoint.azureOpenAI ? agent.model : options.llmConfig?.model;
const maxOutputTokens = optionalChainWithEmptyCheck(
options.llmConfig?.maxOutputTokens,
options.llmConfig?.maxTokens,
0,
);
const agentMaxContextTokens = optionalChainWithEmptyCheck(
maxContextTokens,
getModelMaxTokens(tokensModel, providerEndpointMap[provider], options.endpointTokenConfig),
18000,
);
if (
agent.endpoint === EModelEndpoint.azureOpenAI &&
options.llmConfig?.azureOpenAIApiInstanceName == null
) {
agent.provider = Providers.OPENAI;
}
if (options.provider != null) {
agent.provider = options.provider;
}
/** @type {import('@librechat/agents').GenericTool[]} */
let tools = options.tools?.length ? options.tools : structuredTools;
if (
(agent.provider === Providers.GOOGLE || agent.provider === Providers.VERTEXAI) &&
options.tools?.length &&
structuredTools?.length
) {
throw new Error(`{ "type": "${ErrorTypes.GOOGLE_TOOL_CONFLICT}"}`);
} else if (
(agent.provider === Providers.OPENAI ||
agent.provider === Providers.AZURE ||
agent.provider === Providers.ANTHROPIC) &&
options.tools?.length &&
structuredTools?.length
) {
tools = structuredTools.concat(options.tools);
}
/** @type {import('@librechat/agents').ClientOptions} */
agent.model_parameters = { ...options.llmConfig };
if (options.configOptions) {
agent.model_parameters.configuration = options.configOptions;
}
if (agent.instructions && agent.instructions !== '') {
agent.instructions = replaceSpecialVars({
text: agent.instructions,
user: req.user,
});
}
if (typeof agent.artifacts === 'string' && agent.artifacts !== '') {
agent.additional_instructions = generateArtifactsPrompt({
endpoint: agent.provider,
artifacts: agent.artifacts,
});
}
return {
...agent,
tools,
attachments,
resendFiles,
userMCPAuthMap,
toolContextMap,
useLegacyContent: !!options.useLegacyContent,
maxContextTokens: Math.round((agentMaxContextTokens - maxOutputTokens) * 0.9),
};
};
module.exports = { initializeAgent };

View file

@ -1,6 +1,7 @@
const { logger } = require('@librechat/data-schemas');
const { createContentAggregator } = require('@librechat/agents');
const {
initializeAgent,
validateAgentModel,
getCustomEndpointConfig,
createSequentialChainEdges,
@ -15,12 +16,13 @@ const {
createToolEndCallback,
getDefaultHandlers,
} = require('~/server/controllers/agents/callbacks');
const { initializeAgent } = require('~/server/services/Endpoints/agents/agent');
const { getModelsConfig } = require('~/server/controllers/ModelController');
const { loadAgentTools } = require('~/server/services/ToolService');
const AgentClient = require('~/server/controllers/agents/client');
const { getConvoFiles } = require('~/models/Conversation');
const { getAgent } = require('~/models/Agent');
const { logViolation } = require('~/cache');
const db = require('~/models');
/**
* @param {AbortSignal} signal
@ -109,17 +111,27 @@ const initializeClient = async ({ req, res, signal, endpointOption }) => {
/** @type {string} */
const conversationId = req.body.conversationId;
const primaryConfig = await initializeAgent({
req,
res,
loadTools,
requestFiles,
conversationId,
agent: primaryAgent,
endpointOption,
allowedProviders,
isInitialAgent: true,
});
const primaryConfig = await initializeAgent(
{
req,
res,
loadTools,
requestFiles,
conversationId,
agent: primaryAgent,
endpointOption,
allowedProviders,
isInitialAgent: true,
},
{
getConvoFiles,
getFiles: db.getFiles,
getUserKey: db.getUserKey,
updateFilesUsage: db.updateFilesUsage,
getUserKeyValues: db.getUserKeyValues,
getToolFilesByIds: db.getToolFilesByIds,
},
);
const agent_ids = primaryConfig.agent_ids;
let userMCPAuthMap = primaryConfig.userMCPAuthMap;
@ -142,16 +154,26 @@ const initializeClient = async ({ req, res, signal, endpointOption }) => {
throw new Error(validationResult.error?.message);
}
const config = await initializeAgent({
req,
res,
agent,
loadTools,
requestFiles,
conversationId,
endpointOption,
allowedProviders,
});
const config = await initializeAgent(
{
req,
res,
agent,
loadTools,
requestFiles,
conversationId,
endpointOption,
allowedProviders,
},
{
getConvoFiles,
getFiles: db.getFiles,
getUserKey: db.getUserKey,
updateFilesUsage: db.updateFilesUsage,
getUserKeyValues: db.getUserKeyValues,
getToolFilesByIds: db.getToolFilesByIds,
},
);
if (userMCPAuthMap != null) {
Object.assign(userMCPAuthMap, config.userMCPAuthMap ?? {});
} else {

View file

@ -1,44 +0,0 @@
const { removeNullishValues, anthropicSettings } = require('librechat-data-provider');
const generateArtifactsPrompt = require('~/app/clients/prompts/artifacts');
const buildOptions = (endpoint, parsedBody) => {
const {
modelLabel,
promptPrefix,
maxContextTokens,
fileTokenLimit,
resendFiles = anthropicSettings.resendFiles.default,
promptCache = anthropicSettings.promptCache.default,
thinking = anthropicSettings.thinking.default,
thinkingBudget = anthropicSettings.thinkingBudget.default,
iconURL,
greeting,
spec,
artifacts,
...modelOptions
} = parsedBody;
const endpointOption = removeNullishValues({
endpoint,
modelLabel,
promptPrefix,
resendFiles,
promptCache,
thinking,
thinkingBudget,
iconURL,
greeting,
spec,
maxContextTokens,
fileTokenLimit,
modelOptions,
});
if (typeof artifacts === 'string') {
endpointOption.artifactsPrompt = generateArtifactsPrompt({ endpoint, artifacts });
}
return endpointOption;
};
module.exports = buildOptions;

View file

@ -1,9 +0,0 @@
const addTitle = require('./title');
const buildOptions = require('./build');
const initializeClient = require('./initialize');
module.exports = {
addTitle,
buildOptions,
initializeClient,
};

View file

@ -1,53 +0,0 @@
const { getLLMConfig } = require('@librechat/api');
const { EModelEndpoint } = require('librechat-data-provider');
const { getUserKey, checkUserKeyExpiry } = require('~/server/services/UserService');
const initializeClient = async ({ req, endpointOption, overrideModel }) => {
const appConfig = req.config;
const { ANTHROPIC_API_KEY, ANTHROPIC_REVERSE_PROXY, PROXY } = process.env;
const expiresAt = req.body.key;
const isUserProvided = ANTHROPIC_API_KEY === 'user_provided';
const anthropicApiKey = isUserProvided
? await getUserKey({ userId: req.user.id, name: EModelEndpoint.anthropic })
: ANTHROPIC_API_KEY;
if (!anthropicApiKey) {
throw new Error('Anthropic API key not provided. Please provide it again.');
}
if (expiresAt && isUserProvided) {
checkUserKeyExpiry(expiresAt, EModelEndpoint.anthropic);
}
let clientOptions = {};
/** @type {undefined | TBaseEndpoint} */
const anthropicConfig = appConfig.endpoints?.[EModelEndpoint.anthropic];
if (anthropicConfig) {
clientOptions._lc_stream_delay = anthropicConfig.streamRate;
clientOptions.titleModel = anthropicConfig.titleModel;
}
const allConfig = appConfig.endpoints?.all;
if (allConfig) {
clientOptions._lc_stream_delay = allConfig.streamRate;
}
clientOptions = Object.assign(
{
proxy: PROXY ?? null,
reverseProxyUrl: ANTHROPIC_REVERSE_PROXY ?? null,
modelOptions: endpointOption?.model_parameters ?? {},
},
clientOptions,
);
if (overrideModel) {
clientOptions.modelOptions.model = overrideModel;
}
clientOptions.modelOptions.user = req.user.id;
return getLLMConfig(anthropicApiKey, clientOptions);
};
module.exports = initializeClient;

View file

@ -1,35 +0,0 @@
const { isEnabled } = require('@librechat/api');
const { CacheKeys } = require('librechat-data-provider');
const getLogStores = require('~/cache/getLogStores');
const { saveConvo } = require('~/models');
const addTitle = async (req, { text, response, client }) => {
const { TITLE_CONVO = 'true' } = process.env ?? {};
if (!isEnabled(TITLE_CONVO)) {
return;
}
if (client.options.titleConvo === false) {
return;
}
const titleCache = getLogStores(CacheKeys.GEN_TITLE);
const key = `${req.user.id}-${response.conversationId}`;
const title = await client.titleConvo({
text,
responseText: response?.text ?? '',
conversationId: response.conversationId,
});
await titleCache.set(key, title, 120000);
await saveConvo(
req,
{
conversationId: response.conversationId,
title,
},
{ context: 'api/server/services/Endpoints/anthropic/addTitle.js' },
);
};
module.exports = addTitle;

View file

@ -1,12 +1,8 @@
const OpenAI = require('openai');
const { ProxyAgent } = require('undici');
const { isUserProvided } = require('@librechat/api');
const { isUserProvided, checkUserKeyExpiry } = require('@librechat/api');
const { ErrorTypes, EModelEndpoint } = require('librechat-data-provider');
const {
getUserKeyValues,
getUserKeyExpiry,
checkUserKeyExpiry,
} = require('~/server/services/UserService');
const { getUserKeyValues, getUserKeyExpiry } = require('~/models');
const initializeClient = async ({ req, res, version }) => {
const { PROXY, OPENAI_ORGANIZATION, ASSISTANTS_API_KEY, ASSISTANTS_BASE_URL } = process.env;

View file

@ -1,12 +1,13 @@
const OpenAI = require('openai');
const { ProxyAgent } = require('undici');
const { constructAzureURL, isUserProvided, resolveHeaders } = require('@librechat/api');
const { ErrorTypes, EModelEndpoint, mapModelToAzureConfig } = require('librechat-data-provider');
const {
isUserProvided,
resolveHeaders,
constructAzureURL,
checkUserKeyExpiry,
getUserKeyValues,
getUserKeyExpiry,
} = require('~/server/services/UserService');
} = require('@librechat/api');
const { ErrorTypes, EModelEndpoint, mapModelToAzureConfig } = require('librechat-data-provider');
const { getUserKeyValues, getUserKeyExpiry } = require('~/models');
class Files {
constructor(client) {

View file

@ -1,39 +0,0 @@
const { removeNullishValues } = require('librechat-data-provider');
const generateArtifactsPrompt = require('~/app/clients/prompts/artifacts');
const buildOptions = (endpoint, parsedBody) => {
const {
modelLabel: name,
promptPrefix,
maxContextTokens,
fileTokenLimit,
resendFiles = true,
imageDetail,
iconURL,
greeting,
spec,
artifacts,
...model_parameters
} = parsedBody;
const endpointOption = removeNullishValues({
endpoint,
name,
resendFiles,
imageDetail,
iconURL,
greeting,
spec,
promptPrefix,
maxContextTokens,
fileTokenLimit,
model_parameters,
});
if (typeof artifacts === 'string') {
endpointOption.artifactsPrompt = generateArtifactsPrompt({ endpoint, artifacts });
}
return endpointOption;
};
module.exports = { buildOptions };

View file

@ -1,7 +0,0 @@
const build = require('./build');
const initialize = require('./initialize');
module.exports = {
...build,
...initialize,
};

View file

@ -1,79 +0,0 @@
const { getModelMaxTokens } = require('@librechat/api');
const { createContentAggregator } = require('@librechat/agents');
const {
EModelEndpoint,
providerEndpointMap,
getResponseSender,
} = require('librechat-data-provider');
const { getDefaultHandlers } = require('~/server/controllers/agents/callbacks');
const getOptions = require('~/server/services/Endpoints/bedrock/options');
const AgentClient = require('~/server/controllers/agents/client');
const initializeClient = async ({ req, res, endpointOption }) => {
if (!endpointOption) {
throw new Error('Endpoint option not provided');
}
/** @type {Array<UsageMetadata>} */
const collectedUsage = [];
const { contentParts, aggregateContent } = createContentAggregator();
const eventHandlers = getDefaultHandlers({ res, aggregateContent, collectedUsage });
/** @type {Agent} */
const agent = {
id: EModelEndpoint.bedrock,
name: endpointOption.name,
provider: EModelEndpoint.bedrock,
endpoint: EModelEndpoint.bedrock,
instructions: endpointOption.promptPrefix,
model: endpointOption.model_parameters.model,
model_parameters: endpointOption.model_parameters,
};
if (typeof endpointOption.artifactsPrompt === 'string' && endpointOption.artifactsPrompt) {
agent.instructions = `${agent.instructions ?? ''}\n${endpointOption.artifactsPrompt}`.trim();
}
// TODO: pass-in override settings that are specific to current run
const options = await getOptions({
req,
res,
endpointOption,
});
agent.model_parameters = Object.assign(agent.model_parameters, options.llmConfig);
if (options.configOptions) {
agent.model_parameters.configuration = options.configOptions;
}
const sender =
agent.name ??
getResponseSender({
...endpointOption,
model: endpointOption.model_parameters.model,
});
const client = new AgentClient({
req,
res,
agent,
sender,
// tools,
contentParts,
eventHandlers,
collectedUsage,
spec: endpointOption.spec,
iconURL: endpointOption.iconURL,
endpoint: EModelEndpoint.bedrock,
resendFiles: endpointOption.resendFiles,
maxContextTokens:
endpointOption.maxContextTokens ??
agent.max_context_tokens ??
getModelMaxTokens(agent.model_parameters.model, providerEndpointMap[agent.provider]) ??
4000,
attachments: endpointOption.attachments,
});
return { client };
};
module.exports = { initializeClient };

View file

@ -1,42 +0,0 @@
const { removeNullishValues } = require('librechat-data-provider');
const generateArtifactsPrompt = require('~/app/clients/prompts/artifacts');
const buildOptions = (endpoint, parsedBody, endpointType) => {
const {
modelLabel,
chatGptLabel,
promptPrefix,
maxContextTokens,
fileTokenLimit,
resendFiles = true,
imageDetail,
iconURL,
greeting,
spec,
artifacts,
...modelOptions
} = parsedBody;
const endpointOption = removeNullishValues({
endpoint,
endpointType,
modelLabel,
chatGptLabel,
promptPrefix,
resendFiles,
imageDetail,
iconURL,
greeting,
spec,
maxContextTokens,
fileTokenLimit,
modelOptions,
});
if (typeof artifacts === 'string') {
endpointOption.artifactsPrompt = generateArtifactsPrompt({ endpoint, artifacts });
}
return endpointOption;
};
module.exports = buildOptions;

View file

@ -1,7 +0,0 @@
const initializeClient = require('./initialize');
const buildOptions = require('./build');
module.exports = {
initializeClient,
buildOptions,
};

View file

@ -1,145 +0,0 @@
const { isUserProvided, getOpenAIConfig, getCustomEndpointConfig } = require('@librechat/api');
const {
CacheKeys,
ErrorTypes,
envVarRegex,
FetchTokenConfig,
extractEnvVariable,
} = require('librechat-data-provider');
const { getUserKeyValues, checkUserKeyExpiry } = require('~/server/services/UserService');
const { fetchModels } = require('~/server/services/ModelService');
const getLogStores = require('~/cache/getLogStores');
const { PROXY } = process.env;
const initializeClient = async ({ req, endpointOption, overrideEndpoint }) => {
const appConfig = req.config;
const { key: expiresAt } = req.body;
const endpoint = overrideEndpoint ?? req.body.endpoint;
const endpointConfig = getCustomEndpointConfig({
endpoint,
appConfig,
});
if (!endpointConfig) {
throw new Error(`Config not found for the ${endpoint} custom endpoint.`);
}
const CUSTOM_API_KEY = extractEnvVariable(endpointConfig.apiKey);
const CUSTOM_BASE_URL = extractEnvVariable(endpointConfig.baseURL);
if (CUSTOM_API_KEY.match(envVarRegex)) {
throw new Error(`Missing API Key for ${endpoint}.`);
}
if (CUSTOM_BASE_URL.match(envVarRegex)) {
throw new Error(`Missing Base URL for ${endpoint}.`);
}
const userProvidesKey = isUserProvided(CUSTOM_API_KEY);
const userProvidesURL = isUserProvided(CUSTOM_BASE_URL);
let userValues = null;
if (expiresAt && (userProvidesKey || userProvidesURL)) {
checkUserKeyExpiry(expiresAt, endpoint);
userValues = await getUserKeyValues({ userId: req.user.id, name: endpoint });
}
let apiKey = userProvidesKey ? userValues?.apiKey : CUSTOM_API_KEY;
let baseURL = userProvidesURL ? userValues?.baseURL : CUSTOM_BASE_URL;
if (userProvidesKey & !apiKey) {
throw new Error(
JSON.stringify({
type: ErrorTypes.NO_USER_KEY,
}),
);
}
if (userProvidesURL && !baseURL) {
throw new Error(
JSON.stringify({
type: ErrorTypes.NO_BASE_URL,
}),
);
}
if (!apiKey) {
throw new Error(`${endpoint} API key not provided.`);
}
if (!baseURL) {
throw new Error(`${endpoint} Base URL not provided.`);
}
const cache = getLogStores(CacheKeys.TOKEN_CONFIG);
const tokenKey =
!endpointConfig.tokenConfig && (userProvidesKey || userProvidesURL)
? `${endpoint}:${req.user.id}`
: endpoint;
let endpointTokenConfig =
!endpointConfig.tokenConfig &&
FetchTokenConfig[endpoint.toLowerCase()] &&
(await cache.get(tokenKey));
if (
FetchTokenConfig[endpoint.toLowerCase()] &&
endpointConfig &&
endpointConfig.models.fetch &&
!endpointTokenConfig
) {
await fetchModels({ apiKey, baseURL, name: endpoint, user: req.user.id, tokenKey });
endpointTokenConfig = await cache.get(tokenKey);
}
const customOptions = {
headers: endpointConfig.headers,
addParams: endpointConfig.addParams,
dropParams: endpointConfig.dropParams,
customParams: endpointConfig.customParams,
titleConvo: endpointConfig.titleConvo,
titleModel: endpointConfig.titleModel,
forcePrompt: endpointConfig.forcePrompt,
summaryModel: endpointConfig.summaryModel,
modelDisplayLabel: endpointConfig.modelDisplayLabel,
titleMethod: endpointConfig.titleMethod ?? 'completion',
contextStrategy: endpointConfig.summarize ? 'summarize' : null,
directEndpoint: endpointConfig.directEndpoint,
titleMessageRole: endpointConfig.titleMessageRole,
streamRate: endpointConfig.streamRate,
endpointTokenConfig,
};
const allConfig = appConfig.endpoints?.all;
if (allConfig) {
customOptions.streamRate = allConfig.streamRate;
}
let clientOptions = {
reverseProxyUrl: baseURL ?? null,
proxy: PROXY ?? null,
...customOptions,
...endpointOption,
};
const modelOptions = endpointOption?.model_parameters ?? {};
clientOptions = Object.assign(
{
modelOptions,
},
clientOptions,
);
clientOptions.modelOptions.user = req.user.id;
const options = getOpenAIConfig(apiKey, clientOptions, endpoint);
if (options != null) {
options.useLegacyContent = true;
options.endpointTokenConfig = endpointTokenConfig;
}
if (clientOptions.streamRate) {
options.llmConfig._lc_stream_delay = clientOptions.streamRate;
}
return options;
};
module.exports = initializeClient;

View file

@ -1,39 +0,0 @@
const { removeNullishValues } = require('librechat-data-provider');
const generateArtifactsPrompt = require('~/app/clients/prompts/artifacts');
const buildOptions = (endpoint, parsedBody) => {
const {
examples,
modelLabel,
resendFiles = true,
promptPrefix,
iconURL,
greeting,
spec,
artifacts,
maxContextTokens,
fileTokenLimit,
...modelOptions
} = parsedBody;
const endpointOption = removeNullishValues({
examples,
endpoint,
modelLabel,
resendFiles,
promptPrefix,
iconURL,
greeting,
spec,
maxContextTokens,
fileTokenLimit,
modelOptions,
});
if (typeof artifacts === 'string') {
endpointOption.artifactsPrompt = generateArtifactsPrompt({ endpoint, artifacts });
}
return endpointOption;
};
module.exports = buildOptions;

View file

@ -1,9 +0,0 @@
const addTitle = require('./title');
const buildOptions = require('./build');
const initializeClient = require('./initialize');
module.exports = {
addTitle,
buildOptions,
initializeClient,
};

View file

@ -1,83 +0,0 @@
const path = require('path');
const { EModelEndpoint, AuthKeys } = require('librechat-data-provider');
const { getGoogleConfig, isEnabled, loadServiceKey } = require('@librechat/api');
const { getUserKey, checkUserKeyExpiry } = require('~/server/services/UserService');
const initializeClient = async ({ req, endpointOption, overrideModel }) => {
const { GOOGLE_KEY, GOOGLE_REVERSE_PROXY, GOOGLE_AUTH_HEADER, PROXY } = process.env;
const isUserProvided = GOOGLE_KEY === 'user_provided';
const { key: expiresAt } = req.body;
let userKey = null;
if (expiresAt && isUserProvided) {
checkUserKeyExpiry(expiresAt, EModelEndpoint.google);
userKey = await getUserKey({ userId: req.user.id, name: EModelEndpoint.google });
}
let serviceKey = {};
/** Check if GOOGLE_KEY is provided at all (including 'user_provided') */
const isGoogleKeyProvided =
(GOOGLE_KEY && GOOGLE_KEY.trim() !== '') || (isUserProvided && userKey != null);
if (!isGoogleKeyProvided) {
/** Only attempt to load service key if GOOGLE_KEY is not provided */
try {
const serviceKeyPath =
process.env.GOOGLE_SERVICE_KEY_FILE ||
path.join(__dirname, '../../../..', 'data', 'auth.json');
serviceKey = await loadServiceKey(serviceKeyPath);
if (!serviceKey) {
serviceKey = {};
}
} catch (_e) {
// Service key loading failed, but that's okay if not required
serviceKey = {};
}
}
const credentials = isUserProvided
? userKey
: {
[AuthKeys.GOOGLE_SERVICE_KEY]: serviceKey,
[AuthKeys.GOOGLE_API_KEY]: GOOGLE_KEY,
};
let clientOptions = {};
const appConfig = req.config;
/** @type {undefined | TBaseEndpoint} */
const allConfig = appConfig.endpoints?.all;
/** @type {undefined | TBaseEndpoint} */
const googleConfig = appConfig.endpoints?.[EModelEndpoint.google];
if (googleConfig) {
clientOptions.streamRate = googleConfig.streamRate;
clientOptions.titleModel = googleConfig.titleModel;
}
if (allConfig) {
clientOptions.streamRate = allConfig.streamRate;
}
clientOptions = {
reverseProxyUrl: GOOGLE_REVERSE_PROXY ?? null,
authHeader: isEnabled(GOOGLE_AUTH_HEADER) ?? null,
proxy: PROXY ?? null,
...clientOptions,
...endpointOption,
};
clientOptions = Object.assign(
{
modelOptions: endpointOption?.model_parameters ?? {},
},
clientOptions,
);
if (overrideModel) {
clientOptions.modelOptions.model = overrideModel;
}
return getGoogleConfig(credentials, clientOptions);
};
module.exports = initializeClient;

View file

@ -1,60 +0,0 @@
const { isEnabled } = require('@librechat/api');
const { EModelEndpoint, CacheKeys, Constants, googleSettings } = require('librechat-data-provider');
const getLogStores = require('~/cache/getLogStores');
const initializeClient = require('./initialize');
const { saveConvo } = require('~/models');
const addTitle = async (req, { text, response, client }) => {
const { TITLE_CONVO = 'true' } = process.env ?? {};
if (!isEnabled(TITLE_CONVO)) {
return;
}
if (client.options.titleConvo === false) {
return;
}
const { GOOGLE_TITLE_MODEL } = process.env ?? {};
const appConfig = req.config;
const providerConfig = appConfig.endpoints?.[EModelEndpoint.google];
let model =
providerConfig?.titleModel ??
GOOGLE_TITLE_MODEL ??
client.options?.modelOptions.model ??
googleSettings.model.default;
if (GOOGLE_TITLE_MODEL === Constants.CURRENT_MODEL) {
model = client.options?.modelOptions.model;
}
const titleEndpointOptions = {
...client.options,
modelOptions: { ...client.options?.modelOptions, model: model },
attachments: undefined, // After a response, this is set to an empty array which results in an error during setOptions
};
const { client: titleClient } = await initializeClient({
req,
res: response,
endpointOption: titleEndpointOptions,
});
const titleCache = getLogStores(CacheKeys.GEN_TITLE);
const key = `${req.user.id}-${response.conversationId}`;
const title = await titleClient.titleConvo({
text,
responseText: response?.text ?? '',
conversationId: response.conversationId,
});
await titleCache.set(key, title, 120000);
await saveConvo(
req,
{
conversationId: response.conversationId,
title,
},
{ context: 'api/server/services/Endpoints/google/addTitle.js' },
);
};
module.exports = addTitle;

View file

@ -1,42 +0,0 @@
const { removeNullishValues } = require('librechat-data-provider');
const generateArtifactsPrompt = require('~/app/clients/prompts/artifacts');
const buildOptions = (endpoint, parsedBody) => {
const {
modelLabel,
chatGptLabel,
promptPrefix,
maxContextTokens,
fileTokenLimit,
resendFiles = true,
imageDetail,
iconURL,
greeting,
spec,
artifacts,
...modelOptions
} = parsedBody;
const endpointOption = removeNullishValues({
endpoint,
modelLabel,
chatGptLabel,
promptPrefix,
resendFiles,
imageDetail,
iconURL,
greeting,
spec,
maxContextTokens,
fileTokenLimit,
modelOptions,
});
if (typeof artifacts === 'string') {
endpointOption.artifactsPrompt = generateArtifactsPrompt({ endpoint, artifacts });
}
return endpointOption;
};
module.exports = buildOptions;

View file

@ -1,9 +0,0 @@
const addTitle = require('./title');
const buildOptions = require('./build');
const initializeClient = require('./initialize');
module.exports = {
addTitle,
buildOptions,
initializeClient,
};

View file

@ -1,147 +0,0 @@
const { ErrorTypes, EModelEndpoint, mapModelToAzureConfig } = require('librechat-data-provider');
const {
isEnabled,
resolveHeaders,
isUserProvided,
getOpenAIConfig,
getAzureCredentials,
} = require('@librechat/api');
const { getUserKeyValues, checkUserKeyExpiry } = require('~/server/services/UserService');
const initializeClient = async ({ req, endpointOption, overrideEndpoint, overrideModel }) => {
const appConfig = req.config;
const {
PROXY,
OPENAI_API_KEY,
AZURE_API_KEY,
OPENAI_REVERSE_PROXY,
AZURE_OPENAI_BASEURL,
OPENAI_SUMMARIZE,
DEBUG_OPENAI,
} = process.env;
const { key: expiresAt } = req.body;
const modelName = overrideModel ?? req.body.model;
const endpoint = overrideEndpoint ?? req.body.endpoint;
const contextStrategy = isEnabled(OPENAI_SUMMARIZE) ? 'summarize' : null;
const credentials = {
[EModelEndpoint.openAI]: OPENAI_API_KEY,
[EModelEndpoint.azureOpenAI]: AZURE_API_KEY,
};
const baseURLOptions = {
[EModelEndpoint.openAI]: OPENAI_REVERSE_PROXY,
[EModelEndpoint.azureOpenAI]: AZURE_OPENAI_BASEURL,
};
const userProvidesKey = isUserProvided(credentials[endpoint]);
const userProvidesURL = isUserProvided(baseURLOptions[endpoint]);
let userValues = null;
if (expiresAt && (userProvidesKey || userProvidesURL)) {
checkUserKeyExpiry(expiresAt, endpoint);
userValues = await getUserKeyValues({ userId: req.user.id, name: endpoint });
}
let apiKey = userProvidesKey ? userValues?.apiKey : credentials[endpoint];
let baseURL = userProvidesURL ? userValues?.baseURL : baseURLOptions[endpoint];
let clientOptions = {
contextStrategy,
proxy: PROXY ?? null,
debug: isEnabled(DEBUG_OPENAI),
reverseProxyUrl: baseURL ? baseURL : null,
...endpointOption,
};
const isAzureOpenAI = endpoint === EModelEndpoint.azureOpenAI;
/** @type {false | TAzureConfig} */
const azureConfig = isAzureOpenAI && appConfig.endpoints?.[EModelEndpoint.azureOpenAI];
let serverless = false;
if (isAzureOpenAI && azureConfig) {
const { modelGroupMap, groupMap } = azureConfig;
const {
azureOptions,
baseURL,
headers = {},
serverless: _serverless,
} = mapModelToAzureConfig({
modelName,
modelGroupMap,
groupMap,
});
serverless = _serverless;
clientOptions.reverseProxyUrl = baseURL ?? clientOptions.reverseProxyUrl;
clientOptions.headers = resolveHeaders({
headers: { ...headers, ...(clientOptions.headers ?? {}) },
user: req.user,
});
clientOptions.titleConvo = azureConfig.titleConvo;
clientOptions.titleModel = azureConfig.titleModel;
const azureRate = modelName.includes('gpt-4') ? 30 : 17;
clientOptions.streamRate = azureConfig.streamRate ?? azureRate;
clientOptions.titleMethod = azureConfig.titleMethod ?? 'completion';
const groupName = modelGroupMap[modelName].group;
clientOptions.addParams = azureConfig.groupMap[groupName].addParams;
clientOptions.dropParams = azureConfig.groupMap[groupName].dropParams;
clientOptions.forcePrompt = azureConfig.groupMap[groupName].forcePrompt;
apiKey = azureOptions.azureOpenAIApiKey;
clientOptions.azure = !serverless && azureOptions;
if (serverless === true) {
clientOptions.defaultQuery = azureOptions.azureOpenAIApiVersion
? { 'api-version': azureOptions.azureOpenAIApiVersion }
: undefined;
clientOptions.headers['api-key'] = apiKey;
}
} else if (isAzureOpenAI) {
clientOptions.azure = userProvidesKey ? JSON.parse(userValues.apiKey) : getAzureCredentials();
apiKey = clientOptions.azure.azureOpenAIApiKey;
}
/** @type {undefined | TBaseEndpoint} */
const openAIConfig = appConfig.endpoints?.[EModelEndpoint.openAI];
if (!isAzureOpenAI && openAIConfig) {
clientOptions.streamRate = openAIConfig.streamRate;
clientOptions.titleModel = openAIConfig.titleModel;
}
const allConfig = appConfig.endpoints?.all;
if (allConfig) {
clientOptions.streamRate = allConfig.streamRate;
}
if (userProvidesKey & !apiKey) {
throw new Error(
JSON.stringify({
type: ErrorTypes.NO_USER_KEY,
}),
);
}
if (!apiKey) {
throw new Error(`${endpoint} API Key not provided.`);
}
const modelOptions = endpointOption?.model_parameters ?? {};
modelOptions.model = modelName;
clientOptions = Object.assign({ modelOptions }, clientOptions);
clientOptions.modelOptions.user = req.user.id;
const options = getOpenAIConfig(apiKey, clientOptions, endpoint);
if (options != null && serverless === true) {
options.useLegacyContent = true;
}
const streamRate = clientOptions.streamRate;
if (streamRate) {
options.llmConfig._lc_stream_delay = streamRate;
}
return options;
};
module.exports = initializeClient;

View file

@ -1,35 +0,0 @@
const { isEnabled } = require('@librechat/api');
const { CacheKeys } = require('librechat-data-provider');
const getLogStores = require('~/cache/getLogStores');
const { saveConvo } = require('~/models');
const addTitle = async (req, { text, response, client }) => {
const { TITLE_CONVO = 'true' } = process.env ?? {};
if (!isEnabled(TITLE_CONVO)) {
return;
}
if (client.options.titleConvo === false) {
return;
}
const titleCache = getLogStores(CacheKeys.GEN_TITLE);
const key = `${req.user.id}-${response.conversationId}`;
const title = await client.titleConvo({
text,
responseText: response?.text ?? '',
conversationId: response.conversationId,
});
await titleCache.set(key, title, 120000);
await saveConvo(
req,
{
conversationId: response.conversationId,
title,
},
{ context: 'api/server/services/Endpoints/openAI/addTitle.js' },
);
};
module.exports = addTitle;

View file

@ -14,7 +14,7 @@ const {
const { filterFilesByAgentAccess } = require('~/server/services/Files/permissions');
const { getStrategyFunctions } = require('~/server/services/Files/strategies');
const { convertImage } = require('~/server/services/Files/images/convert');
const { createFile, getFiles, updateFile } = require('~/models/File');
const { createFile, getFiles, updateFile } = require('~/models');
/**
* Process OpenAI image files, convert to target format, save and return file metadata.

View file

@ -28,8 +28,8 @@ const {
const { addResourceFileId, deleteResourceFileId } = require('~/server/controllers/assistants/v2');
const { addAgentResourceFile, removeAgentResourceFiles } = require('~/models/Agent');
const { getOpenAIClient } = require('~/server/controllers/assistants/helpers');
const { createFile, updateFileUsage, deleteFiles } = require('~/models/File');
const { loadAuthValues } = require('~/server/services/Tools/credentials');
const { createFile, updateFileUsage, deleteFiles } = require('~/models');
const { getFileStrategy } = require('~/server/utils/getFileStrategy');
const { checkCapability } = require('~/server/services/Config');
const { LB_QueueAsyncCall } = require('~/server/utils/queue');
@ -60,45 +60,6 @@ const createSanitizedUploadWrapper = (uploadFunction) => {
};
};
/**
*
* @param {Array<MongoFile>} files
* @param {Array<string>} [fileIds]
* @returns
*/
const processFiles = async (files, fileIds) => {
const promises = [];
const seen = new Set();
for (let file of files) {
const { file_id } = file;
if (seen.has(file_id)) {
continue;
}
seen.add(file_id);
promises.push(updateFileUsage({ file_id }));
}
if (!fileIds) {
const results = await Promise.all(promises);
// Filter out null results from failed updateFileUsage calls
return results.filter((result) => result != null);
}
for (let file_id of fileIds) {
if (seen.has(file_id)) {
continue;
}
seen.add(file_id);
promises.push(updateFileUsage({ file_id }));
}
// TODO: calculate token cost when image is first uploaded
const results = await Promise.all(promises);
// Filter out null results from failed updateFileUsage calls
return results.filter((result) => result != null);
};
/**
* Enqueues the delete operation to the leaky bucket queue if necessary, or adds it directly to promises.
*
@ -1057,7 +1018,6 @@ function filterFile({ req, image, isAvatar }) {
module.exports = {
filterFile,
processFiles,
processFileURL,
saveBase64Image,
processImageFile,

View file

@ -1,248 +0,0 @@
// Mock the updateFileUsage function before importing the actual processFiles
jest.mock('~/models/File', () => ({
updateFileUsage: jest.fn(),
}));
// Mock winston and logger configuration to avoid dependency issues
jest.mock('~/config', () => ({
logger: {
info: jest.fn(),
warn: jest.fn(),
debug: jest.fn(),
error: jest.fn(),
},
}));
// Mock all other dependencies that might cause issues
jest.mock('librechat-data-provider', () => ({
isUUID: { parse: jest.fn() },
megabyte: 1024 * 1024,
PrincipalType: {
USER: 'user',
GROUP: 'group',
PUBLIC: 'public',
},
PrincipalModel: {
USER: 'User',
GROUP: 'Group',
},
ResourceType: {
AGENT: 'agent',
PROJECT: 'project',
FILE: 'file',
PROMPTGROUP: 'promptGroup',
},
FileContext: { message_attachment: 'message_attachment' },
FileSources: { local: 'local' },
EModelEndpoint: { assistants: 'assistants' },
EToolResources: { file_search: 'file_search' },
mergeFileConfig: jest.fn(),
removeNullishValues: jest.fn((obj) => obj),
isAssistantsEndpoint: jest.fn(),
Constants: { COMMANDS_MAX_LENGTH: 56 },
PermissionTypes: {
BOOKMARKS: 'BOOKMARKS',
PROMPTS: 'PROMPTS',
MEMORIES: 'MEMORIES',
MULTI_CONVO: 'MULTI_CONVO',
AGENTS: 'AGENTS',
TEMPORARY_CHAT: 'TEMPORARY_CHAT',
RUN_CODE: 'RUN_CODE',
WEB_SEARCH: 'WEB_SEARCH',
FILE_CITATIONS: 'FILE_CITATIONS',
},
Permissions: {
USE: 'USE',
OPT_OUT: 'OPT_OUT',
},
SystemRoles: {
USER: 'USER',
ADMIN: 'ADMIN',
},
}));
jest.mock('~/server/services/Files/images', () => ({
convertImage: jest.fn(),
resizeAndConvert: jest.fn(),
resizeImageBuffer: jest.fn(),
}));
jest.mock('~/server/controllers/assistants/v2', () => ({
addResourceFileId: jest.fn(),
deleteResourceFileId: jest.fn(),
}));
jest.mock('~/models/Agent', () => ({
addAgentResourceFile: jest.fn(),
removeAgentResourceFiles: jest.fn(),
}));
jest.mock('~/server/controllers/assistants/helpers', () => ({
getOpenAIClient: jest.fn(),
}));
jest.mock('~/server/services/Tools/credentials', () => ({
loadAuthValues: jest.fn(),
}));
jest.mock('~/server/services/Config', () => ({
checkCapability: jest.fn(),
}));
jest.mock('~/server/utils/queue', () => ({
LB_QueueAsyncCall: jest.fn(),
}));
jest.mock('./strategies', () => ({
getStrategyFunctions: jest.fn(),
}));
jest.mock('~/server/utils', () => ({
determineFileType: jest.fn(),
}));
jest.mock('@librechat/api', () => ({
parseText: jest.fn(),
parseTextNative: jest.fn(),
}));
// Import the actual processFiles function after all mocks are set up
const { processFiles } = require('./process');
const { updateFileUsage } = require('~/models/File');
describe('processFiles', () => {
beforeEach(() => {
jest.clearAllMocks();
});
describe('null filtering functionality', () => {
it('should filter out null results from updateFileUsage when files do not exist', async () => {
const mockFiles = [
{ file_id: 'existing-file-1' },
{ file_id: 'non-existent-file' },
{ file_id: 'existing-file-2' },
];
// Mock updateFileUsage to return null for non-existent files
updateFileUsage.mockImplementation(({ file_id }) => {
if (file_id === 'non-existent-file') {
return Promise.resolve(null); // Simulate file not found in the database
}
return Promise.resolve({ file_id, usage: 1 });
});
const result = await processFiles(mockFiles);
expect(updateFileUsage).toHaveBeenCalledTimes(3);
expect(result).toEqual([
{ file_id: 'existing-file-1', usage: 1 },
{ file_id: 'existing-file-2', usage: 1 },
]);
// Critical test - ensure no null values in result
expect(result).not.toContain(null);
expect(result).not.toContain(undefined);
expect(result.length).toBe(2); // Only valid files should be returned
});
it('should return empty array when all updateFileUsage calls return null', async () => {
const mockFiles = [{ file_id: 'non-existent-1' }, { file_id: 'non-existent-2' }];
// All updateFileUsage calls return null
updateFileUsage.mockResolvedValue(null);
const result = await processFiles(mockFiles);
expect(updateFileUsage).toHaveBeenCalledTimes(2);
expect(result).toEqual([]);
expect(result).not.toContain(null);
expect(result.length).toBe(0);
});
it('should work correctly when all files exist', async () => {
const mockFiles = [{ file_id: 'file-1' }, { file_id: 'file-2' }];
updateFileUsage.mockImplementation(({ file_id }) => {
return Promise.resolve({ file_id, usage: 1 });
});
const result = await processFiles(mockFiles);
expect(result).toEqual([
{ file_id: 'file-1', usage: 1 },
{ file_id: 'file-2', usage: 1 },
]);
expect(result).not.toContain(null);
expect(result.length).toBe(2);
});
it('should handle fileIds parameter and filter nulls correctly', async () => {
const mockFiles = [{ file_id: 'file-1' }];
const mockFileIds = ['file-2', 'non-existent-file'];
updateFileUsage.mockImplementation(({ file_id }) => {
if (file_id === 'non-existent-file') {
return Promise.resolve(null);
}
return Promise.resolve({ file_id, usage: 1 });
});
const result = await processFiles(mockFiles, mockFileIds);
expect(result).toEqual([
{ file_id: 'file-1', usage: 1 },
{ file_id: 'file-2', usage: 1 },
]);
expect(result).not.toContain(null);
expect(result).not.toContain(undefined);
expect(result.length).toBe(2);
});
it('should handle duplicate file_ids correctly', async () => {
const mockFiles = [
{ file_id: 'duplicate-file' },
{ file_id: 'duplicate-file' }, // Duplicate should be ignored
{ file_id: 'unique-file' },
];
updateFileUsage.mockImplementation(({ file_id }) => {
return Promise.resolve({ file_id, usage: 1 });
});
const result = await processFiles(mockFiles);
// Should only call updateFileUsage twice (duplicate ignored)
expect(updateFileUsage).toHaveBeenCalledTimes(2);
expect(result).toEqual([
{ file_id: 'duplicate-file', usage: 1 },
{ file_id: 'unique-file', usage: 1 },
]);
expect(result.length).toBe(2);
});
});
describe('edge cases', () => {
it('should handle empty files array', async () => {
const result = await processFiles([]);
expect(result).toEqual([]);
expect(updateFileUsage).not.toHaveBeenCalled();
});
it('should handle mixed null and undefined returns from updateFileUsage', async () => {
const mockFiles = [{ file_id: 'file-1' }, { file_id: 'file-2' }, { file_id: 'file-3' }];
updateFileUsage.mockImplementation(({ file_id }) => {
if (file_id === 'file-1') return Promise.resolve(null);
if (file_id === 'file-2') return Promise.resolve(undefined);
return Promise.resolve({ file_id, usage: 1 });
});
const result = await processFiles(mockFiles);
expect(result).toEqual([{ file_id: 'file-3', usage: 1 }]);
expect(result).not.toContain(null);
expect(result).not.toContain(undefined);
expect(result.length).toBe(1);
});
});
});

View file

@ -18,9 +18,6 @@ jest.mock('~/config', () => ({
defaults: {},
})),
}));
jest.mock('~/utils', () => ({
logAxiosError: jest.fn(),
}));
jest.mock('~/server/services/Config', () => ({}));
jest.mock('~/server/services/Files/strategies', () => ({

View file

@ -1,330 +0,0 @@
const axios = require('axios');
const { logger } = require('@librechat/data-schemas');
const { HttpsProxyAgent } = require('https-proxy-agent');
const { logAxiosError, inputSchema, processModelData, isUserProvided } = require('@librechat/api');
const {
CacheKeys,
defaultModels,
KnownEndpoints,
EModelEndpoint,
} = require('librechat-data-provider');
const { OllamaClient } = require('~/app/clients/OllamaClient');
const { config } = require('./Config/EndpointService');
const getLogStores = require('~/cache/getLogStores');
const { extractBaseURL } = require('~/utils');
/**
* Splits a string by commas and trims each resulting value.
* @param {string} input - The input string to split.
* @returns {string[]} An array of trimmed values.
*/
const splitAndTrim = (input) => {
if (!input || typeof input !== 'string') {
return [];
}
return input
.split(',')
.map((item) => item.trim())
.filter(Boolean);
};
/**
* Fetches OpenAI models from the specified base API path or Azure, based on the provided configuration.
*
* @param {Object} params - The parameters for fetching the models.
* @param {Object} params.user - The user ID to send to the API.
* @param {string} params.apiKey - The API key for authentication with the API.
* @param {string} params.baseURL - The base path URL for the API.
* @param {string} [params.name='OpenAI'] - The name of the API; defaults to 'OpenAI'.
* @param {boolean} [params.direct=false] - Whether `directEndpoint` was configured
* @param {boolean} [params.azure=false] - Whether to fetch models from Azure.
* @param {boolean} [params.userIdQuery=false] - Whether to send the user ID as a query parameter.
* @param {boolean} [params.createTokenConfig=true] - Whether to create a token configuration from the API response.
* @param {string} [params.tokenKey] - The cache key to save the token configuration. Uses `name` if omitted.
* @param {Record<string, string>} [params.headers] - Optional headers for the request.
* @param {Partial<IUser>} [params.userObject] - Optional user object for header resolution.
* @returns {Promise<string[]>} A promise that resolves to an array of model identifiers.
* @async
*/
const fetchModels = async ({
user,
apiKey,
baseURL: _baseURL,
name = EModelEndpoint.openAI,
direct,
azure = false,
userIdQuery = false,
createTokenConfig = true,
tokenKey,
headers,
userObject,
}) => {
let models = [];
const baseURL = direct ? extractBaseURL(_baseURL) : _baseURL;
if (!baseURL && !azure) {
return models;
}
if (!apiKey) {
return models;
}
if (name && name.toLowerCase().startsWith(KnownEndpoints.ollama)) {
try {
return await OllamaClient.fetchModels(baseURL, { headers, user: userObject });
} catch (ollamaError) {
const logMessage =
'Failed to fetch models from Ollama API. Attempting to fetch via OpenAI-compatible endpoint.';
logAxiosError({ message: logMessage, error: ollamaError });
}
}
try {
const options = {
headers: {
...(headers ?? {}),
},
timeout: 5000,
};
if (name === EModelEndpoint.anthropic) {
options.headers = {
'x-api-key': apiKey,
'anthropic-version': process.env.ANTHROPIC_VERSION || '2023-06-01',
};
} else {
options.headers.Authorization = `Bearer ${apiKey}`;
}
if (process.env.PROXY) {
options.httpsAgent = new HttpsProxyAgent(process.env.PROXY);
}
if (process.env.OPENAI_ORGANIZATION && baseURL.includes('openai')) {
options.headers['OpenAI-Organization'] = process.env.OPENAI_ORGANIZATION;
}
const url = new URL(`${baseURL.replace(/\/+$/, '')}${azure ? '' : '/models'}`);
if (user && userIdQuery) {
url.searchParams.append('user', user);
}
const res = await axios.get(url.toString(), options);
/** @type {z.infer<typeof inputSchema>} */
const input = res.data;
const validationResult = inputSchema.safeParse(input);
if (validationResult.success && createTokenConfig) {
const endpointTokenConfig = processModelData(input);
const cache = getLogStores(CacheKeys.TOKEN_CONFIG);
await cache.set(tokenKey ?? name, endpointTokenConfig);
}
models = input.data.map((item) => item.id);
} catch (error) {
const logMessage = `Failed to fetch models from ${azure ? 'Azure ' : ''}${name} API`;
logAxiosError({ message: logMessage, error });
}
return models;
};
/**
* Fetches models from the specified API path or Azure, based on the provided options.
* @async
* @function
* @param {object} opts - The options for fetching the models.
* @param {string} opts.user - The user ID to send to the API.
* @param {boolean} [opts.azure=false] - Whether to fetch models from Azure.
* @param {boolean} [opts.assistants=false] - Whether to fetch models from Azure.
* @param {string[]} [_models=[]] - The models to use as a fallback.
*/
const fetchOpenAIModels = async (opts, _models = []) => {
let models = _models.slice() ?? [];
const { openAIApiKey } = config;
let apiKey = openAIApiKey;
const openaiBaseURL = 'https://api.openai.com/v1';
let baseURL = openaiBaseURL;
let reverseProxyUrl = process.env.OPENAI_REVERSE_PROXY;
if (opts.assistants && process.env.ASSISTANTS_BASE_URL) {
reverseProxyUrl = process.env.ASSISTANTS_BASE_URL;
} else if (opts.azure) {
return models;
// const azure = getAzureCredentials();
// baseURL = (genAzureChatCompletion(azure))
// .split('/deployments')[0]
// .concat(`/models?api-version=${azure.azureOpenAIApiVersion}`);
// apiKey = azureOpenAIApiKey;
}
if (reverseProxyUrl) {
baseURL = extractBaseURL(reverseProxyUrl);
}
const modelsCache = getLogStores(CacheKeys.MODEL_QUERIES);
const cachedModels = await modelsCache.get(baseURL);
if (cachedModels) {
return cachedModels;
}
if (baseURL || opts.azure) {
models = await fetchModels({
apiKey,
baseURL,
azure: opts.azure,
user: opts.user,
name: EModelEndpoint.openAI,
});
}
if (models.length === 0) {
return _models;
}
if (baseURL === openaiBaseURL) {
const regex = /(text-davinci-003|gpt-|o\d+)/;
const excludeRegex = /audio|realtime/;
models = models.filter((model) => regex.test(model) && !excludeRegex.test(model));
const instructModels = models.filter((model) => model.includes('instruct'));
const otherModels = models.filter((model) => !model.includes('instruct'));
models = otherModels.concat(instructModels);
}
await modelsCache.set(baseURL, models);
return models;
};
/**
* Loads the default models for the application.
* @async
* @function
* @param {object} opts - The options for fetching the models.
* @param {string} opts.user - The user ID to send to the API.
* @param {boolean} [opts.azure=false] - Whether to fetch models from Azure.
* @param {boolean} [opts.assistants=false] - Whether to fetch models for the Assistants endpoint.
*/
const getOpenAIModels = async (opts) => {
let models = defaultModels[EModelEndpoint.openAI];
if (opts.assistants) {
models = defaultModels[EModelEndpoint.assistants];
} else if (opts.azure) {
models = defaultModels[EModelEndpoint.azureAssistants];
}
let key;
if (opts.assistants) {
key = 'ASSISTANTS_MODELS';
} else if (opts.azure) {
key = 'AZURE_OPENAI_MODELS';
} else {
key = 'OPENAI_MODELS';
}
if (process.env[key]) {
models = splitAndTrim(process.env[key]);
return models;
}
if (config.userProvidedOpenAI) {
return models;
}
return await fetchOpenAIModels(opts, models);
};
/**
* Fetches models from the Anthropic API.
* @async
* @function
* @param {object} opts - The options for fetching the models.
* @param {string} opts.user - The user ID to send to the API.
* @param {string[]} [_models=[]] - The models to use as a fallback.
*/
const fetchAnthropicModels = async (opts, _models = []) => {
let models = _models.slice() ?? [];
let apiKey = process.env.ANTHROPIC_API_KEY;
const anthropicBaseURL = 'https://api.anthropic.com/v1';
let baseURL = anthropicBaseURL;
let reverseProxyUrl = process.env.ANTHROPIC_REVERSE_PROXY;
if (reverseProxyUrl) {
baseURL = extractBaseURL(reverseProxyUrl);
}
if (!apiKey) {
return models;
}
const modelsCache = getLogStores(CacheKeys.MODEL_QUERIES);
const cachedModels = await modelsCache.get(baseURL);
if (cachedModels) {
return cachedModels;
}
if (baseURL) {
models = await fetchModels({
apiKey,
baseURL,
user: opts.user,
name: EModelEndpoint.anthropic,
tokenKey: EModelEndpoint.anthropic,
});
}
if (models.length === 0) {
return _models;
}
await modelsCache.set(baseURL, models);
return models;
};
const getAnthropicModels = async (opts = {}) => {
let models = defaultModels[EModelEndpoint.anthropic];
if (process.env.ANTHROPIC_MODELS) {
models = splitAndTrim(process.env.ANTHROPIC_MODELS);
return models;
}
if (isUserProvided(process.env.ANTHROPIC_API_KEY)) {
return models;
}
try {
return await fetchAnthropicModels(opts, models);
} catch (error) {
logger.error('Error fetching Anthropic models:', error);
return models;
}
};
const getGoogleModels = () => {
let models = defaultModels[EModelEndpoint.google];
if (process.env.GOOGLE_MODELS) {
models = splitAndTrim(process.env.GOOGLE_MODELS);
}
return models;
};
const getBedrockModels = () => {
let models = defaultModels[EModelEndpoint.bedrock];
if (process.env.BEDROCK_AWS_MODELS) {
models = splitAndTrim(process.env.BEDROCK_AWS_MODELS);
}
return models;
};
module.exports = {
fetchModels,
splitAndTrim,
getOpenAIModels,
getGoogleModels,
getBedrockModels,
getAnthropicModels,
};

View file

@ -1,183 +0,0 @@
const { logger } = require('@librechat/data-schemas');
const { encrypt, decrypt } = require('@librechat/api');
const { ErrorTypes } = require('librechat-data-provider');
const { updateUser } = require('~/models');
const { Key } = require('~/db/models');
/**
* Updates the plugins for a user based on the action specified (install/uninstall).
* @async
* @param {Object} user - The user whose plugins are to be updated.
* @param {string} pluginKey - The key of the plugin to install or uninstall.
* @param {'install' | 'uninstall'} action - The action to perform, 'install' or 'uninstall'.
* @returns {Promise<Object>} The result of the update operation.
* @throws Logs the error internally if the update operation fails.
* @description This function updates the plugin array of a user document based on the specified action.
* It adds a plugin key to the plugins array for an 'install' action, and removes it for an 'uninstall' action.
*/
const updateUserPluginsService = async (user, pluginKey, action) => {
try {
const userPlugins = user.plugins || [];
if (action === 'install') {
return await updateUser(user._id, { plugins: [...userPlugins, pluginKey] });
} else if (action === 'uninstall') {
return await updateUser(user._id, {
plugins: userPlugins.filter((plugin) => plugin !== pluginKey),
});
}
} catch (err) {
logger.error('[updateUserPluginsService]', err);
return err;
}
};
/**
* Retrieves and decrypts the key value for a given user identified by userId and identifier name.
* @param {Object} params - The parameters object.
* @param {string} params.userId - The unique identifier for the user.
* @param {string} params.name - The name associated with the key.
* @returns {Promise<string>} The decrypted key value.
* @throws {Error} Throws an error if the key is not found or if there is a problem during key retrieval.
* @description This function searches for a user's key in the database using their userId and name.
* If found, it decrypts the value of the key and returns it. If no key is found, it throws
* an error indicating that there is no user key available.
*/
const getUserKey = async ({ userId, name }) => {
const keyValue = await Key.findOne({ userId, name }).lean();
if (!keyValue) {
throw new Error(
JSON.stringify({
type: ErrorTypes.NO_USER_KEY,
}),
);
}
return await decrypt(keyValue.value);
};
/**
* Retrieves, decrypts, and parses the key values for a given user identified by userId and name.
* @param {Object} params - The parameters object.
* @param {string} params.userId - The unique identifier for the user.
* @param {string} params.name - The name associated with the key.
* @returns {Promise<Record<string,string>>} The decrypted and parsed key values.
* @throws {Error} Throws an error if the key is invalid or if there is a problem during key value parsing.
* @description This function retrieves a user's encrypted key using their userId and name, decrypts it,
* and then attempts to parse the decrypted string into a JSON object. If the parsing fails,
* it throws an error indicating that the user key is invalid.
*/
const getUserKeyValues = async ({ userId, name }) => {
let userValues = await getUserKey({ userId, name });
try {
userValues = JSON.parse(userValues);
} catch (e) {
logger.error('[getUserKeyValues]', e);
throw new Error(
JSON.stringify({
type: ErrorTypes.INVALID_USER_KEY,
}),
);
}
return userValues;
};
/**
* Retrieves the expiry information of a user's key identified by userId and name.
* @async
* @param {Object} params - The parameters object.
* @param {string} params.userId - The unique identifier for the user.
* @param {string} params.name - The name associated with the key.
* @returns {Promise<{expiresAt: Date | null}>} The expiry date of the key or null if the key doesn't exist.
* @description This function fetches a user's key from the database using their userId and name and
* returns its expiry date. If the key is not found, it returns null for the expiry date.
*/
const getUserKeyExpiry = async ({ userId, name }) => {
const keyValue = await Key.findOne({ userId, name }).lean();
if (!keyValue) {
return { expiresAt: null };
}
return { expiresAt: keyValue.expiresAt || 'never' };
};
/**
* Updates or inserts a new key for a given user identified by userId and name, with a specified value and expiry date.
* @async
* @param {Object} params - The parameters object.
* @param {string} params.userId - The unique identifier for the user.
* @param {string} params.name - The name associated with the key.
* @param {string} params.value - The value to be encrypted and stored as the key's value.
* @param {Date} params.expiresAt - The expiry date for the key [optional]
* @returns {Promise<Object>} The updated or newly inserted key document.
* @description This function either updates an existing user key or inserts a new one into the database,
* after encrypting the provided value. It sets the provided expiry date for the key (or unsets for no expiry).
*/
const updateUserKey = async ({ userId, name, value, expiresAt = null }) => {
const encryptedValue = await encrypt(value);
let updateObject = {
userId,
name,
value: encryptedValue,
};
const updateQuery = { $set: updateObject };
// add expiresAt to the update object if it's not null
if (expiresAt) {
updateObject.expiresAt = new Date(expiresAt);
} else {
// make sure to remove if already present
updateQuery.$unset = { expiresAt };
}
return await Key.findOneAndUpdate({ userId, name }, updateQuery, {
upsert: true,
new: true,
}).lean();
};
/**
* Deletes a key or all keys for a given user identified by userId, optionally based on a specified name.
* @async
* @param {Object} params - The parameters object.
* @param {string} params.userId - The unique identifier for the user.
* @param {string} [params.name] - The name associated with the key to delete. If not provided and all is true, deletes all keys.
* @param {boolean} [params.all=false] - Whether to delete all keys for the user.
* @returns {Promise<Object>} The result of the deletion operation.
* @description This function deletes a specific key or all keys for a user from the database.
* If a name is provided and all is false, it deletes only the key with that name.
* If all is true, it ignores the name and deletes all keys for the user.
*/
const deleteUserKey = async ({ userId, name, all = false }) => {
if (all) {
return await Key.deleteMany({ userId });
}
await Key.findOneAndDelete({ userId, name }).lean();
};
/**
* Checks if a user key has expired based on the provided expiration date and endpoint.
* If the key has expired, it throws an Error with details including the type of error, the expiration date, and the endpoint.
*
* @param {string} expiresAt - The expiration date of the user key in a format that can be parsed by the Date constructor.
* @param {string} endpoint - The endpoint associated with the user key to be checked.
* @throws {Error} Throws an error if the user key has expired. The error message is a stringified JSON object
* containing the type of error (`ErrorTypes.EXPIRED_USER_KEY`), the expiration date in the local string format, and the endpoint.
*/
const checkUserKeyExpiry = (expiresAt, endpoint) => {
const expiresAtDate = new Date(expiresAt);
if (expiresAtDate < new Date()) {
const errorMessage = JSON.stringify({
type: ErrorTypes.EXPIRED_USER_KEY,
expiredAt: expiresAtDate.toLocaleString(),
endpoint,
});
throw new Error(errorMessage);
}
};
module.exports = {
getUserKey,
updateUserKey,
deleteUserKey,
getUserKeyValues,
getUserKeyExpiry,
checkUserKeyExpiry,
updateUserPluginsService,
};

View file

@ -1,5 +1,5 @@
const { webcrypto } = require('node:crypto');
const { hashBackupCode, decryptV3, decryptV2 } = require('@librechat/api');
const { hashBackupCode, decryptV3, decryptV2 } = require('@librechat/data-schemas');
const { updateUser } = require('~/models');
// Base32 alphabet for TOTP secret encoding.

View file

@ -13,7 +13,7 @@ jest.mock('@librechat/data-schemas', () => ({
},
}));
jest.mock('~/models/File', () => ({
jest.mock('~/models', () => ({
getFiles: jest.fn().mockResolvedValue([]),
}));

View file

@ -1,28 +0,0 @@
const { logger } = require('@librechat/data-schemas');
/**
* Extracts the base URL from the provided URL.
* @param {string} fullURL - The full URL.
* @returns {string} The base URL.
*/
function deriveBaseURL(fullURL) {
try {
const parsedUrl = new URL(fullURL);
const protocol = parsedUrl.protocol;
const hostname = parsedUrl.hostname;
const port = parsedUrl.port;
// Check if the parsed URL components are meaningful
if (!protocol || !hostname) {
return fullURL;
}
// Reconstruct the base URL
return `${protocol}//${hostname}${port ? `:${port}` : ''}`;
} catch (error) {
logger.error('Failed to derive base URL', error);
return fullURL; // Return the original URL in case of any exception
}
}
module.exports = deriveBaseURL;

View file

@ -1,74 +0,0 @@
const axios = require('axios');
const deriveBaseURL = require('./deriveBaseURL');
jest.mock('@librechat/api', () => {
const originalUtils = jest.requireActual('@librechat/api');
return {
...originalUtils,
processModelData: jest.fn((...args) => {
return originalUtils.processModelData(...args);
}),
};
});
jest.mock('axios');
jest.mock('~/cache/getLogStores', () =>
jest.fn().mockImplementation(() => ({
get: jest.fn().mockResolvedValue(undefined),
set: jest.fn().mockResolvedValue(true),
})),
);
jest.mock('~/config', () => ({
logger: {
error: jest.fn(),
},
}));
axios.get.mockResolvedValue({
data: {
data: [{ id: 'model-1' }, { id: 'model-2' }],
},
});
describe('deriveBaseURL', () => {
it('should extract the base URL correctly from a full URL with a port', () => {
const fullURL = 'https://example.com:8080/path?query=123';
const baseURL = deriveBaseURL(fullURL);
expect(baseURL).toEqual('https://example.com:8080');
});
it('should extract the base URL correctly from a full URL without a port', () => {
const fullURL = 'https://example.com/path?query=123';
const baseURL = deriveBaseURL(fullURL);
expect(baseURL).toEqual('https://example.com');
});
it('should handle URLs using the HTTP protocol', () => {
const fullURL = 'http://example.com:3000/path?query=123';
const baseURL = deriveBaseURL(fullURL);
expect(baseURL).toEqual('http://example.com:3000');
});
it('should return only the protocol and hostname if no port is specified', () => {
const fullURL = 'http://example.com/path?query=123';
const baseURL = deriveBaseURL(fullURL);
expect(baseURL).toEqual('http://example.com');
});
it('should handle URLs with uncommon protocols', () => {
const fullURL = 'ftp://example.com:2121/path?query=123';
const baseURL = deriveBaseURL(fullURL);
expect(baseURL).toEqual('ftp://example.com:2121');
});
it('should handle edge case where URL ends with a slash', () => {
const fullURL = 'https://example.com/';
const baseURL = deriveBaseURL(fullURL);
expect(baseURL).toEqual('https://example.com');
});
it('should return the original URL if the URL is invalid', () => {
const invalidURL = 'htp:/example.com:8080';
const result = deriveBaseURL(invalidURL);
expect(result).toBe(invalidURL);
});
});

View file

@ -1,35 +0,0 @@
const { logger } = require('@librechat/data-schemas');
function findContent(obj) {
if (obj && typeof obj === 'object') {
if ('kwargs' in obj && 'content' in obj.kwargs) {
return obj.kwargs.content;
}
for (let key in obj) {
let content = findContent(obj[key]);
if (content) {
return content;
}
}
}
return null;
}
function findMessageContent(message) {
let startIndex = Math.min(message.indexOf('{'), message.indexOf('['));
let jsonString = message.substring(startIndex);
let jsonObjectOrArray;
try {
jsonObjectOrArray = JSON.parse(jsonString);
} catch (error) {
logger.error('[findMessageContent] Failed to parse JSON:', error);
return null;
}
let content = findContent(jsonObjectOrArray);
return content;
}
module.exports = findMessageContent;

View file

@ -1,9 +0,0 @@
const deriveBaseURL = require('./deriveBaseURL');
const extractBaseURL = require('./extractBaseURL');
const findMessageContent = require('./findMessageContent');
module.exports = {
deriveBaseURL,
extractBaseURL,
findMessageContent,
};

View file

@ -1,6 +1,5 @@
import { logger } from '@librechat/data-schemas';
import { logger, decrypt } from '@librechat/data-schemas';
import type { IPluginAuth, PluginAuthMethods } from '@librechat/data-schemas';
import { decrypt } from '../crypto/encryption';
export interface GetPluginAuthMapParams {
userId: string;

View file

@ -1,7 +1,8 @@
export * from './chain';
export * from './initialize';
export * from './legacy';
export * from './memory';
export * from './migration';
export * from './legacy';
export * from './resources';
export * from './run';
export * from './validation';

View file

@ -0,0 +1,315 @@
import { Providers } from '@librechat/agents';
import {
ErrorTypes,
EModelEndpoint,
EToolResources,
paramEndpoints,
isAgentsEndpoint,
replaceSpecialVars,
providerEndpointMap,
} from 'librechat-data-provider';
import type {
AgentToolResources,
TEndpointOption,
TFile,
Agent,
TUser,
} from 'librechat-data-provider';
import type { Response as ServerResponse } from 'express';
import type { IMongoFile } from '@librechat/data-schemas';
import type { GenericTool } from '@librechat/agents';
import type { InitializeResultBase, ServerRequest, EndpointDbMethods } from '~/types';
import { getModelMaxTokens, extractLibreChatParams, optionalChainWithEmptyCheck } from '~/utils';
import { filterFilesByEndpointConfig } from '~/files';
import { generateArtifactsPrompt } from '~/prompts';
import { getProviderConfig } from '~/endpoints';
import { primeResources } from './resources';
/**
* Extended agent type with additional fields needed after initialization
*/
export type InitializedAgent = Agent & {
tools: GenericTool[];
attachments: IMongoFile[];
toolContextMap: Record<string, unknown>;
maxContextTokens: number;
useLegacyContent: boolean;
resendFiles: boolean;
userMCPAuthMap?: Record<string, Record<string, string>>;
};
/**
* Parameters for initializing an agent
* Matches the CJS signature from api/server/services/Endpoints/agents/agent.js
*/
export interface InitializeAgentParams {
/** Request object */
req: ServerRequest;
/** Response object */
res: ServerResponse;
/** Agent to initialize */
agent: Agent;
/** Conversation ID (optional) */
conversationId?: string | null;
/** Request files */
requestFiles?: IMongoFile[];
/** Function to load agent tools */
loadTools?: (params: {
req: ServerRequest;
res: ServerResponse;
provider: string;
agentId: string;
tools: string[];
model: string | null;
tool_resources: AgentToolResources | undefined;
}) => Promise<{
tools: GenericTool[];
toolContextMap: Record<string, unknown>;
userMCPAuthMap?: Record<string, Record<string, string>>;
} | null>;
/** Endpoint option (contains model_parameters and endpoint info) */
endpointOption?: Partial<TEndpointOption>;
/** Set of allowed providers */
allowedProviders: Set<string>;
/** Whether this is the initial agent */
isInitialAgent?: boolean;
}
/**
* Database methods required for agent initialization
* Most methods come from data-schemas via createMethods()
* getConvoFiles not yet in data-schemas but included here for consistency
*/
export interface InitializeAgentDbMethods extends EndpointDbMethods {
/** Update usage tracking for multiple files */
updateFilesUsage: (files: Array<{ file_id: string }>, fileIds?: string[]) => Promise<unknown[]>;
/** Get files from database */
getFiles: (filter: unknown, sort: unknown, select: unknown, opts?: unknown) => Promise<unknown[]>;
/** Get tool files by IDs */
getToolFilesByIds: (fileIds: string[], toolSet: Set<EToolResources>) => Promise<unknown[]>;
/** Get conversation file IDs */
getConvoFiles: (conversationId: string) => Promise<string[] | null>;
}
/**
* Initializes an agent for use in requests.
* Handles file processing, tool loading, provider configuration, and context token calculations.
*
* This function is exported from @librechat/api and replaces the CJS version from
* api/server/services/Endpoints/agents/agent.js
*
* @param params - Initialization parameters
* @param deps - Optional dependency injection for testing
* @returns Promise resolving to initialized agent with tools and configuration
* @throws Error if agent provider is not allowed or if required dependencies are missing
*/
export async function initializeAgent(
params: InitializeAgentParams,
db?: InitializeAgentDbMethods,
): Promise<InitializedAgent> {
const {
req,
res,
agent,
loadTools,
requestFiles = [],
conversationId,
endpointOption,
allowedProviders,
isInitialAgent = false,
} = params;
if (!db) {
throw new Error('initializeAgent requires db methods to be passed');
}
if (
isAgentsEndpoint(endpointOption?.endpoint) &&
allowedProviders.size > 0 &&
!allowedProviders.has(agent.provider)
) {
throw new Error(
`{ "type": "${ErrorTypes.INVALID_AGENT_PROVIDER}", "info": "${agent.provider}" }`,
);
}
let currentFiles: IMongoFile[] | undefined;
const _modelOptions = structuredClone(
Object.assign(
{ model: agent.model },
agent.model_parameters ?? { model: agent.model },
isInitialAgent === true ? endpointOption?.model_parameters : {},
),
);
const { resendFiles, maxContextTokens, modelOptions } = extractLibreChatParams(
_modelOptions as Record<string, unknown>,
);
const provider = agent.provider;
agent.endpoint = provider;
if (isInitialAgent && conversationId != null && resendFiles) {
const fileIds = (await db.getConvoFiles(conversationId)) ?? [];
const toolResourceSet = new Set<EToolResources>();
for (const tool of agent.tools ?? []) {
if (EToolResources[tool as keyof typeof EToolResources]) {
toolResourceSet.add(EToolResources[tool as keyof typeof EToolResources]);
}
}
const toolFiles = (await db.getToolFilesByIds(fileIds, toolResourceSet)) as IMongoFile[];
if (requestFiles.length || toolFiles.length) {
currentFiles = (await db.updateFilesUsage(requestFiles.concat(toolFiles))) as IMongoFile[];
}
} else if (isInitialAgent && requestFiles.length) {
currentFiles = (await db.updateFilesUsage(requestFiles)) as IMongoFile[];
}
if (currentFiles && currentFiles.length) {
let endpointType: EModelEndpoint | undefined;
if (!paramEndpoints.has(agent.endpoint ?? '')) {
endpointType = EModelEndpoint.custom;
}
currentFiles = filterFilesByEndpointConfig(req, {
files: currentFiles,
endpoint: agent.endpoint ?? '',
endpointType,
});
}
const { attachments: primedAttachments, tool_resources } = await primeResources({
req: req as never,
getFiles: db.getFiles as never,
appConfig: req.config,
agentId: agent.id,
attachments: currentFiles
? (Promise.resolve(currentFiles) as unknown as Promise<TFile[]>)
: undefined,
tool_resources: agent.tool_resources,
requestFileSet: new Set(requestFiles?.map((file) => file.file_id)),
});
const {
tools: structuredTools,
toolContextMap,
userMCPAuthMap,
} = (await loadTools?.({
req,
res,
provider,
agentId: agent.id,
tools: agent.tools ?? [],
model: agent.model,
tool_resources,
})) ?? { tools: [], toolContextMap: {}, userMCPAuthMap: undefined };
const { getOptions, overrideProvider } = getProviderConfig({
provider,
appConfig: req.config,
});
if (overrideProvider !== agent.provider) {
agent.provider = overrideProvider;
}
const finalModelOptions = {
...modelOptions,
model: agent.model,
};
const options: InitializeResultBase = await getOptions({
req,
endpoint: provider,
model_parameters: finalModelOptions,
db,
});
const llmConfig = options.llmConfig as Record<string, unknown>;
const tokensModel =
agent.provider === EModelEndpoint.azureOpenAI ? agent.model : (llmConfig?.model as string);
const maxOutputTokens = optionalChainWithEmptyCheck(
llmConfig?.maxOutputTokens as number | undefined,
llmConfig?.maxTokens as number | undefined,
0,
);
const agentMaxContextTokens = optionalChainWithEmptyCheck(
maxContextTokens,
getModelMaxTokens(
tokensModel ?? '',
providerEndpointMap[provider as keyof typeof providerEndpointMap],
options.endpointTokenConfig,
),
18000,
);
if (
agent.endpoint === EModelEndpoint.azureOpenAI &&
(llmConfig?.azureOpenAIApiInstanceName as string | undefined) == null
) {
agent.provider = Providers.OPENAI;
}
if (options.provider != null) {
agent.provider = options.provider;
}
let tools: GenericTool[] = options.tools?.length
? (options.tools as GenericTool[])
: structuredTools;
if (
(agent.provider === Providers.GOOGLE || agent.provider === Providers.VERTEXAI) &&
options.tools?.length &&
structuredTools?.length
) {
throw new Error(`{ "type": "${ErrorTypes.GOOGLE_TOOL_CONFLICT}"}`);
} else if (
(agent.provider === Providers.OPENAI ||
agent.provider === Providers.AZURE ||
agent.provider === Providers.ANTHROPIC) &&
options.tools?.length &&
structuredTools?.length
) {
tools = structuredTools.concat(options.tools as GenericTool[]);
}
agent.model_parameters = { ...options.llmConfig } as Agent['model_parameters'];
if (options.configOptions) {
(agent.model_parameters as Record<string, unknown>).configuration = options.configOptions;
}
if (agent.instructions && agent.instructions !== '') {
agent.instructions = replaceSpecialVars({
text: agent.instructions,
user: req.user ? (req.user as unknown as TUser) : null,
});
}
if (typeof agent.artifacts === 'string' && agent.artifacts !== '') {
const artifactsPromptResult = generateArtifactsPrompt({
endpoint: agent.provider,
artifacts: agent.artifacts as never,
});
agent.additional_instructions = artifactsPromptResult ?? undefined;
}
const agentMaxContextNum = Number(agentMaxContextTokens) || 18000;
const maxOutputTokensNum = Number(maxOutputTokens) || 0;
const finalAttachments: IMongoFile[] = (primedAttachments ?? [])
.filter((a): a is TFile => a != null)
.map((a) => a as unknown as IMongoFile);
const initializedAgent: InitializedAgent = {
...agent,
tools: (tools ?? []) as GenericTool[] & string[],
attachments: finalAttachments,
resendFiles,
userMCPAuthMap,
toolContextMap: toolContextMap ?? {},
useLegacyContent: !!options.useLegacyContent,
maxContextTokens: Math.round((agentMaxContextNum - maxOutputTokensNum) * 0.9),
};
return initializedAgent;
}

View file

@ -152,7 +152,7 @@ export const primeResources = async ({
agentId,
}: {
req: ServerRequest & { user?: IUser };
appConfig: AppConfig;
appConfig?: AppConfig;
requestFileSet: Set<string>;
attachments: Promise<Array<TFile | null>> | undefined;
tool_resources: AgentToolResources | undefined;

View file

@ -1,129 +0,0 @@
import 'dotenv/config';
import crypto from 'node:crypto';
const { webcrypto } = crypto;
// Use hex decoding for both key and IV for legacy methods.
const key = Buffer.from(process.env.CREDS_KEY ?? '', 'hex');
const iv = Buffer.from(process.env.CREDS_IV ?? '', 'hex');
const algorithm = 'AES-CBC';
// --- Legacy v1/v2 Setup: AES-CBC with fixed key and IV ---
export async function encrypt(value: string) {
const cryptoKey = await webcrypto.subtle.importKey('raw', key, { name: algorithm }, false, [
'encrypt',
]);
const encoder = new TextEncoder();
const data = encoder.encode(value);
const encryptedBuffer = await webcrypto.subtle.encrypt(
{ name: algorithm, iv: iv },
cryptoKey,
data,
);
return Buffer.from(encryptedBuffer).toString('hex');
}
export async function decrypt(encryptedValue: string) {
const cryptoKey = await webcrypto.subtle.importKey('raw', key, { name: algorithm }, false, [
'decrypt',
]);
const encryptedBuffer = Buffer.from(encryptedValue, 'hex');
const decryptedBuffer = await webcrypto.subtle.decrypt(
{ name: algorithm, iv: iv },
cryptoKey,
encryptedBuffer,
);
const decoder = new TextDecoder();
return decoder.decode(decryptedBuffer);
}
// --- v2: AES-CBC with a random IV per encryption ---
export async function encryptV2(value: string) {
const gen_iv = webcrypto.getRandomValues(new Uint8Array(16));
const cryptoKey = await webcrypto.subtle.importKey('raw', key, { name: algorithm }, false, [
'encrypt',
]);
const encoder = new TextEncoder();
const data = encoder.encode(value);
const encryptedBuffer = await webcrypto.subtle.encrypt(
{ name: algorithm, iv: gen_iv },
cryptoKey,
data,
);
return Buffer.from(gen_iv).toString('hex') + ':' + Buffer.from(encryptedBuffer).toString('hex');
}
export async function decryptV2(encryptedValue: string) {
const parts = encryptedValue.split(':');
if (parts.length === 1) {
return parts[0];
}
const gen_iv = Buffer.from(parts.shift() ?? '', 'hex');
const encrypted = parts.join(':');
const cryptoKey = await webcrypto.subtle.importKey('raw', key, { name: algorithm }, false, [
'decrypt',
]);
const encryptedBuffer = Buffer.from(encrypted, 'hex');
const decryptedBuffer = await webcrypto.subtle.decrypt(
{ name: algorithm, iv: gen_iv },
cryptoKey,
encryptedBuffer,
);
const decoder = new TextDecoder();
return decoder.decode(decryptedBuffer);
}
// --- v3: AES-256-CTR using Node's crypto functions ---
const algorithm_v3 = 'aes-256-ctr';
/**
* Encrypts a value using AES-256-CTR.
* Note: AES-256 requires a 32-byte key. Ensure that process.env.CREDS_KEY is a 64-character hex string.
*
* @param value - The plaintext to encrypt.
* @returns The encrypted string with a "v3:" prefix.
*/
export function encryptV3(value: string) {
if (key.length !== 32) {
throw new Error(`Invalid key length: expected 32 bytes, got ${key.length} bytes`);
}
const iv_v3 = crypto.randomBytes(16);
const cipher = crypto.createCipheriv(algorithm_v3, key, iv_v3);
const encrypted = Buffer.concat([cipher.update(value, 'utf8'), cipher.final()]);
return `v3:${iv_v3.toString('hex')}:${encrypted.toString('hex')}`;
}
export function decryptV3(encryptedValue: string) {
const parts = encryptedValue.split(':');
if (parts[0] !== 'v3') {
throw new Error('Not a v3 encrypted value');
}
const iv_v3 = Buffer.from(parts[1], 'hex');
const encryptedText = Buffer.from(parts.slice(2).join(':'), 'hex');
const decipher = crypto.createDecipheriv(algorithm_v3, key, iv_v3);
const decrypted = Buffer.concat([decipher.update(encryptedText), decipher.final()]);
return decrypted.toString('utf8');
}
export async function getRandomValues(length: number) {
if (!Number.isInteger(length) || length <= 0) {
throw new Error('Length must be a positive integer');
}
const randomValues = new Uint8Array(length);
webcrypto.getRandomValues(randomValues);
return Buffer.from(randomValues).toString('hex');
}
/**
* Computes SHA-256 hash for the given input.
* @param input - The input to hash.
* @returns The SHA-256 hash of the input.
*/
export async function hashBackupCode(input: string) {
const encoder = new TextEncoder();
const data = encoder.encode(input);
const hashBuffer = await webcrypto.subtle.digest('SHA-256', data);
const hashArray = Array.from(new Uint8Array(hashBuffer));
return hashArray.map((b) => b.toString(16).padStart(2, '0')).join('');
}

View file

@ -1,2 +1,11 @@
export * from './encryption';
export {
encrypt,
decrypt,
encryptV2,
decryptV2,
encryptV3,
decryptV3,
hashBackupCode,
getRandomValues,
} from '@librechat/data-schemas';
export * from './jwt';

View file

@ -1,2 +1,3 @@
export * from './helpers';
export * from './llm';
export * from './initialize';

View file

@ -0,0 +1,73 @@
import { EModelEndpoint } from 'librechat-data-provider';
import type { BaseInitializeParams, InitializeResultBase, AnthropicConfigOptions } from '~/types';
import { checkUserKeyExpiry } from '~/utils';
import { getLLMConfig } from './llm';
/**
* Initializes Anthropic endpoint configuration.
*
* @param params - Configuration parameters
* @returns Promise resolving to Anthropic configuration options
* @throws Error if API key is not provided
*/
export async function initializeAnthropic({
req,
endpoint,
model_parameters,
db,
}: BaseInitializeParams): Promise<InitializeResultBase> {
void endpoint;
const appConfig = req.config;
const { ANTHROPIC_API_KEY, ANTHROPIC_REVERSE_PROXY, PROXY } = process.env;
const { key: expiresAt } = req.body;
const isUserProvided = ANTHROPIC_API_KEY === 'user_provided';
const anthropicApiKey = isUserProvided
? await db.getUserKey({ userId: req.user?.id ?? '', name: EModelEndpoint.anthropic })
: ANTHROPIC_API_KEY;
if (!anthropicApiKey) {
throw new Error('Anthropic API key not provided. Please provide it again.');
}
if (expiresAt && isUserProvided) {
checkUserKeyExpiry(expiresAt, EModelEndpoint.anthropic);
}
let clientOptions: AnthropicConfigOptions = {};
/** @type {undefined | TBaseEndpoint} */
const anthropicConfig = appConfig?.endpoints?.[EModelEndpoint.anthropic];
if (anthropicConfig) {
clientOptions = {
...clientOptions,
// Note: _lc_stream_delay is set on modelOptions in the result
};
}
const allConfig = appConfig?.endpoints?.all;
clientOptions = {
proxy: PROXY ?? undefined,
reverseProxyUrl: ANTHROPIC_REVERSE_PROXY ?? undefined,
modelOptions: {
...(model_parameters ?? {}),
user: req.user?.id,
},
...clientOptions,
};
const result = getLLMConfig(anthropicApiKey, clientOptions);
// Apply stream rate delay
if (anthropicConfig?.streamRate) {
(result.llmConfig as Record<string, unknown>)._lc_stream_delay = anthropicConfig.streamRate;
}
if (allConfig?.streamRate) {
(result.llmConfig as Record<string, unknown>)._lc_stream_delay = allConfig.streamRate;
}
return result;
}

View file

@ -0,0 +1 @@
export * from './initialize';

View file

@ -1,5 +1,18 @@
import { HttpsProxyAgent } from 'https-proxy-agent';
import { NodeHttpHandler } from '@smithy/node-http-handler';
import { BedrockRuntimeClient } from '@aws-sdk/client-bedrock-runtime';
import {
AuthType,
EModelEndpoint,
bedrockInputParser,
bedrockOutputParser,
removeNullishValues,
} from 'librechat-data-provider';
import type { BaseInitializeParams, InitializeResultBase, BedrockCredentials } from '~/types';
import { checkUserKeyExpiry } from '~/utils';
/**
* Bedrock endpoint options configuration
* Initializes Bedrock endpoint configuration.
*
* This module handles configuration for AWS Bedrock endpoints, including support for
* HTTP/HTTPS proxies and reverse proxies.
@ -18,28 +31,17 @@
* - Credentials and endpoint configuration are passed separately to ChatBedrockConverse,
* which creates its own BedrockRuntimeClient internally
*
* Environment Variables:
* - PROXY: HTTP/HTTPS proxy URL (e.g., http://proxy.example.com:8080)
* - BEDROCK_REVERSE_PROXY: Custom Bedrock API endpoint host
* - BEDROCK_AWS_DEFAULT_REGION: AWS region for Bedrock service
* - BEDROCK_AWS_ACCESS_KEY_ID: AWS access key (or set to 'user_provided')
* - BEDROCK_AWS_SECRET_ACCESS_KEY: AWS secret key (or set to 'user_provided')
* - BEDROCK_AWS_SESSION_TOKEN: Optional AWS session token
* @param params - Configuration parameters
* @returns Promise resolving to Bedrock configuration options
* @throws Error if credentials are not provided when required
*/
const { HttpsProxyAgent } = require('https-proxy-agent');
const { NodeHttpHandler } = require('@smithy/node-http-handler');
const { BedrockRuntimeClient } = require('@aws-sdk/client-bedrock-runtime');
const {
AuthType,
EModelEndpoint,
bedrockInputParser,
bedrockOutputParser,
removeNullishValues,
} = require('librechat-data-provider');
const { getUserKey, checkUserKeyExpiry } = require('~/server/services/UserService');
const getOptions = async ({ req, overrideModel, endpointOption }) => {
export async function initializeBedrock({
req,
endpoint,
model_parameters,
db,
}: BaseInitializeParams): Promise<InitializeResultBase> {
void endpoint;
const {
BEDROCK_AWS_SECRET_ACCESS_KEY,
BEDROCK_AWS_ACCESS_KEY_ID,
@ -48,11 +50,14 @@ const getOptions = async ({ req, overrideModel, endpointOption }) => {
BEDROCK_AWS_DEFAULT_REGION,
PROXY,
} = process.env;
const expiresAt = req.body.key;
const { key: expiresAt } = req.body;
const isUserProvided = BEDROCK_AWS_SECRET_ACCESS_KEY === AuthType.USER_PROVIDED;
let credentials = isUserProvided
? await getUserKey({ userId: req.user.id, name: EModelEndpoint.bedrock })
let credentials: BedrockCredentials | undefined = isUserProvided
? await db
.getUserKey({ userId: req.user?.id ?? '', name: EModelEndpoint.bedrock })
.then((key) => JSON.parse(key) as BedrockCredentials)
: {
accessKeyId: BEDROCK_AWS_ACCESS_KEY_ID,
secretAccessKey: BEDROCK_AWS_SECRET_ACCESS_KEY,
@ -75,37 +80,31 @@ const getOptions = async ({ req, overrideModel, endpointOption }) => {
checkUserKeyExpiry(expiresAt, EModelEndpoint.bedrock);
}
/*
Callback for stream rate no longer awaits and may end the stream prematurely
/** @type {number}
let streamRate = Constants.DEFAULT_STREAM_RATE;
/** @type {undefined | TBaseEndpoint}
const bedrockConfig = appConfig.endpoints?.[EModelEndpoint.bedrock];
if (bedrockConfig && bedrockConfig.streamRate) {
streamRate = bedrockConfig.streamRate;
}
const allConfig = appConfig.endpoints?.all;
if (allConfig && allConfig.streamRate) {
streamRate = allConfig.streamRate;
}
*/
/** @type {BedrockClientOptions} */
const requestOptions = {
model: overrideModel ?? endpointOption?.model,
const requestOptions: Record<string, unknown> = {
model: model_parameters?.model as string | undefined,
region: BEDROCK_AWS_DEFAULT_REGION,
};
const configOptions = {};
const configOptions: Record<string, unknown> = {};
const llmConfig = bedrockOutputParser(
bedrockInputParser.parse(
removeNullishValues(Object.assign(requestOptions, endpointOption?.model_parameters ?? {})),
removeNullishValues({ ...requestOptions, ...(model_parameters ?? {}) }),
),
);
) as InitializeResultBase['llmConfig'] & {
region?: string;
client?: BedrockRuntimeClient;
credentials?: BedrockCredentials;
endpointHost?: string;
};
/** Only include credentials if they're complete (accessKeyId and secretAccessKey are both set) */
const hasCompleteCredentials =
credentials &&
typeof credentials.accessKeyId === 'string' &&
credentials.accessKeyId !== '' &&
typeof credentials.secretAccessKey === 'string' &&
credentials.secretAccessKey !== '';
if (PROXY) {
const proxyAgent = new HttpsProxyAgent(PROXY);
@ -116,8 +115,10 @@ const getOptions = async ({ req, overrideModel, endpointOption }) => {
// the AWS SDK's default credential provider chain is used (instance profiles,
// AWS profiles, environment variables, etc.)
const customClient = new BedrockRuntimeClient({
region: llmConfig.region ?? BEDROCK_AWS_DEFAULT_REGION,
...(credentials && { credentials }),
region: (llmConfig.region as string) ?? BEDROCK_AWS_DEFAULT_REGION,
...(hasCompleteCredentials && {
credentials: credentials as { accessKeyId: string; secretAccessKey: string },
}),
requestHandler: new NodeHttpHandler({
httpAgent: proxyAgent,
httpsAgent: proxyAgent,
@ -141,10 +142,7 @@ const getOptions = async ({ req, overrideModel, endpointOption }) => {
}
return {
/** @type {BedrockClientOptions} */
llmConfig,
configOptions,
};
};
module.exports = getOptions;
}

View file

@ -0,0 +1,99 @@
import { Providers } from '@librechat/agents';
import { EModelEndpoint } from 'librechat-data-provider';
import type { TEndpoint } from 'librechat-data-provider';
import type { AppConfig } from '@librechat/data-schemas';
import type { BaseInitializeParams, InitializeResultBase } from '~/types';
import { initializeAnthropic } from './anthropic/initialize';
import { initializeBedrock } from './bedrock/initialize';
import { initializeCustom } from './custom/initialize';
import { initializeGoogle } from './google/initialize';
import { initializeOpenAI } from './openai/initialize';
import { getCustomEndpointConfig } from '~/app/config';
/**
* Type for initialize functions
*/
export type InitializeFn = (params: BaseInitializeParams) => Promise<InitializeResultBase>;
/**
* Check if the provider is a known custom provider
* @param provider - The provider string
* @returns True if the provider is a known custom provider, false otherwise
*/
export function isKnownCustomProvider(provider?: string): boolean {
return [Providers.XAI, Providers.DEEPSEEK, Providers.OPENROUTER].includes(
(provider?.toLowerCase() ?? '') as Providers,
);
}
/**
* Provider configuration map mapping providers to their initialization functions
*/
export const providerConfigMap: Record<string, InitializeFn> = {
[Providers.XAI]: initializeCustom,
[Providers.DEEPSEEK]: initializeCustom,
[Providers.OPENROUTER]: initializeCustom,
[EModelEndpoint.openAI]: initializeOpenAI,
[EModelEndpoint.google]: initializeGoogle,
[EModelEndpoint.bedrock]: initializeBedrock,
[EModelEndpoint.azureOpenAI]: initializeOpenAI,
[EModelEndpoint.anthropic]: initializeAnthropic,
};
/**
* Result from getProviderConfig
*/
export interface ProviderConfigResult {
/** The initialization function for this provider */
getOptions: InitializeFn;
/** The resolved provider name (may be different from input if normalized) */
overrideProvider: string;
/** Custom endpoint configuration (if applicable) */
customEndpointConfig?: Partial<TEndpoint>;
}
/**
* Get the provider configuration and override endpoint based on the provider string
*
* @param params - Configuration parameters
* @param params.provider - The provider string
* @param params.appConfig - The application configuration
* @returns Provider configuration including getOptions function, override provider, and custom config
* @throws Error if provider is not supported
*/
export function getProviderConfig({
provider,
appConfig,
}: {
provider: string;
appConfig?: AppConfig;
}): ProviderConfigResult {
let getOptions = providerConfigMap[provider];
let overrideProvider = provider;
let customEndpointConfig: Partial<TEndpoint> | undefined;
if (!getOptions && providerConfigMap[provider.toLowerCase()] != null) {
overrideProvider = provider.toLowerCase();
getOptions = providerConfigMap[overrideProvider];
} else if (!getOptions) {
customEndpointConfig = getCustomEndpointConfig({ endpoint: provider, appConfig });
if (!customEndpointConfig) {
throw new Error(`Provider ${provider} not supported`);
}
getOptions = initializeCustom;
overrideProvider = Providers.OPENAI;
}
if (isKnownCustomProvider(overrideProvider) && !customEndpointConfig) {
customEndpointConfig = getCustomEndpointConfig({ endpoint: provider, appConfig });
if (!customEndpointConfig) {
throw new Error(`Provider ${provider} not supported`);
}
}
return {
getOptions,
overrideProvider,
customEndpointConfig,
};
}

View file

@ -1 +1,2 @@
export * from './config';
export * from './initialize';

View file

@ -0,0 +1,180 @@
import {
CacheKeys,
ErrorTypes,
envVarRegex,
FetchTokenConfig,
extractEnvVariable,
} from 'librechat-data-provider';
import type { TEndpoint } from 'librechat-data-provider';
import type { AppConfig } from '@librechat/data-schemas';
import type { BaseInitializeParams, InitializeResultBase, EndpointTokenConfig } from '~/types';
import { getOpenAIConfig } from '~/endpoints/openai/config';
import { getCustomEndpointConfig } from '~/app/config';
import { fetchModels } from '~/endpoints/models';
import { isUserProvided, checkUserKeyExpiry } from '~/utils';
import { standardCache } from '~/cache';
const { PROXY } = process.env;
/**
* Builds custom options from endpoint configuration
*/
function buildCustomOptions(
endpointConfig: Partial<TEndpoint>,
appConfig?: AppConfig,
endpointTokenConfig?: Record<string, unknown>,
) {
const customOptions: Record<string, unknown> = {
headers: endpointConfig.headers,
addParams: endpointConfig.addParams,
dropParams: endpointConfig.dropParams,
customParams: endpointConfig.customParams,
titleConvo: endpointConfig.titleConvo,
titleModel: endpointConfig.titleModel,
forcePrompt: endpointConfig.forcePrompt,
summaryModel: endpointConfig.summaryModel,
modelDisplayLabel: endpointConfig.modelDisplayLabel,
titleMethod: endpointConfig.titleMethod ?? 'completion',
contextStrategy: endpointConfig.summarize ? 'summarize' : null,
directEndpoint: endpointConfig.directEndpoint,
titleMessageRole: endpointConfig.titleMessageRole,
streamRate: endpointConfig.streamRate,
endpointTokenConfig,
};
const allConfig = appConfig?.endpoints?.all;
if (allConfig) {
customOptions.streamRate = allConfig.streamRate;
}
return customOptions;
}
/**
* Initializes a custom endpoint client configuration.
* This function handles custom endpoints defined in librechat.yaml, including
* user-provided API keys and URLs.
*
* @param params - Configuration parameters
* @returns Promise resolving to endpoint configuration options
* @throws Error if config is missing, API key is not provided, or base URL is missing
*/
export async function initializeCustom({
req,
endpoint,
model_parameters,
db,
}: BaseInitializeParams): Promise<InitializeResultBase> {
const appConfig = req.config;
const { key: expiresAt } = req.body;
const endpointConfig = getCustomEndpointConfig({
endpoint,
appConfig,
});
if (!endpointConfig) {
throw new Error(`Config not found for the ${endpoint} custom endpoint.`);
}
const CUSTOM_API_KEY = extractEnvVariable(endpointConfig.apiKey ?? '');
const CUSTOM_BASE_URL = extractEnvVariable(endpointConfig.baseURL ?? '');
if (CUSTOM_API_KEY.match(envVarRegex)) {
throw new Error(`Missing API Key for ${endpoint}.`);
}
if (CUSTOM_BASE_URL.match(envVarRegex)) {
throw new Error(`Missing Base URL for ${endpoint}.`);
}
const userProvidesKey = isUserProvided(CUSTOM_API_KEY);
const userProvidesURL = isUserProvided(CUSTOM_BASE_URL);
let userValues = null;
if (expiresAt && (userProvidesKey || userProvidesURL)) {
checkUserKeyExpiry(expiresAt, endpoint);
userValues = await db.getUserKeyValues({ userId: req.user?.id ?? '', name: endpoint });
}
const apiKey = userProvidesKey ? userValues?.apiKey : CUSTOM_API_KEY;
const baseURL = userProvidesURL ? userValues?.baseURL : CUSTOM_BASE_URL;
if (userProvidesKey && !apiKey) {
throw new Error(
JSON.stringify({
type: ErrorTypes.NO_USER_KEY,
}),
);
}
if (userProvidesURL && !baseURL) {
throw new Error(
JSON.stringify({
type: ErrorTypes.NO_BASE_URL,
}),
);
}
if (!apiKey) {
throw new Error(`${endpoint} API key not provided.`);
}
if (!baseURL) {
throw new Error(`${endpoint} Base URL not provided.`);
}
let endpointTokenConfig: EndpointTokenConfig | undefined;
const userId = req.user?.id ?? '';
const cache = standardCache(CacheKeys.TOKEN_CONFIG);
/** tokenConfig is an optional extended property on custom endpoints */
const hasTokenConfig = (endpointConfig as Record<string, unknown>).tokenConfig != null;
const tokenKey =
!hasTokenConfig && (userProvidesKey || userProvidesURL) ? `${endpoint}:${userId}` : endpoint;
const cachedConfig =
!hasTokenConfig &&
FetchTokenConfig[endpoint.toLowerCase() as keyof typeof FetchTokenConfig] &&
(await cache.get(tokenKey));
endpointTokenConfig = (cachedConfig as EndpointTokenConfig) || undefined;
if (
FetchTokenConfig[endpoint.toLowerCase() as keyof typeof FetchTokenConfig] &&
endpointConfig &&
endpointConfig.models?.fetch &&
!endpointTokenConfig
) {
await fetchModels({ apiKey, baseURL, name: endpoint, user: userId, tokenKey });
endpointTokenConfig = (await cache.get(tokenKey)) as EndpointTokenConfig | undefined;
}
const customOptions = buildCustomOptions(endpointConfig, appConfig, endpointTokenConfig);
const clientOptions: Record<string, unknown> = {
reverseProxyUrl: baseURL ?? null,
proxy: PROXY ?? null,
...customOptions,
};
const modelOptions = { ...(model_parameters ?? {}), user: userId };
const finalClientOptions = {
modelOptions,
...clientOptions,
};
const options = getOpenAIConfig(apiKey, finalClientOptions, endpoint);
if (options != null) {
(options as InitializeResultBase).useLegacyContent = true;
(options as InitializeResultBase).endpointTokenConfig = endpointTokenConfig;
}
const streamRate = clientOptions.streamRate as number | undefined;
if (streamRate) {
(options.llmConfig as Record<string, unknown>)._lc_stream_delay = streamRate;
}
return options;
}

View file

@ -1 +1,2 @@
export * from './llm';
export * from './initialize';

View file

@ -0,0 +1,91 @@
import path from 'path';
import { EModelEndpoint, AuthKeys } from 'librechat-data-provider';
import type {
BaseInitializeParams,
InitializeResultBase,
GoogleConfigOptions,
GoogleCredentials,
} from '~/types';
import { isEnabled, loadServiceKey, checkUserKeyExpiry } from '~/utils';
import { getGoogleConfig } from './llm';
/**
* Initializes Google/Vertex AI endpoint configuration.
* Supports both API key authentication and service account credentials.
*
* @param params - Configuration parameters
* @returns Promise resolving to Google configuration options
* @throws Error if no valid credentials are provided
*/
export async function initializeGoogle({
req,
endpoint,
model_parameters,
db,
}: BaseInitializeParams): Promise<InitializeResultBase> {
void endpoint;
const appConfig = req.config;
const { GOOGLE_KEY, GOOGLE_REVERSE_PROXY, GOOGLE_AUTH_HEADER, PROXY } = process.env;
const isUserProvided = GOOGLE_KEY === 'user_provided';
const { key: expiresAt } = req.body;
let userKey = null;
if (expiresAt && isUserProvided) {
checkUserKeyExpiry(expiresAt, EModelEndpoint.google);
userKey = await db.getUserKey({ userId: req.user?.id, name: EModelEndpoint.google });
}
let serviceKey: Record<string, unknown> = {};
/** Check if GOOGLE_KEY is provided at all (including 'user_provided') */
const isGoogleKeyProvided =
(GOOGLE_KEY && GOOGLE_KEY.trim() !== '') || (isUserProvided && userKey != null);
if (!isGoogleKeyProvided && loadServiceKey) {
/** Only attempt to load service key if GOOGLE_KEY is not provided */
try {
const serviceKeyPath =
process.env.GOOGLE_SERVICE_KEY_FILE || path.join(process.cwd(), 'data', 'auth.json');
const loadedKey = await loadServiceKey(serviceKeyPath);
if (loadedKey) {
serviceKey = loadedKey;
}
} catch {
// Service key loading failed, but that's okay if not required
serviceKey = {};
}
}
const credentials: GoogleCredentials = isUserProvided
? (userKey as GoogleCredentials)
: {
[AuthKeys.GOOGLE_SERVICE_KEY]: serviceKey,
[AuthKeys.GOOGLE_API_KEY]: GOOGLE_KEY,
};
let clientOptions: GoogleConfigOptions = {};
/** @type {undefined | TBaseEndpoint} */
const allConfig = appConfig?.endpoints?.all;
/** @type {undefined | TBaseEndpoint} */
const googleConfig = appConfig?.endpoints?.[EModelEndpoint.google];
if (googleConfig) {
clientOptions.streamRate = googleConfig.streamRate;
clientOptions.titleModel = googleConfig.titleModel;
}
if (allConfig) {
clientOptions.streamRate = allConfig.streamRate;
}
clientOptions = {
reverseProxyUrl: GOOGLE_REVERSE_PROXY ?? undefined,
authHeader: isEnabled(GOOGLE_AUTH_HEADER) ?? undefined,
proxy: PROXY ?? undefined,
modelOptions: model_parameters ?? {},
...clientOptions,
};
return getGoogleConfig(credentials, clientOptions);
}

View file

@ -1,4 +1,7 @@
export * from './anthropic';
export * from './bedrock';
export * from './config';
export * from './custom';
export * from './google';
export * from './models';
export * from './openai';
export * from './anthropic';

View file

@ -1,49 +1,46 @@
const axios = require('axios');
const { logAxiosError, resolveHeaders } = require('@librechat/api');
const { EModelEndpoint, defaultModels } = require('librechat-data-provider');
const {
import axios from 'axios';
import { EModelEndpoint, defaultModels } from 'librechat-data-provider';
import {
fetchModels,
splitAndTrim,
getOpenAIModels,
getGoogleModels,
getBedrockModels,
getAnthropicModels,
} = require('./ModelService');
} from './models';
jest.mock('@librechat/api', () => {
const originalUtils = jest.requireActual('@librechat/api');
jest.mock('axios');
jest.mock('~/cache', () => ({
standardCache: jest.fn().mockImplementation(() => ({
get: jest.fn().mockResolvedValue(undefined),
set: jest.fn().mockResolvedValue(true),
})),
}));
jest.mock('~/utils', () => {
const originalUtils = jest.requireActual('~/utils');
return {
...originalUtils,
processModelData: jest.fn((...args) => {
return originalUtils.processModelData(...args);
}),
processModelData: jest.fn((...args) => originalUtils.processModelData(...args)),
logAxiosError: jest.fn(),
resolveHeaders: jest.fn((options) => options?.headers || {}),
};
});
jest.mock('axios');
jest.mock('~/cache/getLogStores', () =>
jest.fn().mockImplementation(() => ({
get: jest.fn().mockResolvedValue(undefined),
set: jest.fn().mockResolvedValue(true),
})),
);
jest.mock('@librechat/data-schemas', () => ({
...jest.requireActual('@librechat/data-schemas'),
logger: {
error: jest.fn(),
},
}));
jest.mock('./Config/EndpointService', () => ({
config: {
openAIApiKey: 'mockedApiKey',
userProvidedOpenAI: false,
warn: jest.fn(),
debug: jest.fn(),
},
}));
axios.get.mockResolvedValue({
const mockedAxios = axios as jest.Mocked<typeof axios>;
const { logAxiosError, resolveHeaders } = jest.requireMock('~/utils');
mockedAxios.get.mockResolvedValue({
data: {
data: [{ id: 'model-1' }, { id: 'model-2' }],
},
@ -59,7 +56,7 @@ describe('fetchModels', () => {
});
expect(models).toEqual(['model-1', 'model-2']);
expect(axios.get).toHaveBeenCalledWith(
expect(mockedAxios.get).toHaveBeenCalledWith(
expect.stringContaining('https://api.test.com/models'),
expect.any(Object),
);
@ -75,7 +72,7 @@ describe('fetchModels', () => {
});
expect(models).toEqual(['model-1', 'model-2']);
expect(axios.get).toHaveBeenCalledWith(
expect(mockedAxios.get).toHaveBeenCalledWith(
expect.stringContaining('https://api.test.com/models?user=user123'),
expect.any(Object),
);
@ -95,7 +92,7 @@ describe('fetchModels', () => {
headers: customHeaders,
});
expect(axios.get).toHaveBeenCalledWith(
expect(mockedAxios.get).toHaveBeenCalledWith(
expect.stringContaining('https://api.test.com/models'),
expect.objectContaining({
headers: expect.objectContaining({
@ -116,7 +113,7 @@ describe('fetchModels', () => {
headers: null,
});
expect(axios.get).toHaveBeenCalledWith(
expect(mockedAxios.get).toHaveBeenCalledWith(
expect.stringContaining('https://api.test.com/models'),
expect.objectContaining({
headers: expect.objectContaining({
@ -135,7 +132,7 @@ describe('fetchModels', () => {
headers: undefined,
});
expect(axios.get).toHaveBeenCalledWith(
expect(mockedAxios.get).toHaveBeenCalledWith(
expect.stringContaining('https://api.test.com/models'),
expect.objectContaining({
headers: expect.objectContaining({
@ -173,9 +170,7 @@ describe('fetchModels with createTokenConfig true', () => {
};
beforeEach(() => {
// Clears the mock's history before each test
const _utils = require('@librechat/api');
axios.get.mockResolvedValue({ data });
mockedAxios.get.mockResolvedValue({ data });
});
it('creates and stores token configuration if createTokenConfig is true', async () => {
@ -186,23 +181,23 @@ describe('fetchModels with createTokenConfig true', () => {
createTokenConfig: true,
});
const { processModelData } = require('@librechat/api');
const { processModelData } = jest.requireMock('~/utils');
expect(processModelData).toHaveBeenCalled();
expect(processModelData).toHaveBeenCalledWith(data);
});
});
describe('getOpenAIModels', () => {
let originalEnv;
let originalEnv: NodeJS.ProcessEnv;
beforeEach(() => {
originalEnv = { ...process.env };
axios.get.mockRejectedValue(new Error('Network error'));
mockedAxios.get.mockRejectedValue(new Error('Network error'));
});
afterEach(() => {
process.env = originalEnv;
axios.get.mockReset();
mockedAxios.get.mockReset();
});
it('returns default models when no environment configurations are provided (and fetch fails)', async () => {
@ -223,15 +218,16 @@ describe('getOpenAIModels', () => {
});
it('utilizes proxy configuration when PROXY is set', async () => {
axios.get.mockResolvedValue({
mockedAxios.get.mockResolvedValue({
data: {
data: [],
},
});
process.env.PROXY = 'http://localhost:8888';
process.env.OPENAI_API_KEY = 'mockedApiKey';
await getOpenAIModels({ user: 'user456' });
expect(axios.get).toHaveBeenCalledWith(
expect(mockedAxios.get).toHaveBeenCalledWith(
expect.any(String),
expect.objectContaining({
httpsAgent: expect.anything(),
@ -240,35 +236,13 @@ describe('getOpenAIModels', () => {
});
});
describe('getOpenAIModels with mocked config', () => {
it('uses alternative behavior when userProvidedOpenAI is true', async () => {
jest.mock('./Config/EndpointService', () => ({
config: {
openAIApiKey: 'mockedApiKey',
userProvidedOpenAI: true,
},
}));
jest.mock('librechat-data-provider', () => {
const original = jest.requireActual('librechat-data-provider');
return {
...original,
defaultModels: {
[original.EModelEndpoint.openAI]: ['some-default-model'],
},
};
});
jest.resetModules();
const { getOpenAIModels } = require('./ModelService');
const models = await getOpenAIModels({ user: 'user456' });
expect(models).toContain('some-default-model');
});
});
describe('getOpenAIModels sorting behavior', () => {
let originalEnv: NodeJS.ProcessEnv;
beforeEach(() => {
axios.get.mockResolvedValue({
originalEnv = { ...process.env };
process.env.OPENAI_API_KEY = 'mockedApiKey';
mockedAxios.get.mockResolvedValue({
data: {
data: [
{ id: 'gpt-3.5-turbo-instruct-0914' },
@ -281,13 +255,16 @@ describe('getOpenAIModels sorting behavior', () => {
});
});
afterEach(() => {
process.env = originalEnv;
jest.clearAllMocks();
});
it('ensures instruct models are listed last', async () => {
const models = await getOpenAIModels({ user: 'user456' });
// Check if the last model is an "instruct" model
expect(models[models.length - 1]).toMatch(/instruct/);
// Check if the "instruct" models are placed at the end
const instructIndexes = models
.map((model, index) => (model.includes('instruct') ? index : -1))
.filter((index) => index !== -1);
@ -306,10 +283,6 @@ describe('getOpenAIModels sorting behavior', () => {
];
expect(models).toEqual(expectedOrder);
});
afterEach(() => {
jest.clearAllMocks();
});
});
describe('fetchModels with Ollama specific logic', () => {
@ -320,7 +293,7 @@ describe('fetchModels with Ollama specific logic', () => {
};
beforeEach(() => {
axios.get.mockResolvedValue(mockOllamaData);
mockedAxios.get.mockResolvedValue(mockOllamaData);
});
afterEach(() => {
@ -336,7 +309,7 @@ describe('fetchModels with Ollama specific logic', () => {
});
expect(models).toEqual(['Ollama-Base', 'Ollama-Advanced']);
expect(axios.get).toHaveBeenCalledWith('https://api.ollama.test.com/api/tags', {
expect(mockedAxios.get).toHaveBeenCalledWith('https://api.ollama.test.com/api/tags', {
headers: {},
timeout: 5000,
});
@ -352,7 +325,7 @@ describe('fetchModels with Ollama specific logic', () => {
email: 'test@example.com',
};
resolveHeaders.mockReturnValueOnce(customHeaders);
(resolveHeaders as jest.Mock).mockReturnValueOnce(customHeaders);
const models = await fetchModels({
user: 'user789',
@ -368,15 +341,15 @@ describe('fetchModels with Ollama specific logic', () => {
headers: customHeaders,
user: userObject,
});
expect(axios.get).toHaveBeenCalledWith('https://api.ollama.test.com/api/tags', {
expect(mockedAxios.get).toHaveBeenCalledWith('https://api.ollama.test.com/api/tags', {
headers: customHeaders,
timeout: 5000,
});
});
it('should handle errors gracefully when fetching Ollama models fails and fallback to OpenAI-compatible fetch', async () => {
axios.get.mockRejectedValueOnce(new Error('Ollama API error'));
axios.get.mockResolvedValueOnce({
mockedAxios.get.mockRejectedValueOnce(new Error('Ollama API error'));
mockedAxios.get.mockResolvedValueOnce({
data: {
data: [{ id: 'fallback-model-1' }, { id: 'fallback-model-2' }],
},
@ -395,7 +368,7 @@ describe('fetchModels with Ollama specific logic', () => {
'Failed to fetch models from Ollama API. Attempting to fetch via OpenAI-compatible endpoint.',
error: expect.any(Error),
});
expect(axios.get).toHaveBeenCalledTimes(2);
expect(mockedAxios.get).toHaveBeenCalledTimes(2);
});
it('should return an empty array if no baseURL is provided', async () => {
@ -408,8 +381,7 @@ describe('fetchModels with Ollama specific logic', () => {
});
it('should not fetch Ollama models if the name does not start with "ollama"', async () => {
// Mock axios to return a different set of models for non-Ollama API calls
axios.get.mockResolvedValue({
mockedAxios.get.mockResolvedValue({
data: {
data: [{ id: 'model-1' }, { id: 'model-2' }],
},
@ -423,16 +395,13 @@ describe('fetchModels with Ollama specific logic', () => {
});
expect(models).toEqual(['model-1', 'model-2']);
expect(axios.get).toHaveBeenCalledWith(
'https://api.test.com/models', // Ensure the correct API endpoint is called
expect.any(Object), // Ensuring some object (headers, etc.) is passed
);
expect(mockedAxios.get).toHaveBeenCalledWith('https://api.test.com/models', expect.any(Object));
});
});
describe('fetchModels URL construction with trailing slashes', () => {
beforeEach(() => {
axios.get.mockResolvedValue({
mockedAxios.get.mockResolvedValue({
data: {
data: [{ id: 'model-1' }, { id: 'model-2' }],
},
@ -451,7 +420,10 @@ describe('fetchModels URL construction with trailing slashes', () => {
name: 'TestAPI',
});
expect(axios.get).toHaveBeenCalledWith('https://api.test.com/v1/models', expect.any(Object));
expect(mockedAxios.get).toHaveBeenCalledWith(
'https://api.test.com/v1/models',
expect.any(Object),
);
});
it('should handle baseURL without trailing slash normally', async () => {
@ -462,7 +434,10 @@ describe('fetchModels URL construction with trailing slashes', () => {
name: 'TestAPI',
});
expect(axios.get).toHaveBeenCalledWith('https://api.test.com/v1/models', expect.any(Object));
expect(mockedAxios.get).toHaveBeenCalledWith(
'https://api.test.com/v1/models',
expect.any(Object),
);
});
it('should handle baseURL with multiple trailing slashes', async () => {
@ -473,7 +448,10 @@ describe('fetchModels URL construction with trailing slashes', () => {
name: 'TestAPI',
});
expect(axios.get).toHaveBeenCalledWith('https://api.test.com/v1/models', expect.any(Object));
expect(mockedAxios.get).toHaveBeenCalledWith(
'https://api.test.com/v1/models',
expect.any(Object),
);
});
it('should correctly append query params after stripping trailing slashes', async () => {
@ -485,7 +463,7 @@ describe('fetchModels URL construction with trailing slashes', () => {
userIdQuery: true,
});
expect(axios.get).toHaveBeenCalledWith(
expect(mockedAxios.get).toHaveBeenCalledWith(
'https://api.test.com/v1/models?user=user123',
expect.any(Object),
);
@ -519,6 +497,17 @@ describe('splitAndTrim', () => {
});
describe('getAnthropicModels', () => {
let originalEnv: NodeJS.ProcessEnv;
beforeEach(() => {
originalEnv = { ...process.env };
});
afterEach(() => {
process.env = originalEnv;
jest.clearAllMocks();
});
it('returns default models when ANTHROPIC_MODELS is not set', async () => {
delete process.env.ANTHROPIC_MODELS;
const models = await getAnthropicModels();
@ -535,7 +524,7 @@ describe('getAnthropicModels', () => {
delete process.env.ANTHROPIC_MODELS;
process.env.ANTHROPIC_API_KEY = 'test-anthropic-key';
axios.get.mockResolvedValue({
mockedAxios.get.mockResolvedValue({
data: {
data: [{ id: 'claude-3' }, { id: 'claude-4' }],
},
@ -548,7 +537,7 @@ describe('getAnthropicModels', () => {
name: EModelEndpoint.anthropic,
});
expect(axios.get).toHaveBeenCalledWith(
expect(mockedAxios.get).toHaveBeenCalledWith(
expect.any(String),
expect.objectContaining({
headers: {
@ -564,7 +553,7 @@ describe('getAnthropicModels', () => {
'X-Custom-Header': 'custom-value',
};
axios.get.mockResolvedValue({
mockedAxios.get.mockResolvedValue({
data: {
data: [{ id: 'claude-3' }],
},
@ -578,7 +567,7 @@ describe('getAnthropicModels', () => {
headers: customHeaders,
});
expect(axios.get).toHaveBeenCalledWith(
expect(mockedAxios.get).toHaveBeenCalledWith(
expect.any(String),
expect.objectContaining({
headers: {
@ -591,6 +580,16 @@ describe('getAnthropicModels', () => {
});
describe('getGoogleModels', () => {
let originalEnv: NodeJS.ProcessEnv;
beforeEach(() => {
originalEnv = { ...process.env };
});
afterEach(() => {
process.env = originalEnv;
});
it('returns default models when GOOGLE_MODELS is not set', () => {
delete process.env.GOOGLE_MODELS;
const models = getGoogleModels();
@ -605,6 +604,16 @@ describe('getGoogleModels', () => {
});
describe('getBedrockModels', () => {
let originalEnv: NodeJS.ProcessEnv;
beforeEach(() => {
originalEnv = { ...process.env };
});
afterEach(() => {
process.env = originalEnv;
});
it('returns default models when BEDROCK_AWS_MODELS is not set', () => {
delete process.env.BEDROCK_AWS_MODELS;
const models = getBedrockModels();

View file

@ -0,0 +1,383 @@
import axios from 'axios';
import { logger } from '@librechat/data-schemas';
import { HttpsProxyAgent } from 'https-proxy-agent';
import { CacheKeys, KnownEndpoints, EModelEndpoint, defaultModels } from 'librechat-data-provider';
import type { IUser } from '@librechat/data-schemas';
import {
processModelData,
extractBaseURL,
isUserProvided,
resolveHeaders,
deriveBaseURL,
logAxiosError,
inputSchema,
} from '~/utils';
import { standardCache } from '~/cache';
export interface FetchModelsParams {
/** User ID for API requests */
user?: string;
/** API key for authentication */
apiKey: string;
/** Base URL for the API */
baseURL?: string;
/** Endpoint name (defaults to 'openAI') */
name?: string;
/** Whether directEndpoint was configured */
direct?: boolean;
/** Whether to fetch from Azure */
azure?: boolean;
/** Whether to send user ID as query parameter */
userIdQuery?: boolean;
/** Whether to create token configuration from API response */
createTokenConfig?: boolean;
/** Cache key for token configuration (uses name if omitted) */
tokenKey?: string;
/** Optional headers for the request */
headers?: Record<string, string> | null;
/** Optional user object for header resolution */
userObject?: Partial<IUser>;
}
/**
* Fetches Ollama models from the specified base API path.
* @param baseURL - The Ollama server URL
* @param options - Optional configuration
* @returns Promise resolving to array of model names
*/
async function fetchOllamaModels(
baseURL: string,
options: { headers?: Record<string, string> | null; user?: Partial<IUser> } = {},
): Promise<string[]> {
if (!baseURL) {
return [];
}
const ollamaEndpoint = deriveBaseURL(baseURL);
const resolvedHeaders = resolveHeaders({
headers: options.headers ?? undefined,
user: options.user,
});
const response = await axios.get<{ models: Array<{ name: string }> }>(
`${ollamaEndpoint}/api/tags`,
{
headers: resolvedHeaders,
timeout: 5000,
},
);
return response.data.models.map((tag) => tag.name);
}
/**
* Splits a string by commas and trims each resulting value.
* @param input - The input string to split.
* @returns An array of trimmed values.
*/
export function splitAndTrim(input: string | null | undefined): string[] {
if (!input || typeof input !== 'string') {
return [];
}
return input
.split(',')
.map((item) => item.trim())
.filter(Boolean);
}
/**
* Fetches models from the specified base API path or Azure, based on the provided configuration.
*
* @param params - The parameters for fetching the models.
* @returns A promise that resolves to an array of model identifiers.
*/
export async function fetchModels({
user,
apiKey,
baseURL: _baseURL,
name = EModelEndpoint.openAI,
direct = false,
azure = false,
userIdQuery = false,
createTokenConfig = true,
tokenKey,
headers,
userObject,
}: FetchModelsParams): Promise<string[]> {
let models: string[] = [];
const baseURL = direct ? extractBaseURL(_baseURL ?? '') : _baseURL;
if (!baseURL && !azure) {
return models;
}
if (!apiKey) {
return models;
}
if (name && name.toLowerCase().startsWith(KnownEndpoints.ollama)) {
try {
return await fetchOllamaModels(baseURL ?? '', { headers, user: userObject });
} catch (ollamaError) {
const logMessage =
'Failed to fetch models from Ollama API. Attempting to fetch via OpenAI-compatible endpoint.';
logAxiosError({ message: logMessage, error: ollamaError as Error });
}
}
try {
const options: {
headers: Record<string, string>;
timeout: number;
httpsAgent?: HttpsProxyAgent;
} = {
headers: {
...(headers ?? {}),
},
timeout: 5000,
};
if (name === EModelEndpoint.anthropic) {
options.headers = {
'x-api-key': apiKey,
'anthropic-version': process.env.ANTHROPIC_VERSION || '2023-06-01',
};
} else {
options.headers.Authorization = `Bearer ${apiKey}`;
}
if (process.env.PROXY) {
options.httpsAgent = new HttpsProxyAgent(process.env.PROXY);
}
if (process.env.OPENAI_ORGANIZATION && baseURL?.includes('openai')) {
options.headers['OpenAI-Organization'] = process.env.OPENAI_ORGANIZATION;
}
const url = new URL(`${(baseURL ?? '').replace(/\/+$/, '')}${azure ? '' : '/models'}`);
if (user && userIdQuery) {
url.searchParams.append('user', user);
}
const res = await axios.get(url.toString(), options);
const input = res.data;
const validationResult = inputSchema.safeParse(input);
if (validationResult.success && createTokenConfig) {
const endpointTokenConfig = processModelData(input);
const cache = standardCache(CacheKeys.TOKEN_CONFIG);
await cache.set(tokenKey ?? name, endpointTokenConfig);
}
models = input.data.map((item: { id: string }) => item.id);
} catch (error) {
const logMessage = `Failed to fetch models from ${azure ? 'Azure ' : ''}${name} API`;
logAxiosError({ message: logMessage, error: error as Error });
}
return models;
}
/** Options for fetching OpenAI models */
export interface GetOpenAIModelsOptions {
/** User ID for API requests */
user?: string;
/** Whether to fetch from Azure */
azure?: boolean;
/** Whether to fetch models for the Assistants endpoint */
assistants?: boolean;
/** OpenAI API key (if not using environment variable) */
openAIApiKey?: string;
/** Whether user provides their own API key */
userProvidedOpenAI?: boolean;
}
/**
* Fetches models from OpenAI or Azure based on the provided options.
* @param opts - Options for fetching models
* @param _models - Fallback models array
* @returns Promise resolving to array of model IDs
*/
export async function fetchOpenAIModels(
opts: GetOpenAIModelsOptions,
_models: string[] = [],
): Promise<string[]> {
let models = _models.slice() ?? [];
const apiKey = opts.openAIApiKey ?? process.env.OPENAI_API_KEY;
const openaiBaseURL = 'https://api.openai.com/v1';
let baseURL = openaiBaseURL;
let reverseProxyUrl = process.env.OPENAI_REVERSE_PROXY;
if (opts.assistants && process.env.ASSISTANTS_BASE_URL) {
reverseProxyUrl = process.env.ASSISTANTS_BASE_URL;
} else if (opts.azure) {
return models;
}
if (reverseProxyUrl) {
baseURL = extractBaseURL(reverseProxyUrl) ?? openaiBaseURL;
}
const modelsCache = standardCache(CacheKeys.MODEL_QUERIES);
const cachedModels = await modelsCache.get(baseURL);
if (cachedModels) {
return cachedModels as string[];
}
if (baseURL || opts.azure) {
models = await fetchModels({
apiKey: apiKey ?? '',
baseURL,
azure: opts.azure,
user: opts.user,
name: EModelEndpoint.openAI,
});
}
if (models.length === 0) {
return _models;
}
if (baseURL === openaiBaseURL) {
const regex = /(text-davinci-003|gpt-|o\d+)/;
const excludeRegex = /audio|realtime/;
models = models.filter((model) => regex.test(model) && !excludeRegex.test(model));
const instructModels = models.filter((model) => model.includes('instruct'));
const otherModels = models.filter((model) => !model.includes('instruct'));
models = otherModels.concat(instructModels);
}
await modelsCache.set(baseURL, models);
return models;
}
/**
* Loads the default models for OpenAI or Azure.
* @param opts - Options for getting models
* @returns Promise resolving to array of model IDs
*/
export async function getOpenAIModels(opts: GetOpenAIModelsOptions = {}): Promise<string[]> {
let models = defaultModels[EModelEndpoint.openAI];
if (opts.assistants) {
models = defaultModels[EModelEndpoint.assistants];
} else if (opts.azure) {
models = defaultModels[EModelEndpoint.azureAssistants];
}
let key: string;
if (opts.assistants) {
key = 'ASSISTANTS_MODELS';
} else if (opts.azure) {
key = 'AZURE_OPENAI_MODELS';
} else {
key = 'OPENAI_MODELS';
}
if (process.env[key]) {
return splitAndTrim(process.env[key]);
}
if (opts.userProvidedOpenAI) {
return models;
}
return await fetchOpenAIModels(opts, models);
}
/**
* Fetches models from the Anthropic API.
* @param opts - Options for fetching models
* @param _models - Fallback models array
* @returns Promise resolving to array of model IDs
*/
export async function fetchAnthropicModels(
opts: { user?: string } = {},
_models: string[] = [],
): Promise<string[]> {
let models = _models.slice() ?? [];
const apiKey = process.env.ANTHROPIC_API_KEY;
const anthropicBaseURL = 'https://api.anthropic.com/v1';
let baseURL = anthropicBaseURL;
const reverseProxyUrl = process.env.ANTHROPIC_REVERSE_PROXY;
if (reverseProxyUrl) {
baseURL = extractBaseURL(reverseProxyUrl) ?? anthropicBaseURL;
}
if (!apiKey) {
return models;
}
const modelsCache = standardCache(CacheKeys.MODEL_QUERIES);
const cachedModels = await modelsCache.get(baseURL);
if (cachedModels) {
return cachedModels as string[];
}
if (baseURL) {
models = await fetchModels({
apiKey,
baseURL,
user: opts.user,
name: EModelEndpoint.anthropic,
tokenKey: EModelEndpoint.anthropic,
});
}
if (models.length === 0) {
return _models;
}
await modelsCache.set(baseURL, models);
return models;
}
/**
* Gets Anthropic models from environment or API.
* @param opts - Options for fetching models
* @returns Promise resolving to array of model IDs
*/
export async function getAnthropicModels(opts: { user?: string } = {}): Promise<string[]> {
const models = defaultModels[EModelEndpoint.anthropic];
if (process.env.ANTHROPIC_MODELS) {
return splitAndTrim(process.env.ANTHROPIC_MODELS);
}
if (isUserProvided(process.env.ANTHROPIC_API_KEY)) {
return models;
}
try {
return await fetchAnthropicModels(opts, models);
} catch (error) {
logger.error('Error fetching Anthropic models:', error);
return models;
}
}
/**
* Gets Google models from environment or defaults.
* @returns Array of model IDs
*/
export function getGoogleModels(): string[] {
let models = defaultModels[EModelEndpoint.google];
if (process.env.GOOGLE_MODELS) {
models = splitAndTrim(process.env.GOOGLE_MODELS);
}
return models;
}
/**
* Gets Bedrock models from environment or defaults.
* @returns Array of model IDs
*/
export function getBedrockModels(): string[] {
let models = defaultModels[EModelEndpoint.bedrock];
if (process.env.BEDROCK_AWS_MODELS) {
models = splitAndTrim(process.env.BEDROCK_AWS_MODELS);
}
return models;
}

View file

@ -1,13 +1,11 @@
import { ErrorTypes, EModelEndpoint, mapModelToAzureConfig } from 'librechat-data-provider';
import type {
InitializeOpenAIOptionsParams,
BaseInitializeParams,
InitializeResultBase,
OpenAIConfigOptions,
LLMConfigResult,
UserKeyValues,
} from '~/types';
import { getAzureCredentials } from '~/utils/azure';
import { isUserProvided } from '~/utils/common';
import { resolveHeaders } from '~/utils/env';
import { getAzureCredentials, resolveHeaders, isUserProvided, checkUserKeyExpiry } from '~/utils';
import { getOpenAIConfig } from './config';
/**
@ -18,25 +16,18 @@ import { getOpenAIConfig } from './config';
* @returns Promise resolving to OpenAI configuration options
* @throws Error if API key is missing or user key has expired
*/
export const initializeOpenAI = async ({
export async function initializeOpenAI({
req,
appConfig,
overrideModel,
endpointOption,
overrideEndpoint,
getUserKeyValues,
checkUserKeyExpiry,
}: InitializeOpenAIOptionsParams): Promise<LLMConfigResult> => {
endpoint,
model_parameters,
db,
}: BaseInitializeParams): Promise<InitializeResultBase> {
const appConfig = req.config;
const { PROXY, OPENAI_API_KEY, AZURE_API_KEY, OPENAI_REVERSE_PROXY, AZURE_OPENAI_BASEURL } =
process.env;
const { key: expiresAt } = req.body;
const modelName = overrideModel ?? req.body.model;
const endpoint = overrideEndpoint ?? req.body.endpoint;
if (!endpoint) {
throw new Error('Endpoint is required');
}
const modelName = model_parameters?.model as string | undefined;
const credentials = {
[EModelEndpoint.openAI]: OPENAI_API_KEY,
@ -54,7 +45,7 @@ export const initializeOpenAI = async ({
let userValues: UserKeyValues | null = null;
if (expiresAt && (userProvidesKey || userProvidesURL)) {
checkUserKeyExpiry(expiresAt, endpoint);
userValues = await getUserKeyValues({ userId: req.user.id, name: endpoint });
userValues = await db.getUserKeyValues({ userId: req.user?.id ?? '', name: endpoint });
}
let apiKey = userProvidesKey
@ -71,7 +62,8 @@ export const initializeOpenAI = async ({
};
const isAzureOpenAI = endpoint === EModelEndpoint.azureOpenAI;
const azureConfig = isAzureOpenAI && appConfig.endpoints?.[EModelEndpoint.azureOpenAI];
const azureConfig = isAzureOpenAI && appConfig?.endpoints?.[EModelEndpoint.azureOpenAI];
let isServerless = false;
if (isAzureOpenAI && azureConfig) {
const { modelGroupMap, groupMap } = azureConfig;
@ -85,6 +77,7 @@ export const initializeOpenAI = async ({
modelGroupMap,
groupMap,
});
isServerless = serverless === true;
clientOptions.reverseProxyUrl = configBaseURL ?? clientOptions.reverseProxyUrl;
clientOptions.headers = resolveHeaders({
@ -99,9 +92,9 @@ export const initializeOpenAI = async ({
}
apiKey = azureOptions.azureOpenAIApiKey;
clientOptions.azure = !serverless ? azureOptions : undefined;
clientOptions.azure = !isServerless ? azureOptions : undefined;
if (serverless === true) {
if (isServerless) {
clientOptions.defaultQuery = azureOptions.azureOpenAIApiVersion
? { 'api-version': azureOptions.azureOpenAIApiVersion }
: undefined;
@ -130,9 +123,9 @@ export const initializeOpenAI = async ({
}
const modelOptions = {
...endpointOption.model_parameters,
...(model_parameters ?? {}),
model: modelName,
user: req.user.id,
user: req.user?.id,
};
const finalClientOptions: OpenAIConfigOptions = {
@ -142,8 +135,13 @@ export const initializeOpenAI = async ({
const options = getOpenAIConfig(apiKey, finalClientOptions, endpoint);
const openAIConfig = appConfig.endpoints?.[EModelEndpoint.openAI];
const allConfig = appConfig.endpoints?.all;
/** Set useLegacyContent for Azure serverless deployments */
if (isServerless) {
(options as InitializeResultBase).useLegacyContent = true;
}
const openAIConfig = appConfig?.endpoints?.[EModelEndpoint.openAI];
const allConfig = appConfig?.endpoints?.all;
const azureRate = modelName?.includes('gpt-4') ? 30 : 17;
let streamRate: number | undefined;
@ -163,4 +161,4 @@ export const initializeOpenAI = async ({
}
return options;
};
}

View file

@ -1,9 +1,10 @@
import { MCPTokenStorage } from '~/mcp/oauth/tokens';
import { decryptV2 } from '~/crypto';
import type { TokenMethods, IToken } from '@librechat/data-schemas';
import { Types } from 'mongoose';
import { decryptV2 } from '@librechat/data-schemas';
import type { TokenMethods, IToken } from '@librechat/data-schemas';
import { MCPTokenStorage } from '~/mcp/oauth/tokens';
jest.mock('~/crypto', () => ({
jest.mock('@librechat/data-schemas', () => ({
...jest.requireActual('@librechat/data-schemas'),
decryptV2: jest.fn(),
}));

View file

@ -1,8 +1,7 @@
import { logger } from '@librechat/data-schemas';
import { logger, encryptV2, decryptV2 } from '@librechat/data-schemas';
import type { OAuthTokens, OAuthClientInformation } from '@modelcontextprotocol/sdk/shared/auth.js';
import type { TokenMethods, IToken } from '@librechat/data-schemas';
import type { MCPOAuthTokens, ExtendedOAuthTokens, OAuthMetadata } from './types';
import { encryptV2, decryptV2 } from '~/crypto';
import { isSystemUserId } from '~/mcp/enum';
interface StoreTokensParams {

View file

@ -1,9 +1,8 @@
import axios from 'axios';
import { logger } from '@librechat/data-schemas';
import { logger, encryptV2, decryptV2 } from '@librechat/data-schemas';
import { TokenExchangeMethodEnum } from 'librechat-data-provider';
import type { TokenMethods } from '@librechat/data-schemas';
import type { AxiosError } from 'axios';
import { encryptV2, decryptV2 } from '~/crypto';
import { logAxiosError } from '~/utils';
export function createHandleOAuthToken({

View file

@ -0,0 +1,711 @@
export interface ShadcnComponent {
componentName: string;
importDocs: string;
usageDocs: string;
}
/** Essential Components */
const essentialComponents: Record<string, ShadcnComponent> = {
avatar: {
componentName: 'Avatar',
importDocs: 'import { Avatar, AvatarFallback, AvatarImage } from "/components/ui/avatar"',
usageDocs: `
<Avatar>
<AvatarImage src="https://github.com/shadcn.png" />
<AvatarFallback>CN</AvatarFallback>
</Avatar>`,
},
button: {
componentName: 'Button',
importDocs: 'import { Button } from "/components/ui/button"',
usageDocs: `
<Button variant="outline">Button</Button>`,
},
card: {
componentName: 'Card',
importDocs: `
import {
Card,
CardContent,
CardDescription,
CardFooter,
CardHeader,
CardTitle,
} from "/components/ui/card"`,
usageDocs: `
<Card>
<CardHeader>
<CardTitle>Card Title</CardTitle>
<CardDescription>Card Description</CardDescription>
</CardHeader>
<CardContent>
<p>Card Content</p>
</CardContent>
<CardFooter>
<p>Card Footer</p>
</CardFooter>
</Card>`,
},
checkbox: {
componentName: 'Checkbox',
importDocs: 'import { Checkbox } from "/components/ui/checkbox"',
usageDocs: '<Checkbox />',
},
input: {
componentName: 'Input',
importDocs: 'import { Input } from "/components/ui/input"',
usageDocs: '<Input />',
},
label: {
componentName: 'Label',
importDocs: 'import { Label } from "/components/ui/label"',
usageDocs: '<Label htmlFor="email">Your email address</Label>',
},
radioGroup: {
componentName: 'RadioGroup',
importDocs: `
import { Label } from "/components/ui/label"
import { RadioGroup, RadioGroupItem } from "/components/ui/radio-group"`,
usageDocs: `
<RadioGroup defaultValue="option-one">
<div className="flex items-center space-x-2">
<RadioGroupItem value="option-one" id="option-one" />
<Label htmlFor="option-one">Option One</Label>
</div>
<div className="flex items-center space-x-2">
<RadioGroupItem value="option-two" id="option-two" />
<Label htmlFor="option-two">Option Two</Label>
</div>
</RadioGroup>`,
},
select: {
componentName: 'Select',
importDocs: `
import {
Select,
SelectContent,
SelectItem,
SelectTrigger,
SelectValue,
} from "/components/ui/select"`,
usageDocs: `
<Select>
<SelectTrigger className="w-[180px]">
<SelectValue placeholder="Theme" />
</SelectTrigger>
<SelectContent>
<SelectItem value="light">Light</SelectItem>
<SelectItem value="dark">Dark</SelectItem>
<SelectItem value="system">System</SelectItem>
</SelectContent>
</Select>`,
},
textarea: {
componentName: 'Textarea',
importDocs: 'import { Textarea } from "/components/ui/textarea"',
usageDocs: '<Textarea />',
},
};
/** Extra Components */
const extraComponents: Record<string, ShadcnComponent> = {
accordion: {
componentName: 'Accordion',
importDocs: `
import {
Accordion,
AccordionContent,
AccordionItem,
AccordionTrigger,
} from "/components/ui/accordion"`,
usageDocs: `
<Accordion type="single" collapsible>
<AccordionItem value="item-1">
<AccordionTrigger>Is it accessible?</AccordionTrigger>
<AccordionContent>
Yes. It adheres to the WAI-ARIA design pattern.
</AccordionContent>
</AccordionItem>
</Accordion>`,
},
alertDialog: {
componentName: 'AlertDialog',
importDocs: `
import {
AlertDialog,
AlertDialogAction,
AlertDialogCancel,
AlertDialogContent,
AlertDialogDescription,
AlertDialogFooter,
AlertDialogHeader,
AlertDialogTitle,
AlertDialogTrigger,
} from "/components/ui/alert-dialog"`,
usageDocs: `
<AlertDialog>
<AlertDialogTrigger>Open</AlertDialogTrigger>
<AlertDialogContent>
<AlertDialogHeader>
<AlertDialogTitle>Are you absolutely sure?</AlertDialogTitle>
<AlertDialogDescription>
This action cannot be undone.
</AlertDialogDescription>
</AlertDialogHeader>
<AlertDialogFooter>
<AlertDialogCancel>Cancel</AlertDialogCancel>
<AlertDialogAction>Continue</AlertDialogAction>
</AlertDialogFooter>
</AlertDialogContent>
</AlertDialog>`,
},
alert: {
componentName: 'Alert',
importDocs: `
import {
Alert,
AlertDescription,
AlertTitle,
} from "/components/ui/alert"`,
usageDocs: `
<Alert>
<AlertTitle>Heads up!</AlertTitle>
<AlertDescription>
You can add components to your app using the cli.
</AlertDescription>
</Alert>`,
},
aspectRatio: {
componentName: 'AspectRatio',
importDocs: 'import { AspectRatio } from "/components/ui/aspect-ratio"',
usageDocs: `
<AspectRatio ratio={16 / 9}>
<Image src="..." alt="Image" className="rounded-md object-cover" />
</AspectRatio>`,
},
badge: {
componentName: 'Badge',
importDocs: 'import { Badge } from "/components/ui/badge"',
usageDocs: '<Badge>Badge</Badge>',
},
calendar: {
componentName: 'Calendar',
importDocs: 'import { Calendar } from "/components/ui/calendar"',
usageDocs: '<Calendar />',
},
carousel: {
componentName: 'Carousel',
importDocs: `
import {
Carousel,
CarouselContent,
CarouselItem,
CarouselNext,
CarouselPrevious,
} from "/components/ui/carousel"`,
usageDocs: `
<Carousel>
<CarouselContent>
<CarouselItem>...</CarouselItem>
<CarouselItem>...</CarouselItem>
<CarouselItem>...</CarouselItem>
</CarouselContent>
<CarouselPrevious />
<CarouselNext />
</Carousel>`,
},
collapsible: {
componentName: 'Collapsible',
importDocs: `
import {
Collapsible,
CollapsibleContent,
CollapsibleTrigger,
} from "/components/ui/collapsible"`,
usageDocs: `
<Collapsible>
<CollapsibleTrigger>Can I use this in my project?</CollapsibleTrigger>
<CollapsibleContent>
Yes. Free to use for personal and commercial projects. No attribution required.
</CollapsibleContent>
</Collapsible>`,
},
dialog: {
componentName: 'Dialog',
importDocs: `
import {
Dialog,
DialogContent,
DialogDescription,
DialogHeader,
DialogTitle,
DialogTrigger,
} from "/components/ui/dialog"`,
usageDocs: `
<Dialog>
<DialogTrigger>Open</DialogTrigger>
<DialogContent>
<DialogHeader>
<DialogTitle>Are you sure absolutely sure?</DialogTitle>
<DialogDescription>
This action cannot be undone.
</DialogDescription>
</DialogHeader>
</DialogContent>
</Dialog>`,
},
dropdownMenu: {
componentName: 'DropdownMenu',
importDocs: `
import {
DropdownMenu,
DropdownMenuContent,
DropdownMenuItem,
DropdownMenuLabel,
DropdownMenuSeparator,
DropdownMenuTrigger,
} from "/components/ui/dropdown-menu"`,
usageDocs: `
<DropdownMenu>
<DropdownMenuTrigger>Open</DropdownMenuTrigger>
<DropdownMenuContent>
<DropdownMenuLabel>My Account</DropdownMenuLabel>
<DropdownMenuSeparator />
<DropdownMenuItem>Profile</DropdownMenuItem>
<DropdownMenuItem>Billing</DropdownMenuItem>
<DropdownMenuItem>Team</DropdownMenuItem>
<DropdownMenuItem>Subscription</DropdownMenuItem>
</DropdownMenuContent>
</DropdownMenu>`,
},
menubar: {
componentName: 'Menubar',
importDocs: `
import {
Menubar,
MenubarContent,
MenubarItem,
MenubarMenu,
MenubarSeparator,
MenubarShortcut,
MenubarTrigger,
} from "/components/ui/menubar"`,
usageDocs: `
<Menubar>
<MenubarMenu>
<MenubarTrigger>File</MenubarTrigger>
<MenubarContent>
<MenubarItem>
New Tab <MenubarShortcut>T</MenubarShortcut>
</MenubarItem>
<MenubarItem>New Window</MenubarItem>
<MenubarSeparator />
<MenubarItem>Share</MenubarItem>
<MenubarSeparator />
<MenubarItem>Print</MenubarItem>
</MenubarContent>
</MenubarMenu>
</Menubar>`,
},
navigationMenu: {
componentName: 'NavigationMenu',
importDocs: `
import {
NavigationMenu,
NavigationMenuContent,
NavigationMenuItem,
NavigationMenuLink,
NavigationMenuList,
NavigationMenuTrigger,
navigationMenuTriggerStyle,
} from "/components/ui/navigation-menu"`,
usageDocs: `
<NavigationMenu>
<NavigationMenuList>
<NavigationMenuItem>
<NavigationMenuTrigger>Item One</NavigationMenuTrigger>
<NavigationMenuContent>
<NavigationMenuLink>Link</NavigationMenuLink>
</NavigationMenuContent>
</NavigationMenuItem>
</NavigationMenuList>
</NavigationMenu>`,
},
popover: {
componentName: 'Popover',
importDocs: `
import {
Popover,
PopoverContent,
PopoverTrigger,
} from "/components/ui/popover"`,
usageDocs: `
<Popover>
<PopoverTrigger>Open</PopoverTrigger>
<PopoverContent>Place content for the popover here.</PopoverContent>
</Popover>`,
},
progress: {
componentName: 'Progress',
importDocs: 'import { Progress } from "/components/ui/progress"',
usageDocs: '<Progress value={33} />',
},
separator: {
componentName: 'Separator',
importDocs: 'import { Separator } from "/components/ui/separator"',
usageDocs: '<Separator />',
},
sheet: {
componentName: 'Sheet',
importDocs: `
import {
Sheet,
SheetContent,
SheetDescription,
SheetHeader,
SheetTitle,
SheetTrigger,
} from "/components/ui/sheet"`,
usageDocs: `
<Sheet>
<SheetTrigger>Open</SheetTrigger>
<SheetContent>
<SheetHeader>
<SheetTitle>Are you sure absolutely sure?</SheetTitle>
<SheetDescription>
This action cannot be undone.
</SheetDescription>
</SheetHeader>
</SheetContent>
</Sheet>`,
},
skeleton: {
componentName: 'Skeleton',
importDocs: 'import { Skeleton } from "/components/ui/skeleton"',
usageDocs: '<Skeleton className="w-[100px] h-[20px] rounded-full" />',
},
slider: {
componentName: 'Slider',
importDocs: 'import { Slider } from "/components/ui/slider"',
usageDocs: '<Slider defaultValue={[33]} max={100} step={1} />',
},
switch: {
componentName: 'Switch',
importDocs: 'import { Switch } from "/components/ui/switch"',
usageDocs: '<Switch />',
},
table: {
componentName: 'Table',
importDocs: `
import {
Table,
TableBody,
TableCaption,
TableCell,
TableHead,
TableHeader,
TableRow,
} from "/components/ui/table"`,
usageDocs: `
<Table>
<TableCaption>A list of your recent invoices.</TableCaption>
<TableHeader>
<TableRow>
<TableHead className="w-[100px]">Invoice</TableHead>
<TableHead>Status</TableHead>
<TableHead>Method</TableHead>
<TableHead className="text-right">Amount</TableHead>
</TableRow>
</TableHeader>
<TableBody>
<TableRow>
<TableCell className="font-medium">INV001</TableCell>
<TableCell>Paid</TableCell>
<TableCell>Credit Card</TableCell>
<TableCell className="text-right">$250.00</TableCell>
</TableRow>
</TableBody>
</Table>`,
},
tabs: {
componentName: 'Tabs',
importDocs: `
import {
Tabs,
TabsContent,
TabsList,
TabsTrigger,
} from "/components/ui/tabs"`,
usageDocs: `
<Tabs defaultValue="account" className="w-[400px]">
<TabsList>
<TabsTrigger value="account">Account</TabsTrigger>
<TabsTrigger value="password">Password</TabsTrigger>
</TabsList>
<TabsContent value="account">Make changes to your account here.</TabsContent>
<TabsContent value="password">Change your password here.</TabsContent>
</Tabs>`,
},
toast: {
componentName: 'Toast',
importDocs: `
import { useToast } from "/components/ui/use-toast"
import { Button } from "/components/ui/button"`,
usageDocs: `
export function ToastDemo() {
const { toast } = useToast()
return (
<Button
onClick={() => {
toast({
title: "Scheduled: Catch up",
description: "Friday, February 10, 2023 at 5:57 PM",
})
}}
>
Show Toast
</Button>
)
}`,
},
toggle: {
componentName: 'Toggle',
importDocs: 'import { Toggle } from "/components/ui/toggle"',
usageDocs: '<Toggle>Toggle</Toggle>',
},
tooltip: {
componentName: 'Tooltip',
importDocs: `
import {
Tooltip,
TooltipContent,
TooltipProvider,
TooltipTrigger,
} from "/components/ui/tooltip"`,
usageDocs: `
<TooltipProvider>
<Tooltip>
<TooltipTrigger>Hover</TooltipTrigger>
<TooltipContent>
<p>Add to library</p>
</TooltipContent>
</Tooltip>
</TooltipProvider>`,
},
};
/** Extra Components continued */
const moreExtraComponents: Record<string, ShadcnComponent> = {
calendar: {
componentName: 'Calendar',
importDocs: 'import { Calendar } from "/components/ui/calendar"',
usageDocs: '<Calendar />',
},
carousel: {
componentName: 'Carousel',
importDocs: `
import {
Carousel,
CarouselContent,
CarouselItem,
CarouselNext,
CarouselPrevious,
} from "/components/ui/carousel"`,
usageDocs: `
<Carousel>
<CarouselContent>
<CarouselItem>...</CarouselItem>
<CarouselItem>...</CarouselItem>
<CarouselItem>...</CarouselItem>
</CarouselContent>
<CarouselPrevious />
<CarouselNext />
</Carousel>`,
},
collapsible: {
componentName: 'Collapsible',
importDocs: `
import {
Collapsible,
CollapsibleContent,
CollapsibleTrigger,
} from "/components/ui/collapsible"`,
usageDocs: `
<Collapsible>
<CollapsibleTrigger>Can I use this in my project?</CollapsibleTrigger>
<CollapsibleContent>
Yes. Free to use for personal and commercial projects. No attribution required.
</CollapsibleContent>
</Collapsible>`,
},
dialog: {
componentName: 'Dialog',
importDocs: `
import {
Dialog,
DialogContent,
DialogDescription,
DialogHeader,
DialogTitle,
DialogTrigger,
} from "/components/ui/dialog"`,
usageDocs: `
<Dialog>
<DialogTrigger>Open</DialogTrigger>
<DialogContent>
<DialogHeader>
<DialogTitle>Are you sure absolutely sure?</DialogTitle>
<DialogDescription>
This action cannot be undone.
</DialogDescription>
</DialogHeader>
</DialogContent>
</Dialog>`,
},
dropdownMenu: {
componentName: 'DropdownMenu',
importDocs: `
import {
DropdownMenu,
DropdownMenuContent,
DropdownMenuItem,
DropdownMenuLabel,
DropdownMenuSeparator,
DropdownMenuTrigger,
} from "/components/ui/dropdown-menu"`,
usageDocs: `
<DropdownMenu>
<DropdownMenuTrigger>Open</DropdownMenuTrigger>
<DropdownMenuContent>
<DropdownMenuLabel>My Account</DropdownMenuLabel>
<DropdownMenuSeparator />
<DropdownMenuItem>Profile</DropdownMenuItem>
<DropdownMenuItem>Billing</DropdownMenuItem>
<DropdownMenuItem>Team</DropdownMenuItem>
<DropdownMenuItem>Subscription</DropdownMenuItem>
</DropdownMenuContent>
</DropdownMenu>`,
},
menubar: {
componentName: 'Menubar',
importDocs: `
import {
Menubar,
MenubarContent,
MenubarItem,
MenubarMenu,
MenubarSeparator,
MenubarShortcut,
MenubarTrigger,
} from "/components/ui/menubar"`,
usageDocs: `
<Menubar>
<MenubarMenu>
<MenubarTrigger>File</MenubarTrigger>
<MenubarContent>
<MenubarItem>
New Tab <MenubarShortcut>T</MenubarShortcut>
</MenubarItem>
<MenubarItem>New Window</MenubarItem>
<MenubarSeparator />
<MenubarItem>Share</MenubarItem>
<MenubarSeparator />
<MenubarItem>Print</MenubarItem>
</MenubarContent>
</MenubarMenu>
</Menubar>`,
},
navigationMenu: {
componentName: 'NavigationMenu',
importDocs: `
import {
NavigationMenu,
NavigationMenuContent,
NavigationMenuItem,
NavigationMenuLink,
NavigationMenuList,
NavigationMenuTrigger,
navigationMenuTriggerStyle,
} from "/components/ui/navigation-menu"`,
usageDocs: `
<NavigationMenu>
<NavigationMenuList>
<NavigationMenuItem>
<NavigationMenuTrigger>Item One</NavigationMenuTrigger>
<NavigationMenuContent>
<NavigationMenuLink>Link</NavigationMenuLink>
</NavigationMenuContent>
</NavigationMenuItem>
</NavigationMenuList>
</NavigationMenu>`,
},
popover: {
componentName: 'Popover',
importDocs: `
import {
Popover,
PopoverContent,
PopoverTrigger,
} from "/components/ui/popover"`,
usageDocs: `
<Popover>
<PopoverTrigger>Open</PopoverTrigger>
<PopoverContent>Place content for the popover here.</PopoverContent>
</Popover>`,
},
progress: {
componentName: 'Progress',
importDocs: 'import { Progress } from "/components/ui/progress"',
usageDocs: '<Progress value={33} />',
},
separator: {
componentName: 'Separator',
importDocs: 'import { Separator } from "/components/ui/separator"',
usageDocs: '<Separator />',
},
sheet: {
componentName: 'Sheet',
importDocs: `
import {
Sheet,
SheetContent,
SheetDescription,
SheetHeader,
SheetTitle,
SheetTrigger,
} from "/components/ui/sheet"`,
usageDocs: `
<Sheet>
<SheetTrigger>Open</SheetTrigger>
<SheetContent>
<SheetHeader>
<SheetTitle>Are you sure absolutely sure?</SheetTitle>
<SheetDescription>
This action cannot be undone.
</SheetDescription>
</SheetHeader>
</SheetContent>
</Sheet>`,
},
skeleton: {
componentName: 'Skeleton',
importDocs: 'import { Skeleton } from "/components/ui/skeleton"',
usageDocs: '<Skeleton className="w-[100px] h-[20px] rounded-full" />',
},
slider: {
componentName: 'Slider',
importDocs: 'import { Slider } from "/components/ui/slider"',
usageDocs: '<Slider defaultValue={[33]} max={100} step={1} />',
},
switch: {
componentName: 'Switch',
importDocs: 'import { Switch } from "/components/ui/switch"',
usageDocs: '<Switch />',
},
};
export const components: Record<string, ShadcnComponent> = {
...essentialComponents,
...extraComponents,
...moreExtraComponents,
};

View file

@ -0,0 +1,49 @@
import dedent from 'dedent';
import type { ShadcnComponent } from './components';
/**
* Generate system prompt for AI-assisted React component creation
* @param options - Configuration options
* @param options.components - Documentation for shadcn components
* @param options.useXML - Whether to use XML-style formatting for component instructions
* @returns The generated system prompt
*/
export function generateShadcnPrompt(options: {
components: Record<string, ShadcnComponent>;
useXML?: boolean;
}): string {
const { components, useXML = false } = options;
const systemPrompt = dedent`
## Additional Artifact Instructions for React Components: "application/vnd.react"
There are some prestyled components (primitives) available for use. Please use your best judgement to use any of these components if the app calls for one.
Here are the components that are available, along with how to import them, and how to use them:
${Object.values(components)
.map((component) => {
if (useXML) {
return dedent`
<component>
<name>${component.componentName}</name>
<import-instructions>${component.importDocs}</import-instructions>
<usage-instructions>${component.usageDocs}</usage-instructions>
</component>
`;
}
return dedent`
# ${component.componentName}
## Import Instructions
${component.importDocs}
## Usage Instructions
${component.usageDocs}
`;
})
.join('\n\n')}
`;
return systemPrompt;
}

View file

@ -0,0 +1,426 @@
import dedent from 'dedent';
import { EModelEndpoint, ArtifactModes } from 'librechat-data-provider';
import { generateShadcnPrompt } from './generate';
import { components } from './components';
const artifactsPrompt = dedent`The assistant can create and reference artifacts during conversations.
Artifacts are for substantial, self-contained content that users might modify or reuse, displayed in a separate UI window for clarity.
# Good artifacts are...
- Substantial content (>15 lines)
- Content that the user is likely to modify, iterate on, or take ownership of
- Self-contained, complex content that can be understood on its own, without context from the conversation
- Content intended for eventual use outside the conversation (e.g., reports, emails, presentations)
- Content likely to be referenced or reused multiple times
# Don't use artifacts for...
- Simple, informational, or short content, such as brief code snippets, mathematical equations, or small examples
- Primarily explanatory, instructional, or illustrative content, such as examples provided to clarify a concept
- Suggestions, commentary, or feedback on existing artifacts
- Conversational or explanatory content that doesn't represent a standalone piece of work
- Content that is dependent on the current conversational context to be useful
- Content that is unlikely to be modified or iterated upon by the user
- Request from users that appears to be a one-off question
# Usage notes
- One artifact per message unless specifically requested
- Prefer in-line content (don't use artifacts) when possible. Unnecessary use of artifacts can be jarring for users.
- If a user asks the assistant to "draw an SVG" or "make a website," the assistant does not need to explain that it doesn't have these capabilities. Creating the code and placing it within the appropriate artifact will fulfill the user's intentions.
- If asked to generate an image, the assistant can offer an SVG instead. The assistant isn't very proficient at making SVG images but should engage with the task positively. Self-deprecating humor about its abilities can make it an entertaining experience for users.
- The assistant errs on the side of simplicity and avoids overusing artifacts for content that can be effectively presented within the conversation.
- Always provide complete, specific, and fully functional content for artifacts without any snippets, placeholders, ellipses, or 'remains the same' comments.
- If an artifact is not necessary or requested, the assistant should not mention artifacts at all, and respond to the user accordingly.
<artifact_instructions>
When collaborating with the user on creating content that falls into compatible categories, the assistant should follow these steps:
1. Create the artifact using the following format:
:::artifact{identifier="unique-identifier" type="mime-type" title="Artifact Title"}
\`\`\`
Your artifact content here
\`\`\`
:::
2. Assign an identifier to the \`identifier\` attribute. For updates, reuse the prior identifier. For new artifacts, the identifier should be descriptive and relevant to the content, using kebab-case (e.g., "example-code-snippet"). This identifier will be used consistently throughout the artifact's lifecycle, even when updating or iterating on the artifact.
3. Include a \`title\` attribute to provide a brief title or description of the content.
4. Add a \`type\` attribute to specify the type of content the artifact represents. Assign one of the following values to the \`type\` attribute:
- HTML: "text/html"
- The user interface can render single file HTML pages placed within the artifact tags. HTML, JS, and CSS should be in a single file when using the \`text/html\` type.
- Images from the web are not allowed, but you can use placeholder images by specifying the width and height like so \`<img src="/api/placeholder/400/320" alt="placeholder" />\`
- The only place external scripts can be imported from is https://cdnjs.cloudflare.com
- SVG: "image/svg+xml"
- The user interface will render the Scalable Vector Graphics (SVG) image within the artifact tags.
- The assistant should specify the viewbox of the SVG rather than defining a width/height
- Markdown: "text/markdown" or "text/md"
- The user interface will render Markdown content placed within the artifact tags.
- Supports standard Markdown syntax including headers, lists, links, images, code blocks, tables, and more.
- Both "text/markdown" and "text/md" are accepted as valid MIME types for Markdown content.
- Mermaid Diagrams: "application/vnd.mermaid"
- The user interface will render Mermaid diagrams placed within the artifact tags.
- React Components: "application/vnd.react"
- Use this for displaying either: React elements, e.g. \`<strong>Hello World!</strong>\`, React pure functional components, e.g. \`() => <strong>Hello World!</strong>\`, React functional components with Hooks, or React component classes
- When creating a React component, ensure it has no required props (or provide default values for all props) and use a default export.
- Use Tailwind classes for styling. DO NOT USE ARBITRARY VALUES (e.g. \`h-[600px]\`).
- Base React is available to be imported. To use hooks, first import it at the top of the artifact, e.g. \`import { useState } from "react"\`
- The lucide-react@0.394.0 library is available to be imported. e.g. \`import { Camera } from "lucide-react"\` & \`<Camera color="red" size={48} />\`
- The recharts charting library is available to be imported, e.g. \`import { LineChart, XAxis, ... } from "recharts"\` & \`<LineChart ...><XAxis dataKey="name"> ...\`
- The three.js library is available to be imported, e.g. \`import * as THREE from "three";\`
- The date-fns library is available to be imported, e.g. \`import { compareAsc, format } from "date-fns";\`
- The react-day-picker library is available to be imported, e.g. \`import { DayPicker } from "react-day-picker";\`
- The assistant can use prebuilt components from the \`shadcn/ui\` library after it is imported: \`import { Alert, AlertDescription, AlertTitle, AlertDialog, AlertDialogAction } from '/components/ui/alert';\`. If using components from the shadcn/ui library, the assistant mentions this to the user and offers to help them install the components if necessary.
- Components MUST be imported from \`/components/ui/name\` and NOT from \`/components/name\` or \`@/components/ui/name\`.
- NO OTHER LIBRARIES (e.g. zod, hookform) ARE INSTALLED OR ABLE TO BE IMPORTED.
- Images from the web are not allowed, but you can use placeholder images by specifying the width and height like so \`<img src="/api/placeholder/400/320" alt="placeholder" />\`
- When iterating on code, ensure that the code is complete and functional without any snippets, placeholders, or ellipses.
- If you are unable to follow the above requirements for any reason, don't use artifacts and use regular code blocks instead, which will not attempt to render the component.
5. Include the complete and updated content of the artifact, without any truncation or minimization. Don't use "// rest of the code remains the same...".
6. If unsure whether the content qualifies as an artifact, if an artifact should be updated, or which type to assign to an artifact, err on the side of not creating an artifact.
7. Always use triple backticks (\`\`\`) to enclose the content within the artifact, regardless of the content type.
</artifact_instructions>
Here are some examples of correct usage of artifacts:
<examples>
<example_docstring>
This example demonstrates how to create a Mermaid artifact for a simple flow chart.
</example_docstring>
<example>
<user_query>Can you create a simple flow chart showing the process of making tea using Mermaid?</user_query>
<assistant_response>
Sure! Here's a simple flow chart depicting the process of making tea using Mermaid syntax:
:::artifact{identifier="tea-making-flowchart" type="application/vnd.mermaid" title="Flow chart: Making Tea"}
\`\`\`mermaid
graph TD
A[Start] --> B{Water boiled?}
B -->|Yes| C[Add tea leaves to cup]
B -->|No| D[Boil water]
D --> B
C --> E[Pour boiling water into cup]
E --> F[Steep tea for desired time]
F --> G[Remove tea leaves]
G --> H[Add milk or sugar, if desired]
H --> I[Enjoy your tea!]
I --> J[End]
\`\`\`
:::
This flow chart uses Mermaid syntax to visualize the steps involved in making a cup of tea. Here's a brief explanation of the process:
1. Start
2. Check if water is boiled
3. If not boiled, boil the water
4. Once water is boiled, add tea leaves to the cup
5. Pour boiling water into the cup
6. Steep the tea for the desired time
7. Remove the tea leaves
8. Optionally add milk or sugar
9. Enjoy your tea!
10. End
This chart provides a clear visual representation of the tea-making process. You can easily modify or expand this chart if you want to add more details or steps to the process. Let me know if you'd like any changes or have any questions!
</assistant_response>
</example>
<example>
<user_query>Create a simple React counter component</user_query>
<assistant_response>
Here's a simple React counter component:
:::artifact{identifier="react-counter" type="application/vnd.react" title="React Counter"}
\`\`\`
import { useState } from 'react';
export default function Counter() {
const [count, setCount] = useState(0);
return (
<div className="p-4">
<p className="mb-2">Count: {count}</p>
<button className="bg-blue-500 text-white px-4 py-2 rounded" onClick={() => setCount(count + 1)}>
Increment
</button>
</div>
);
}
\`\`\`
:::
This component creates a simple counter with an increment button.
</assistant_response>
</example>
<example>
<user_query>Create a basic HTML structure for a blog post</user_query>
<assistant_response>
Here's a basic HTML structure for a blog post:
:::artifact{identifier="blog-post-html" type="text/html" title="Blog Post HTML"}
\`\`\`
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>My Blog Post</title>
<style>
body { font-family: Arial, sans-serif; line-height: 1.6; max-width: 800px; margin: 0 auto; padding: 20px; }
h1 { color: #333; }
p { margin-bottom: 15px; }
</style>
</head>
<body>
<header>
<h1>My First Blog Post</h1>
</header>
<main>
<article>
<p>This is the content of my blog post. It's short and sweet!</p>
</article>
</main>
<footer>
<p>&copy; 2023 My Blog</p>
</footer>
</body>
</html>
\`\`\`
:::
This HTML structure provides a simple layout for a blog post.
</assistant_response>
</example>
</examples>`;
const artifactsOpenAIPrompt = dedent`The assistant can create and reference artifacts during conversations.
Artifacts are for substantial, self-contained content that users might modify or reuse, displayed in a separate UI window for clarity.
# Good artifacts are...
- Substantial content (>15 lines)
- Content that the user is likely to modify, iterate on, or take ownership of
- Self-contained, complex content that can be understood on its own, without context from the conversation
- Content intended for eventual use outside the conversation (e.g., reports, emails, presentations)
- Content likely to be referenced or reused multiple times
# Don't use artifacts for...
- Simple, informational, or short content, such as brief code snippets, mathematical equations, or small examples
- Primarily explanatory, instructional, or illustrative content, such as examples provided to clarify a concept
- Suggestions, commentary, or feedback on existing artifacts
- Conversational or explanatory content that doesn't represent a standalone piece of work
- Content that is dependent on the current conversational context to be useful
- Content that is unlikely to be modified or iterated upon by the user
- Request from users that appears to be a one-off question
# Usage notes
- One artifact per message unless specifically requested
- Prefer in-line content (don't use artifacts) when possible. Unnecessary use of artifacts can be jarring for users.
- If a user asks the assistant to "draw an SVG" or "make a website," the assistant does not need to explain that it doesn't have these capabilities. Creating the code and placing it within the appropriate artifact will fulfill the user's intentions.
- If asked to generate an image, the assistant can offer an SVG instead. The assistant isn't very proficient at making SVG images but should engage with the task positively. Self-deprecating humor about its abilities can make it an entertaining experience for users.
- The assistant errs on the side of simplicity and avoids overusing artifacts for content that can be effectively presented within the conversation.
- Always provide complete, specific, and fully functional content for artifacts without any snippets, placeholders, ellipses, or 'remains the same' comments.
- If an artifact is not necessary or requested, the assistant should not mention artifacts at all, and respond to the user accordingly.
## Artifact Instructions
When collaborating with the user on creating content that falls into compatible categories, the assistant should follow these steps:
1. Create the artifact using the following remark-directive markdown format:
:::artifact{identifier="unique-identifier" type="mime-type" title="Artifact Title"}
\`\`\`
Your artifact content here
\`\`\`
:::
a. Example of correct format:
:::artifact{identifier="example-artifact" type="text/plain" title="Example Artifact"}
\`\`\`
This is the content of the artifact.
It can span multiple lines.
\`\`\`
:::
b. Common mistakes to avoid:
- Don't split the opening ::: line
- Don't add extra backticks outside the artifact structure
- Don't omit the closing :::
2. Assign an identifier to the \`identifier\` attribute. For updates, reuse the prior identifier. For new artifacts, the identifier should be descriptive and relevant to the content, using kebab-case (e.g., "example-code-snippet"). This identifier will be used consistently throughout the artifact's lifecycle, even when updating or iterating on the artifact.
3. Include a \`title\` attribute to provide a brief title or description of the content.
4. Add a \`type\` attribute to specify the type of content the artifact represents. Assign one of the following values to the \`type\` attribute:
- HTML: "text/html"
- The user interface can render single file HTML pages placed within the artifact tags. HTML, JS, and CSS should be in a single file when using the \`text/html\` type.
- Images from the web are not allowed, but you can use placeholder images by specifying the width and height like so \`<img src="/api/placeholder/400/320" alt="placeholder" />\`
- The only place external scripts can be imported from is https://cdnjs.cloudflare.com
- SVG: "image/svg+xml"
- The user interface will render the Scalable Vector Graphics (SVG) image within the artifact tags.
- The assistant should specify the viewbox of the SVG rather than defining a width/height
- Markdown: "text/markdown" or "text/md"
- The user interface will render Markdown content placed within the artifact tags.
- Supports standard Markdown syntax including headers, lists, links, images, code blocks, tables, and more.
- Both "text/markdown" and "text/md" are accepted as valid MIME types for Markdown content.
- Mermaid Diagrams: "application/vnd.mermaid"
- The user interface will render Mermaid diagrams placed within the artifact tags.
- React Components: "application/vnd.react"
- Use this for displaying either: React elements, e.g. \`<strong>Hello World!</strong>\`, React pure functional components, e.g. \`() => <strong>Hello World!</strong>\`, React functional components with Hooks, or React component classes
- When creating a React component, ensure it has no required props (or provide default values for all props) and use a default export.
- Use Tailwind classes for styling. DO NOT USE ARBITRARY VALUES (e.g. \`h-[600px]\`).
- Base React is available to be imported. To use hooks, first import it at the top of the artifact, e.g. \`import { useState } from "react"\`
- The lucide-react@0.394.0 library is available to be imported. e.g. \`import { Camera } from "lucide-react"\` & \`<Camera color="red" size={48} />\`
- The recharts charting library is available to be imported, e.g. \`import { LineChart, XAxis, ... } from "recharts"\` & \`<LineChart ...><XAxis dataKey="name"> ...\`
- The three.js library is available to be imported, e.g. \`import * as THREE from "three";\`
- The date-fns library is available to be imported, e.g. \`import { compareAsc, format } from "date-fns";\`
- The react-day-picker library is available to be imported, e.g. \`import { DayPicker } from "react-day-picker";\`
- The assistant can use prebuilt components from the \`shadcn/ui\` library after it is imported: \`import { Alert, AlertDescription, AlertTitle, AlertDialog, AlertDialogAction } from '/components/ui/alert';\`. If using components from the shadcn/ui library, the assistant mentions this to the user and offers to help them install the components if necessary.
- Components MUST be imported from \`/components/ui/name\` and NOT from \`/components/name\` or \`@/components/ui/name\`.
- NO OTHER LIBRARIES (e.g. zod, hookform) ARE INSTALLED OR ABLE TO BE IMPORTED.
- Images from the web are not allowed, but you can use placeholder images by specifying the width and height like so \`<img src="/api/placeholder/400/320" alt="placeholder" />\`
- When iterating on code, ensure that the code is complete and functional without any snippets, placeholders, or ellipses.
- If you are unable to follow the above requirements for any reason, don't use artifacts and use regular code blocks instead, which will not attempt to render the component.
5. Include the complete and updated content of the artifact, without any truncation or minimization. Don't use "// rest of the code remains the same...".
6. If unsure whether the content qualifies as an artifact, if an artifact should be updated, or which type to assign to an artifact, err on the side of not creating an artifact.
7. NEVER use triple backticks to enclose the artifact, ONLY the content within the artifact.
Here are some examples of correct usage of artifacts:
## Examples
### Example 1
This example demonstrates how to create a Mermaid artifact for a simple flow chart.
User: Can you create a simple flow chart showing the process of making tea using Mermaid?
Assistant: Sure! Here's a simple flow chart depicting the process of making tea using Mermaid syntax:
:::artifact{identifier="tea-making-flowchart" type="application/vnd.mermaid" title="Flow chart: Making Tea"}
\`\`\`mermaid
graph TD
A[Start] --> B{Water boiled?}
B -->|Yes| C[Add tea leaves to cup]
B -->|No| D[Boil water]
D --> B
C --> E[Pour boiling water into cup]
E --> F[Steep tea for desired time]
F --> G[Remove tea leaves]
G --> H[Add milk or sugar, if desired]
H --> I[Enjoy your tea!]
I --> J[End]
\`\`\`
:::
This flow chart uses Mermaid syntax to visualize the steps involved in making a cup of tea. Here's a brief explanation of the process:
1. Start
2. Check if water is boiled
3. If not boiled, boil the water
4. Once water is boiled, add tea leaves to the cup
5. Pour boiling water into the cup
6. Steep the tea for the desired time
7. Remove the tea leaves
8. Optionally add milk or sugar
9. Enjoy your tea!
10. End
This chart provides a clear visual representation of the tea-making process. You can easily modify or expand this chart if you want to add more details or steps to the process. Let me know if you'd like any changes or have any questions!
---
### Example 2
User: Create a simple React counter component
Assistant: Here's a simple React counter component:
:::artifact{identifier="react-counter" type="application/vnd.react" title="React Counter"}
\`\`\`
import { useState } from 'react';
export default function Counter() {
const [count, setCount] = useState(0);
return (
<div className="p-4">
<p className="mb-2">Count: {count}</p>
<button className="bg-blue-500 text-white px-4 py-2 rounded" onClick={() => setCount(count + 1)}>
Increment
</button>
</div>
);
}
\`\`\`
:::
This component creates a simple counter with an increment button.
---
### Example 3
User: Create a basic HTML structure for a blog post
Assistant: Here's a basic HTML structure for a blog post:
:::artifact{identifier="blog-post-html" type="text/html" title="Blog Post HTML"}
\`\`\`
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>My Blog Post</title>
<style>
body { font-family: Arial, sans-serif; line-height: 1.6; max-width: 800px; margin: 0 auto; padding: 20px; }
h1 { color: #333; }
p { margin-bottom: 15px; }
</style>
</head>
<body>
<header>
<h1>My First Blog Post</h1>
</header>
<main>
<article>
<p>This is the content of my blog post. It's short and sweet!</p>
</article>
</main>
<footer>
<p>&copy; 2023 My Blog</p>
</footer>
</body>
</html>
\`\`\`
:::
This HTML structure provides a simple layout for a blog post.
---`;
/**
* Generates an artifacts prompt based on the endpoint and artifact mode
* @param params - Configuration parameters
* @param params.endpoint - The current endpoint
* @param params.artifacts - The current artifact mode
* @returns The artifacts prompt, or null if mode is CUSTOM
*/
export function generateArtifactsPrompt(params: {
endpoint: EModelEndpoint | string;
artifacts: ArtifactModes;
}): string | null {
const { endpoint, artifacts } = params;
if (artifacts === ArtifactModes.CUSTOM) {
return null;
}
let prompt = artifactsPrompt;
if (endpoint !== EModelEndpoint.anthropic) {
prompt = artifactsOpenAIPrompt;
}
if (artifacts === ArtifactModes.SHADCNUI) {
prompt += generateShadcnPrompt({ components, useXML: endpoint === EModelEndpoint.anthropic });
}
return prompt;
}

View file

@ -1,3 +1,4 @@
export * from './artifacts';
export * from './format';
export * from './migration';
export * from './schemas';

View file

@ -0,0 +1,37 @@
import type { BedrockRuntimeClient } from '@aws-sdk/client-bedrock-runtime';
import type { AwsCredentialIdentity } from '@aws-sdk/types';
import type { BedrockConverseInput } from 'librechat-data-provider';
/**
* AWS credentials for Bedrock
* Extends AWS AwsCredentialIdentity to ensure compatibility
*/
export type BedrockCredentials = Partial<AwsCredentialIdentity>;
/**
* Configuration options for Bedrock LLM
*/
export interface BedrockConfigOptions {
modelOptions?: Partial<BedrockConverseInput>;
/** AWS region for Bedrock */
region?: string;
/** Optional pre-configured Bedrock client (used with proxy) */
client?: BedrockRuntimeClient;
/** AWS credentials */
credentials?: BedrockCredentials;
/** Custom endpoint host for reverse proxy */
endpointHost?: string;
}
/**
* Return type for Bedrock getOptions function
*/
export interface BedrockLLMConfigResult {
llmConfig: BedrockConverseInput & {
region?: string;
client?: BedrockRuntimeClient;
credentials?: BedrockCredentials;
endpointHost?: string;
};
configOptions: Record<string, unknown>;
}

View file

@ -1,3 +1,64 @@
import type { TConfig } from 'librechat-data-provider';
import type { ClientOptions, OpenAIClientOptions } from '@librechat/agents';
import type { TEndpoint } from 'librechat-data-provider';
import type { EndpointTokenConfig, ServerRequest } from '~/types';
export type TCustomEndpointsConfig = Partial<{ [key: string]: Omit<TConfig, 'order'> }>;
export type TCustomEndpointsConfig = Partial<{ [key: string]: Omit<TEndpoint, 'order'> }>;
/**
* Interface for user key values retrieved from the database
*/
export interface UserKeyValues {
apiKey?: string;
baseURL?: string;
}
/**
* Function type for getting user key (single decrypted value)
*/
export type GetUserKeyFunction = (params: { userId: string; name: string }) => Promise<string>;
/**
* Function type for getting user key values (parsed JSON object with apiKey/baseURL)
*/
export type GetUserKeyValuesFunction = (params: {
userId: string;
name: string;
}) => Promise<UserKeyValues>;
/**
* Database methods required for endpoint initialization
* These are passed in at invocation time to allow for dependency injection
*/
export interface EndpointDbMethods {
/** Get single decrypted key value (used for simple API keys) */
getUserKey: GetUserKeyFunction;
/** Get parsed key values object (used for apiKey + baseURL combinations) */
getUserKeyValues: GetUserKeyValuesFunction;
}
/**
* Base parameters for all endpoint initialization functions
*/
export interface BaseInitializeParams {
/** Request data containing user and body information (includes req.config) */
req: ServerRequest;
/** The endpoint name/identifier (e.g., 'openAI', 'anthropic', 'custom-endpoint-name') */
endpoint: string;
/** Model parameters from the request (includes model, temperature, topP, etc.) */
model_parameters?: Record<string, unknown>;
/** Database methods for user key operations */
db: EndpointDbMethods;
}
/**
* Base result type that all initialize functions return
* Using a more permissive type to accommodate different provider-specific results
*/
export interface InitializeResultBase {
llmConfig: ClientOptions;
configOptions?: OpenAIClientOptions['configuration'];
endpointTokenConfig?: EndpointTokenConfig;
useLegacyContent?: boolean;
provider?: string;
tools?: unknown[];
}

View file

@ -4,7 +4,7 @@ import { AuthKeys, googleBaseSchema } from 'librechat-data-provider';
export type GoogleParameters = z.infer<typeof googleBaseSchema>;
export type GoogleCredentials = {
[AuthKeys.GOOGLE_SERVICE_KEY]?: string;
[AuthKeys.GOOGLE_SERVICE_KEY]?: string | Record<string, unknown>;
[AuthKeys.GOOGLE_API_KEY]?: string;
};
@ -23,4 +23,8 @@ export interface GoogleConfigOptions {
defaultParams?: Record<string, unknown>;
addParams?: Record<string, unknown>;
dropParams?: string[];
/** Stream rate delay for controlling token streaming speed */
streamRate?: number;
/** Model to use for title generation */
titleModel?: string;
}

View file

@ -10,6 +10,9 @@ export type RequestBody = {
fileTokenLimit?: number;
conversationId?: string;
parentMessageId?: string;
endpoint?: string;
model?: string;
key?: string;
};
export type ServerRequest = Request<unknown, unknown, RequestBody> & {

View file

@ -1,13 +1,15 @@
export * from './anthropic';
export * from './azure';
export * from './balance';
export * from './bedrock';
export * from './endpoints';
export * from './events';
export * from './error';
export * from './events';
export * from './files';
export * from './google';
export * from './http';
export * from './mistral';
export * from './openai';
export type * from './openai';
export * from './prompts';
export * from './run';
export * from './anthropic';
export * from './tokens';

View file

@ -1,9 +1,8 @@
import { z } from 'zod';
import { openAISchema, EModelEndpoint } from 'librechat-data-provider';
import type { TEndpointOption, TAzureConfig, TEndpoint, TConfig } from 'librechat-data-provider';
import type { BindToolsInput } from '@langchain/core/language_models/chat_models';
import { openAISchema } from 'librechat-data-provider';
import type { TConfig } from 'librechat-data-provider';
import type { OpenAIClientOptions, Providers } from '@librechat/agents';
import type { AppConfig } from '@librechat/data-schemas';
import type { BindToolsInput } from '@langchain/core/language_models/chat_models';
import type { AzureOptions } from './azure';
export type OpenAIParameters = z.infer<typeof openAISchema>;
@ -46,58 +45,3 @@ export interface LLMConfigResult<T = OAIClientOptions> {
export type OpenAIConfigResult = LLMConfigResult<OAIClientOptions> & {
configOptions?: OpenAIConfiguration;
};
/**
* Interface for user values retrieved from the database
*/
export interface UserKeyValues {
apiKey?: string;
baseURL?: string;
}
/**
* Request interface with only the properties we need (avoids Express typing conflicts)
*/
export interface RequestData {
user: {
id: string;
};
body: {
model?: string;
endpoint?: string;
key?: string;
};
app: {
locals: {
[EModelEndpoint.azureOpenAI]?: TAzureConfig;
[EModelEndpoint.openAI]?: TEndpoint;
all?: TEndpoint;
};
};
}
/**
* Function type for getting user key values
*/
export type GetUserKeyValuesFunction = (params: {
userId: string;
name: string;
}) => Promise<UserKeyValues>;
/**
* Function type for checking user key expiry
*/
export type CheckUserKeyExpiryFunction = (expiresAt: string, endpoint: string) => void;
/**
* Parameters for the initializeOpenAI function
*/
export interface InitializeOpenAIOptionsParams {
req: RequestData;
appConfig: AppConfig;
overrideModel?: string;
overrideEndpoint?: string;
endpointOption: Partial<TEndpointOption>;
getUserKeyValues: GetUserKeyValuesFunction;
checkUserKeyExpiry: CheckUserKeyExpiryFunction;
}

View file

@ -0,0 +1,17 @@
/** Configuration object mapping model keys to their respective prompt, completion rates, and context limit
*
* Note: the [key: string]: unknown is not in the original JSDoc typedef in /api/typedefs.js, but I've included it since
* getModelMaxOutputTokens calls getModelTokenValue with a key of 'output', which was not in the original JSDoc typedef,
* but would be referenced in a TokenConfig in the if(matchedPattern) portion of getModelTokenValue.
* So in order to preserve functionality for that case and any others which might reference an additional key I'm unaware of,
* I've included it here until the interface can be typed more tightly.
*/
export interface TokenConfig {
prompt: number;
completion: number;
context: number;
[key: string]: unknown;
}
/** An endpoint's config object mapping model keys to their respective prompt, completion rates, and context limit */
export type EndpointTokenConfig = Record<string, TokenConfig>;

View file

@ -21,4 +21,5 @@ export { default as Tokenizer, countTokens } from './tokenizer';
export * from './yaml';
export * from './http';
export * from './tokens';
export * from './url';
export * from './message';

View file

@ -1,5 +1,6 @@
import path from 'path';
import axios from 'axios';
import { ErrorTypes } from 'librechat-data-provider';
import { logger } from '@librechat/data-schemas';
import { readFileAsString } from './files';
@ -114,3 +115,25 @@ export async function loadServiceKey(keyPath: string): Promise<GoogleServiceKey
return key;
}
/**
* Checks if a user key has expired based on the provided expiration date and endpoint.
* If the key has expired, it throws an Error with details including the type of error,
* the expiration date, and the endpoint.
*
* @param expiresAt - The expiration date of the user key in a format that can be parsed by the Date constructor
* @param endpoint - The endpoint associated with the user key to be checked
* @throws Error if the user key has expired. The error message is a stringified JSON object
* containing the type of error (`ErrorTypes.EXPIRED_USER_KEY`), the expiration date in the local string format, and the endpoint.
*/
export function checkUserKeyExpiry(expiresAt: string, endpoint: string): void {
const expiresAtDate = new Date(expiresAt);
if (expiresAtDate < new Date()) {
const errorMessage = JSON.stringify({
type: ErrorTypes.EXPIRED_USER_KEY,
expiredAt: expiresAtDate.toLocaleString(),
endpoint,
});
throw new Error(errorMessage);
}
}

View file

@ -1,23 +1,6 @@
import z from 'zod';
import { EModelEndpoint } from 'librechat-data-provider';
/** Configuration object mapping model keys to their respective prompt, completion rates, and context limit
*
* Note: the [key: string]: unknown is not in the original JSDoc typedef in /api/typedefs.js, but I've included it since
* getModelMaxOutputTokens calls getModelTokenValue with a key of 'output', which was not in the original JSDoc typedef,
* but would be referenced in a TokenConfig in the if(matchedPattern) portion of getModelTokenValue.
* So in order to preserve functionality for that case and any others which might reference an additional key I'm unaware of,
* I've included it here until the interface can be typed more tightly.
*/
export interface TokenConfig {
prompt: number;
completion: number;
context: number;
[key: string]: unknown;
}
/** An endpoint's config object mapping model keys to their respective prompt, completion rates, and context limit */
export type EndpointTokenConfig = Record<string, TokenConfig>;
import type { EndpointTokenConfig, TokenConfig } from '~/types';
const openAIModels = {
'o4-mini': 200000,

View file

@ -1,4 +1,4 @@
const extractBaseURL = require('./extractBaseURL');
import { extractBaseURL, deriveBaseURL } from './url';
describe('extractBaseURL', () => {
test('should extract base URL up to /v1 for standard endpoints', () => {
@ -33,7 +33,6 @@ describe('extractBaseURL', () => {
expect(extractBaseURL(url)).toBe(url);
});
// Test our JSDoc examples.
test('should extract base URL up to /v1 for open.ai standard endpoint', () => {
const url = 'https://open.ai/v1/chat';
expect(extractBaseURL(url)).toBe('https://open.ai/v1');
@ -108,4 +107,50 @@ describe('extractBaseURL', () => {
const url = 'https://${INSTANCE_NAME}.com/resources/deployments/${DEPLOYMENT_NAME}';
expect(extractBaseURL(url)).toBe(url);
});
test('should return undefined for null or empty input', () => {
expect(extractBaseURL('')).toBe(undefined);
// @ts-expect-error testing invalid input
expect(extractBaseURL(null)).toBe(undefined);
// @ts-expect-error testing invalid input
expect(extractBaseURL(undefined)).toBe(undefined);
});
});
describe('deriveBaseURL', () => {
test('should extract protocol, hostname and port from a URL', () => {
const fullURL = 'https://api.example.com:8080/v1/models';
const baseURL = deriveBaseURL(fullURL);
expect(baseURL).toBe('https://api.example.com:8080');
});
test('should handle URLs without port', () => {
const fullURL = 'https://api.example.com/v1/models';
const baseURL = deriveBaseURL(fullURL);
expect(baseURL).toBe('https://api.example.com');
});
test('should handle HTTP protocol', () => {
const fullURL = 'http://localhost:11434/api/tags';
const baseURL = deriveBaseURL(fullURL);
expect(baseURL).toBe('http://localhost:11434');
});
test('should handle URLs with paths', () => {
const fullURL = 'https://api.ollama.com/v1/chat/completions';
const baseURL = deriveBaseURL(fullURL);
expect(baseURL).toBe('https://api.ollama.com');
});
test('should return the original URL if parsing fails', () => {
const invalidURL = 'not-a-valid-url';
const result = deriveBaseURL(invalidURL);
expect(result).toBe(invalidURL);
});
test('should handle localhost URLs', () => {
const fullURL = 'http://localhost:11434';
const baseURL = deriveBaseURL(fullURL);
expect(baseURL).toBe('http://localhost:11434');
});
});

View file

@ -1,4 +1,5 @@
const { CohereConstants } = require('librechat-data-provider');
import { logger } from '@librechat/data-schemas';
import { CohereConstants } from 'librechat-data-provider';
/**
* Extracts a valid OpenAI baseURL from a given string, matching "url/v1," followed by an optional suffix.
@ -13,10 +14,10 @@ const { CohereConstants } = require('librechat-data-provider');
* - `https://open.ai/v1/hi/openai` -> `https://open.ai/v1/hi/openai`
* - `https://api.example.com/v1/replicate` -> `https://api.example.com/v1/replicate`
*
* @param {string} url - The URL to be processed.
* @returns {string | undefined} The matched pattern or input if no match is found.
* @param url - The URL to be processed.
* @returns The matched pattern or input if no match is found.
*/
function extractBaseURL(url) {
export function extractBaseURL(url: string): string | null | undefined {
if (!url || typeof url !== 'string') {
return undefined;
}
@ -29,14 +30,10 @@ function extractBaseURL(url) {
return url;
}
// Find the index of '/v1' to use it as a reference point.
const v1Index = url.indexOf('/v1');
// Extract the part of the URL up to and including '/v1'.
let baseUrl = url.substring(0, v1Index + 3);
const openai = 'openai';
// Find which suffix is present.
const suffixes = [
'azure-openai',
openai,
@ -62,29 +59,44 @@ function extractBaseURL(url) {
return url.split(/\/(chat|completion)/)[0];
}
// Check if the URL has '/openai' immediately after '/v1'.
const openaiIndex = url.indexOf(`/${openai}`, v1Index + 3);
// Find which suffix is present in the URL, if any.
const suffixIndex =
suffixUsed === openai ? openaiIndex : url.indexOf(`/${suffixUsed}`, v1Index + 3);
// If '/openai' is found right after '/v1', include it in the base URL.
if (openaiIndex === v1Index + 3) {
// Find the next slash or the end of the URL after '/openai'.
const nextSlashIndex = url.indexOf('/', openaiIndex + 7);
if (nextSlashIndex === -1) {
// If there is no next slash, the rest of the URL is the base URL.
baseUrl = url.substring(0, openaiIndex + 7);
} else {
// If there is a next slash, the base URL goes up to but not including the slash.
baseUrl = url.substring(0, nextSlashIndex);
}
} else if (suffixIndex > 0) {
// If a suffix is present but not immediately after '/v1', we need to include the reverse proxy pattern.
baseUrl = url.substring(0, suffixIndex + suffixUsed.length + 1);
baseUrl = url.substring(0, suffixIndex + (suffixUsed?.length ?? 0) + 1);
}
return baseUrl;
}
module.exports = extractBaseURL; // Export the function for use in your test file.
/**
* Extracts the base URL (protocol + hostname + port) from the provided URL.
* Used primarily for Ollama endpoints to derive the host.
* @param fullURL - The full URL.
* @returns The base URL (protocol://hostname:port).
*/
export function deriveBaseURL(fullURL: string): string {
try {
const parsedUrl = new URL(fullURL);
const protocol = parsedUrl.protocol;
const hostname = parsedUrl.hostname;
const port = parsedUrl.port;
if (!protocol || !hostname) {
return fullURL;
}
return `${protocol}//${hostname}${port ? `:${port}` : ''}`;
} catch (error) {
logger.error('Failed to derive base URL', error);
return fullURL;
}
}

View file

@ -1,7 +1,15 @@
import 'dotenv/config';
import jwt from 'jsonwebtoken';
import { webcrypto } from 'node:crypto';
import crypto from 'node:crypto';
import { SignPayloadParams } from '~/types';
const { webcrypto } = crypto;
/** Use hex decoding for both key and IV for legacy methods */
const key = Buffer.from(process.env.CREDS_KEY ?? '', 'hex');
const iv = Buffer.from(process.env.CREDS_IV ?? '', 'hex');
const algorithm = 'AES-CBC';
export async function signPayload({
payload,
secret,
@ -15,3 +23,153 @@ export async function hashToken(str: string): Promise<string> {
const hashBuffer = await webcrypto.subtle.digest('SHA-256', data);
return Buffer.from(hashBuffer).toString('hex');
}
/** --- Legacy v1/v2 Setup: AES-CBC with fixed key and IV --- */
/**
* Encrypts a value using AES-CBC
* @param value - The plaintext to encrypt
* @returns The encrypted string in hex format
*/
export async function encrypt(value: string): Promise<string> {
const cryptoKey = await webcrypto.subtle.importKey('raw', key, { name: algorithm }, false, [
'encrypt',
]);
const encoder = new TextEncoder();
const data = encoder.encode(value);
const encryptedBuffer = await webcrypto.subtle.encrypt(
{ name: algorithm, iv: iv },
cryptoKey,
data,
);
return Buffer.from(encryptedBuffer).toString('hex');
}
/**
* Decrypts an encrypted value using AES-CBC
* @param encryptedValue - The encrypted string in hex format
* @returns The decrypted plaintext
*/
export async function decrypt(encryptedValue: string): Promise<string> {
const cryptoKey = await webcrypto.subtle.importKey('raw', key, { name: algorithm }, false, [
'decrypt',
]);
const encryptedBuffer = Buffer.from(encryptedValue, 'hex');
const decryptedBuffer = await webcrypto.subtle.decrypt(
{ name: algorithm, iv: iv },
cryptoKey,
encryptedBuffer,
);
const decoder = new TextDecoder();
return decoder.decode(decryptedBuffer);
}
/** --- v2: AES-CBC with a random IV per encryption --- */
/**
* Encrypts a value using AES-CBC with a random IV per encryption
* @param value - The plaintext to encrypt
* @returns The encrypted string with IV prepended (iv:ciphertext format)
*/
export async function encryptV2(value: string): Promise<string> {
const gen_iv = webcrypto.getRandomValues(new Uint8Array(16));
const cryptoKey = await webcrypto.subtle.importKey('raw', key, { name: algorithm }, false, [
'encrypt',
]);
const encoder = new TextEncoder();
const data = encoder.encode(value);
const encryptedBuffer = await webcrypto.subtle.encrypt(
{ name: algorithm, iv: gen_iv },
cryptoKey,
data,
);
return Buffer.from(gen_iv).toString('hex') + ':' + Buffer.from(encryptedBuffer).toString('hex');
}
/**
* Decrypts an encrypted value using AES-CBC with random IV
* @param encryptedValue - The encrypted string in iv:ciphertext format
* @returns The decrypted plaintext
*/
export async function decryptV2(encryptedValue: string): Promise<string> {
const parts = encryptedValue.split(':');
if (parts.length === 1) {
return parts[0];
}
const gen_iv = Buffer.from(parts.shift() ?? '', 'hex');
const encrypted = parts.join(':');
const cryptoKey = await webcrypto.subtle.importKey('raw', key, { name: algorithm }, false, [
'decrypt',
]);
const encryptedBuffer = Buffer.from(encrypted, 'hex');
const decryptedBuffer = await webcrypto.subtle.decrypt(
{ name: algorithm, iv: gen_iv },
cryptoKey,
encryptedBuffer,
);
const decoder = new TextDecoder();
return decoder.decode(decryptedBuffer);
}
/** --- v3: AES-256-CTR using Node's crypto functions --- */
const algorithm_v3 = 'aes-256-ctr';
/**
* Encrypts a value using AES-256-CTR.
* Note: AES-256 requires a 32-byte key. Ensure that process.env.CREDS_KEY is a 64-character hex string.
* @param value - The plaintext to encrypt.
* @returns The encrypted string with a "v3:" prefix.
*/
export function encryptV3(value: string): string {
if (key.length !== 32) {
throw new Error(`Invalid key length: expected 32 bytes, got ${key.length} bytes`);
}
const iv_v3 = crypto.randomBytes(16);
const cipher = crypto.createCipheriv(algorithm_v3, key, iv_v3);
const encrypted = Buffer.concat([cipher.update(value, 'utf8'), cipher.final()]);
return `v3:${iv_v3.toString('hex')}:${encrypted.toString('hex')}`;
}
/**
* Decrypts an encrypted value using AES-256-CTR.
* @param encryptedValue - The encrypted string with "v3:" prefix.
* @returns The decrypted plaintext.
*/
export function decryptV3(encryptedValue: string): string {
const parts = encryptedValue.split(':');
if (parts[0] !== 'v3') {
throw new Error('Not a v3 encrypted value');
}
const iv_v3 = Buffer.from(parts[1], 'hex');
const encryptedText = Buffer.from(parts.slice(2).join(':'), 'hex');
const decipher = crypto.createDecipheriv(algorithm_v3, key, iv_v3);
const decrypted = Buffer.concat([decipher.update(encryptedText), decipher.final()]);
return decrypted.toString('utf8');
}
/**
* Generates random values as a hex string
* @param length - The number of random bytes to generate
* @returns The random values as a hex string
*/
export async function getRandomValues(length: number): Promise<string> {
if (!Number.isInteger(length) || length <= 0) {
throw new Error('Length must be a positive integer');
}
const randomValues = new Uint8Array(length);
webcrypto.getRandomValues(randomValues);
return Buffer.from(randomValues).toString('hex');
}
/**
* Computes SHA-256 hash for the given input.
* @param input - The input to hash.
* @returns The SHA-256 hash of the input.
*/
export async function hashBackupCode(input: string): Promise<string> {
const encoder = new TextEncoder();
const data = encoder.encode(input);
const hashBuffer = await webcrypto.subtle.digest('SHA-256', data);
const hashArray = Array.from(new Uint8Array(hashBuffer));
return hashArray.map((b) => b.toString(16).padStart(2, '0')).join('');
}

View file

@ -0,0 +1,530 @@
import mongoose from 'mongoose';
import { v4 as uuidv4 } from 'uuid';
import { MongoMemoryServer } from 'mongodb-memory-server';
import { EToolResources, FileContext } from 'librechat-data-provider';
import { createFileMethods } from './file';
import { createModels } from '~/models';
let File: mongoose.Model<unknown>;
let fileMethods: ReturnType<typeof createFileMethods>;
let mongoServer: MongoMemoryServer;
let modelsToCleanup: string[] = [];
describe('File Methods', () => {
beforeAll(async () => {
mongoServer = await MongoMemoryServer.create();
const mongoUri = mongoServer.getUri();
await mongoose.connect(mongoUri);
const models = createModels(mongoose);
modelsToCleanup = Object.keys(models);
Object.assign(mongoose.models, models);
File = mongoose.models.File as mongoose.Model<unknown>;
fileMethods = createFileMethods(mongoose);
});
afterAll(async () => {
const collections = mongoose.connection.collections;
for (const key in collections) {
await collections[key].deleteMany({});
}
for (const modelName of modelsToCleanup) {
if (mongoose.models[modelName]) {
delete mongoose.models[modelName];
}
}
await mongoose.disconnect();
await mongoServer.stop();
});
beforeEach(async () => {
await File.deleteMany({});
});
describe('createFile', () => {
it('should create a new file with TTL', async () => {
const fileId = uuidv4();
const userId = new mongoose.Types.ObjectId();
const file = await fileMethods.createFile({
file_id: fileId,
user: userId,
filename: 'test.txt',
filepath: '/uploads/test.txt',
type: 'text/plain',
bytes: 100,
});
expect(file).not.toBeNull();
expect(file?.file_id).toBe(fileId);
expect(file?.filename).toBe('test.txt');
expect(file?.expiresAt).toBeDefined();
});
it('should create a file without TTL when disableTTL is true', async () => {
const fileId = uuidv4();
const userId = new mongoose.Types.ObjectId();
const file = await fileMethods.createFile(
{
file_id: fileId,
user: userId,
filename: 'permanent.txt',
filepath: '/uploads/permanent.txt',
type: 'text/plain',
bytes: 200,
},
true,
);
expect(file).not.toBeNull();
expect(file?.file_id).toBe(fileId);
expect(file?.expiresAt).toBeUndefined();
});
});
describe('findFileById', () => {
it('should find a file by file_id', async () => {
const fileId = uuidv4();
const userId = new mongoose.Types.ObjectId();
await fileMethods.createFile({
file_id: fileId,
user: userId,
filename: 'find-me.txt',
filepath: '/uploads/find-me.txt',
type: 'text/plain',
bytes: 150,
});
const found = await fileMethods.findFileById(fileId);
expect(found).not.toBeNull();
expect(found?.file_id).toBe(fileId);
expect(found?.filename).toBe('find-me.txt');
});
it('should return null for non-existent file', async () => {
const found = await fileMethods.findFileById('non-existent');
expect(found).toBeNull();
});
});
describe('getFiles', () => {
it('should retrieve multiple files matching filter', async () => {
const userId = new mongoose.Types.ObjectId();
const fileIds = [uuidv4(), uuidv4(), uuidv4()];
for (const fileId of fileIds) {
await fileMethods.createFile({
file_id: fileId,
user: userId,
filename: `file-${fileId}.txt`,
filepath: `/uploads/${fileId}.txt`,
type: 'text/plain',
bytes: 100,
});
}
const files = await fileMethods.getFiles({ user: userId });
expect(files).toHaveLength(3);
expect(files.map((f) => f.file_id)).toEqual(expect.arrayContaining(fileIds));
});
it('should exclude text field by default', async () => {
const fileId = uuidv4();
const userId = new mongoose.Types.ObjectId();
await fileMethods.createFile({
file_id: fileId,
user: userId,
filename: 'with-text.txt',
filepath: '/uploads/with-text.txt',
type: 'text/plain',
bytes: 100,
text: 'Some content here',
});
const files = await fileMethods.getFiles({ file_id: fileId });
expect(files).toHaveLength(1);
expect(files[0].text).toBeUndefined();
});
});
describe('getToolFilesByIds', () => {
it('should retrieve files for file_search tool (embedded files)', async () => {
const userId = new mongoose.Types.ObjectId();
const embeddedFileId = uuidv4();
const regularFileId = uuidv4();
await fileMethods.createFile({
file_id: embeddedFileId,
user: userId,
filename: 'embedded.txt',
filepath: '/uploads/embedded.txt',
type: 'text/plain',
bytes: 100,
embedded: true,
});
await fileMethods.createFile({
file_id: regularFileId,
user: userId,
filename: 'regular.txt',
filepath: '/uploads/regular.txt',
type: 'text/plain',
bytes: 100,
});
const toolSet = new Set([EToolResources.file_search]);
const files = await fileMethods.getToolFilesByIds([embeddedFileId, regularFileId], toolSet);
expect(files).toHaveLength(1);
expect(files[0].file_id).toBe(embeddedFileId);
});
it('should retrieve files for context tool', async () => {
const userId = new mongoose.Types.ObjectId();
const contextFileId = uuidv4();
await fileMethods.createFile({
file_id: contextFileId,
user: userId,
filename: 'context.txt',
filepath: '/uploads/context.txt',
type: 'text/plain',
bytes: 100,
text: 'Context content',
context: FileContext.agents,
});
const toolSet = new Set([EToolResources.context]);
const files = await fileMethods.getToolFilesByIds([contextFileId], toolSet);
expect(files).toHaveLength(1);
expect(files[0].file_id).toBe(contextFileId);
});
it('should retrieve files for execute_code tool', async () => {
const userId = new mongoose.Types.ObjectId();
const codeFileId = uuidv4();
await fileMethods.createFile({
file_id: codeFileId,
user: userId,
filename: 'code.py',
filepath: '/uploads/code.py',
type: 'text/x-python',
bytes: 100,
metadata: { fileIdentifier: 'some-identifier' },
});
const toolSet = new Set([EToolResources.execute_code]);
const files = await fileMethods.getToolFilesByIds([codeFileId], toolSet);
expect(files).toHaveLength(1);
expect(files[0].file_id).toBe(codeFileId);
});
});
describe('updateFile', () => {
it('should update file data and remove TTL', async () => {
const fileId = uuidv4();
const userId = new mongoose.Types.ObjectId();
await fileMethods.createFile({
file_id: fileId,
user: userId,
filename: 'original.txt',
filepath: '/uploads/original.txt',
type: 'text/plain',
bytes: 100,
});
const updated = await fileMethods.updateFile({
file_id: fileId,
filename: 'updated.txt',
bytes: 200,
});
expect(updated).not.toBeNull();
expect(updated?.filename).toBe('updated.txt');
expect(updated?.bytes).toBe(200);
expect(updated?.expiresAt).toBeUndefined();
});
});
describe('updateFileUsage', () => {
it('should increment usage count', async () => {
const fileId = uuidv4();
const userId = new mongoose.Types.ObjectId();
await fileMethods.createFile({
file_id: fileId,
user: userId,
filename: 'usage-test.txt',
filepath: '/uploads/usage-test.txt',
type: 'text/plain',
bytes: 100,
usage: 0,
});
const updated = await fileMethods.updateFileUsage({ file_id: fileId });
expect(updated?.usage).toBe(1);
const updated2 = await fileMethods.updateFileUsage({ file_id: fileId, inc: 5 });
expect(updated2?.usage).toBe(6);
});
});
describe('updateFilesUsage', () => {
it('should update usage for multiple files', async () => {
const userId = new mongoose.Types.ObjectId();
const fileIds = [uuidv4(), uuidv4()];
for (const fileId of fileIds) {
await fileMethods.createFile({
file_id: fileId,
user: userId,
filename: `file-${fileId}.txt`,
filepath: `/uploads/${fileId}.txt`,
type: 'text/plain',
bytes: 100,
usage: 0,
});
}
const files = fileIds.map((file_id) => ({ file_id }));
const updated = await fileMethods.updateFilesUsage(files);
expect(updated).toHaveLength(2);
for (const file of updated) {
expect((file as { usage: number }).usage).toBe(1);
}
});
it('should deduplicate files', async () => {
const userId = new mongoose.Types.ObjectId();
const fileId = uuidv4();
await fileMethods.createFile({
file_id: fileId,
user: userId,
filename: 'duplicate-test.txt',
filepath: '/uploads/duplicate-test.txt',
type: 'text/plain',
bytes: 100,
usage: 0,
});
const files = [{ file_id: fileId }, { file_id: fileId }, { file_id: fileId }];
const updated = await fileMethods.updateFilesUsage(files);
expect(updated).toHaveLength(1);
expect((updated[0] as { usage: number }).usage).toBe(1);
});
it('should filter out null results when files do not exist', async () => {
const userId = new mongoose.Types.ObjectId();
const existingFileId = uuidv4();
await fileMethods.createFile({
file_id: existingFileId,
user: userId,
filename: 'existing.txt',
filepath: '/uploads/existing.txt',
type: 'text/plain',
bytes: 100,
usage: 0,
});
const files = [{ file_id: existingFileId }, { file_id: 'non-existent-file' }];
const updated = await fileMethods.updateFilesUsage(files);
expect(updated.length).toBeGreaterThan(0);
expect(updated).not.toContain(null);
expect(updated).not.toContain(undefined);
const existingFile = updated.find(
(f) => (f as { file_id: string }).file_id === existingFileId,
);
expect(existingFile).toBeDefined();
expect((existingFile as { usage: number }).usage).toBe(1);
});
it('should handle empty files array', async () => {
const result = await fileMethods.updateFilesUsage([]);
expect(result).toEqual([]);
});
it('should handle fileIds parameter', async () => {
const userId = new mongoose.Types.ObjectId();
const fileIds = [uuidv4(), uuidv4()];
for (const fileId of fileIds) {
await fileMethods.createFile({
file_id: fileId,
user: userId,
filename: `file-${fileId}.txt`,
filepath: `/uploads/${fileId}.txt`,
type: 'text/plain',
bytes: 100,
usage: 0,
});
}
const files = [{ file_id: fileIds[0] }];
const updated = await fileMethods.updateFilesUsage(files, [fileIds[1]]);
expect(updated).toHaveLength(2);
const file1 = updated.find((f) => (f as { file_id: string }).file_id === fileIds[0]);
const file2 = updated.find((f) => (f as { file_id: string }).file_id === fileIds[1]);
expect(file1).toBeDefined();
expect(file2).toBeDefined();
expect((file1 as { usage: number }).usage).toBe(1);
expect((file2 as { usage: number }).usage).toBe(1);
});
it('should deduplicate between files and fileIds parameters', async () => {
const userId = new mongoose.Types.ObjectId();
const fileId = uuidv4();
await fileMethods.createFile({
file_id: fileId,
user: userId,
filename: 'test.txt',
filepath: '/uploads/test.txt',
type: 'text/plain',
bytes: 100,
usage: 0,
});
const files = [{ file_id: fileId }];
const updated = await fileMethods.updateFilesUsage(files, [fileId]);
expect(updated).toHaveLength(1);
expect((updated[0] as { usage: number }).usage).toBe(1);
});
});
describe('deleteFile', () => {
it('should delete a file by file_id', async () => {
const fileId = uuidv4();
const userId = new mongoose.Types.ObjectId();
await fileMethods.createFile({
file_id: fileId,
user: userId,
filename: 'delete-me.txt',
filepath: '/uploads/delete-me.txt',
type: 'text/plain',
bytes: 100,
});
const deleted = await fileMethods.deleteFile(fileId);
expect(deleted).not.toBeNull();
expect(deleted?.file_id).toBe(fileId);
const found = await fileMethods.findFileById(fileId);
expect(found).toBeNull();
});
});
describe('deleteFiles', () => {
it('should delete multiple files by file_ids', async () => {
const userId = new mongoose.Types.ObjectId();
const fileIds = [uuidv4(), uuidv4(), uuidv4()];
for (const fileId of fileIds) {
await fileMethods.createFile({
file_id: fileId,
user: userId,
filename: `file-${fileId}.txt`,
filepath: `/uploads/${fileId}.txt`,
type: 'text/plain',
bytes: 100,
});
}
const result = await fileMethods.deleteFiles(fileIds);
expect(result.deletedCount).toBe(3);
const remaining = await fileMethods.getFiles({ file_id: { $in: fileIds } });
expect(remaining).toHaveLength(0);
});
it('should delete all files for a user', async () => {
const userId = new mongoose.Types.ObjectId();
const otherUserId = new mongoose.Types.ObjectId();
await fileMethods.createFile({
file_id: uuidv4(),
user: userId,
filename: 'user-file-1.txt',
filepath: '/uploads/user-file-1.txt',
type: 'text/plain',
bytes: 100,
});
await fileMethods.createFile({
file_id: uuidv4(),
user: userId,
filename: 'user-file-2.txt',
filepath: '/uploads/user-file-2.txt',
type: 'text/plain',
bytes: 100,
});
await fileMethods.createFile({
file_id: uuidv4(),
user: otherUserId,
filename: 'other-user-file.txt',
filepath: '/uploads/other-user-file.txt',
type: 'text/plain',
bytes: 100,
});
const result = await fileMethods.deleteFiles([], userId.toString());
expect(result.deletedCount).toBe(2);
const remaining = await fileMethods.getFiles({});
expect(remaining).toHaveLength(1);
expect(remaining[0].user?.toString()).toBe(otherUserId.toString());
});
});
describe('batchUpdateFiles', () => {
it('should update multiple file paths', async () => {
const userId = new mongoose.Types.ObjectId();
const fileIds = [uuidv4(), uuidv4()];
for (const fileId of fileIds) {
await fileMethods.createFile({
file_id: fileId,
user: userId,
filename: `file-${fileId}.txt`,
filepath: `/old-path/${fileId}.txt`,
type: 'text/plain',
bytes: 100,
});
}
const updates = fileIds.map((file_id) => ({
file_id,
filepath: `/new-path/${file_id}.txt`,
}));
await fileMethods.batchUpdateFiles(updates);
for (const fileId of fileIds) {
const file = await fileMethods.findFileById(fileId);
expect(file?.filepath).toBe(`/new-path/${fileId}.txt`);
}
});
it('should handle empty updates array gracefully', async () => {
await expect(fileMethods.batchUpdateFiles([])).resolves.toBeUndefined();
});
});
});

View file

@ -0,0 +1,272 @@
import logger from '../config/winston';
import { EToolResources, FileContext } from 'librechat-data-provider';
import type { FilterQuery, SortOrder, Model } from 'mongoose';
import type { IMongoFile } from '~/types/file';
/** Factory function that takes mongoose instance and returns the file methods */
export function createFileMethods(mongoose: typeof import('mongoose')) {
/**
* Finds a file by its file_id with additional query options.
* @param file_id - The unique identifier of the file
* @param options - Query options for filtering, projection, etc.
* @returns A promise that resolves to the file document or null
*/
async function findFileById(
file_id: string,
options: Record<string, unknown> = {},
): Promise<IMongoFile | null> {
const File = mongoose.models.File as Model<IMongoFile>;
return File.findOne({ file_id, ...options }).lean();
}
/** Select fields for query projection - 0 to exclude, 1 to include */
type SelectProjection = Record<string, 0 | 1>;
/**
* Retrieves files matching a given filter, sorted by the most recently updated.
* @param filter - The filter criteria to apply
* @param _sortOptions - Optional sort parameters
* @param selectFields - Fields to include/exclude in the query results. Default excludes the 'text' field
* @param options - Additional query options (userId, agentId for ACL)
* @returns A promise that resolves to an array of file documents
*/
async function getFiles(
filter: FilterQuery<IMongoFile>,
_sortOptions?: Record<string, SortOrder> | null,
selectFields?: SelectProjection | string | null,
): Promise<IMongoFile[] | null> {
const File = mongoose.models.File as Model<IMongoFile>;
const sortOptions = { updatedAt: -1 as SortOrder, ..._sortOptions };
const query = File.find(filter);
if (selectFields != null) {
query.select(selectFields);
} else {
query.select({ text: 0 });
}
return await query.sort(sortOptions).lean();
}
/**
* Retrieves tool files (files that are embedded or have a fileIdentifier) from an array of file IDs
* @param fileIds - Array of file_id strings to search for
* @param toolResourceSet - Optional filter for tool resources
* @returns Files that match the criteria
*/
async function getToolFilesByIds(
fileIds: string[],
toolResourceSet?: Set<EToolResources>,
): Promise<IMongoFile[]> {
if (!fileIds || !fileIds.length || !toolResourceSet?.size) {
return [];
}
try {
const filter: FilterQuery<IMongoFile> = {
file_id: { $in: fileIds },
$or: [],
};
if (toolResourceSet.has(EToolResources.context)) {
filter.$or?.push({ text: { $exists: true, $ne: null }, context: FileContext.agents });
}
if (toolResourceSet.has(EToolResources.file_search)) {
filter.$or?.push({ embedded: true });
}
if (toolResourceSet.has(EToolResources.execute_code)) {
filter.$or?.push({ 'metadata.fileIdentifier': { $exists: true } });
}
const selectFields: SelectProjection = { text: 0 };
const sortOptions = { updatedAt: -1 as SortOrder };
const results = await getFiles(filter, sortOptions, selectFields);
return results ?? [];
} catch (error) {
logger.error('[getToolFilesByIds] Error retrieving tool files:', error);
throw new Error('Error retrieving tool files');
}
}
/**
* Creates a new file with a TTL of 1 hour.
* @param data - The file data to be created, must contain file_id
* @param disableTTL - Whether to disable the TTL
* @returns A promise that resolves to the created file document
*/
async function createFile(
data: Partial<IMongoFile>,
disableTTL?: boolean,
): Promise<IMongoFile | null> {
const File = mongoose.models.File as Model<IMongoFile>;
const fileData: Partial<IMongoFile> = {
...data,
expiresAt: new Date(Date.now() + 3600 * 1000),
};
if (disableTTL) {
delete fileData.expiresAt;
}
return File.findOneAndUpdate({ file_id: data.file_id }, fileData, {
new: true,
upsert: true,
}).lean();
}
/**
* Updates a file identified by file_id with new data and removes the TTL.
* @param data - The data to update, must contain file_id
* @returns A promise that resolves to the updated file document
*/
async function updateFile(
data: Partial<IMongoFile> & { file_id: string },
): Promise<IMongoFile | null> {
const File = mongoose.models.File as Model<IMongoFile>;
const { file_id, ...update } = data;
const updateOperation = {
$set: update,
$unset: { expiresAt: '' },
};
return File.findOneAndUpdate({ file_id }, updateOperation, {
new: true,
}).lean();
}
/**
* Increments the usage of a file identified by file_id.
* @param data - The data to update, must contain file_id and the increment value for usage
* @returns A promise that resolves to the updated file document
*/
async function updateFileUsage(data: {
file_id: string;
inc?: number;
}): Promise<IMongoFile | null> {
const File = mongoose.models.File as Model<IMongoFile>;
const { file_id, inc = 1 } = data;
const updateOperation = {
$inc: { usage: inc },
$unset: { expiresAt: '', temp_file_id: '' },
};
return File.findOneAndUpdate({ file_id }, updateOperation, {
new: true,
}).lean();
}
/**
* Deletes a file identified by file_id.
* @param file_id - The unique identifier of the file to delete
* @returns A promise that resolves to the deleted file document or null
*/
async function deleteFile(file_id: string): Promise<IMongoFile | null> {
const File = mongoose.models.File as Model<IMongoFile>;
return File.findOneAndDelete({ file_id }).lean();
}
/**
* Deletes a file identified by a filter.
* @param filter - The filter criteria to apply
* @returns A promise that resolves to the deleted file document or null
*/
async function deleteFileByFilter(filter: FilterQuery<IMongoFile>): Promise<IMongoFile | null> {
const File = mongoose.models.File as Model<IMongoFile>;
return File.findOneAndDelete(filter).lean();
}
/**
* Deletes multiple files identified by an array of file_ids.
* @param file_ids - The unique identifiers of the files to delete
* @param user - Optional user ID to filter by
* @returns A promise that resolves to the result of the deletion operation
*/
async function deleteFiles(
file_ids: string[],
user?: string,
): Promise<{ deletedCount?: number }> {
const File = mongoose.models.File as Model<IMongoFile>;
let deleteQuery: FilterQuery<IMongoFile> = { file_id: { $in: file_ids } };
if (user) {
deleteQuery = { user: user };
}
return File.deleteMany(deleteQuery);
}
/**
* Batch updates files with new signed URLs in MongoDB
* @param updates - Array of updates in the format { file_id, filepath }
*/
async function batchUpdateFiles(
updates: Array<{ file_id: string; filepath: string }>,
): Promise<void> {
if (!updates || updates.length === 0) {
return;
}
const File = mongoose.models.File as Model<IMongoFile>;
const bulkOperations = updates.map((update) => ({
updateOne: {
filter: { file_id: update.file_id },
update: { $set: { filepath: update.filepath } },
},
}));
const result = await File.bulkWrite(bulkOperations);
logger.info(`Updated ${result.modifiedCount} files with new S3 URLs`);
}
/**
* Updates usage tracking for multiple files.
* Processes files and optional fileIds, updating their usage count in the database.
*
* @param files - Array of file objects to process
* @param fileIds - Optional array of file IDs to process
* @returns Array of updated file documents (with null results filtered out)
*/
async function updateFilesUsage(
files: Array<{ file_id: string }>,
fileIds?: string[],
): Promise<IMongoFile[]> {
const promises: Promise<IMongoFile | null>[] = [];
const seen = new Set<string>();
for (const file of files) {
const { file_id } = file;
if (seen.has(file_id)) {
continue;
}
seen.add(file_id);
promises.push(updateFileUsage({ file_id }));
}
if (!fileIds) {
const results = await Promise.all(promises);
return results.filter((result): result is IMongoFile => result != null);
}
for (const file_id of fileIds) {
if (seen.has(file_id)) {
continue;
}
seen.add(file_id);
promises.push(updateFileUsage({ file_id }));
}
const results = await Promise.all(promises);
return results.filter((result): result is IMongoFile => result != null);
}
return {
findFileById,
getFiles,
getToolFilesByIds,
createFile,
updateFile,
updateFileUsage,
deleteFile,
deleteFiles,
deleteFileByFilter,
batchUpdateFiles,
updateFilesUsage,
};
}
export type FileMethods = ReturnType<typeof createFileMethods>;

Some files were not shown because too many files have changed in this diff Show more