diff --git a/.github/workflows/unused-packages.yml b/.github/workflows/unused-packages.yml index 442925b69b..f67c1d23be 100644 --- a/.github/workflows/unused-packages.yml +++ b/.github/workflows/unused-packages.yml @@ -8,6 +8,7 @@ on: - 'client/**' - 'api/**' - 'packages/client/**' + - 'packages/api/**' jobs: detect-unused-packages: @@ -63,35 +64,45 @@ jobs: extract_deps_from_code() { local folder=$1 local output_file=$2 + + # Initialize empty output file + > "$output_file" + if [[ -d "$folder" ]]; then - # Extract require() statements - grep -rEho "require\\(['\"]([a-zA-Z0-9@/._-]+)['\"]\\)" "$folder" --include=\*.{js,ts,tsx,jsx,mjs,cjs} | \ - sed -E "s/require\\(['\"]([a-zA-Z0-9@/._-]+)['\"]\\)/\1/" > "$output_file" + # Extract require() statements (use explicit includes for portability) + grep -rEho "require\\(['\"]([a-zA-Z0-9@/._-]+)['\"]\\)" "$folder" \ + --include='*.js' --include='*.ts' --include='*.tsx' --include='*.jsx' --include='*.mjs' --include='*.cjs' 2>/dev/null | \ + sed -E "s/require\\(['\"]([a-zA-Z0-9@/._-]+)['\"]\\)/\1/" >> "$output_file" || true - # Extract ES6 imports - various patterns - # import x from 'module' - grep -rEho "import .* from ['\"]([a-zA-Z0-9@/._-]+)['\"]" "$folder" --include=\*.{js,ts,tsx,jsx,mjs,cjs} | \ - sed -E "s/import .* from ['\"]([a-zA-Z0-9@/._-]+)['\"]/\1/" >> "$output_file" + # Extract ES6 imports - import x from 'module' + grep -rEho "import .* from ['\"]([a-zA-Z0-9@/._-]+)['\"]" "$folder" \ + --include='*.js' --include='*.ts' --include='*.tsx' --include='*.jsx' --include='*.mjs' --include='*.cjs' 2>/dev/null | \ + sed -E "s/import .* from ['\"]([a-zA-Z0-9@/._-]+)['\"]/\1/" >> "$output_file" || true # import 'module' (side-effect imports) - grep -rEho "import ['\"]([a-zA-Z0-9@/._-]+)['\"]" "$folder" --include=\*.{js,ts,tsx,jsx,mjs,cjs} | \ - sed -E "s/import ['\"]([a-zA-Z0-9@/._-]+)['\"]/\1/" >> "$output_file" + grep -rEho "import ['\"]([a-zA-Z0-9@/._-]+)['\"]" "$folder" \ + --include='*.js' --include='*.ts' --include='*.tsx' --include='*.jsx' --include='*.mjs' --include='*.cjs' 2>/dev/null | \ + sed -E "s/import ['\"]([a-zA-Z0-9@/._-]+)['\"]/\1/" >> "$output_file" || true # export { x } from 'module' or export * from 'module' - grep -rEho "export .* from ['\"]([a-zA-Z0-9@/._-]+)['\"]" "$folder" --include=\*.{js,ts,tsx,jsx,mjs,cjs} | \ - sed -E "s/export .* from ['\"]([a-zA-Z0-9@/._-]+)['\"]/\1/" >> "$output_file" + grep -rEho "export .* from ['\"]([a-zA-Z0-9@/._-]+)['\"]" "$folder" \ + --include='*.js' --include='*.ts' --include='*.tsx' --include='*.jsx' --include='*.mjs' --include='*.cjs' 2>/dev/null | \ + sed -E "s/export .* from ['\"]([a-zA-Z0-9@/._-]+)['\"]/\1/" >> "$output_file" || true # import type { x } from 'module' (TypeScript) - grep -rEho "import type .* from ['\"]([a-zA-Z0-9@/._-]+)['\"]" "$folder" --include=\*.{ts,tsx} | \ - sed -E "s/import type .* from ['\"]([a-zA-Z0-9@/._-]+)['\"]/\1/" >> "$output_file" + grep -rEho "import type .* from ['\"]([a-zA-Z0-9@/._-]+)['\"]" "$folder" \ + --include='*.ts' --include='*.tsx' 2>/dev/null | \ + sed -E "s/import type .* from ['\"]([a-zA-Z0-9@/._-]+)['\"]/\1/" >> "$output_file" || true # Remove subpath imports but keep the base package - # e.g., '@tanstack/react-query/devtools' becomes '@tanstack/react-query' - sed -i -E 's|^(@?[a-zA-Z0-9-]+(/[a-zA-Z0-9-]+)?)/.*|\1|' "$output_file" + # For scoped packages: '@scope/pkg/subpath' -> '@scope/pkg' + # For regular packages: 'pkg/subpath' -> 'pkg' + # Scoped packages (must keep @scope/package, strip anything after) + sed -i -E 's|^(@[a-zA-Z0-9_-]+/[a-zA-Z0-9_-]+)/.*|\1|' "$output_file" 2>/dev/null || true + # Non-scoped packages (keep package name, strip subpath) + sed -i -E 's|^([a-zA-Z0-9_-]+)/.*|\1|' "$output_file" 2>/dev/null || true sort -u "$output_file" -o "$output_file" - else - touch "$output_file" fi } @@ -99,8 +110,10 @@ jobs: extract_deps_from_code "client" client_used_code.txt extract_deps_from_code "api" api_used_code.txt - # Extract dependencies used by @librechat/client package + # Extract dependencies used by workspace packages + # These packages are used in the workspace but dependencies are provided by parent package.json extract_deps_from_code "packages/client" packages_client_used_code.txt + extract_deps_from_code "packages/api" packages_api_used_code.txt - name: Get @librechat/client dependencies id: get-librechat-client-deps @@ -126,6 +139,30 @@ jobs: touch librechat_client_deps.txt fi + - name: Get @librechat/api dependencies + id: get-librechat-api-deps + run: | + if [[ -f "packages/api/package.json" ]]; then + # Get all dependencies from @librechat/api (dependencies, devDependencies, and peerDependencies) + DEPS=$(jq -r '.dependencies // {} | keys[]' packages/api/package.json 2>/dev/null || echo "") + DEV_DEPS=$(jq -r '.devDependencies // {} | keys[]' packages/api/package.json 2>/dev/null || echo "") + PEER_DEPS=$(jq -r '.peerDependencies // {} | keys[]' packages/api/package.json 2>/dev/null || echo "") + + # Combine all dependencies + echo "$DEPS" > librechat_api_deps.txt + echo "$DEV_DEPS" >> librechat_api_deps.txt + echo "$PEER_DEPS" >> librechat_api_deps.txt + + # Also include dependencies that are imported in packages/api + cat packages_api_used_code.txt >> librechat_api_deps.txt + + # Remove empty lines and sort + grep -v '^$' librechat_api_deps.txt | sort -u > temp_deps.txt + mv temp_deps.txt librechat_api_deps.txt + else + touch librechat_api_deps.txt + fi + - name: Extract Workspace Dependencies id: extract-workspace-deps run: | @@ -184,8 +221,8 @@ jobs: chmod -R 755 client cd client UNUSED=$(depcheck --json | jq -r '.dependencies | join("\n")' || echo "") - # Exclude dependencies used in scripts, code, and workspace packages - UNUSED=$(comm -23 <(echo "$UNUSED" | sort) <(cat ../client_used_deps.txt ../client_used_code.txt ../client_workspace_deps.txt | sort) || echo "") + # Exclude dependencies used in scripts, code, workspace packages, and @librechat/client imports + UNUSED=$(comm -23 <(echo "$UNUSED" | sort) <(cat ../client_used_deps.txt ../client_used_code.txt ../client_workspace_deps.txt ../packages_client_used_code.txt ../librechat_client_deps.txt 2>/dev/null | sort -u) || echo "") # Filter out false positives UNUSED=$(echo "$UNUSED" | grep -v "^micromark-extension-llm-math$" || echo "") echo "CLIENT_UNUSED<> $GITHUB_ENV @@ -201,8 +238,8 @@ jobs: chmod -R 755 api cd api UNUSED=$(depcheck --json | jq -r '.dependencies | join("\n")' || echo "") - # Exclude dependencies used in scripts, code, and workspace packages - UNUSED=$(comm -23 <(echo "$UNUSED" | sort) <(cat ../api_used_deps.txt ../api_used_code.txt ../api_workspace_deps.txt | sort) || echo "") + # Exclude dependencies used in scripts, code, workspace packages, and @librechat/api imports + UNUSED=$(comm -23 <(echo "$UNUSED" | sort) <(cat ../api_used_deps.txt ../api_used_code.txt ../api_workspace_deps.txt ../packages_api_used_code.txt ../librechat_api_deps.txt 2>/dev/null | sort -u) || echo "") echo "API_UNUSED<> $GITHUB_ENV echo "$UNUSED" >> $GITHUB_ENV echo "EOF" >> $GITHUB_ENV @@ -241,4 +278,4 @@ jobs: - name: Fail workflow if unused dependencies found if: env.ROOT_UNUSED != '' || env.CLIENT_UNUSED != '' || env.API_UNUSED != '' - run: exit 1 \ No newline at end of file + run: exit 1 diff --git a/api/app/clients/BaseClient.js b/api/app/clients/BaseClient.js index f4a69be229..126efcc385 100644 --- a/api/app/clients/BaseClient.js +++ b/api/app/clients/BaseClient.js @@ -20,11 +20,17 @@ const { isAgentsEndpoint, supportsBalanceCheck, } = require('librechat-data-provider'); -const { getMessages, saveMessage, updateMessage, saveConvo, getConvo } = require('~/models'); +const { + updateMessage, + getMessages, + saveMessage, + saveConvo, + getConvo, + getFiles, +} = require('~/models'); const { getStrategyFunctions } = require('~/server/services/Files/strategies'); const { checkBalance } = require('~/models/balanceMethods'); const { truncateToolCallOutputs } = require('./prompts'); -const { getFiles } = require('~/models/File'); const TextStream = require('./TextStream'); class BaseClient { diff --git a/api/app/clients/OllamaClient.js b/api/app/clients/OllamaClient.js index b8bdacf13e..d0dda519fe 100644 --- a/api/app/clients/OllamaClient.js +++ b/api/app/clients/OllamaClient.js @@ -2,10 +2,9 @@ const { z } = require('zod'); const axios = require('axios'); const { Ollama } = require('ollama'); const { sleep } = require('@librechat/agents'); -const { resolveHeaders } = require('@librechat/api'); const { logger } = require('@librechat/data-schemas'); const { Constants } = require('librechat-data-provider'); -const { deriveBaseURL } = require('~/utils'); +const { resolveHeaders, deriveBaseURL } = require('@librechat/api'); const ollamaPayloadSchema = z.object({ mirostat: z.number().optional(), diff --git a/api/app/clients/tools/structured/DALLE3.js b/api/app/clients/tools/structured/DALLE3.js index d92388b320..c44b56f83d 100644 --- a/api/app/clients/tools/structured/DALLE3.js +++ b/api/app/clients/tools/structured/DALLE3.js @@ -5,9 +5,8 @@ const { v4: uuidv4 } = require('uuid'); const { ProxyAgent, fetch } = require('undici'); const { Tool } = require('@langchain/core/tools'); const { logger } = require('@librechat/data-schemas'); -const { getImageBasename } = require('@librechat/api'); +const { getImageBasename, extractBaseURL } = require('@librechat/api'); const { FileContext, ContentTypes } = require('librechat-data-provider'); -const extractBaseURL = require('~/utils/extractBaseURL'); const displayMessage = "DALL-E displayed an image. All generated images are already plainly visible, so don't repeat the descriptions in detail. Do not list download links as they are available in the UI already. The user may download the images by clicking on them, but do not mention anything about downloading to the user."; diff --git a/api/app/clients/tools/structured/OpenAIImageTools.js b/api/app/clients/tools/structured/OpenAIImageTools.js index 35eeb32ffe..3771167c51 100644 --- a/api/app/clients/tools/structured/OpenAIImageTools.js +++ b/api/app/clients/tools/structured/OpenAIImageTools.js @@ -6,11 +6,10 @@ const { ProxyAgent } = require('undici'); const { tool } = require('@langchain/core/tools'); const { logger } = require('@librechat/data-schemas'); const { HttpsProxyAgent } = require('https-proxy-agent'); -const { logAxiosError, oaiToolkit } = require('@librechat/api'); const { ContentTypes, EImageOutputType } = require('librechat-data-provider'); +const { logAxiosError, oaiToolkit, extractBaseURL } = require('@librechat/api'); const { getStrategyFunctions } = require('~/server/services/Files/strategies'); -const extractBaseURL = require('~/utils/extractBaseURL'); -const { getFiles } = require('~/models/File'); +const { getFiles } = require('~/models'); const displayMessage = "The tool displayed an image. All generated images are already plainly visible, so don't repeat the descriptions in detail. Do not list download links as they are available in the UI already. The user may download the images by clicking on them, but do not mention anything about downloading to the user."; diff --git a/api/app/clients/tools/util/fileSearch.js b/api/app/clients/tools/util/fileSearch.js index 17b3dc3452..d48b9b986d 100644 --- a/api/app/clients/tools/util/fileSearch.js +++ b/api/app/clients/tools/util/fileSearch.js @@ -5,7 +5,7 @@ const { logger } = require('@librechat/data-schemas'); const { generateShortLivedToken } = require('@librechat/api'); const { Tools, EToolResources } = require('librechat-data-provider'); const { filterFilesByAgentAccess } = require('~/server/services/Files/permissions'); -const { getFiles } = require('~/models/File'); +const { getFiles } = require('~/models'); /** * diff --git a/api/models/File.spec.js b/api/models/File.spec.js index c92224ea3e..2d4282cff7 100644 --- a/api/models/File.spec.js +++ b/api/models/File.spec.js @@ -1,7 +1,7 @@ const mongoose = require('mongoose'); const { v4: uuidv4 } = require('uuid'); -const { createModels } = require('@librechat/data-schemas'); const { MongoMemoryServer } = require('mongodb-memory-server'); +const { createModels, createMethods } = require('@librechat/data-schemas'); const { SystemRoles, ResourceType, @@ -9,8 +9,6 @@ const { PrincipalType, } = require('librechat-data-provider'); const { grantPermission } = require('~/server/services/PermissionService'); -const { getFiles, createFile } = require('./File'); -const { seedDefaultRoles } = require('~/models'); const { createAgent } = require('./Agent'); let File; @@ -18,6 +16,10 @@ let Agent; let AclEntry; let User; let modelsToCleanup = []; +let methods; +let getFiles; +let createFile; +let seedDefaultRoles; describe('File Access Control', () => { let mongoServer; @@ -42,6 +44,12 @@ describe('File Access Control', () => { AclEntry = dbModels.AclEntry; User = dbModels.User; + // Create methods from data-schemas (includes file methods) + methods = createMethods(mongoose); + getFiles = methods.getFiles; + createFile = methods.createFile; + seedDefaultRoles = methods.seedDefaultRoles; + // Seed default roles await seedDefaultRoles(); }); diff --git a/api/models/index.js b/api/models/index.js index 7f2c651941..d0b10be079 100644 --- a/api/models/index.js +++ b/api/models/index.js @@ -2,15 +2,6 @@ const mongoose = require('mongoose'); const { createMethods } = require('@librechat/data-schemas'); const methods = createMethods(mongoose); const { comparePassword } = require('./userMethods'); -const { - findFileById, - createFile, - updateFile, - deleteFile, - deleteFiles, - getFiles, - updateFileUsage, -} = require('./File'); const { getMessage, getMessages, @@ -34,13 +25,6 @@ module.exports = { ...methods, seedDatabase, comparePassword, - findFileById, - createFile, - updateFile, - deleteFile, - deleteFiles, - getFiles, - updateFileUsage, getMessage, getMessages, diff --git a/api/models/inviteUser.js b/api/models/inviteUser.js index eeb42841bf..eda8394225 100644 --- a/api/models/inviteUser.js +++ b/api/models/inviteUser.js @@ -1,6 +1,5 @@ const mongoose = require('mongoose'); -const { getRandomValues } = require('@librechat/api'); -const { logger, hashToken } = require('@librechat/data-schemas'); +const { logger, hashToken, getRandomValues } = require('@librechat/data-schemas'); const { createToken, findToken } = require('~/models'); /** diff --git a/api/server/controllers/TwoFactorController.js b/api/server/controllers/TwoFactorController.js index 9ef2718108..fde5965261 100644 --- a/api/server/controllers/TwoFactorController.js +++ b/api/server/controllers/TwoFactorController.js @@ -1,11 +1,10 @@ -const { encryptV3 } = require('@librechat/api'); -const { logger } = require('@librechat/data-schemas'); +const { encryptV3, logger } = require('@librechat/data-schemas'); const { - verifyTOTP, - getTOTPSecret, - verifyBackupCode, - generateTOTPSecret, generateBackupCodes, + generateTOTPSecret, + verifyBackupCode, + getTOTPSecret, + verifyTOTP, } = require('~/server/services/twoFactorService'); const { getUserById, updateUser } = require('~/models'); diff --git a/api/server/controllers/UserController.js b/api/server/controllers/UserController.js index 75f3ab2ce3..e95cdc36a0 100644 --- a/api/server/controllers/UserController.js +++ b/api/server/controllers/UserController.js @@ -9,9 +9,11 @@ const { const { deleteAllUserSessions, deleteAllSharedLinks, + updateUserPlugins, deleteUserById, deleteMessages, deletePresets, + deleteUserKey, deleteConvos, deleteFiles, updateUser, @@ -31,7 +33,6 @@ const { User, } = require('~/db/models'); const { updateUserPluginAuth, deleteUserPluginAuth } = require('~/server/services/PluginService'); -const { updateUserPluginsService, deleteUserKey } = require('~/server/services/UserService'); const { verifyEmail, resendVerificationEmail } = require('~/server/services/AuthService'); const { getMCPManager, getFlowStateManager, getMCPServersRegistry } = require('~/config'); const { needsRefresh, getNewS3URL } = require('~/server/services/Files/S3/crud'); @@ -114,13 +115,7 @@ const updateUserPluginsController = async (req, res) => { const { pluginKey, action, auth, isEntityTool } = req.body; try { if (!isEntityTool) { - const userPluginsService = await updateUserPluginsService(user, pluginKey, action); - - if (userPluginsService instanceof Error) { - logger.error('[userPluginsService]', userPluginsService); - const { status, message } = normalizeHttpError(userPluginsService); - return res.status(status).send({ message }); - } + await updateUserPlugins(user._id, user.plugins, pluginKey, action); } if (auth == null) { diff --git a/api/server/controllers/agents/client.js b/api/server/controllers/agents/client.js index baa9b7a37a..faf3c58399 100644 --- a/api/server/controllers/agents/client.js +++ b/api/server/controllers/agents/client.js @@ -10,7 +10,9 @@ const { sanitizeTitle, resolveHeaders, createSafeUser, + initializeAgent, getBalanceConfig, + getProviderConfig, memoryInstructions, getTransactionsConfig, createMemoryProcessor, @@ -38,17 +40,16 @@ const { bedrockInputSchema, removeNullishValues, } = require('librechat-data-provider'); -const { initializeAgent } = require('~/server/services/Endpoints/agents/agent'); const { spendTokens, spendStructuredTokens } = require('~/models/spendTokens'); -const { getFormattedMemories, deleteMemory, setMemory } = require('~/models'); const { encodeAndFormat } = require('~/server/services/Files/images/encode'); -const { getProviderConfig } = require('~/server/services/Endpoints'); const { createContextHandlers } = require('~/app/clients/prompts'); const { checkCapability } = require('~/server/services/Config'); +const { getConvoFiles } = require('~/models/Conversation'); const BaseClient = require('~/app/clients/BaseClient'); const { getRoleByName } = require('~/models/Role'); const { loadAgent } = require('~/models/Agent'); const { getMCPManager } = require('~/config'); +const db = require('~/models'); const omitTitleOptions = new Set([ 'stream', @@ -542,18 +543,28 @@ class AgentClient extends BaseClient { ); } - const agent = await initializeAgent({ - req: this.options.req, - res: this.options.res, - agent: prelimAgent, - allowedProviders, - endpointOption: { - endpoint: - prelimAgent.id !== Constants.EPHEMERAL_AGENT_ID - ? EModelEndpoint.agents - : memoryConfig.agent?.provider, + const agent = await initializeAgent( + { + req: this.options.req, + res: this.options.res, + agent: prelimAgent, + allowedProviders, + endpointOption: { + endpoint: + prelimAgent.id !== Constants.EPHEMERAL_AGENT_ID + ? EModelEndpoint.agents + : memoryConfig.agent?.provider, + }, }, - }); + { + getConvoFiles, + getFiles: db.getFiles, + getUserKey: db.getUserKey, + updateFilesUsage: db.updateFilesUsage, + getUserKeyValues: db.getUserKeyValues, + getToolFilesByIds: db.getToolFilesByIds, + }, + ); if (!agent) { logger.warn( @@ -588,9 +599,9 @@ class AgentClient extends BaseClient { messageId, conversationId, memoryMethods: { - setMemory, - deleteMemory, - getFormattedMemories, + setMemory: db.setMemory, + deleteMemory: db.deleteMemory, + getFormattedMemories: db.getFormattedMemories, }, res: this.options.res, }); @@ -1040,7 +1051,7 @@ class AgentClient extends BaseClient { throw new Error('Run not initialized'); } const { handleLLMEnd, collected: collectedMetadata } = createMetadataAggregator(); - const { req, res, agent } = this.options; + const { req, agent } = this.options; const appConfig = req.config; let endpoint = agent.endpoint; @@ -1097,11 +1108,12 @@ class AgentClient extends BaseClient { const options = await titleProviderConfig.getOptions({ req, - res, - optionsOnly: true, - overrideEndpoint: endpoint, - overrideModel: clientOptions.model, - endpointOption: { model_parameters: clientOptions }, + endpoint, + model_parameters: clientOptions, + db: { + getUserKey: db.getUserKey, + getUserKeyValues: db.getUserKeyValues, + }, }); let provider = options.provider ?? titleProviderConfig.overrideProvider ?? agent.provider; diff --git a/api/server/controllers/agents/v1.js b/api/server/controllers/agents/v1.js index b7b2dbf367..01e33f913b 100644 --- a/api/server/controllers/agents/v1.js +++ b/api/server/controllers/agents/v1.js @@ -38,14 +38,13 @@ const { grantPermission, } = require('~/server/services/PermissionService'); const { getStrategyFunctions } = require('~/server/services/Files/strategies'); +const { getCategoriesWithCounts, deleteFileByFilter } = require('~/models'); const { resizeAvatar } = require('~/server/services/Files/images/avatar'); const { getFileStrategy } = require('~/server/utils/getFileStrategy'); const { refreshS3Url } = require('~/server/services/Files/S3/crud'); const { filterFile } = require('~/server/services/Files/process'); const { updateAction, getActions } = require('~/models/Action'); const { getCachedTools } = require('~/server/services/Config'); -const { deleteFileByFilter } = require('~/models/File'); -const { getCategoriesWithCounts } = require('~/models'); const { getLogStores } = require('~/cache'); const systemTools = { diff --git a/api/server/controllers/assistants/v1.js b/api/server/controllers/assistants/v1.js index e2fbbe5b34..53566f2a24 100644 --- a/api/server/controllers/assistants/v1.js +++ b/api/server/controllers/assistants/v1.js @@ -9,7 +9,7 @@ const { updateAssistantDoc, getAssistants } = require('~/models/Assistant'); const { getOpenAIClient, fetchAssistants } = require('./helpers'); const { getCachedTools } = require('~/server/services/Config'); const { manifestToolMap } = require('~/app/clients/tools'); -const { deleteFileByFilter } = require('~/models/File'); +const { deleteFileByFilter } = require('~/models'); /** * Create an assistant. diff --git a/api/server/middleware/accessResources/fileAccess.js b/api/server/middleware/accessResources/fileAccess.js index b26a512f5f..25d41e7c02 100644 --- a/api/server/middleware/accessResources/fileAccess.js +++ b/api/server/middleware/accessResources/fileAccess.js @@ -2,7 +2,7 @@ const { logger } = require('@librechat/data-schemas'); const { PermissionBits, hasPermissions, ResourceType } = require('librechat-data-provider'); const { getEffectivePermissions } = require('~/server/services/PermissionService'); const { getAgents } = require('~/models/Agent'); -const { getFiles } = require('~/models/File'); +const { getFiles } = require('~/models'); /** * Checks if user has access to a file through agent permissions diff --git a/api/server/middleware/accessResources/fileAccess.spec.js b/api/server/middleware/accessResources/fileAccess.spec.js index 6e741ac34e..de7c7d50f6 100644 --- a/api/server/middleware/accessResources/fileAccess.spec.js +++ b/api/server/middleware/accessResources/fileAccess.spec.js @@ -4,7 +4,7 @@ const { MongoMemoryServer } = require('mongodb-memory-server'); const { fileAccess } = require('./fileAccess'); const { User, Role, AclEntry } = require('~/db/models'); const { createAgent } = require('~/models/Agent'); -const { createFile } = require('~/models/File'); +const { createFile } = require('~/models'); describe('fileAccess middleware', () => { let mongoServer; diff --git a/api/server/middleware/buildEndpointOption.js b/api/server/middleware/buildEndpointOption.js index e745c3b636..202bf7d921 100644 --- a/api/server/middleware/buildEndpointOption.js +++ b/api/server/middleware/buildEndpointOption.js @@ -8,22 +8,11 @@ const { } = require('librechat-data-provider'); const azureAssistants = require('~/server/services/Endpoints/azureAssistants'); const assistants = require('~/server/services/Endpoints/assistants'); -const { processFiles } = require('~/server/services/Files/process'); -const anthropic = require('~/server/services/Endpoints/anthropic'); -const bedrock = require('~/server/services/Endpoints/bedrock'); -const openAI = require('~/server/services/Endpoints/openAI'); const agents = require('~/server/services/Endpoints/agents'); -const custom = require('~/server/services/Endpoints/custom'); -const google = require('~/server/services/Endpoints/google'); +const { updateFilesUsage } = require('~/models'); const buildFunction = { - [EModelEndpoint.openAI]: openAI.buildOptions, - [EModelEndpoint.google]: google.buildOptions, - [EModelEndpoint.custom]: custom.buildOptions, [EModelEndpoint.agents]: agents.buildOptions, - [EModelEndpoint.bedrock]: bedrock.buildOptions, - [EModelEndpoint.azureOpenAI]: openAI.buildOptions, - [EModelEndpoint.anthropic]: anthropic.buildOptions, [EModelEndpoint.assistants]: assistants.buildOptions, [EModelEndpoint.azureAssistants]: azureAssistants.buildOptions, }; @@ -93,7 +82,7 @@ async function buildEndpointOption(req, res, next) { req.body.endpointOption = await builder(endpoint, parsedBody, endpointType); if (req.body.files && !isAgents) { - req.body.endpointOption.attachments = processFiles(req.body.files); + req.body.endpointOption.attachments = updateFilesUsage(req.body.files); } next(); diff --git a/api/server/routes/files/files.agents.test.js b/api/server/routes/files/files.agents.test.js index fbccf62a2b..8d0d640f2e 100644 --- a/api/server/routes/files/files.agents.test.js +++ b/api/server/routes/files/files.agents.test.js @@ -6,7 +6,7 @@ const { createMethods } = require('@librechat/data-schemas'); const { MongoMemoryServer } = require('mongodb-memory-server'); const { AccessRoleIds, ResourceType, PrincipalType } = require('librechat-data-provider'); const { createAgent } = require('~/models/Agent'); -const { createFile } = require('~/models/File'); +const { createFile } = require('~/models'); // Only mock the external dependencies that we don't want to test jest.mock('~/server/services/Files/process', () => ({ diff --git a/api/server/routes/files/files.js b/api/server/routes/files/files.js index 7237729c87..de149320bf 100644 --- a/api/server/routes/files/files.js +++ b/api/server/routes/files/files.js @@ -26,7 +26,7 @@ const { checkPermission } = require('~/server/services/PermissionService'); const { loadAuthValues } = require('~/server/services/Tools/credentials'); const { refreshS3FileUrls } = require('~/server/services/Files/S3/crud'); const { hasAccessToFilesViaAgent } = require('~/server/services/Files'); -const { getFiles, batchUpdateFiles } = require('~/models/File'); +const { getFiles, batchUpdateFiles } = require('~/models'); const { cleanFileName } = require('~/server/utils/files'); const { getAssistant } = require('~/models/Assistant'); const { getAgent } = require('~/models/Agent'); diff --git a/api/server/routes/files/files.test.js b/api/server/routes/files/files.test.js index 896542b6f4..1d548b44be 100644 --- a/api/server/routes/files/files.test.js +++ b/api/server/routes/files/files.test.js @@ -11,7 +11,7 @@ const { PrincipalType, } = require('librechat-data-provider'); const { createAgent } = require('~/models/Agent'); -const { createFile } = require('~/models/File'); +const { createFile } = require('~/models'); // Only mock the external dependencies that we don't want to test jest.mock('~/server/services/Files/process', () => ({ diff --git a/api/server/routes/keys.js b/api/server/routes/keys.js index cb8a4a5d92..620e4d234b 100644 --- a/api/server/routes/keys.js +++ b/api/server/routes/keys.js @@ -1,7 +1,8 @@ const express = require('express'); +const { updateUserKey, deleteUserKey, getUserKeyExpiry } = require('~/models'); +const { requireJwtAuth } = require('~/server/middleware'); + const router = express.Router(); -const { updateUserKey, deleteUserKey, getUserKeyExpiry } = require('../services/UserService'); -const { requireJwtAuth } = require('../middleware/'); router.put('/', requireJwtAuth, async (req, res) => { await updateUserKey({ userId: req.user.id, ...req.body }); diff --git a/api/server/services/ActionService.js b/api/server/services/ActionService.js index b9555a752c..79586f0cf2 100644 --- a/api/server/services/ActionService.js +++ b/api/server/services/ActionService.js @@ -1,15 +1,9 @@ const jwt = require('jsonwebtoken'); const { nanoid } = require('nanoid'); const { tool } = require('@langchain/core/tools'); -const { logger } = require('@librechat/data-schemas'); const { GraphEvents, sleep } = require('@librechat/agents'); -const { - sendEvent, - encryptV2, - decryptV2, - logAxiosError, - refreshAccessToken, -} = require('@librechat/api'); +const { logger, encryptV2, decryptV2 } = require('@librechat/data-schemas'); +const { sendEvent, logAxiosError, refreshAccessToken } = require('@librechat/api'); const { Time, CacheKeys, diff --git a/api/server/services/Config/loadConfigModels.js b/api/server/services/Config/loadConfigModels.js index 555bbcacf5..6354d10331 100644 --- a/api/server/services/Config/loadConfigModels.js +++ b/api/server/services/Config/loadConfigModels.js @@ -1,10 +1,9 @@ -const { isUserProvided } = require('@librechat/api'); +const { isUserProvided, fetchModels } = require('@librechat/api'); const { EModelEndpoint, extractEnvVariable, normalizeEndpointName, } = require('librechat-data-provider'); -const { fetchModels } = require('~/server/services/ModelService'); const { getAppConfig } = require('./app'); /** diff --git a/api/server/services/Config/loadConfigModels.spec.js b/api/server/services/Config/loadConfigModels.spec.js index 1e0e8780a7..6ffb8ba522 100644 --- a/api/server/services/Config/loadConfigModels.spec.js +++ b/api/server/services/Config/loadConfigModels.spec.js @@ -1,8 +1,11 @@ -const { fetchModels } = require('~/server/services/ModelService'); +const { fetchModels } = require('@librechat/api'); const loadConfigModels = require('./loadConfigModels'); const { getAppConfig } = require('./app'); -jest.mock('~/server/services/ModelService'); +jest.mock('@librechat/api', () => ({ + ...jest.requireActual('@librechat/api'), + fetchModels: jest.fn(), +})); jest.mock('./app'); const exampleConfig = { diff --git a/api/server/services/Config/loadDefaultModels.js b/api/server/services/Config/loadDefaultModels.js index a70fa58495..0a4b4a7fa7 100644 --- a/api/server/services/Config/loadDefaultModels.js +++ b/api/server/services/Config/loadDefaultModels.js @@ -5,7 +5,7 @@ const { getBedrockModels, getOpenAIModels, getGoogleModels, -} = require('~/server/services/ModelService'); +} = require('@librechat/api'); /** * Loads the default models for the application. diff --git a/api/server/services/Endpoints/agents/agent.js b/api/server/services/Endpoints/agents/agent.js deleted file mode 100644 index eebaa2cfc0..0000000000 --- a/api/server/services/Endpoints/agents/agent.js +++ /dev/null @@ -1,226 +0,0 @@ -const { Providers } = require('@librechat/agents'); -const { - primeResources, - getModelMaxTokens, - extractLibreChatParams, - filterFilesByEndpointConfig, - optionalChainWithEmptyCheck, -} = require('@librechat/api'); -const { - ErrorTypes, - EModelEndpoint, - EToolResources, - paramEndpoints, - isAgentsEndpoint, - replaceSpecialVars, - providerEndpointMap, -} = require('librechat-data-provider'); -const generateArtifactsPrompt = require('~/app/clients/prompts/artifacts'); -const { getProviderConfig } = require('~/server/services/Endpoints'); -const { processFiles } = require('~/server/services/Files/process'); -const { getFiles, getToolFilesByIds } = require('~/models/File'); -const { getConvoFiles } = require('~/models/Conversation'); - -/** - * @param {object} params - * @param {ServerRequest} params.req - * @param {ServerResponse} params.res - * @param {Agent} params.agent - * @param {string | null} [params.conversationId] - * @param {Array} [params.requestFiles] - * @param {typeof import('~/server/services/ToolService').loadAgentTools | undefined} [params.loadTools] - * @param {TEndpointOption} [params.endpointOption] - * @param {Set} [params.allowedProviders] - * @param {boolean} [params.isInitialAgent] - * @returns {Promise, - * toolContextMap: Record, - * maxContextTokens: number, - * userMCPAuthMap?: Record> - * }>} - */ -const initializeAgent = async ({ - req, - res, - agent, - loadTools, - requestFiles, - conversationId, - endpointOption, - allowedProviders, - isInitialAgent = false, -}) => { - const appConfig = req.config; - if ( - isAgentsEndpoint(endpointOption?.endpoint) && - allowedProviders.size > 0 && - !allowedProviders.has(agent.provider) - ) { - throw new Error( - `{ "type": "${ErrorTypes.INVALID_AGENT_PROVIDER}", "info": "${agent.provider}" }`, - ); - } - let currentFiles; - - const _modelOptions = structuredClone( - Object.assign( - { model: agent.model }, - agent.model_parameters ?? { model: agent.model }, - isInitialAgent === true ? endpointOption?.model_parameters : {}, - ), - ); - - const { resendFiles, maxContextTokens, modelOptions } = extractLibreChatParams(_modelOptions); - - const provider = agent.provider; - agent.endpoint = provider; - - if (isInitialAgent && conversationId != null && resendFiles) { - const fileIds = (await getConvoFiles(conversationId)) ?? []; - /** @type {Set} */ - const toolResourceSet = new Set(); - for (const tool of agent.tools) { - if (EToolResources[tool]) { - toolResourceSet.add(EToolResources[tool]); - } - } - const toolFiles = await getToolFilesByIds(fileIds, toolResourceSet); - if (requestFiles.length || toolFiles.length) { - currentFiles = await processFiles(requestFiles.concat(toolFiles)); - } - } else if (isInitialAgent && requestFiles.length) { - currentFiles = await processFiles(requestFiles); - } - - if (currentFiles && currentFiles.length) { - let endpointType; - if (!paramEndpoints.has(agent.endpoint)) { - endpointType = EModelEndpoint.custom; - } - - currentFiles = filterFilesByEndpointConfig(req, { - files: currentFiles, - endpoint: agent.endpoint, - endpointType, - }); - } - - const { attachments, tool_resources } = await primeResources({ - req, - getFiles, - appConfig, - agentId: agent.id, - attachments: currentFiles, - tool_resources: agent.tool_resources, - requestFileSet: new Set(requestFiles?.map((file) => file.file_id)), - }); - - const { - tools: structuredTools, - toolContextMap, - userMCPAuthMap, - } = (await loadTools?.({ - req, - res, - provider, - agentId: agent.id, - tools: agent.tools, - model: agent.model, - tool_resources, - })) ?? {}; - - const { getOptions, overrideProvider } = getProviderConfig({ provider, appConfig }); - if (overrideProvider !== agent.provider) { - agent.provider = overrideProvider; - } - - const _endpointOption = - isInitialAgent === true - ? Object.assign({}, endpointOption, { model_parameters: modelOptions }) - : { model_parameters: modelOptions }; - - const options = await getOptions({ - req, - res, - optionsOnly: true, - overrideEndpoint: provider, - overrideModel: agent.model, - endpointOption: _endpointOption, - }); - - const tokensModel = - agent.provider === EModelEndpoint.azureOpenAI ? agent.model : options.llmConfig?.model; - const maxOutputTokens = optionalChainWithEmptyCheck( - options.llmConfig?.maxOutputTokens, - options.llmConfig?.maxTokens, - 0, - ); - const agentMaxContextTokens = optionalChainWithEmptyCheck( - maxContextTokens, - getModelMaxTokens(tokensModel, providerEndpointMap[provider], options.endpointTokenConfig), - 18000, - ); - - if ( - agent.endpoint === EModelEndpoint.azureOpenAI && - options.llmConfig?.azureOpenAIApiInstanceName == null - ) { - agent.provider = Providers.OPENAI; - } - - if (options.provider != null) { - agent.provider = options.provider; - } - - /** @type {import('@librechat/agents').GenericTool[]} */ - let tools = options.tools?.length ? options.tools : structuredTools; - if ( - (agent.provider === Providers.GOOGLE || agent.provider === Providers.VERTEXAI) && - options.tools?.length && - structuredTools?.length - ) { - throw new Error(`{ "type": "${ErrorTypes.GOOGLE_TOOL_CONFLICT}"}`); - } else if ( - (agent.provider === Providers.OPENAI || - agent.provider === Providers.AZURE || - agent.provider === Providers.ANTHROPIC) && - options.tools?.length && - structuredTools?.length - ) { - tools = structuredTools.concat(options.tools); - } - - /** @type {import('@librechat/agents').ClientOptions} */ - agent.model_parameters = { ...options.llmConfig }; - if (options.configOptions) { - agent.model_parameters.configuration = options.configOptions; - } - - if (agent.instructions && agent.instructions !== '') { - agent.instructions = replaceSpecialVars({ - text: agent.instructions, - user: req.user, - }); - } - - if (typeof agent.artifacts === 'string' && agent.artifacts !== '') { - agent.additional_instructions = generateArtifactsPrompt({ - endpoint: agent.provider, - artifacts: agent.artifacts, - }); - } - - return { - ...agent, - tools, - attachments, - resendFiles, - userMCPAuthMap, - toolContextMap, - useLegacyContent: !!options.useLegacyContent, - maxContextTokens: Math.round((agentMaxContextTokens - maxOutputTokens) * 0.9), - }; -}; - -module.exports = { initializeAgent }; diff --git a/api/server/services/Endpoints/agents/initialize.js b/api/server/services/Endpoints/agents/initialize.js index 3064a03662..8acf4c9292 100644 --- a/api/server/services/Endpoints/agents/initialize.js +++ b/api/server/services/Endpoints/agents/initialize.js @@ -1,6 +1,7 @@ const { logger } = require('@librechat/data-schemas'); const { createContentAggregator } = require('@librechat/agents'); const { + initializeAgent, validateAgentModel, getCustomEndpointConfig, createSequentialChainEdges, @@ -15,12 +16,13 @@ const { createToolEndCallback, getDefaultHandlers, } = require('~/server/controllers/agents/callbacks'); -const { initializeAgent } = require('~/server/services/Endpoints/agents/agent'); const { getModelsConfig } = require('~/server/controllers/ModelController'); const { loadAgentTools } = require('~/server/services/ToolService'); const AgentClient = require('~/server/controllers/agents/client'); +const { getConvoFiles } = require('~/models/Conversation'); const { getAgent } = require('~/models/Agent'); const { logViolation } = require('~/cache'); +const db = require('~/models'); /** * @param {AbortSignal} signal @@ -109,17 +111,27 @@ const initializeClient = async ({ req, res, signal, endpointOption }) => { /** @type {string} */ const conversationId = req.body.conversationId; - const primaryConfig = await initializeAgent({ - req, - res, - loadTools, - requestFiles, - conversationId, - agent: primaryAgent, - endpointOption, - allowedProviders, - isInitialAgent: true, - }); + const primaryConfig = await initializeAgent( + { + req, + res, + loadTools, + requestFiles, + conversationId, + agent: primaryAgent, + endpointOption, + allowedProviders, + isInitialAgent: true, + }, + { + getConvoFiles, + getFiles: db.getFiles, + getUserKey: db.getUserKey, + updateFilesUsage: db.updateFilesUsage, + getUserKeyValues: db.getUserKeyValues, + getToolFilesByIds: db.getToolFilesByIds, + }, + ); const agent_ids = primaryConfig.agent_ids; let userMCPAuthMap = primaryConfig.userMCPAuthMap; @@ -142,16 +154,26 @@ const initializeClient = async ({ req, res, signal, endpointOption }) => { throw new Error(validationResult.error?.message); } - const config = await initializeAgent({ - req, - res, - agent, - loadTools, - requestFiles, - conversationId, - endpointOption, - allowedProviders, - }); + const config = await initializeAgent( + { + req, + res, + agent, + loadTools, + requestFiles, + conversationId, + endpointOption, + allowedProviders, + }, + { + getConvoFiles, + getFiles: db.getFiles, + getUserKey: db.getUserKey, + updateFilesUsage: db.updateFilesUsage, + getUserKeyValues: db.getUserKeyValues, + getToolFilesByIds: db.getToolFilesByIds, + }, + ); if (userMCPAuthMap != null) { Object.assign(userMCPAuthMap, config.userMCPAuthMap ?? {}); } else { diff --git a/api/server/services/Endpoints/anthropic/build.js b/api/server/services/Endpoints/anthropic/build.js deleted file mode 100644 index 1d2c29d81e..0000000000 --- a/api/server/services/Endpoints/anthropic/build.js +++ /dev/null @@ -1,44 +0,0 @@ -const { removeNullishValues, anthropicSettings } = require('librechat-data-provider'); -const generateArtifactsPrompt = require('~/app/clients/prompts/artifacts'); - -const buildOptions = (endpoint, parsedBody) => { - const { - modelLabel, - promptPrefix, - maxContextTokens, - fileTokenLimit, - resendFiles = anthropicSettings.resendFiles.default, - promptCache = anthropicSettings.promptCache.default, - thinking = anthropicSettings.thinking.default, - thinkingBudget = anthropicSettings.thinkingBudget.default, - iconURL, - greeting, - spec, - artifacts, - ...modelOptions - } = parsedBody; - - const endpointOption = removeNullishValues({ - endpoint, - modelLabel, - promptPrefix, - resendFiles, - promptCache, - thinking, - thinkingBudget, - iconURL, - greeting, - spec, - maxContextTokens, - fileTokenLimit, - modelOptions, - }); - - if (typeof artifacts === 'string') { - endpointOption.artifactsPrompt = generateArtifactsPrompt({ endpoint, artifacts }); - } - - return endpointOption; -}; - -module.exports = buildOptions; diff --git a/api/server/services/Endpoints/anthropic/index.js b/api/server/services/Endpoints/anthropic/index.js deleted file mode 100644 index c4e7533c5d..0000000000 --- a/api/server/services/Endpoints/anthropic/index.js +++ /dev/null @@ -1,9 +0,0 @@ -const addTitle = require('./title'); -const buildOptions = require('./build'); -const initializeClient = require('./initialize'); - -module.exports = { - addTitle, - buildOptions, - initializeClient, -}; diff --git a/api/server/services/Endpoints/anthropic/initialize.js b/api/server/services/Endpoints/anthropic/initialize.js deleted file mode 100644 index 5944240379..0000000000 --- a/api/server/services/Endpoints/anthropic/initialize.js +++ /dev/null @@ -1,53 +0,0 @@ -const { getLLMConfig } = require('@librechat/api'); -const { EModelEndpoint } = require('librechat-data-provider'); -const { getUserKey, checkUserKeyExpiry } = require('~/server/services/UserService'); - -const initializeClient = async ({ req, endpointOption, overrideModel }) => { - const appConfig = req.config; - const { ANTHROPIC_API_KEY, ANTHROPIC_REVERSE_PROXY, PROXY } = process.env; - const expiresAt = req.body.key; - const isUserProvided = ANTHROPIC_API_KEY === 'user_provided'; - - const anthropicApiKey = isUserProvided - ? await getUserKey({ userId: req.user.id, name: EModelEndpoint.anthropic }) - : ANTHROPIC_API_KEY; - - if (!anthropicApiKey) { - throw new Error('Anthropic API key not provided. Please provide it again.'); - } - - if (expiresAt && isUserProvided) { - checkUserKeyExpiry(expiresAt, EModelEndpoint.anthropic); - } - - let clientOptions = {}; - - /** @type {undefined | TBaseEndpoint} */ - const anthropicConfig = appConfig.endpoints?.[EModelEndpoint.anthropic]; - - if (anthropicConfig) { - clientOptions._lc_stream_delay = anthropicConfig.streamRate; - clientOptions.titleModel = anthropicConfig.titleModel; - } - - const allConfig = appConfig.endpoints?.all; - if (allConfig) { - clientOptions._lc_stream_delay = allConfig.streamRate; - } - - clientOptions = Object.assign( - { - proxy: PROXY ?? null, - reverseProxyUrl: ANTHROPIC_REVERSE_PROXY ?? null, - modelOptions: endpointOption?.model_parameters ?? {}, - }, - clientOptions, - ); - if (overrideModel) { - clientOptions.modelOptions.model = overrideModel; - } - clientOptions.modelOptions.user = req.user.id; - return getLLMConfig(anthropicApiKey, clientOptions); -}; - -module.exports = initializeClient; diff --git a/api/server/services/Endpoints/anthropic/title.js b/api/server/services/Endpoints/anthropic/title.js deleted file mode 100644 index cac39fa2be..0000000000 --- a/api/server/services/Endpoints/anthropic/title.js +++ /dev/null @@ -1,35 +0,0 @@ -const { isEnabled } = require('@librechat/api'); -const { CacheKeys } = require('librechat-data-provider'); -const getLogStores = require('~/cache/getLogStores'); -const { saveConvo } = require('~/models'); - -const addTitle = async (req, { text, response, client }) => { - const { TITLE_CONVO = 'true' } = process.env ?? {}; - if (!isEnabled(TITLE_CONVO)) { - return; - } - - if (client.options.titleConvo === false) { - return; - } - - const titleCache = getLogStores(CacheKeys.GEN_TITLE); - const key = `${req.user.id}-${response.conversationId}`; - - const title = await client.titleConvo({ - text, - responseText: response?.text ?? '', - conversationId: response.conversationId, - }); - await titleCache.set(key, title, 120000); - await saveConvo( - req, - { - conversationId: response.conversationId, - title, - }, - { context: 'api/server/services/Endpoints/anthropic/addTitle.js' }, - ); -}; - -module.exports = addTitle; diff --git a/api/server/services/Endpoints/assistants/initalize.js b/api/server/services/Endpoints/assistants/initalize.js index 56a00cfe3f..d5a246dff7 100644 --- a/api/server/services/Endpoints/assistants/initalize.js +++ b/api/server/services/Endpoints/assistants/initalize.js @@ -1,12 +1,8 @@ const OpenAI = require('openai'); const { ProxyAgent } = require('undici'); -const { isUserProvided } = require('@librechat/api'); +const { isUserProvided, checkUserKeyExpiry } = require('@librechat/api'); const { ErrorTypes, EModelEndpoint } = require('librechat-data-provider'); -const { - getUserKeyValues, - getUserKeyExpiry, - checkUserKeyExpiry, -} = require('~/server/services/UserService'); +const { getUserKeyValues, getUserKeyExpiry } = require('~/models'); const initializeClient = async ({ req, res, version }) => { const { PROXY, OPENAI_ORGANIZATION, ASSISTANTS_API_KEY, ASSISTANTS_BASE_URL } = process.env; diff --git a/api/server/services/Endpoints/azureAssistants/initialize.js b/api/server/services/Endpoints/azureAssistants/initialize.js index 85f77b60de..6a9118ea8a 100644 --- a/api/server/services/Endpoints/azureAssistants/initialize.js +++ b/api/server/services/Endpoints/azureAssistants/initialize.js @@ -1,12 +1,13 @@ const OpenAI = require('openai'); const { ProxyAgent } = require('undici'); -const { constructAzureURL, isUserProvided, resolveHeaders } = require('@librechat/api'); -const { ErrorTypes, EModelEndpoint, mapModelToAzureConfig } = require('librechat-data-provider'); const { + isUserProvided, + resolveHeaders, + constructAzureURL, checkUserKeyExpiry, - getUserKeyValues, - getUserKeyExpiry, -} = require('~/server/services/UserService'); +} = require('@librechat/api'); +const { ErrorTypes, EModelEndpoint, mapModelToAzureConfig } = require('librechat-data-provider'); +const { getUserKeyValues, getUserKeyExpiry } = require('~/models'); class Files { constructor(client) { diff --git a/api/server/services/Endpoints/bedrock/build.js b/api/server/services/Endpoints/bedrock/build.js deleted file mode 100644 index b9f281bd99..0000000000 --- a/api/server/services/Endpoints/bedrock/build.js +++ /dev/null @@ -1,39 +0,0 @@ -const { removeNullishValues } = require('librechat-data-provider'); -const generateArtifactsPrompt = require('~/app/clients/prompts/artifacts'); - -const buildOptions = (endpoint, parsedBody) => { - const { - modelLabel: name, - promptPrefix, - maxContextTokens, - fileTokenLimit, - resendFiles = true, - imageDetail, - iconURL, - greeting, - spec, - artifacts, - ...model_parameters - } = parsedBody; - const endpointOption = removeNullishValues({ - endpoint, - name, - resendFiles, - imageDetail, - iconURL, - greeting, - spec, - promptPrefix, - maxContextTokens, - fileTokenLimit, - model_parameters, - }); - - if (typeof artifacts === 'string') { - endpointOption.artifactsPrompt = generateArtifactsPrompt({ endpoint, artifacts }); - } - - return endpointOption; -}; - -module.exports = { buildOptions }; diff --git a/api/server/services/Endpoints/bedrock/index.js b/api/server/services/Endpoints/bedrock/index.js deleted file mode 100644 index 8989f7df8c..0000000000 --- a/api/server/services/Endpoints/bedrock/index.js +++ /dev/null @@ -1,7 +0,0 @@ -const build = require('./build'); -const initialize = require('./initialize'); - -module.exports = { - ...build, - ...initialize, -}; diff --git a/api/server/services/Endpoints/bedrock/initialize.js b/api/server/services/Endpoints/bedrock/initialize.js deleted file mode 100644 index bbee7caf39..0000000000 --- a/api/server/services/Endpoints/bedrock/initialize.js +++ /dev/null @@ -1,79 +0,0 @@ -const { getModelMaxTokens } = require('@librechat/api'); -const { createContentAggregator } = require('@librechat/agents'); -const { - EModelEndpoint, - providerEndpointMap, - getResponseSender, -} = require('librechat-data-provider'); -const { getDefaultHandlers } = require('~/server/controllers/agents/callbacks'); -const getOptions = require('~/server/services/Endpoints/bedrock/options'); -const AgentClient = require('~/server/controllers/agents/client'); - -const initializeClient = async ({ req, res, endpointOption }) => { - if (!endpointOption) { - throw new Error('Endpoint option not provided'); - } - - /** @type {Array} */ - const collectedUsage = []; - const { contentParts, aggregateContent } = createContentAggregator(); - const eventHandlers = getDefaultHandlers({ res, aggregateContent, collectedUsage }); - - /** @type {Agent} */ - const agent = { - id: EModelEndpoint.bedrock, - name: endpointOption.name, - provider: EModelEndpoint.bedrock, - endpoint: EModelEndpoint.bedrock, - instructions: endpointOption.promptPrefix, - model: endpointOption.model_parameters.model, - model_parameters: endpointOption.model_parameters, - }; - - if (typeof endpointOption.artifactsPrompt === 'string' && endpointOption.artifactsPrompt) { - agent.instructions = `${agent.instructions ?? ''}\n${endpointOption.artifactsPrompt}`.trim(); - } - - // TODO: pass-in override settings that are specific to current run - const options = await getOptions({ - req, - res, - endpointOption, - }); - - agent.model_parameters = Object.assign(agent.model_parameters, options.llmConfig); - if (options.configOptions) { - agent.model_parameters.configuration = options.configOptions; - } - - const sender = - agent.name ?? - getResponseSender({ - ...endpointOption, - model: endpointOption.model_parameters.model, - }); - - const client = new AgentClient({ - req, - res, - agent, - sender, - // tools, - contentParts, - eventHandlers, - collectedUsage, - spec: endpointOption.spec, - iconURL: endpointOption.iconURL, - endpoint: EModelEndpoint.bedrock, - resendFiles: endpointOption.resendFiles, - maxContextTokens: - endpointOption.maxContextTokens ?? - agent.max_context_tokens ?? - getModelMaxTokens(agent.model_parameters.model, providerEndpointMap[agent.provider]) ?? - 4000, - attachments: endpointOption.attachments, - }); - return { client }; -}; - -module.exports = { initializeClient }; diff --git a/api/server/services/Endpoints/custom/build.js b/api/server/services/Endpoints/custom/build.js deleted file mode 100644 index b1839ee035..0000000000 --- a/api/server/services/Endpoints/custom/build.js +++ /dev/null @@ -1,42 +0,0 @@ -const { removeNullishValues } = require('librechat-data-provider'); -const generateArtifactsPrompt = require('~/app/clients/prompts/artifacts'); - -const buildOptions = (endpoint, parsedBody, endpointType) => { - const { - modelLabel, - chatGptLabel, - promptPrefix, - maxContextTokens, - fileTokenLimit, - resendFiles = true, - imageDetail, - iconURL, - greeting, - spec, - artifacts, - ...modelOptions - } = parsedBody; - const endpointOption = removeNullishValues({ - endpoint, - endpointType, - modelLabel, - chatGptLabel, - promptPrefix, - resendFiles, - imageDetail, - iconURL, - greeting, - spec, - maxContextTokens, - fileTokenLimit, - modelOptions, - }); - - if (typeof artifacts === 'string') { - endpointOption.artifactsPrompt = generateArtifactsPrompt({ endpoint, artifacts }); - } - - return endpointOption; -}; - -module.exports = buildOptions; diff --git a/api/server/services/Endpoints/custom/index.js b/api/server/services/Endpoints/custom/index.js deleted file mode 100644 index 5a70d78749..0000000000 --- a/api/server/services/Endpoints/custom/index.js +++ /dev/null @@ -1,7 +0,0 @@ -const initializeClient = require('./initialize'); -const buildOptions = require('./build'); - -module.exports = { - initializeClient, - buildOptions, -}; diff --git a/api/server/services/Endpoints/custom/initialize.js b/api/server/services/Endpoints/custom/initialize.js deleted file mode 100644 index e0527d7d8a..0000000000 --- a/api/server/services/Endpoints/custom/initialize.js +++ /dev/null @@ -1,145 +0,0 @@ -const { isUserProvided, getOpenAIConfig, getCustomEndpointConfig } = require('@librechat/api'); -const { - CacheKeys, - ErrorTypes, - envVarRegex, - FetchTokenConfig, - extractEnvVariable, -} = require('librechat-data-provider'); -const { getUserKeyValues, checkUserKeyExpiry } = require('~/server/services/UserService'); -const { fetchModels } = require('~/server/services/ModelService'); -const getLogStores = require('~/cache/getLogStores'); - -const { PROXY } = process.env; - -const initializeClient = async ({ req, endpointOption, overrideEndpoint }) => { - const appConfig = req.config; - const { key: expiresAt } = req.body; - const endpoint = overrideEndpoint ?? req.body.endpoint; - - const endpointConfig = getCustomEndpointConfig({ - endpoint, - appConfig, - }); - if (!endpointConfig) { - throw new Error(`Config not found for the ${endpoint} custom endpoint.`); - } - - const CUSTOM_API_KEY = extractEnvVariable(endpointConfig.apiKey); - const CUSTOM_BASE_URL = extractEnvVariable(endpointConfig.baseURL); - - if (CUSTOM_API_KEY.match(envVarRegex)) { - throw new Error(`Missing API Key for ${endpoint}.`); - } - - if (CUSTOM_BASE_URL.match(envVarRegex)) { - throw new Error(`Missing Base URL for ${endpoint}.`); - } - - const userProvidesKey = isUserProvided(CUSTOM_API_KEY); - const userProvidesURL = isUserProvided(CUSTOM_BASE_URL); - - let userValues = null; - if (expiresAt && (userProvidesKey || userProvidesURL)) { - checkUserKeyExpiry(expiresAt, endpoint); - userValues = await getUserKeyValues({ userId: req.user.id, name: endpoint }); - } - - let apiKey = userProvidesKey ? userValues?.apiKey : CUSTOM_API_KEY; - let baseURL = userProvidesURL ? userValues?.baseURL : CUSTOM_BASE_URL; - - if (userProvidesKey & !apiKey) { - throw new Error( - JSON.stringify({ - type: ErrorTypes.NO_USER_KEY, - }), - ); - } - - if (userProvidesURL && !baseURL) { - throw new Error( - JSON.stringify({ - type: ErrorTypes.NO_BASE_URL, - }), - ); - } - - if (!apiKey) { - throw new Error(`${endpoint} API key not provided.`); - } - - if (!baseURL) { - throw new Error(`${endpoint} Base URL not provided.`); - } - - const cache = getLogStores(CacheKeys.TOKEN_CONFIG); - const tokenKey = - !endpointConfig.tokenConfig && (userProvidesKey || userProvidesURL) - ? `${endpoint}:${req.user.id}` - : endpoint; - - let endpointTokenConfig = - !endpointConfig.tokenConfig && - FetchTokenConfig[endpoint.toLowerCase()] && - (await cache.get(tokenKey)); - - if ( - FetchTokenConfig[endpoint.toLowerCase()] && - endpointConfig && - endpointConfig.models.fetch && - !endpointTokenConfig - ) { - await fetchModels({ apiKey, baseURL, name: endpoint, user: req.user.id, tokenKey }); - endpointTokenConfig = await cache.get(tokenKey); - } - - const customOptions = { - headers: endpointConfig.headers, - addParams: endpointConfig.addParams, - dropParams: endpointConfig.dropParams, - customParams: endpointConfig.customParams, - titleConvo: endpointConfig.titleConvo, - titleModel: endpointConfig.titleModel, - forcePrompt: endpointConfig.forcePrompt, - summaryModel: endpointConfig.summaryModel, - modelDisplayLabel: endpointConfig.modelDisplayLabel, - titleMethod: endpointConfig.titleMethod ?? 'completion', - contextStrategy: endpointConfig.summarize ? 'summarize' : null, - directEndpoint: endpointConfig.directEndpoint, - titleMessageRole: endpointConfig.titleMessageRole, - streamRate: endpointConfig.streamRate, - endpointTokenConfig, - }; - - const allConfig = appConfig.endpoints?.all; - if (allConfig) { - customOptions.streamRate = allConfig.streamRate; - } - - let clientOptions = { - reverseProxyUrl: baseURL ?? null, - proxy: PROXY ?? null, - ...customOptions, - ...endpointOption, - }; - - const modelOptions = endpointOption?.model_parameters ?? {}; - clientOptions = Object.assign( - { - modelOptions, - }, - clientOptions, - ); - clientOptions.modelOptions.user = req.user.id; - const options = getOpenAIConfig(apiKey, clientOptions, endpoint); - if (options != null) { - options.useLegacyContent = true; - options.endpointTokenConfig = endpointTokenConfig; - } - if (clientOptions.streamRate) { - options.llmConfig._lc_stream_delay = clientOptions.streamRate; - } - return options; -}; - -module.exports = initializeClient; diff --git a/api/server/services/Endpoints/google/build.js b/api/server/services/Endpoints/google/build.js deleted file mode 100644 index 3ac6b167c4..0000000000 --- a/api/server/services/Endpoints/google/build.js +++ /dev/null @@ -1,39 +0,0 @@ -const { removeNullishValues } = require('librechat-data-provider'); -const generateArtifactsPrompt = require('~/app/clients/prompts/artifacts'); - -const buildOptions = (endpoint, parsedBody) => { - const { - examples, - modelLabel, - resendFiles = true, - promptPrefix, - iconURL, - greeting, - spec, - artifacts, - maxContextTokens, - fileTokenLimit, - ...modelOptions - } = parsedBody; - const endpointOption = removeNullishValues({ - examples, - endpoint, - modelLabel, - resendFiles, - promptPrefix, - iconURL, - greeting, - spec, - maxContextTokens, - fileTokenLimit, - modelOptions, - }); - - if (typeof artifacts === 'string') { - endpointOption.artifactsPrompt = generateArtifactsPrompt({ endpoint, artifacts }); - } - - return endpointOption; -}; - -module.exports = buildOptions; diff --git a/api/server/services/Endpoints/google/index.js b/api/server/services/Endpoints/google/index.js deleted file mode 100644 index c4e7533c5d..0000000000 --- a/api/server/services/Endpoints/google/index.js +++ /dev/null @@ -1,9 +0,0 @@ -const addTitle = require('./title'); -const buildOptions = require('./build'); -const initializeClient = require('./initialize'); - -module.exports = { - addTitle, - buildOptions, - initializeClient, -}; diff --git a/api/server/services/Endpoints/google/initialize.js b/api/server/services/Endpoints/google/initialize.js deleted file mode 100644 index de4cf74ae2..0000000000 --- a/api/server/services/Endpoints/google/initialize.js +++ /dev/null @@ -1,83 +0,0 @@ -const path = require('path'); -const { EModelEndpoint, AuthKeys } = require('librechat-data-provider'); -const { getGoogleConfig, isEnabled, loadServiceKey } = require('@librechat/api'); -const { getUserKey, checkUserKeyExpiry } = require('~/server/services/UserService'); - -const initializeClient = async ({ req, endpointOption, overrideModel }) => { - const { GOOGLE_KEY, GOOGLE_REVERSE_PROXY, GOOGLE_AUTH_HEADER, PROXY } = process.env; - const isUserProvided = GOOGLE_KEY === 'user_provided'; - const { key: expiresAt } = req.body; - - let userKey = null; - if (expiresAt && isUserProvided) { - checkUserKeyExpiry(expiresAt, EModelEndpoint.google); - userKey = await getUserKey({ userId: req.user.id, name: EModelEndpoint.google }); - } - - let serviceKey = {}; - - /** Check if GOOGLE_KEY is provided at all (including 'user_provided') */ - const isGoogleKeyProvided = - (GOOGLE_KEY && GOOGLE_KEY.trim() !== '') || (isUserProvided && userKey != null); - - if (!isGoogleKeyProvided) { - /** Only attempt to load service key if GOOGLE_KEY is not provided */ - try { - const serviceKeyPath = - process.env.GOOGLE_SERVICE_KEY_FILE || - path.join(__dirname, '../../../..', 'data', 'auth.json'); - serviceKey = await loadServiceKey(serviceKeyPath); - if (!serviceKey) { - serviceKey = {}; - } - } catch (_e) { - // Service key loading failed, but that's okay if not required - serviceKey = {}; - } - } - - const credentials = isUserProvided - ? userKey - : { - [AuthKeys.GOOGLE_SERVICE_KEY]: serviceKey, - [AuthKeys.GOOGLE_API_KEY]: GOOGLE_KEY, - }; - - let clientOptions = {}; - - const appConfig = req.config; - /** @type {undefined | TBaseEndpoint} */ - const allConfig = appConfig.endpoints?.all; - /** @type {undefined | TBaseEndpoint} */ - const googleConfig = appConfig.endpoints?.[EModelEndpoint.google]; - - if (googleConfig) { - clientOptions.streamRate = googleConfig.streamRate; - clientOptions.titleModel = googleConfig.titleModel; - } - - if (allConfig) { - clientOptions.streamRate = allConfig.streamRate; - } - - clientOptions = { - reverseProxyUrl: GOOGLE_REVERSE_PROXY ?? null, - authHeader: isEnabled(GOOGLE_AUTH_HEADER) ?? null, - proxy: PROXY ?? null, - ...clientOptions, - ...endpointOption, - }; - - clientOptions = Object.assign( - { - modelOptions: endpointOption?.model_parameters ?? {}, - }, - clientOptions, - ); - if (overrideModel) { - clientOptions.modelOptions.model = overrideModel; - } - return getGoogleConfig(credentials, clientOptions); -}; - -module.exports = initializeClient; diff --git a/api/server/services/Endpoints/google/title.js b/api/server/services/Endpoints/google/title.js deleted file mode 100644 index 63ed8aae5f..0000000000 --- a/api/server/services/Endpoints/google/title.js +++ /dev/null @@ -1,60 +0,0 @@ -const { isEnabled } = require('@librechat/api'); -const { EModelEndpoint, CacheKeys, Constants, googleSettings } = require('librechat-data-provider'); -const getLogStores = require('~/cache/getLogStores'); -const initializeClient = require('./initialize'); -const { saveConvo } = require('~/models'); - -const addTitle = async (req, { text, response, client }) => { - const { TITLE_CONVO = 'true' } = process.env ?? {}; - if (!isEnabled(TITLE_CONVO)) { - return; - } - - if (client.options.titleConvo === false) { - return; - } - const { GOOGLE_TITLE_MODEL } = process.env ?? {}; - const appConfig = req.config; - const providerConfig = appConfig.endpoints?.[EModelEndpoint.google]; - let model = - providerConfig?.titleModel ?? - GOOGLE_TITLE_MODEL ?? - client.options?.modelOptions.model ?? - googleSettings.model.default; - - if (GOOGLE_TITLE_MODEL === Constants.CURRENT_MODEL) { - model = client.options?.modelOptions.model; - } - - const titleEndpointOptions = { - ...client.options, - modelOptions: { ...client.options?.modelOptions, model: model }, - attachments: undefined, // After a response, this is set to an empty array which results in an error during setOptions - }; - - const { client: titleClient } = await initializeClient({ - req, - res: response, - endpointOption: titleEndpointOptions, - }); - - const titleCache = getLogStores(CacheKeys.GEN_TITLE); - const key = `${req.user.id}-${response.conversationId}`; - - const title = await titleClient.titleConvo({ - text, - responseText: response?.text ?? '', - conversationId: response.conversationId, - }); - await titleCache.set(key, title, 120000); - await saveConvo( - req, - { - conversationId: response.conversationId, - title, - }, - { context: 'api/server/services/Endpoints/google/addTitle.js' }, - ); -}; - -module.exports = addTitle; diff --git a/api/server/services/Endpoints/openAI/build.js b/api/server/services/Endpoints/openAI/build.js deleted file mode 100644 index 611546a545..0000000000 --- a/api/server/services/Endpoints/openAI/build.js +++ /dev/null @@ -1,42 +0,0 @@ -const { removeNullishValues } = require('librechat-data-provider'); -const generateArtifactsPrompt = require('~/app/clients/prompts/artifacts'); - -const buildOptions = (endpoint, parsedBody) => { - const { - modelLabel, - chatGptLabel, - promptPrefix, - maxContextTokens, - fileTokenLimit, - resendFiles = true, - imageDetail, - iconURL, - greeting, - spec, - artifacts, - ...modelOptions - } = parsedBody; - - const endpointOption = removeNullishValues({ - endpoint, - modelLabel, - chatGptLabel, - promptPrefix, - resendFiles, - imageDetail, - iconURL, - greeting, - spec, - maxContextTokens, - fileTokenLimit, - modelOptions, - }); - - if (typeof artifacts === 'string') { - endpointOption.artifactsPrompt = generateArtifactsPrompt({ endpoint, artifacts }); - } - - return endpointOption; -}; - -module.exports = buildOptions; diff --git a/api/server/services/Endpoints/openAI/index.js b/api/server/services/Endpoints/openAI/index.js deleted file mode 100644 index c4e7533c5d..0000000000 --- a/api/server/services/Endpoints/openAI/index.js +++ /dev/null @@ -1,9 +0,0 @@ -const addTitle = require('./title'); -const buildOptions = require('./build'); -const initializeClient = require('./initialize'); - -module.exports = { - addTitle, - buildOptions, - initializeClient, -}; diff --git a/api/server/services/Endpoints/openAI/initialize.js b/api/server/services/Endpoints/openAI/initialize.js deleted file mode 100644 index c6eccd5716..0000000000 --- a/api/server/services/Endpoints/openAI/initialize.js +++ /dev/null @@ -1,147 +0,0 @@ -const { ErrorTypes, EModelEndpoint, mapModelToAzureConfig } = require('librechat-data-provider'); -const { - isEnabled, - resolveHeaders, - isUserProvided, - getOpenAIConfig, - getAzureCredentials, -} = require('@librechat/api'); -const { getUserKeyValues, checkUserKeyExpiry } = require('~/server/services/UserService'); - -const initializeClient = async ({ req, endpointOption, overrideEndpoint, overrideModel }) => { - const appConfig = req.config; - const { - PROXY, - OPENAI_API_KEY, - AZURE_API_KEY, - OPENAI_REVERSE_PROXY, - AZURE_OPENAI_BASEURL, - OPENAI_SUMMARIZE, - DEBUG_OPENAI, - } = process.env; - const { key: expiresAt } = req.body; - const modelName = overrideModel ?? req.body.model; - const endpoint = overrideEndpoint ?? req.body.endpoint; - const contextStrategy = isEnabled(OPENAI_SUMMARIZE) ? 'summarize' : null; - - const credentials = { - [EModelEndpoint.openAI]: OPENAI_API_KEY, - [EModelEndpoint.azureOpenAI]: AZURE_API_KEY, - }; - - const baseURLOptions = { - [EModelEndpoint.openAI]: OPENAI_REVERSE_PROXY, - [EModelEndpoint.azureOpenAI]: AZURE_OPENAI_BASEURL, - }; - - const userProvidesKey = isUserProvided(credentials[endpoint]); - const userProvidesURL = isUserProvided(baseURLOptions[endpoint]); - - let userValues = null; - if (expiresAt && (userProvidesKey || userProvidesURL)) { - checkUserKeyExpiry(expiresAt, endpoint); - userValues = await getUserKeyValues({ userId: req.user.id, name: endpoint }); - } - - let apiKey = userProvidesKey ? userValues?.apiKey : credentials[endpoint]; - let baseURL = userProvidesURL ? userValues?.baseURL : baseURLOptions[endpoint]; - - let clientOptions = { - contextStrategy, - proxy: PROXY ?? null, - debug: isEnabled(DEBUG_OPENAI), - reverseProxyUrl: baseURL ? baseURL : null, - ...endpointOption, - }; - - const isAzureOpenAI = endpoint === EModelEndpoint.azureOpenAI; - /** @type {false | TAzureConfig} */ - const azureConfig = isAzureOpenAI && appConfig.endpoints?.[EModelEndpoint.azureOpenAI]; - let serverless = false; - if (isAzureOpenAI && azureConfig) { - const { modelGroupMap, groupMap } = azureConfig; - const { - azureOptions, - baseURL, - headers = {}, - serverless: _serverless, - } = mapModelToAzureConfig({ - modelName, - modelGroupMap, - groupMap, - }); - serverless = _serverless; - - clientOptions.reverseProxyUrl = baseURL ?? clientOptions.reverseProxyUrl; - clientOptions.headers = resolveHeaders({ - headers: { ...headers, ...(clientOptions.headers ?? {}) }, - user: req.user, - }); - - clientOptions.titleConvo = azureConfig.titleConvo; - clientOptions.titleModel = azureConfig.titleModel; - - const azureRate = modelName.includes('gpt-4') ? 30 : 17; - clientOptions.streamRate = azureConfig.streamRate ?? azureRate; - - clientOptions.titleMethod = azureConfig.titleMethod ?? 'completion'; - - const groupName = modelGroupMap[modelName].group; - clientOptions.addParams = azureConfig.groupMap[groupName].addParams; - clientOptions.dropParams = azureConfig.groupMap[groupName].dropParams; - clientOptions.forcePrompt = azureConfig.groupMap[groupName].forcePrompt; - - apiKey = azureOptions.azureOpenAIApiKey; - clientOptions.azure = !serverless && azureOptions; - if (serverless === true) { - clientOptions.defaultQuery = azureOptions.azureOpenAIApiVersion - ? { 'api-version': azureOptions.azureOpenAIApiVersion } - : undefined; - clientOptions.headers['api-key'] = apiKey; - } - } else if (isAzureOpenAI) { - clientOptions.azure = userProvidesKey ? JSON.parse(userValues.apiKey) : getAzureCredentials(); - apiKey = clientOptions.azure.azureOpenAIApiKey; - } - - /** @type {undefined | TBaseEndpoint} */ - const openAIConfig = appConfig.endpoints?.[EModelEndpoint.openAI]; - - if (!isAzureOpenAI && openAIConfig) { - clientOptions.streamRate = openAIConfig.streamRate; - clientOptions.titleModel = openAIConfig.titleModel; - } - - const allConfig = appConfig.endpoints?.all; - if (allConfig) { - clientOptions.streamRate = allConfig.streamRate; - } - - if (userProvidesKey & !apiKey) { - throw new Error( - JSON.stringify({ - type: ErrorTypes.NO_USER_KEY, - }), - ); - } - - if (!apiKey) { - throw new Error(`${endpoint} API Key not provided.`); - } - - const modelOptions = endpointOption?.model_parameters ?? {}; - modelOptions.model = modelName; - clientOptions = Object.assign({ modelOptions }, clientOptions); - clientOptions.modelOptions.user = req.user.id; - const options = getOpenAIConfig(apiKey, clientOptions, endpoint); - if (options != null && serverless === true) { - options.useLegacyContent = true; - } - const streamRate = clientOptions.streamRate; - if (streamRate) { - options.llmConfig._lc_stream_delay = streamRate; - } - return options; -}; - -module.exports = initializeClient; diff --git a/api/server/services/Endpoints/openAI/title.js b/api/server/services/Endpoints/openAI/title.js deleted file mode 100644 index f8624ef657..0000000000 --- a/api/server/services/Endpoints/openAI/title.js +++ /dev/null @@ -1,35 +0,0 @@ -const { isEnabled } = require('@librechat/api'); -const { CacheKeys } = require('librechat-data-provider'); -const getLogStores = require('~/cache/getLogStores'); -const { saveConvo } = require('~/models'); - -const addTitle = async (req, { text, response, client }) => { - const { TITLE_CONVO = 'true' } = process.env ?? {}; - if (!isEnabled(TITLE_CONVO)) { - return; - } - - if (client.options.titleConvo === false) { - return; - } - - const titleCache = getLogStores(CacheKeys.GEN_TITLE); - const key = `${req.user.id}-${response.conversationId}`; - - const title = await client.titleConvo({ - text, - responseText: response?.text ?? '', - conversationId: response.conversationId, - }); - await titleCache.set(key, title, 120000); - await saveConvo( - req, - { - conversationId: response.conversationId, - title, - }, - { context: 'api/server/services/Endpoints/openAI/addTitle.js' }, - ); -}; - -module.exports = addTitle; diff --git a/api/server/services/Files/Code/process.js b/api/server/services/Files/Code/process.js index c38aad7087..15df6de0d6 100644 --- a/api/server/services/Files/Code/process.js +++ b/api/server/services/Files/Code/process.js @@ -14,7 +14,7 @@ const { const { filterFilesByAgentAccess } = require('~/server/services/Files/permissions'); const { getStrategyFunctions } = require('~/server/services/Files/strategies'); const { convertImage } = require('~/server/services/Files/images/convert'); -const { createFile, getFiles, updateFile } = require('~/models/File'); +const { createFile, getFiles, updateFile } = require('~/models'); /** * Process OpenAI image files, convert to target format, save and return file metadata. diff --git a/api/server/services/Files/process.js b/api/server/services/Files/process.js index f586554ae8..30b47f2e52 100644 --- a/api/server/services/Files/process.js +++ b/api/server/services/Files/process.js @@ -28,8 +28,8 @@ const { const { addResourceFileId, deleteResourceFileId } = require('~/server/controllers/assistants/v2'); const { addAgentResourceFile, removeAgentResourceFiles } = require('~/models/Agent'); const { getOpenAIClient } = require('~/server/controllers/assistants/helpers'); -const { createFile, updateFileUsage, deleteFiles } = require('~/models/File'); const { loadAuthValues } = require('~/server/services/Tools/credentials'); +const { createFile, updateFileUsage, deleteFiles } = require('~/models'); const { getFileStrategy } = require('~/server/utils/getFileStrategy'); const { checkCapability } = require('~/server/services/Config'); const { LB_QueueAsyncCall } = require('~/server/utils/queue'); @@ -60,45 +60,6 @@ const createSanitizedUploadWrapper = (uploadFunction) => { }; }; -/** - * - * @param {Array} files - * @param {Array} [fileIds] - * @returns - */ -const processFiles = async (files, fileIds) => { - const promises = []; - const seen = new Set(); - - for (let file of files) { - const { file_id } = file; - if (seen.has(file_id)) { - continue; - } - seen.add(file_id); - promises.push(updateFileUsage({ file_id })); - } - - if (!fileIds) { - const results = await Promise.all(promises); - // Filter out null results from failed updateFileUsage calls - return results.filter((result) => result != null); - } - - for (let file_id of fileIds) { - if (seen.has(file_id)) { - continue; - } - seen.add(file_id); - promises.push(updateFileUsage({ file_id })); - } - - // TODO: calculate token cost when image is first uploaded - const results = await Promise.all(promises); - // Filter out null results from failed updateFileUsage calls - return results.filter((result) => result != null); -}; - /** * Enqueues the delete operation to the leaky bucket queue if necessary, or adds it directly to promises. * @@ -1057,7 +1018,6 @@ function filterFile({ req, image, isAvatar }) { module.exports = { filterFile, - processFiles, processFileURL, saveBase64Image, processImageFile, diff --git a/api/server/services/Files/processFiles.test.js b/api/server/services/Files/processFiles.test.js deleted file mode 100644 index 8417f639e9..0000000000 --- a/api/server/services/Files/processFiles.test.js +++ /dev/null @@ -1,248 +0,0 @@ -// Mock the updateFileUsage function before importing the actual processFiles -jest.mock('~/models/File', () => ({ - updateFileUsage: jest.fn(), -})); - -// Mock winston and logger configuration to avoid dependency issues -jest.mock('~/config', () => ({ - logger: { - info: jest.fn(), - warn: jest.fn(), - debug: jest.fn(), - error: jest.fn(), - }, -})); - -// Mock all other dependencies that might cause issues -jest.mock('librechat-data-provider', () => ({ - isUUID: { parse: jest.fn() }, - megabyte: 1024 * 1024, - PrincipalType: { - USER: 'user', - GROUP: 'group', - PUBLIC: 'public', - }, - PrincipalModel: { - USER: 'User', - GROUP: 'Group', - }, - ResourceType: { - AGENT: 'agent', - PROJECT: 'project', - FILE: 'file', - PROMPTGROUP: 'promptGroup', - }, - FileContext: { message_attachment: 'message_attachment' }, - FileSources: { local: 'local' }, - EModelEndpoint: { assistants: 'assistants' }, - EToolResources: { file_search: 'file_search' }, - mergeFileConfig: jest.fn(), - removeNullishValues: jest.fn((obj) => obj), - isAssistantsEndpoint: jest.fn(), - Constants: { COMMANDS_MAX_LENGTH: 56 }, - PermissionTypes: { - BOOKMARKS: 'BOOKMARKS', - PROMPTS: 'PROMPTS', - MEMORIES: 'MEMORIES', - MULTI_CONVO: 'MULTI_CONVO', - AGENTS: 'AGENTS', - TEMPORARY_CHAT: 'TEMPORARY_CHAT', - RUN_CODE: 'RUN_CODE', - WEB_SEARCH: 'WEB_SEARCH', - FILE_CITATIONS: 'FILE_CITATIONS', - }, - Permissions: { - USE: 'USE', - OPT_OUT: 'OPT_OUT', - }, - SystemRoles: { - USER: 'USER', - ADMIN: 'ADMIN', - }, -})); - -jest.mock('~/server/services/Files/images', () => ({ - convertImage: jest.fn(), - resizeAndConvert: jest.fn(), - resizeImageBuffer: jest.fn(), -})); - -jest.mock('~/server/controllers/assistants/v2', () => ({ - addResourceFileId: jest.fn(), - deleteResourceFileId: jest.fn(), -})); - -jest.mock('~/models/Agent', () => ({ - addAgentResourceFile: jest.fn(), - removeAgentResourceFiles: jest.fn(), -})); - -jest.mock('~/server/controllers/assistants/helpers', () => ({ - getOpenAIClient: jest.fn(), -})); - -jest.mock('~/server/services/Tools/credentials', () => ({ - loadAuthValues: jest.fn(), -})); - -jest.mock('~/server/services/Config', () => ({ - checkCapability: jest.fn(), -})); - -jest.mock('~/server/utils/queue', () => ({ - LB_QueueAsyncCall: jest.fn(), -})); - -jest.mock('./strategies', () => ({ - getStrategyFunctions: jest.fn(), -})); - -jest.mock('~/server/utils', () => ({ - determineFileType: jest.fn(), -})); - -jest.mock('@librechat/api', () => ({ - parseText: jest.fn(), - parseTextNative: jest.fn(), -})); - -// Import the actual processFiles function after all mocks are set up -const { processFiles } = require('./process'); -const { updateFileUsage } = require('~/models/File'); - -describe('processFiles', () => { - beforeEach(() => { - jest.clearAllMocks(); - }); - - describe('null filtering functionality', () => { - it('should filter out null results from updateFileUsage when files do not exist', async () => { - const mockFiles = [ - { file_id: 'existing-file-1' }, - { file_id: 'non-existent-file' }, - { file_id: 'existing-file-2' }, - ]; - - // Mock updateFileUsage to return null for non-existent files - updateFileUsage.mockImplementation(({ file_id }) => { - if (file_id === 'non-existent-file') { - return Promise.resolve(null); // Simulate file not found in the database - } - return Promise.resolve({ file_id, usage: 1 }); - }); - - const result = await processFiles(mockFiles); - - expect(updateFileUsage).toHaveBeenCalledTimes(3); - expect(result).toEqual([ - { file_id: 'existing-file-1', usage: 1 }, - { file_id: 'existing-file-2', usage: 1 }, - ]); - - // Critical test - ensure no null values in result - expect(result).not.toContain(null); - expect(result).not.toContain(undefined); - expect(result.length).toBe(2); // Only valid files should be returned - }); - - it('should return empty array when all updateFileUsage calls return null', async () => { - const mockFiles = [{ file_id: 'non-existent-1' }, { file_id: 'non-existent-2' }]; - - // All updateFileUsage calls return null - updateFileUsage.mockResolvedValue(null); - - const result = await processFiles(mockFiles); - - expect(updateFileUsage).toHaveBeenCalledTimes(2); - expect(result).toEqual([]); - expect(result).not.toContain(null); - expect(result.length).toBe(0); - }); - - it('should work correctly when all files exist', async () => { - const mockFiles = [{ file_id: 'file-1' }, { file_id: 'file-2' }]; - - updateFileUsage.mockImplementation(({ file_id }) => { - return Promise.resolve({ file_id, usage: 1 }); - }); - - const result = await processFiles(mockFiles); - - expect(result).toEqual([ - { file_id: 'file-1', usage: 1 }, - { file_id: 'file-2', usage: 1 }, - ]); - expect(result).not.toContain(null); - expect(result.length).toBe(2); - }); - - it('should handle fileIds parameter and filter nulls correctly', async () => { - const mockFiles = [{ file_id: 'file-1' }]; - const mockFileIds = ['file-2', 'non-existent-file']; - - updateFileUsage.mockImplementation(({ file_id }) => { - if (file_id === 'non-existent-file') { - return Promise.resolve(null); - } - return Promise.resolve({ file_id, usage: 1 }); - }); - - const result = await processFiles(mockFiles, mockFileIds); - - expect(result).toEqual([ - { file_id: 'file-1', usage: 1 }, - { file_id: 'file-2', usage: 1 }, - ]); - expect(result).not.toContain(null); - expect(result).not.toContain(undefined); - expect(result.length).toBe(2); - }); - - it('should handle duplicate file_ids correctly', async () => { - const mockFiles = [ - { file_id: 'duplicate-file' }, - { file_id: 'duplicate-file' }, // Duplicate should be ignored - { file_id: 'unique-file' }, - ]; - - updateFileUsage.mockImplementation(({ file_id }) => { - return Promise.resolve({ file_id, usage: 1 }); - }); - - const result = await processFiles(mockFiles); - - // Should only call updateFileUsage twice (duplicate ignored) - expect(updateFileUsage).toHaveBeenCalledTimes(2); - expect(result).toEqual([ - { file_id: 'duplicate-file', usage: 1 }, - { file_id: 'unique-file', usage: 1 }, - ]); - expect(result.length).toBe(2); - }); - }); - - describe('edge cases', () => { - it('should handle empty files array', async () => { - const result = await processFiles([]); - expect(result).toEqual([]); - expect(updateFileUsage).not.toHaveBeenCalled(); - }); - - it('should handle mixed null and undefined returns from updateFileUsage', async () => { - const mockFiles = [{ file_id: 'file-1' }, { file_id: 'file-2' }, { file_id: 'file-3' }]; - - updateFileUsage.mockImplementation(({ file_id }) => { - if (file_id === 'file-1') return Promise.resolve(null); - if (file_id === 'file-2') return Promise.resolve(undefined); - return Promise.resolve({ file_id, usage: 1 }); - }); - - const result = await processFiles(mockFiles); - - expect(result).toEqual([{ file_id: 'file-3', usage: 1 }]); - expect(result).not.toContain(null); - expect(result).not.toContain(undefined); - expect(result.length).toBe(1); - }); - }); -}); diff --git a/api/server/services/GraphApiService.spec.js b/api/server/services/GraphApiService.spec.js index fa11190cc3..0a625e77e1 100644 --- a/api/server/services/GraphApiService.spec.js +++ b/api/server/services/GraphApiService.spec.js @@ -18,9 +18,6 @@ jest.mock('~/config', () => ({ defaults: {}, })), })); -jest.mock('~/utils', () => ({ - logAxiosError: jest.fn(), -})); jest.mock('~/server/services/Config', () => ({})); jest.mock('~/server/services/Files/strategies', () => ({ diff --git a/api/server/services/ModelService.js b/api/server/services/ModelService.js deleted file mode 100644 index 88a14f1c2c..0000000000 --- a/api/server/services/ModelService.js +++ /dev/null @@ -1,330 +0,0 @@ -const axios = require('axios'); -const { logger } = require('@librechat/data-schemas'); -const { HttpsProxyAgent } = require('https-proxy-agent'); -const { logAxiosError, inputSchema, processModelData, isUserProvided } = require('@librechat/api'); -const { - CacheKeys, - defaultModels, - KnownEndpoints, - EModelEndpoint, -} = require('librechat-data-provider'); -const { OllamaClient } = require('~/app/clients/OllamaClient'); -const { config } = require('./Config/EndpointService'); -const getLogStores = require('~/cache/getLogStores'); -const { extractBaseURL } = require('~/utils'); - -/** - * Splits a string by commas and trims each resulting value. - * @param {string} input - The input string to split. - * @returns {string[]} An array of trimmed values. - */ -const splitAndTrim = (input) => { - if (!input || typeof input !== 'string') { - return []; - } - return input - .split(',') - .map((item) => item.trim()) - .filter(Boolean); -}; - -/** - * Fetches OpenAI models from the specified base API path or Azure, based on the provided configuration. - * - * @param {Object} params - The parameters for fetching the models. - * @param {Object} params.user - The user ID to send to the API. - * @param {string} params.apiKey - The API key for authentication with the API. - * @param {string} params.baseURL - The base path URL for the API. - * @param {string} [params.name='OpenAI'] - The name of the API; defaults to 'OpenAI'. - * @param {boolean} [params.direct=false] - Whether `directEndpoint` was configured - * @param {boolean} [params.azure=false] - Whether to fetch models from Azure. - * @param {boolean} [params.userIdQuery=false] - Whether to send the user ID as a query parameter. - * @param {boolean} [params.createTokenConfig=true] - Whether to create a token configuration from the API response. - * @param {string} [params.tokenKey] - The cache key to save the token configuration. Uses `name` if omitted. - * @param {Record} [params.headers] - Optional headers for the request. - * @param {Partial} [params.userObject] - Optional user object for header resolution. - * @returns {Promise} A promise that resolves to an array of model identifiers. - * @async - */ -const fetchModels = async ({ - user, - apiKey, - baseURL: _baseURL, - name = EModelEndpoint.openAI, - direct, - azure = false, - userIdQuery = false, - createTokenConfig = true, - tokenKey, - headers, - userObject, -}) => { - let models = []; - const baseURL = direct ? extractBaseURL(_baseURL) : _baseURL; - - if (!baseURL && !azure) { - return models; - } - - if (!apiKey) { - return models; - } - - if (name && name.toLowerCase().startsWith(KnownEndpoints.ollama)) { - try { - return await OllamaClient.fetchModels(baseURL, { headers, user: userObject }); - } catch (ollamaError) { - const logMessage = - 'Failed to fetch models from Ollama API. Attempting to fetch via OpenAI-compatible endpoint.'; - logAxiosError({ message: logMessage, error: ollamaError }); - } - } - - try { - const options = { - headers: { - ...(headers ?? {}), - }, - timeout: 5000, - }; - - if (name === EModelEndpoint.anthropic) { - options.headers = { - 'x-api-key': apiKey, - 'anthropic-version': process.env.ANTHROPIC_VERSION || '2023-06-01', - }; - } else { - options.headers.Authorization = `Bearer ${apiKey}`; - } - - if (process.env.PROXY) { - options.httpsAgent = new HttpsProxyAgent(process.env.PROXY); - } - - if (process.env.OPENAI_ORGANIZATION && baseURL.includes('openai')) { - options.headers['OpenAI-Organization'] = process.env.OPENAI_ORGANIZATION; - } - - const url = new URL(`${baseURL.replace(/\/+$/, '')}${azure ? '' : '/models'}`); - if (user && userIdQuery) { - url.searchParams.append('user', user); - } - const res = await axios.get(url.toString(), options); - - /** @type {z.infer} */ - const input = res.data; - - const validationResult = inputSchema.safeParse(input); - if (validationResult.success && createTokenConfig) { - const endpointTokenConfig = processModelData(input); - const cache = getLogStores(CacheKeys.TOKEN_CONFIG); - await cache.set(tokenKey ?? name, endpointTokenConfig); - } - models = input.data.map((item) => item.id); - } catch (error) { - const logMessage = `Failed to fetch models from ${azure ? 'Azure ' : ''}${name} API`; - logAxiosError({ message: logMessage, error }); - } - - return models; -}; - -/** - * Fetches models from the specified API path or Azure, based on the provided options. - * @async - * @function - * @param {object} opts - The options for fetching the models. - * @param {string} opts.user - The user ID to send to the API. - * @param {boolean} [opts.azure=false] - Whether to fetch models from Azure. - * @param {boolean} [opts.assistants=false] - Whether to fetch models from Azure. - * @param {string[]} [_models=[]] - The models to use as a fallback. - */ -const fetchOpenAIModels = async (opts, _models = []) => { - let models = _models.slice() ?? []; - const { openAIApiKey } = config; - let apiKey = openAIApiKey; - const openaiBaseURL = 'https://api.openai.com/v1'; - let baseURL = openaiBaseURL; - let reverseProxyUrl = process.env.OPENAI_REVERSE_PROXY; - - if (opts.assistants && process.env.ASSISTANTS_BASE_URL) { - reverseProxyUrl = process.env.ASSISTANTS_BASE_URL; - } else if (opts.azure) { - return models; - // const azure = getAzureCredentials(); - // baseURL = (genAzureChatCompletion(azure)) - // .split('/deployments')[0] - // .concat(`/models?api-version=${azure.azureOpenAIApiVersion}`); - // apiKey = azureOpenAIApiKey; - } - - if (reverseProxyUrl) { - baseURL = extractBaseURL(reverseProxyUrl); - } - - const modelsCache = getLogStores(CacheKeys.MODEL_QUERIES); - - const cachedModels = await modelsCache.get(baseURL); - if (cachedModels) { - return cachedModels; - } - - if (baseURL || opts.azure) { - models = await fetchModels({ - apiKey, - baseURL, - azure: opts.azure, - user: opts.user, - name: EModelEndpoint.openAI, - }); - } - - if (models.length === 0) { - return _models; - } - - if (baseURL === openaiBaseURL) { - const regex = /(text-davinci-003|gpt-|o\d+)/; - const excludeRegex = /audio|realtime/; - models = models.filter((model) => regex.test(model) && !excludeRegex.test(model)); - const instructModels = models.filter((model) => model.includes('instruct')); - const otherModels = models.filter((model) => !model.includes('instruct')); - models = otherModels.concat(instructModels); - } - - await modelsCache.set(baseURL, models); - return models; -}; - -/** - * Loads the default models for the application. - * @async - * @function - * @param {object} opts - The options for fetching the models. - * @param {string} opts.user - The user ID to send to the API. - * @param {boolean} [opts.azure=false] - Whether to fetch models from Azure. - * @param {boolean} [opts.assistants=false] - Whether to fetch models for the Assistants endpoint. - */ -const getOpenAIModels = async (opts) => { - let models = defaultModels[EModelEndpoint.openAI]; - - if (opts.assistants) { - models = defaultModels[EModelEndpoint.assistants]; - } else if (opts.azure) { - models = defaultModels[EModelEndpoint.azureAssistants]; - } - - let key; - if (opts.assistants) { - key = 'ASSISTANTS_MODELS'; - } else if (opts.azure) { - key = 'AZURE_OPENAI_MODELS'; - } else { - key = 'OPENAI_MODELS'; - } - - if (process.env[key]) { - models = splitAndTrim(process.env[key]); - return models; - } - - if (config.userProvidedOpenAI) { - return models; - } - - return await fetchOpenAIModels(opts, models); -}; - -/** - * Fetches models from the Anthropic API. - * @async - * @function - * @param {object} opts - The options for fetching the models. - * @param {string} opts.user - The user ID to send to the API. - * @param {string[]} [_models=[]] - The models to use as a fallback. - */ -const fetchAnthropicModels = async (opts, _models = []) => { - let models = _models.slice() ?? []; - let apiKey = process.env.ANTHROPIC_API_KEY; - const anthropicBaseURL = 'https://api.anthropic.com/v1'; - let baseURL = anthropicBaseURL; - let reverseProxyUrl = process.env.ANTHROPIC_REVERSE_PROXY; - - if (reverseProxyUrl) { - baseURL = extractBaseURL(reverseProxyUrl); - } - - if (!apiKey) { - return models; - } - - const modelsCache = getLogStores(CacheKeys.MODEL_QUERIES); - - const cachedModels = await modelsCache.get(baseURL); - if (cachedModels) { - return cachedModels; - } - - if (baseURL) { - models = await fetchModels({ - apiKey, - baseURL, - user: opts.user, - name: EModelEndpoint.anthropic, - tokenKey: EModelEndpoint.anthropic, - }); - } - - if (models.length === 0) { - return _models; - } - - await modelsCache.set(baseURL, models); - return models; -}; - -const getAnthropicModels = async (opts = {}) => { - let models = defaultModels[EModelEndpoint.anthropic]; - if (process.env.ANTHROPIC_MODELS) { - models = splitAndTrim(process.env.ANTHROPIC_MODELS); - return models; - } - - if (isUserProvided(process.env.ANTHROPIC_API_KEY)) { - return models; - } - - try { - return await fetchAnthropicModels(opts, models); - } catch (error) { - logger.error('Error fetching Anthropic models:', error); - return models; - } -}; - -const getGoogleModels = () => { - let models = defaultModels[EModelEndpoint.google]; - if (process.env.GOOGLE_MODELS) { - models = splitAndTrim(process.env.GOOGLE_MODELS); - } - - return models; -}; - -const getBedrockModels = () => { - let models = defaultModels[EModelEndpoint.bedrock]; - if (process.env.BEDROCK_AWS_MODELS) { - models = splitAndTrim(process.env.BEDROCK_AWS_MODELS); - } - - return models; -}; - -module.exports = { - fetchModels, - splitAndTrim, - getOpenAIModels, - getGoogleModels, - getBedrockModels, - getAnthropicModels, -}; diff --git a/api/server/services/UserService.js b/api/server/services/UserService.js deleted file mode 100644 index 7cf2f832a3..0000000000 --- a/api/server/services/UserService.js +++ /dev/null @@ -1,183 +0,0 @@ -const { logger } = require('@librechat/data-schemas'); -const { encrypt, decrypt } = require('@librechat/api'); -const { ErrorTypes } = require('librechat-data-provider'); -const { updateUser } = require('~/models'); -const { Key } = require('~/db/models'); - -/** - * Updates the plugins for a user based on the action specified (install/uninstall). - * @async - * @param {Object} user - The user whose plugins are to be updated. - * @param {string} pluginKey - The key of the plugin to install or uninstall. - * @param {'install' | 'uninstall'} action - The action to perform, 'install' or 'uninstall'. - * @returns {Promise} The result of the update operation. - * @throws Logs the error internally if the update operation fails. - * @description This function updates the plugin array of a user document based on the specified action. - * It adds a plugin key to the plugins array for an 'install' action, and removes it for an 'uninstall' action. - */ -const updateUserPluginsService = async (user, pluginKey, action) => { - try { - const userPlugins = user.plugins || []; - if (action === 'install') { - return await updateUser(user._id, { plugins: [...userPlugins, pluginKey] }); - } else if (action === 'uninstall') { - return await updateUser(user._id, { - plugins: userPlugins.filter((plugin) => plugin !== pluginKey), - }); - } - } catch (err) { - logger.error('[updateUserPluginsService]', err); - return err; - } -}; - -/** - * Retrieves and decrypts the key value for a given user identified by userId and identifier name. - * @param {Object} params - The parameters object. - * @param {string} params.userId - The unique identifier for the user. - * @param {string} params.name - The name associated with the key. - * @returns {Promise} The decrypted key value. - * @throws {Error} Throws an error if the key is not found or if there is a problem during key retrieval. - * @description This function searches for a user's key in the database using their userId and name. - * If found, it decrypts the value of the key and returns it. If no key is found, it throws - * an error indicating that there is no user key available. - */ -const getUserKey = async ({ userId, name }) => { - const keyValue = await Key.findOne({ userId, name }).lean(); - if (!keyValue) { - throw new Error( - JSON.stringify({ - type: ErrorTypes.NO_USER_KEY, - }), - ); - } - return await decrypt(keyValue.value); -}; - -/** - * Retrieves, decrypts, and parses the key values for a given user identified by userId and name. - * @param {Object} params - The parameters object. - * @param {string} params.userId - The unique identifier for the user. - * @param {string} params.name - The name associated with the key. - * @returns {Promise>} The decrypted and parsed key values. - * @throws {Error} Throws an error if the key is invalid or if there is a problem during key value parsing. - * @description This function retrieves a user's encrypted key using their userId and name, decrypts it, - * and then attempts to parse the decrypted string into a JSON object. If the parsing fails, - * it throws an error indicating that the user key is invalid. - */ -const getUserKeyValues = async ({ userId, name }) => { - let userValues = await getUserKey({ userId, name }); - try { - userValues = JSON.parse(userValues); - } catch (e) { - logger.error('[getUserKeyValues]', e); - throw new Error( - JSON.stringify({ - type: ErrorTypes.INVALID_USER_KEY, - }), - ); - } - return userValues; -}; - -/** - * Retrieves the expiry information of a user's key identified by userId and name. - * @async - * @param {Object} params - The parameters object. - * @param {string} params.userId - The unique identifier for the user. - * @param {string} params.name - The name associated with the key. - * @returns {Promise<{expiresAt: Date | null}>} The expiry date of the key or null if the key doesn't exist. - * @description This function fetches a user's key from the database using their userId and name and - * returns its expiry date. If the key is not found, it returns null for the expiry date. - */ -const getUserKeyExpiry = async ({ userId, name }) => { - const keyValue = await Key.findOne({ userId, name }).lean(); - if (!keyValue) { - return { expiresAt: null }; - } - return { expiresAt: keyValue.expiresAt || 'never' }; -}; - -/** - * Updates or inserts a new key for a given user identified by userId and name, with a specified value and expiry date. - * @async - * @param {Object} params - The parameters object. - * @param {string} params.userId - The unique identifier for the user. - * @param {string} params.name - The name associated with the key. - * @param {string} params.value - The value to be encrypted and stored as the key's value. - * @param {Date} params.expiresAt - The expiry date for the key [optional] - * @returns {Promise} The updated or newly inserted key document. - * @description This function either updates an existing user key or inserts a new one into the database, - * after encrypting the provided value. It sets the provided expiry date for the key (or unsets for no expiry). - */ -const updateUserKey = async ({ userId, name, value, expiresAt = null }) => { - const encryptedValue = await encrypt(value); - let updateObject = { - userId, - name, - value: encryptedValue, - }; - const updateQuery = { $set: updateObject }; - // add expiresAt to the update object if it's not null - if (expiresAt) { - updateObject.expiresAt = new Date(expiresAt); - } else { - // make sure to remove if already present - updateQuery.$unset = { expiresAt }; - } - return await Key.findOneAndUpdate({ userId, name }, updateQuery, { - upsert: true, - new: true, - }).lean(); -}; - -/** - * Deletes a key or all keys for a given user identified by userId, optionally based on a specified name. - * @async - * @param {Object} params - The parameters object. - * @param {string} params.userId - The unique identifier for the user. - * @param {string} [params.name] - The name associated with the key to delete. If not provided and all is true, deletes all keys. - * @param {boolean} [params.all=false] - Whether to delete all keys for the user. - * @returns {Promise} The result of the deletion operation. - * @description This function deletes a specific key or all keys for a user from the database. - * If a name is provided and all is false, it deletes only the key with that name. - * If all is true, it ignores the name and deletes all keys for the user. - */ -const deleteUserKey = async ({ userId, name, all = false }) => { - if (all) { - return await Key.deleteMany({ userId }); - } - - await Key.findOneAndDelete({ userId, name }).lean(); -}; - -/** - * Checks if a user key has expired based on the provided expiration date and endpoint. - * If the key has expired, it throws an Error with details including the type of error, the expiration date, and the endpoint. - * - * @param {string} expiresAt - The expiration date of the user key in a format that can be parsed by the Date constructor. - * @param {string} endpoint - The endpoint associated with the user key to be checked. - * @throws {Error} Throws an error if the user key has expired. The error message is a stringified JSON object - * containing the type of error (`ErrorTypes.EXPIRED_USER_KEY`), the expiration date in the local string format, and the endpoint. - */ -const checkUserKeyExpiry = (expiresAt, endpoint) => { - const expiresAtDate = new Date(expiresAt); - if (expiresAtDate < new Date()) { - const errorMessage = JSON.stringify({ - type: ErrorTypes.EXPIRED_USER_KEY, - expiredAt: expiresAtDate.toLocaleString(), - endpoint, - }); - throw new Error(errorMessage); - } -}; - -module.exports = { - getUserKey, - updateUserKey, - deleteUserKey, - getUserKeyValues, - getUserKeyExpiry, - checkUserKeyExpiry, - updateUserPluginsService, -}; diff --git a/api/server/services/twoFactorService.js b/api/server/services/twoFactorService.js index 4ac86a5549..cce24e2322 100644 --- a/api/server/services/twoFactorService.js +++ b/api/server/services/twoFactorService.js @@ -1,5 +1,5 @@ const { webcrypto } = require('node:crypto'); -const { hashBackupCode, decryptV3, decryptV2 } = require('@librechat/api'); +const { hashBackupCode, decryptV3, decryptV2 } = require('@librechat/data-schemas'); const { updateUser } = require('~/models'); // Base32 alphabet for TOTP secret encoding. diff --git a/api/test/app/clients/tools/util/fileSearch.test.js b/api/test/app/clients/tools/util/fileSearch.test.js index 72353bd296..782e48f720 100644 --- a/api/test/app/clients/tools/util/fileSearch.test.js +++ b/api/test/app/clients/tools/util/fileSearch.test.js @@ -13,7 +13,7 @@ jest.mock('@librechat/data-schemas', () => ({ }, })); -jest.mock('~/models/File', () => ({ +jest.mock('~/models', () => ({ getFiles: jest.fn().mockResolvedValue([]), })); diff --git a/api/utils/deriveBaseURL.js b/api/utils/deriveBaseURL.js deleted file mode 100644 index 982c2c8c2e..0000000000 --- a/api/utils/deriveBaseURL.js +++ /dev/null @@ -1,28 +0,0 @@ -const { logger } = require('@librechat/data-schemas'); - -/** - * Extracts the base URL from the provided URL. - * @param {string} fullURL - The full URL. - * @returns {string} The base URL. - */ -function deriveBaseURL(fullURL) { - try { - const parsedUrl = new URL(fullURL); - const protocol = parsedUrl.protocol; - const hostname = parsedUrl.hostname; - const port = parsedUrl.port; - - // Check if the parsed URL components are meaningful - if (!protocol || !hostname) { - return fullURL; - } - - // Reconstruct the base URL - return `${protocol}//${hostname}${port ? `:${port}` : ''}`; - } catch (error) { - logger.error('Failed to derive base URL', error); - return fullURL; // Return the original URL in case of any exception - } -} - -module.exports = deriveBaseURL; diff --git a/api/utils/deriveBaseURL.spec.js b/api/utils/deriveBaseURL.spec.js deleted file mode 100644 index 50f64257fe..0000000000 --- a/api/utils/deriveBaseURL.spec.js +++ /dev/null @@ -1,74 +0,0 @@ -const axios = require('axios'); -const deriveBaseURL = require('./deriveBaseURL'); -jest.mock('@librechat/api', () => { - const originalUtils = jest.requireActual('@librechat/api'); - return { - ...originalUtils, - processModelData: jest.fn((...args) => { - return originalUtils.processModelData(...args); - }), - }; -}); - -jest.mock('axios'); -jest.mock('~/cache/getLogStores', () => - jest.fn().mockImplementation(() => ({ - get: jest.fn().mockResolvedValue(undefined), - set: jest.fn().mockResolvedValue(true), - })), -); -jest.mock('~/config', () => ({ - logger: { - error: jest.fn(), - }, -})); - -axios.get.mockResolvedValue({ - data: { - data: [{ id: 'model-1' }, { id: 'model-2' }], - }, -}); - -describe('deriveBaseURL', () => { - it('should extract the base URL correctly from a full URL with a port', () => { - const fullURL = 'https://example.com:8080/path?query=123'; - const baseURL = deriveBaseURL(fullURL); - expect(baseURL).toEqual('https://example.com:8080'); - }); - - it('should extract the base URL correctly from a full URL without a port', () => { - const fullURL = 'https://example.com/path?query=123'; - const baseURL = deriveBaseURL(fullURL); - expect(baseURL).toEqual('https://example.com'); - }); - - it('should handle URLs using the HTTP protocol', () => { - const fullURL = 'http://example.com:3000/path?query=123'; - const baseURL = deriveBaseURL(fullURL); - expect(baseURL).toEqual('http://example.com:3000'); - }); - - it('should return only the protocol and hostname if no port is specified', () => { - const fullURL = 'http://example.com/path?query=123'; - const baseURL = deriveBaseURL(fullURL); - expect(baseURL).toEqual('http://example.com'); - }); - - it('should handle URLs with uncommon protocols', () => { - const fullURL = 'ftp://example.com:2121/path?query=123'; - const baseURL = deriveBaseURL(fullURL); - expect(baseURL).toEqual('ftp://example.com:2121'); - }); - - it('should handle edge case where URL ends with a slash', () => { - const fullURL = 'https://example.com/'; - const baseURL = deriveBaseURL(fullURL); - expect(baseURL).toEqual('https://example.com'); - }); - - it('should return the original URL if the URL is invalid', () => { - const invalidURL = 'htp:/example.com:8080'; - const result = deriveBaseURL(invalidURL); - expect(result).toBe(invalidURL); - }); -}); diff --git a/api/utils/findMessageContent.js b/api/utils/findMessageContent.js deleted file mode 100644 index 6aeed1a395..0000000000 --- a/api/utils/findMessageContent.js +++ /dev/null @@ -1,35 +0,0 @@ -const { logger } = require('@librechat/data-schemas'); - -function findContent(obj) { - if (obj && typeof obj === 'object') { - if ('kwargs' in obj && 'content' in obj.kwargs) { - return obj.kwargs.content; - } - for (let key in obj) { - let content = findContent(obj[key]); - if (content) { - return content; - } - } - } - return null; -} - -function findMessageContent(message) { - let startIndex = Math.min(message.indexOf('{'), message.indexOf('[')); - let jsonString = message.substring(startIndex); - - let jsonObjectOrArray; - try { - jsonObjectOrArray = JSON.parse(jsonString); - } catch (error) { - logger.error('[findMessageContent] Failed to parse JSON:', error); - return null; - } - - let content = findContent(jsonObjectOrArray); - - return content; -} - -module.exports = findMessageContent; diff --git a/api/utils/index.js b/api/utils/index.js deleted file mode 100644 index dc5f3a6737..0000000000 --- a/api/utils/index.js +++ /dev/null @@ -1,9 +0,0 @@ -const deriveBaseURL = require('./deriveBaseURL'); -const extractBaseURL = require('./extractBaseURL'); -const findMessageContent = require('./findMessageContent'); - -module.exports = { - deriveBaseURL, - extractBaseURL, - findMessageContent, -}; diff --git a/packages/api/src/agents/auth.ts b/packages/api/src/agents/auth.ts index a5fc882660..5783de7b98 100644 --- a/packages/api/src/agents/auth.ts +++ b/packages/api/src/agents/auth.ts @@ -1,6 +1,5 @@ -import { logger } from '@librechat/data-schemas'; +import { logger, decrypt } from '@librechat/data-schemas'; import type { IPluginAuth, PluginAuthMethods } from '@librechat/data-schemas'; -import { decrypt } from '../crypto/encryption'; export interface GetPluginAuthMapParams { userId: string; diff --git a/packages/api/src/agents/index.ts b/packages/api/src/agents/index.ts index 4ad5fbab79..44ef3e9de8 100644 --- a/packages/api/src/agents/index.ts +++ b/packages/api/src/agents/index.ts @@ -1,7 +1,8 @@ export * from './chain'; +export * from './initialize'; +export * from './legacy'; export * from './memory'; export * from './migration'; -export * from './legacy'; export * from './resources'; export * from './run'; export * from './validation'; diff --git a/packages/api/src/agents/initialize.ts b/packages/api/src/agents/initialize.ts new file mode 100644 index 0000000000..a37ddf4848 --- /dev/null +++ b/packages/api/src/agents/initialize.ts @@ -0,0 +1,315 @@ +import { Providers } from '@librechat/agents'; +import { + ErrorTypes, + EModelEndpoint, + EToolResources, + paramEndpoints, + isAgentsEndpoint, + replaceSpecialVars, + providerEndpointMap, +} from 'librechat-data-provider'; +import type { + AgentToolResources, + TEndpointOption, + TFile, + Agent, + TUser, +} from 'librechat-data-provider'; +import type { Response as ServerResponse } from 'express'; +import type { IMongoFile } from '@librechat/data-schemas'; +import type { GenericTool } from '@librechat/agents'; +import type { InitializeResultBase, ServerRequest, EndpointDbMethods } from '~/types'; +import { getModelMaxTokens, extractLibreChatParams, optionalChainWithEmptyCheck } from '~/utils'; +import { filterFilesByEndpointConfig } from '~/files'; +import { generateArtifactsPrompt } from '~/prompts'; +import { getProviderConfig } from '~/endpoints'; +import { primeResources } from './resources'; + +/** + * Extended agent type with additional fields needed after initialization + */ +export type InitializedAgent = Agent & { + tools: GenericTool[]; + attachments: IMongoFile[]; + toolContextMap: Record; + maxContextTokens: number; + useLegacyContent: boolean; + resendFiles: boolean; + userMCPAuthMap?: Record>; +}; + +/** + * Parameters for initializing an agent + * Matches the CJS signature from api/server/services/Endpoints/agents/agent.js + */ +export interface InitializeAgentParams { + /** Request object */ + req: ServerRequest; + /** Response object */ + res: ServerResponse; + /** Agent to initialize */ + agent: Agent; + /** Conversation ID (optional) */ + conversationId?: string | null; + /** Request files */ + requestFiles?: IMongoFile[]; + /** Function to load agent tools */ + loadTools?: (params: { + req: ServerRequest; + res: ServerResponse; + provider: string; + agentId: string; + tools: string[]; + model: string | null; + tool_resources: AgentToolResources | undefined; + }) => Promise<{ + tools: GenericTool[]; + toolContextMap: Record; + userMCPAuthMap?: Record>; + } | null>; + /** Endpoint option (contains model_parameters and endpoint info) */ + endpointOption?: Partial; + /** Set of allowed providers */ + allowedProviders: Set; + /** Whether this is the initial agent */ + isInitialAgent?: boolean; +} + +/** + * Database methods required for agent initialization + * Most methods come from data-schemas via createMethods() + * getConvoFiles not yet in data-schemas but included here for consistency + */ +export interface InitializeAgentDbMethods extends EndpointDbMethods { + /** Update usage tracking for multiple files */ + updateFilesUsage: (files: Array<{ file_id: string }>, fileIds?: string[]) => Promise; + /** Get files from database */ + getFiles: (filter: unknown, sort: unknown, select: unknown, opts?: unknown) => Promise; + /** Get tool files by IDs */ + getToolFilesByIds: (fileIds: string[], toolSet: Set) => Promise; + /** Get conversation file IDs */ + getConvoFiles: (conversationId: string) => Promise; +} + +/** + * Initializes an agent for use in requests. + * Handles file processing, tool loading, provider configuration, and context token calculations. + * + * This function is exported from @librechat/api and replaces the CJS version from + * api/server/services/Endpoints/agents/agent.js + * + * @param params - Initialization parameters + * @param deps - Optional dependency injection for testing + * @returns Promise resolving to initialized agent with tools and configuration + * @throws Error if agent provider is not allowed or if required dependencies are missing + */ +export async function initializeAgent( + params: InitializeAgentParams, + db?: InitializeAgentDbMethods, +): Promise { + const { + req, + res, + agent, + loadTools, + requestFiles = [], + conversationId, + endpointOption, + allowedProviders, + isInitialAgent = false, + } = params; + + if (!db) { + throw new Error('initializeAgent requires db methods to be passed'); + } + + if ( + isAgentsEndpoint(endpointOption?.endpoint) && + allowedProviders.size > 0 && + !allowedProviders.has(agent.provider) + ) { + throw new Error( + `{ "type": "${ErrorTypes.INVALID_AGENT_PROVIDER}", "info": "${agent.provider}" }`, + ); + } + + let currentFiles: IMongoFile[] | undefined; + + const _modelOptions = structuredClone( + Object.assign( + { model: agent.model }, + agent.model_parameters ?? { model: agent.model }, + isInitialAgent === true ? endpointOption?.model_parameters : {}, + ), + ); + + const { resendFiles, maxContextTokens, modelOptions } = extractLibreChatParams( + _modelOptions as Record, + ); + + const provider = agent.provider; + agent.endpoint = provider; + + if (isInitialAgent && conversationId != null && resendFiles) { + const fileIds = (await db.getConvoFiles(conversationId)) ?? []; + const toolResourceSet = new Set(); + for (const tool of agent.tools ?? []) { + if (EToolResources[tool as keyof typeof EToolResources]) { + toolResourceSet.add(EToolResources[tool as keyof typeof EToolResources]); + } + } + const toolFiles = (await db.getToolFilesByIds(fileIds, toolResourceSet)) as IMongoFile[]; + if (requestFiles.length || toolFiles.length) { + currentFiles = (await db.updateFilesUsage(requestFiles.concat(toolFiles))) as IMongoFile[]; + } + } else if (isInitialAgent && requestFiles.length) { + currentFiles = (await db.updateFilesUsage(requestFiles)) as IMongoFile[]; + } + + if (currentFiles && currentFiles.length) { + let endpointType: EModelEndpoint | undefined; + if (!paramEndpoints.has(agent.endpoint ?? '')) { + endpointType = EModelEndpoint.custom; + } + + currentFiles = filterFilesByEndpointConfig(req, { + files: currentFiles, + endpoint: agent.endpoint ?? '', + endpointType, + }); + } + + const { attachments: primedAttachments, tool_resources } = await primeResources({ + req: req as never, + getFiles: db.getFiles as never, + appConfig: req.config, + agentId: agent.id, + attachments: currentFiles + ? (Promise.resolve(currentFiles) as unknown as Promise) + : undefined, + tool_resources: agent.tool_resources, + requestFileSet: new Set(requestFiles?.map((file) => file.file_id)), + }); + + const { + tools: structuredTools, + toolContextMap, + userMCPAuthMap, + } = (await loadTools?.({ + req, + res, + provider, + agentId: agent.id, + tools: agent.tools ?? [], + model: agent.model, + tool_resources, + })) ?? { tools: [], toolContextMap: {}, userMCPAuthMap: undefined }; + + const { getOptions, overrideProvider } = getProviderConfig({ + provider, + appConfig: req.config, + }); + if (overrideProvider !== agent.provider) { + agent.provider = overrideProvider; + } + + const finalModelOptions = { + ...modelOptions, + model: agent.model, + }; + + const options: InitializeResultBase = await getOptions({ + req, + endpoint: provider, + model_parameters: finalModelOptions, + db, + }); + + const llmConfig = options.llmConfig as Record; + const tokensModel = + agent.provider === EModelEndpoint.azureOpenAI ? agent.model : (llmConfig?.model as string); + const maxOutputTokens = optionalChainWithEmptyCheck( + llmConfig?.maxOutputTokens as number | undefined, + llmConfig?.maxTokens as number | undefined, + 0, + ); + const agentMaxContextTokens = optionalChainWithEmptyCheck( + maxContextTokens, + getModelMaxTokens( + tokensModel ?? '', + providerEndpointMap[provider as keyof typeof providerEndpointMap], + options.endpointTokenConfig, + ), + 18000, + ); + + if ( + agent.endpoint === EModelEndpoint.azureOpenAI && + (llmConfig?.azureOpenAIApiInstanceName as string | undefined) == null + ) { + agent.provider = Providers.OPENAI; + } + + if (options.provider != null) { + agent.provider = options.provider; + } + + let tools: GenericTool[] = options.tools?.length + ? (options.tools as GenericTool[]) + : structuredTools; + if ( + (agent.provider === Providers.GOOGLE || agent.provider === Providers.VERTEXAI) && + options.tools?.length && + structuredTools?.length + ) { + throw new Error(`{ "type": "${ErrorTypes.GOOGLE_TOOL_CONFLICT}"}`); + } else if ( + (agent.provider === Providers.OPENAI || + agent.provider === Providers.AZURE || + agent.provider === Providers.ANTHROPIC) && + options.tools?.length && + structuredTools?.length + ) { + tools = structuredTools.concat(options.tools as GenericTool[]); + } + + agent.model_parameters = { ...options.llmConfig } as Agent['model_parameters']; + if (options.configOptions) { + (agent.model_parameters as Record).configuration = options.configOptions; + } + + if (agent.instructions && agent.instructions !== '') { + agent.instructions = replaceSpecialVars({ + text: agent.instructions, + user: req.user ? (req.user as unknown as TUser) : null, + }); + } + + if (typeof agent.artifacts === 'string' && agent.artifacts !== '') { + const artifactsPromptResult = generateArtifactsPrompt({ + endpoint: agent.provider, + artifacts: agent.artifacts as never, + }); + agent.additional_instructions = artifactsPromptResult ?? undefined; + } + + const agentMaxContextNum = Number(agentMaxContextTokens) || 18000; + const maxOutputTokensNum = Number(maxOutputTokens) || 0; + + const finalAttachments: IMongoFile[] = (primedAttachments ?? []) + .filter((a): a is TFile => a != null) + .map((a) => a as unknown as IMongoFile); + + const initializedAgent: InitializedAgent = { + ...agent, + tools: (tools ?? []) as GenericTool[] & string[], + attachments: finalAttachments, + resendFiles, + userMCPAuthMap, + toolContextMap: toolContextMap ?? {}, + useLegacyContent: !!options.useLegacyContent, + maxContextTokens: Math.round((agentMaxContextNum - maxOutputTokensNum) * 0.9), + }; + + return initializedAgent; +} diff --git a/packages/api/src/agents/resources.ts b/packages/api/src/agents/resources.ts index 9c32638a9c..4655453847 100644 --- a/packages/api/src/agents/resources.ts +++ b/packages/api/src/agents/resources.ts @@ -152,7 +152,7 @@ export const primeResources = async ({ agentId, }: { req: ServerRequest & { user?: IUser }; - appConfig: AppConfig; + appConfig?: AppConfig; requestFileSet: Set; attachments: Promise> | undefined; tool_resources: AgentToolResources | undefined; diff --git a/packages/api/src/crypto/encryption.ts b/packages/api/src/crypto/encryption.ts deleted file mode 100644 index aedf3c9c92..0000000000 --- a/packages/api/src/crypto/encryption.ts +++ /dev/null @@ -1,129 +0,0 @@ -import 'dotenv/config'; -import crypto from 'node:crypto'; -const { webcrypto } = crypto; - -// Use hex decoding for both key and IV for legacy methods. -const key = Buffer.from(process.env.CREDS_KEY ?? '', 'hex'); -const iv = Buffer.from(process.env.CREDS_IV ?? '', 'hex'); -const algorithm = 'AES-CBC'; - -// --- Legacy v1/v2 Setup: AES-CBC with fixed key and IV --- - -export async function encrypt(value: string) { - const cryptoKey = await webcrypto.subtle.importKey('raw', key, { name: algorithm }, false, [ - 'encrypt', - ]); - const encoder = new TextEncoder(); - const data = encoder.encode(value); - const encryptedBuffer = await webcrypto.subtle.encrypt( - { name: algorithm, iv: iv }, - cryptoKey, - data, - ); - return Buffer.from(encryptedBuffer).toString('hex'); -} - -export async function decrypt(encryptedValue: string) { - const cryptoKey = await webcrypto.subtle.importKey('raw', key, { name: algorithm }, false, [ - 'decrypt', - ]); - const encryptedBuffer = Buffer.from(encryptedValue, 'hex'); - const decryptedBuffer = await webcrypto.subtle.decrypt( - { name: algorithm, iv: iv }, - cryptoKey, - encryptedBuffer, - ); - const decoder = new TextDecoder(); - return decoder.decode(decryptedBuffer); -} - -// --- v2: AES-CBC with a random IV per encryption --- - -export async function encryptV2(value: string) { - const gen_iv = webcrypto.getRandomValues(new Uint8Array(16)); - const cryptoKey = await webcrypto.subtle.importKey('raw', key, { name: algorithm }, false, [ - 'encrypt', - ]); - const encoder = new TextEncoder(); - const data = encoder.encode(value); - const encryptedBuffer = await webcrypto.subtle.encrypt( - { name: algorithm, iv: gen_iv }, - cryptoKey, - data, - ); - return Buffer.from(gen_iv).toString('hex') + ':' + Buffer.from(encryptedBuffer).toString('hex'); -} - -export async function decryptV2(encryptedValue: string) { - const parts = encryptedValue.split(':'); - if (parts.length === 1) { - return parts[0]; - } - const gen_iv = Buffer.from(parts.shift() ?? '', 'hex'); - const encrypted = parts.join(':'); - const cryptoKey = await webcrypto.subtle.importKey('raw', key, { name: algorithm }, false, [ - 'decrypt', - ]); - const encryptedBuffer = Buffer.from(encrypted, 'hex'); - const decryptedBuffer = await webcrypto.subtle.decrypt( - { name: algorithm, iv: gen_iv }, - cryptoKey, - encryptedBuffer, - ); - const decoder = new TextDecoder(); - return decoder.decode(decryptedBuffer); -} - -// --- v3: AES-256-CTR using Node's crypto functions --- -const algorithm_v3 = 'aes-256-ctr'; - -/** - * Encrypts a value using AES-256-CTR. - * Note: AES-256 requires a 32-byte key. Ensure that process.env.CREDS_KEY is a 64-character hex string. - * - * @param value - The plaintext to encrypt. - * @returns The encrypted string with a "v3:" prefix. - */ -export function encryptV3(value: string) { - if (key.length !== 32) { - throw new Error(`Invalid key length: expected 32 bytes, got ${key.length} bytes`); - } - const iv_v3 = crypto.randomBytes(16); - const cipher = crypto.createCipheriv(algorithm_v3, key, iv_v3); - const encrypted = Buffer.concat([cipher.update(value, 'utf8'), cipher.final()]); - return `v3:${iv_v3.toString('hex')}:${encrypted.toString('hex')}`; -} - -export function decryptV3(encryptedValue: string) { - const parts = encryptedValue.split(':'); - if (parts[0] !== 'v3') { - throw new Error('Not a v3 encrypted value'); - } - const iv_v3 = Buffer.from(parts[1], 'hex'); - const encryptedText = Buffer.from(parts.slice(2).join(':'), 'hex'); - const decipher = crypto.createDecipheriv(algorithm_v3, key, iv_v3); - const decrypted = Buffer.concat([decipher.update(encryptedText), decipher.final()]); - return decrypted.toString('utf8'); -} - -export async function getRandomValues(length: number) { - if (!Number.isInteger(length) || length <= 0) { - throw new Error('Length must be a positive integer'); - } - const randomValues = new Uint8Array(length); - webcrypto.getRandomValues(randomValues); - return Buffer.from(randomValues).toString('hex'); -} - -/** - * Computes SHA-256 hash for the given input. - * @param input - The input to hash. - * @returns The SHA-256 hash of the input. - */ -export async function hashBackupCode(input: string) { - const encoder = new TextEncoder(); - const data = encoder.encode(input); - const hashBuffer = await webcrypto.subtle.digest('SHA-256', data); - const hashArray = Array.from(new Uint8Array(hashBuffer)); - return hashArray.map((b) => b.toString(16).padStart(2, '0')).join(''); -} diff --git a/packages/api/src/crypto/index.ts b/packages/api/src/crypto/index.ts index 0821d6d8b6..ac78a47b29 100644 --- a/packages/api/src/crypto/index.ts +++ b/packages/api/src/crypto/index.ts @@ -1,2 +1,11 @@ -export * from './encryption'; +export { + encrypt, + decrypt, + encryptV2, + decryptV2, + encryptV3, + decryptV3, + hashBackupCode, + getRandomValues, +} from '@librechat/data-schemas'; export * from './jwt'; diff --git a/packages/api/src/endpoints/anthropic/index.ts b/packages/api/src/endpoints/anthropic/index.ts index 724cfda752..e90546e461 100644 --- a/packages/api/src/endpoints/anthropic/index.ts +++ b/packages/api/src/endpoints/anthropic/index.ts @@ -1,2 +1,3 @@ export * from './helpers'; export * from './llm'; +export * from './initialize'; diff --git a/packages/api/src/endpoints/anthropic/initialize.ts b/packages/api/src/endpoints/anthropic/initialize.ts new file mode 100644 index 0000000000..9a8984b5c7 --- /dev/null +++ b/packages/api/src/endpoints/anthropic/initialize.ts @@ -0,0 +1,73 @@ +import { EModelEndpoint } from 'librechat-data-provider'; +import type { BaseInitializeParams, InitializeResultBase, AnthropicConfigOptions } from '~/types'; +import { checkUserKeyExpiry } from '~/utils'; +import { getLLMConfig } from './llm'; + +/** + * Initializes Anthropic endpoint configuration. + * + * @param params - Configuration parameters + * @returns Promise resolving to Anthropic configuration options + * @throws Error if API key is not provided + */ +export async function initializeAnthropic({ + req, + endpoint, + model_parameters, + db, +}: BaseInitializeParams): Promise { + void endpoint; + const appConfig = req.config; + const { ANTHROPIC_API_KEY, ANTHROPIC_REVERSE_PROXY, PROXY } = process.env; + const { key: expiresAt } = req.body; + const isUserProvided = ANTHROPIC_API_KEY === 'user_provided'; + + const anthropicApiKey = isUserProvided + ? await db.getUserKey({ userId: req.user?.id ?? '', name: EModelEndpoint.anthropic }) + : ANTHROPIC_API_KEY; + + if (!anthropicApiKey) { + throw new Error('Anthropic API key not provided. Please provide it again.'); + } + + if (expiresAt && isUserProvided) { + checkUserKeyExpiry(expiresAt, EModelEndpoint.anthropic); + } + + let clientOptions: AnthropicConfigOptions = {}; + + /** @type {undefined | TBaseEndpoint} */ + const anthropicConfig = appConfig?.endpoints?.[EModelEndpoint.anthropic]; + + if (anthropicConfig) { + clientOptions = { + ...clientOptions, + // Note: _lc_stream_delay is set on modelOptions in the result + }; + } + + const allConfig = appConfig?.endpoints?.all; + + clientOptions = { + proxy: PROXY ?? undefined, + reverseProxyUrl: ANTHROPIC_REVERSE_PROXY ?? undefined, + modelOptions: { + ...(model_parameters ?? {}), + user: req.user?.id, + }, + ...clientOptions, + }; + + const result = getLLMConfig(anthropicApiKey, clientOptions); + + // Apply stream rate delay + if (anthropicConfig?.streamRate) { + (result.llmConfig as Record)._lc_stream_delay = anthropicConfig.streamRate; + } + + if (allConfig?.streamRate) { + (result.llmConfig as Record)._lc_stream_delay = allConfig.streamRate; + } + + return result; +} diff --git a/packages/api/src/endpoints/bedrock/index.ts b/packages/api/src/endpoints/bedrock/index.ts new file mode 100644 index 0000000000..c2de1cea0b --- /dev/null +++ b/packages/api/src/endpoints/bedrock/index.ts @@ -0,0 +1 @@ +export * from './initialize'; diff --git a/api/server/services/Endpoints/bedrock/options.js b/packages/api/src/endpoints/bedrock/initialize.ts similarity index 61% rename from api/server/services/Endpoints/bedrock/options.js rename to packages/api/src/endpoints/bedrock/initialize.ts index 392d0cee08..500285e02f 100644 --- a/api/server/services/Endpoints/bedrock/options.js +++ b/packages/api/src/endpoints/bedrock/initialize.ts @@ -1,5 +1,18 @@ +import { HttpsProxyAgent } from 'https-proxy-agent'; +import { NodeHttpHandler } from '@smithy/node-http-handler'; +import { BedrockRuntimeClient } from '@aws-sdk/client-bedrock-runtime'; +import { + AuthType, + EModelEndpoint, + bedrockInputParser, + bedrockOutputParser, + removeNullishValues, +} from 'librechat-data-provider'; +import type { BaseInitializeParams, InitializeResultBase, BedrockCredentials } from '~/types'; +import { checkUserKeyExpiry } from '~/utils'; + /** - * Bedrock endpoint options configuration + * Initializes Bedrock endpoint configuration. * * This module handles configuration for AWS Bedrock endpoints, including support for * HTTP/HTTPS proxies and reverse proxies. @@ -18,28 +31,17 @@ * - Credentials and endpoint configuration are passed separately to ChatBedrockConverse, * which creates its own BedrockRuntimeClient internally * - * Environment Variables: - * - PROXY: HTTP/HTTPS proxy URL (e.g., http://proxy.example.com:8080) - * - BEDROCK_REVERSE_PROXY: Custom Bedrock API endpoint host - * - BEDROCK_AWS_DEFAULT_REGION: AWS region for Bedrock service - * - BEDROCK_AWS_ACCESS_KEY_ID: AWS access key (or set to 'user_provided') - * - BEDROCK_AWS_SECRET_ACCESS_KEY: AWS secret key (or set to 'user_provided') - * - BEDROCK_AWS_SESSION_TOKEN: Optional AWS session token + * @param params - Configuration parameters + * @returns Promise resolving to Bedrock configuration options + * @throws Error if credentials are not provided when required */ - -const { HttpsProxyAgent } = require('https-proxy-agent'); -const { NodeHttpHandler } = require('@smithy/node-http-handler'); -const { BedrockRuntimeClient } = require('@aws-sdk/client-bedrock-runtime'); -const { - AuthType, - EModelEndpoint, - bedrockInputParser, - bedrockOutputParser, - removeNullishValues, -} = require('librechat-data-provider'); -const { getUserKey, checkUserKeyExpiry } = require('~/server/services/UserService'); - -const getOptions = async ({ req, overrideModel, endpointOption }) => { +export async function initializeBedrock({ + req, + endpoint, + model_parameters, + db, +}: BaseInitializeParams): Promise { + void endpoint; const { BEDROCK_AWS_SECRET_ACCESS_KEY, BEDROCK_AWS_ACCESS_KEY_ID, @@ -48,11 +50,14 @@ const getOptions = async ({ req, overrideModel, endpointOption }) => { BEDROCK_AWS_DEFAULT_REGION, PROXY, } = process.env; - const expiresAt = req.body.key; + + const { key: expiresAt } = req.body; const isUserProvided = BEDROCK_AWS_SECRET_ACCESS_KEY === AuthType.USER_PROVIDED; - let credentials = isUserProvided - ? await getUserKey({ userId: req.user.id, name: EModelEndpoint.bedrock }) + let credentials: BedrockCredentials | undefined = isUserProvided + ? await db + .getUserKey({ userId: req.user?.id ?? '', name: EModelEndpoint.bedrock }) + .then((key) => JSON.parse(key) as BedrockCredentials) : { accessKeyId: BEDROCK_AWS_ACCESS_KEY_ID, secretAccessKey: BEDROCK_AWS_SECRET_ACCESS_KEY, @@ -75,37 +80,31 @@ const getOptions = async ({ req, overrideModel, endpointOption }) => { checkUserKeyExpiry(expiresAt, EModelEndpoint.bedrock); } - /* - Callback for stream rate no longer awaits and may end the stream prematurely - /** @type {number} - let streamRate = Constants.DEFAULT_STREAM_RATE; - - /** @type {undefined | TBaseEndpoint} - const bedrockConfig = appConfig.endpoints?.[EModelEndpoint.bedrock]; - - if (bedrockConfig && bedrockConfig.streamRate) { - streamRate = bedrockConfig.streamRate; - } - - const allConfig = appConfig.endpoints?.all; - if (allConfig && allConfig.streamRate) { - streamRate = allConfig.streamRate; - } - */ - - /** @type {BedrockClientOptions} */ - const requestOptions = { - model: overrideModel ?? endpointOption?.model, + const requestOptions: Record = { + model: model_parameters?.model as string | undefined, region: BEDROCK_AWS_DEFAULT_REGION, }; - const configOptions = {}; + const configOptions: Record = {}; const llmConfig = bedrockOutputParser( bedrockInputParser.parse( - removeNullishValues(Object.assign(requestOptions, endpointOption?.model_parameters ?? {})), + removeNullishValues({ ...requestOptions, ...(model_parameters ?? {}) }), ), - ); + ) as InitializeResultBase['llmConfig'] & { + region?: string; + client?: BedrockRuntimeClient; + credentials?: BedrockCredentials; + endpointHost?: string; + }; + + /** Only include credentials if they're complete (accessKeyId and secretAccessKey are both set) */ + const hasCompleteCredentials = + credentials && + typeof credentials.accessKeyId === 'string' && + credentials.accessKeyId !== '' && + typeof credentials.secretAccessKey === 'string' && + credentials.secretAccessKey !== ''; if (PROXY) { const proxyAgent = new HttpsProxyAgent(PROXY); @@ -116,8 +115,10 @@ const getOptions = async ({ req, overrideModel, endpointOption }) => { // the AWS SDK's default credential provider chain is used (instance profiles, // AWS profiles, environment variables, etc.) const customClient = new BedrockRuntimeClient({ - region: llmConfig.region ?? BEDROCK_AWS_DEFAULT_REGION, - ...(credentials && { credentials }), + region: (llmConfig.region as string) ?? BEDROCK_AWS_DEFAULT_REGION, + ...(hasCompleteCredentials && { + credentials: credentials as { accessKeyId: string; secretAccessKey: string }, + }), requestHandler: new NodeHttpHandler({ httpAgent: proxyAgent, httpsAgent: proxyAgent, @@ -141,10 +142,7 @@ const getOptions = async ({ req, overrideModel, endpointOption }) => { } return { - /** @type {BedrockClientOptions} */ llmConfig, configOptions, }; -}; - -module.exports = getOptions; +} diff --git a/packages/api/src/endpoints/config.ts b/packages/api/src/endpoints/config.ts new file mode 100644 index 0000000000..041f8ca73d --- /dev/null +++ b/packages/api/src/endpoints/config.ts @@ -0,0 +1,99 @@ +import { Providers } from '@librechat/agents'; +import { EModelEndpoint } from 'librechat-data-provider'; +import type { TEndpoint } from 'librechat-data-provider'; +import type { AppConfig } from '@librechat/data-schemas'; +import type { BaseInitializeParams, InitializeResultBase } from '~/types'; +import { initializeAnthropic } from './anthropic/initialize'; +import { initializeBedrock } from './bedrock/initialize'; +import { initializeCustom } from './custom/initialize'; +import { initializeGoogle } from './google/initialize'; +import { initializeOpenAI } from './openai/initialize'; +import { getCustomEndpointConfig } from '~/app/config'; + +/** + * Type for initialize functions + */ +export type InitializeFn = (params: BaseInitializeParams) => Promise; + +/** + * Check if the provider is a known custom provider + * @param provider - The provider string + * @returns True if the provider is a known custom provider, false otherwise + */ +export function isKnownCustomProvider(provider?: string): boolean { + return [Providers.XAI, Providers.DEEPSEEK, Providers.OPENROUTER].includes( + (provider?.toLowerCase() ?? '') as Providers, + ); +} + +/** + * Provider configuration map mapping providers to their initialization functions + */ +export const providerConfigMap: Record = { + [Providers.XAI]: initializeCustom, + [Providers.DEEPSEEK]: initializeCustom, + [Providers.OPENROUTER]: initializeCustom, + [EModelEndpoint.openAI]: initializeOpenAI, + [EModelEndpoint.google]: initializeGoogle, + [EModelEndpoint.bedrock]: initializeBedrock, + [EModelEndpoint.azureOpenAI]: initializeOpenAI, + [EModelEndpoint.anthropic]: initializeAnthropic, +}; + +/** + * Result from getProviderConfig + */ +export interface ProviderConfigResult { + /** The initialization function for this provider */ + getOptions: InitializeFn; + /** The resolved provider name (may be different from input if normalized) */ + overrideProvider: string; + /** Custom endpoint configuration (if applicable) */ + customEndpointConfig?: Partial; +} + +/** + * Get the provider configuration and override endpoint based on the provider string + * + * @param params - Configuration parameters + * @param params.provider - The provider string + * @param params.appConfig - The application configuration + * @returns Provider configuration including getOptions function, override provider, and custom config + * @throws Error if provider is not supported + */ +export function getProviderConfig({ + provider, + appConfig, +}: { + provider: string; + appConfig?: AppConfig; +}): ProviderConfigResult { + let getOptions = providerConfigMap[provider]; + let overrideProvider = provider; + let customEndpointConfig: Partial | undefined; + + if (!getOptions && providerConfigMap[provider.toLowerCase()] != null) { + overrideProvider = provider.toLowerCase(); + getOptions = providerConfigMap[overrideProvider]; + } else if (!getOptions) { + customEndpointConfig = getCustomEndpointConfig({ endpoint: provider, appConfig }); + if (!customEndpointConfig) { + throw new Error(`Provider ${provider} not supported`); + } + getOptions = initializeCustom; + overrideProvider = Providers.OPENAI; + } + + if (isKnownCustomProvider(overrideProvider) && !customEndpointConfig) { + customEndpointConfig = getCustomEndpointConfig({ endpoint: provider, appConfig }); + if (!customEndpointConfig) { + throw new Error(`Provider ${provider} not supported`); + } + } + + return { + getOptions, + overrideProvider, + customEndpointConfig, + }; +} diff --git a/packages/api/src/endpoints/custom/index.ts b/packages/api/src/endpoints/custom/index.ts index f03c2281a9..d9819c2802 100644 --- a/packages/api/src/endpoints/custom/index.ts +++ b/packages/api/src/endpoints/custom/index.ts @@ -1 +1,2 @@ export * from './config'; +export * from './initialize'; diff --git a/packages/api/src/endpoints/custom/initialize.ts b/packages/api/src/endpoints/custom/initialize.ts new file mode 100644 index 0000000000..4550fa9f5b --- /dev/null +++ b/packages/api/src/endpoints/custom/initialize.ts @@ -0,0 +1,180 @@ +import { + CacheKeys, + ErrorTypes, + envVarRegex, + FetchTokenConfig, + extractEnvVariable, +} from 'librechat-data-provider'; +import type { TEndpoint } from 'librechat-data-provider'; +import type { AppConfig } from '@librechat/data-schemas'; +import type { BaseInitializeParams, InitializeResultBase, EndpointTokenConfig } from '~/types'; +import { getOpenAIConfig } from '~/endpoints/openai/config'; +import { getCustomEndpointConfig } from '~/app/config'; +import { fetchModels } from '~/endpoints/models'; +import { isUserProvided, checkUserKeyExpiry } from '~/utils'; +import { standardCache } from '~/cache'; + +const { PROXY } = process.env; + +/** + * Builds custom options from endpoint configuration + */ +function buildCustomOptions( + endpointConfig: Partial, + appConfig?: AppConfig, + endpointTokenConfig?: Record, +) { + const customOptions: Record = { + headers: endpointConfig.headers, + addParams: endpointConfig.addParams, + dropParams: endpointConfig.dropParams, + customParams: endpointConfig.customParams, + titleConvo: endpointConfig.titleConvo, + titleModel: endpointConfig.titleModel, + forcePrompt: endpointConfig.forcePrompt, + summaryModel: endpointConfig.summaryModel, + modelDisplayLabel: endpointConfig.modelDisplayLabel, + titleMethod: endpointConfig.titleMethod ?? 'completion', + contextStrategy: endpointConfig.summarize ? 'summarize' : null, + directEndpoint: endpointConfig.directEndpoint, + titleMessageRole: endpointConfig.titleMessageRole, + streamRate: endpointConfig.streamRate, + endpointTokenConfig, + }; + + const allConfig = appConfig?.endpoints?.all; + if (allConfig) { + customOptions.streamRate = allConfig.streamRate; + } + + return customOptions; +} + +/** + * Initializes a custom endpoint client configuration. + * This function handles custom endpoints defined in librechat.yaml, including + * user-provided API keys and URLs. + * + * @param params - Configuration parameters + * @returns Promise resolving to endpoint configuration options + * @throws Error if config is missing, API key is not provided, or base URL is missing + */ +export async function initializeCustom({ + req, + endpoint, + model_parameters, + db, +}: BaseInitializeParams): Promise { + const appConfig = req.config; + const { key: expiresAt } = req.body; + + const endpointConfig = getCustomEndpointConfig({ + endpoint, + appConfig, + }); + + if (!endpointConfig) { + throw new Error(`Config not found for the ${endpoint} custom endpoint.`); + } + + const CUSTOM_API_KEY = extractEnvVariable(endpointConfig.apiKey ?? ''); + const CUSTOM_BASE_URL = extractEnvVariable(endpointConfig.baseURL ?? ''); + + if (CUSTOM_API_KEY.match(envVarRegex)) { + throw new Error(`Missing API Key for ${endpoint}.`); + } + + if (CUSTOM_BASE_URL.match(envVarRegex)) { + throw new Error(`Missing Base URL for ${endpoint}.`); + } + + const userProvidesKey = isUserProvided(CUSTOM_API_KEY); + const userProvidesURL = isUserProvided(CUSTOM_BASE_URL); + + let userValues = null; + if (expiresAt && (userProvidesKey || userProvidesURL)) { + checkUserKeyExpiry(expiresAt, endpoint); + userValues = await db.getUserKeyValues({ userId: req.user?.id ?? '', name: endpoint }); + } + + const apiKey = userProvidesKey ? userValues?.apiKey : CUSTOM_API_KEY; + const baseURL = userProvidesURL ? userValues?.baseURL : CUSTOM_BASE_URL; + + if (userProvidesKey && !apiKey) { + throw new Error( + JSON.stringify({ + type: ErrorTypes.NO_USER_KEY, + }), + ); + } + + if (userProvidesURL && !baseURL) { + throw new Error( + JSON.stringify({ + type: ErrorTypes.NO_BASE_URL, + }), + ); + } + + if (!apiKey) { + throw new Error(`${endpoint} API key not provided.`); + } + + if (!baseURL) { + throw new Error(`${endpoint} Base URL not provided.`); + } + + let endpointTokenConfig: EndpointTokenConfig | undefined; + + const userId = req.user?.id ?? ''; + + const cache = standardCache(CacheKeys.TOKEN_CONFIG); + /** tokenConfig is an optional extended property on custom endpoints */ + const hasTokenConfig = (endpointConfig as Record).tokenConfig != null; + const tokenKey = + !hasTokenConfig && (userProvidesKey || userProvidesURL) ? `${endpoint}:${userId}` : endpoint; + + const cachedConfig = + !hasTokenConfig && + FetchTokenConfig[endpoint.toLowerCase() as keyof typeof FetchTokenConfig] && + (await cache.get(tokenKey)); + + endpointTokenConfig = (cachedConfig as EndpointTokenConfig) || undefined; + + if ( + FetchTokenConfig[endpoint.toLowerCase() as keyof typeof FetchTokenConfig] && + endpointConfig && + endpointConfig.models?.fetch && + !endpointTokenConfig + ) { + await fetchModels({ apiKey, baseURL, name: endpoint, user: userId, tokenKey }); + endpointTokenConfig = (await cache.get(tokenKey)) as EndpointTokenConfig | undefined; + } + + const customOptions = buildCustomOptions(endpointConfig, appConfig, endpointTokenConfig); + + const clientOptions: Record = { + reverseProxyUrl: baseURL ?? null, + proxy: PROXY ?? null, + ...customOptions, + }; + + const modelOptions = { ...(model_parameters ?? {}), user: userId }; + const finalClientOptions = { + modelOptions, + ...clientOptions, + }; + + const options = getOpenAIConfig(apiKey, finalClientOptions, endpoint); + if (options != null) { + (options as InitializeResultBase).useLegacyContent = true; + (options as InitializeResultBase).endpointTokenConfig = endpointTokenConfig; + } + + const streamRate = clientOptions.streamRate as number | undefined; + if (streamRate) { + (options.llmConfig as Record)._lc_stream_delay = streamRate; + } + + return options; +} diff --git a/packages/api/src/endpoints/google/index.ts b/packages/api/src/endpoints/google/index.ts index 4045e8de0c..46ec106869 100644 --- a/packages/api/src/endpoints/google/index.ts +++ b/packages/api/src/endpoints/google/index.ts @@ -1 +1,2 @@ export * from './llm'; +export * from './initialize'; diff --git a/packages/api/src/endpoints/google/initialize.ts b/packages/api/src/endpoints/google/initialize.ts new file mode 100644 index 0000000000..8881c9ff24 --- /dev/null +++ b/packages/api/src/endpoints/google/initialize.ts @@ -0,0 +1,91 @@ +import path from 'path'; +import { EModelEndpoint, AuthKeys } from 'librechat-data-provider'; +import type { + BaseInitializeParams, + InitializeResultBase, + GoogleConfigOptions, + GoogleCredentials, +} from '~/types'; +import { isEnabled, loadServiceKey, checkUserKeyExpiry } from '~/utils'; +import { getGoogleConfig } from './llm'; + +/** + * Initializes Google/Vertex AI endpoint configuration. + * Supports both API key authentication and service account credentials. + * + * @param params - Configuration parameters + * @returns Promise resolving to Google configuration options + * @throws Error if no valid credentials are provided + */ +export async function initializeGoogle({ + req, + endpoint, + model_parameters, + db, +}: BaseInitializeParams): Promise { + void endpoint; + const appConfig = req.config; + const { GOOGLE_KEY, GOOGLE_REVERSE_PROXY, GOOGLE_AUTH_HEADER, PROXY } = process.env; + const isUserProvided = GOOGLE_KEY === 'user_provided'; + const { key: expiresAt } = req.body; + + let userKey = null; + if (expiresAt && isUserProvided) { + checkUserKeyExpiry(expiresAt, EModelEndpoint.google); + userKey = await db.getUserKey({ userId: req.user?.id, name: EModelEndpoint.google }); + } + + let serviceKey: Record = {}; + + /** Check if GOOGLE_KEY is provided at all (including 'user_provided') */ + const isGoogleKeyProvided = + (GOOGLE_KEY && GOOGLE_KEY.trim() !== '') || (isUserProvided && userKey != null); + + if (!isGoogleKeyProvided && loadServiceKey) { + /** Only attempt to load service key if GOOGLE_KEY is not provided */ + try { + const serviceKeyPath = + process.env.GOOGLE_SERVICE_KEY_FILE || path.join(process.cwd(), 'data', 'auth.json'); + const loadedKey = await loadServiceKey(serviceKeyPath); + if (loadedKey) { + serviceKey = loadedKey; + } + } catch { + // Service key loading failed, but that's okay if not required + serviceKey = {}; + } + } + + const credentials: GoogleCredentials = isUserProvided + ? (userKey as GoogleCredentials) + : { + [AuthKeys.GOOGLE_SERVICE_KEY]: serviceKey, + [AuthKeys.GOOGLE_API_KEY]: GOOGLE_KEY, + }; + + let clientOptions: GoogleConfigOptions = {}; + + /** @type {undefined | TBaseEndpoint} */ + const allConfig = appConfig?.endpoints?.all; + /** @type {undefined | TBaseEndpoint} */ + const googleConfig = appConfig?.endpoints?.[EModelEndpoint.google]; + + if (googleConfig) { + clientOptions.streamRate = googleConfig.streamRate; + clientOptions.titleModel = googleConfig.titleModel; + } + + if (allConfig) { + clientOptions.streamRate = allConfig.streamRate; + } + + clientOptions = { + reverseProxyUrl: GOOGLE_REVERSE_PROXY ?? undefined, + authHeader: isEnabled(GOOGLE_AUTH_HEADER) ?? undefined, + proxy: PROXY ?? undefined, + modelOptions: model_parameters ?? {}, + ...clientOptions, + }; + + return getGoogleConfig(credentials, clientOptions); +} diff --git a/packages/api/src/endpoints/index.ts b/packages/api/src/endpoints/index.ts index 56b95bf52e..208181168e 100644 --- a/packages/api/src/endpoints/index.ts +++ b/packages/api/src/endpoints/index.ts @@ -1,4 +1,7 @@ +export * from './anthropic'; +export * from './bedrock'; +export * from './config'; export * from './custom'; export * from './google'; +export * from './models'; export * from './openai'; -export * from './anthropic'; diff --git a/api/server/services/ModelService.spec.js b/packages/api/src/endpoints/models.spec.ts similarity index 80% rename from api/server/services/ModelService.spec.js rename to packages/api/src/endpoints/models.spec.ts index 8880768c91..575cc5fef8 100644 --- a/api/server/services/ModelService.spec.js +++ b/packages/api/src/endpoints/models.spec.ts @@ -1,49 +1,46 @@ -const axios = require('axios'); -const { logAxiosError, resolveHeaders } = require('@librechat/api'); -const { EModelEndpoint, defaultModels } = require('librechat-data-provider'); - -const { +import axios from 'axios'; +import { EModelEndpoint, defaultModels } from 'librechat-data-provider'; +import { fetchModels, splitAndTrim, getOpenAIModels, getGoogleModels, getBedrockModels, getAnthropicModels, -} = require('./ModelService'); +} from './models'; -jest.mock('@librechat/api', () => { - const originalUtils = jest.requireActual('@librechat/api'); +jest.mock('axios'); + +jest.mock('~/cache', () => ({ + standardCache: jest.fn().mockImplementation(() => ({ + get: jest.fn().mockResolvedValue(undefined), + set: jest.fn().mockResolvedValue(true), + })), +})); + +jest.mock('~/utils', () => { + const originalUtils = jest.requireActual('~/utils'); return { ...originalUtils, - processModelData: jest.fn((...args) => { - return originalUtils.processModelData(...args); - }), + processModelData: jest.fn((...args) => originalUtils.processModelData(...args)), logAxiosError: jest.fn(), resolveHeaders: jest.fn((options) => options?.headers || {}), }; }); -jest.mock('axios'); -jest.mock('~/cache/getLogStores', () => - jest.fn().mockImplementation(() => ({ - get: jest.fn().mockResolvedValue(undefined), - set: jest.fn().mockResolvedValue(true), - })), -); jest.mock('@librechat/data-schemas', () => ({ ...jest.requireActual('@librechat/data-schemas'), logger: { error: jest.fn(), - }, -})); -jest.mock('./Config/EndpointService', () => ({ - config: { - openAIApiKey: 'mockedApiKey', - userProvidedOpenAI: false, + warn: jest.fn(), + debug: jest.fn(), }, })); -axios.get.mockResolvedValue({ +const mockedAxios = axios as jest.Mocked; +const { logAxiosError, resolveHeaders } = jest.requireMock('~/utils'); + +mockedAxios.get.mockResolvedValue({ data: { data: [{ id: 'model-1' }, { id: 'model-2' }], }, @@ -59,7 +56,7 @@ describe('fetchModels', () => { }); expect(models).toEqual(['model-1', 'model-2']); - expect(axios.get).toHaveBeenCalledWith( + expect(mockedAxios.get).toHaveBeenCalledWith( expect.stringContaining('https://api.test.com/models'), expect.any(Object), ); @@ -75,7 +72,7 @@ describe('fetchModels', () => { }); expect(models).toEqual(['model-1', 'model-2']); - expect(axios.get).toHaveBeenCalledWith( + expect(mockedAxios.get).toHaveBeenCalledWith( expect.stringContaining('https://api.test.com/models?user=user123'), expect.any(Object), ); @@ -95,7 +92,7 @@ describe('fetchModels', () => { headers: customHeaders, }); - expect(axios.get).toHaveBeenCalledWith( + expect(mockedAxios.get).toHaveBeenCalledWith( expect.stringContaining('https://api.test.com/models'), expect.objectContaining({ headers: expect.objectContaining({ @@ -116,7 +113,7 @@ describe('fetchModels', () => { headers: null, }); - expect(axios.get).toHaveBeenCalledWith( + expect(mockedAxios.get).toHaveBeenCalledWith( expect.stringContaining('https://api.test.com/models'), expect.objectContaining({ headers: expect.objectContaining({ @@ -135,7 +132,7 @@ describe('fetchModels', () => { headers: undefined, }); - expect(axios.get).toHaveBeenCalledWith( + expect(mockedAxios.get).toHaveBeenCalledWith( expect.stringContaining('https://api.test.com/models'), expect.objectContaining({ headers: expect.objectContaining({ @@ -173,9 +170,7 @@ describe('fetchModels with createTokenConfig true', () => { }; beforeEach(() => { - // Clears the mock's history before each test - const _utils = require('@librechat/api'); - axios.get.mockResolvedValue({ data }); + mockedAxios.get.mockResolvedValue({ data }); }); it('creates and stores token configuration if createTokenConfig is true', async () => { @@ -186,23 +181,23 @@ describe('fetchModels with createTokenConfig true', () => { createTokenConfig: true, }); - const { processModelData } = require('@librechat/api'); + const { processModelData } = jest.requireMock('~/utils'); expect(processModelData).toHaveBeenCalled(); expect(processModelData).toHaveBeenCalledWith(data); }); }); describe('getOpenAIModels', () => { - let originalEnv; + let originalEnv: NodeJS.ProcessEnv; beforeEach(() => { originalEnv = { ...process.env }; - axios.get.mockRejectedValue(new Error('Network error')); + mockedAxios.get.mockRejectedValue(new Error('Network error')); }); afterEach(() => { process.env = originalEnv; - axios.get.mockReset(); + mockedAxios.get.mockReset(); }); it('returns default models when no environment configurations are provided (and fetch fails)', async () => { @@ -223,15 +218,16 @@ describe('getOpenAIModels', () => { }); it('utilizes proxy configuration when PROXY is set', async () => { - axios.get.mockResolvedValue({ + mockedAxios.get.mockResolvedValue({ data: { data: [], }, }); process.env.PROXY = 'http://localhost:8888'; + process.env.OPENAI_API_KEY = 'mockedApiKey'; await getOpenAIModels({ user: 'user456' }); - expect(axios.get).toHaveBeenCalledWith( + expect(mockedAxios.get).toHaveBeenCalledWith( expect.any(String), expect.objectContaining({ httpsAgent: expect.anything(), @@ -240,35 +236,13 @@ describe('getOpenAIModels', () => { }); }); -describe('getOpenAIModels with mocked config', () => { - it('uses alternative behavior when userProvidedOpenAI is true', async () => { - jest.mock('./Config/EndpointService', () => ({ - config: { - openAIApiKey: 'mockedApiKey', - userProvidedOpenAI: true, - }, - })); - jest.mock('librechat-data-provider', () => { - const original = jest.requireActual('librechat-data-provider'); - return { - ...original, - defaultModels: { - [original.EModelEndpoint.openAI]: ['some-default-model'], - }, - }; - }); - - jest.resetModules(); - const { getOpenAIModels } = require('./ModelService'); - - const models = await getOpenAIModels({ user: 'user456' }); - expect(models).toContain('some-default-model'); - }); -}); - describe('getOpenAIModels sorting behavior', () => { + let originalEnv: NodeJS.ProcessEnv; + beforeEach(() => { - axios.get.mockResolvedValue({ + originalEnv = { ...process.env }; + process.env.OPENAI_API_KEY = 'mockedApiKey'; + mockedAxios.get.mockResolvedValue({ data: { data: [ { id: 'gpt-3.5-turbo-instruct-0914' }, @@ -281,13 +255,16 @@ describe('getOpenAIModels sorting behavior', () => { }); }); + afterEach(() => { + process.env = originalEnv; + jest.clearAllMocks(); + }); + it('ensures instruct models are listed last', async () => { const models = await getOpenAIModels({ user: 'user456' }); - // Check if the last model is an "instruct" model expect(models[models.length - 1]).toMatch(/instruct/); - // Check if the "instruct" models are placed at the end const instructIndexes = models .map((model, index) => (model.includes('instruct') ? index : -1)) .filter((index) => index !== -1); @@ -306,10 +283,6 @@ describe('getOpenAIModels sorting behavior', () => { ]; expect(models).toEqual(expectedOrder); }); - - afterEach(() => { - jest.clearAllMocks(); - }); }); describe('fetchModels with Ollama specific logic', () => { @@ -320,7 +293,7 @@ describe('fetchModels with Ollama specific logic', () => { }; beforeEach(() => { - axios.get.mockResolvedValue(mockOllamaData); + mockedAxios.get.mockResolvedValue(mockOllamaData); }); afterEach(() => { @@ -336,7 +309,7 @@ describe('fetchModels with Ollama specific logic', () => { }); expect(models).toEqual(['Ollama-Base', 'Ollama-Advanced']); - expect(axios.get).toHaveBeenCalledWith('https://api.ollama.test.com/api/tags', { + expect(mockedAxios.get).toHaveBeenCalledWith('https://api.ollama.test.com/api/tags', { headers: {}, timeout: 5000, }); @@ -352,7 +325,7 @@ describe('fetchModels with Ollama specific logic', () => { email: 'test@example.com', }; - resolveHeaders.mockReturnValueOnce(customHeaders); + (resolveHeaders as jest.Mock).mockReturnValueOnce(customHeaders); const models = await fetchModels({ user: 'user789', @@ -368,15 +341,15 @@ describe('fetchModels with Ollama specific logic', () => { headers: customHeaders, user: userObject, }); - expect(axios.get).toHaveBeenCalledWith('https://api.ollama.test.com/api/tags', { + expect(mockedAxios.get).toHaveBeenCalledWith('https://api.ollama.test.com/api/tags', { headers: customHeaders, timeout: 5000, }); }); it('should handle errors gracefully when fetching Ollama models fails and fallback to OpenAI-compatible fetch', async () => { - axios.get.mockRejectedValueOnce(new Error('Ollama API error')); - axios.get.mockResolvedValueOnce({ + mockedAxios.get.mockRejectedValueOnce(new Error('Ollama API error')); + mockedAxios.get.mockResolvedValueOnce({ data: { data: [{ id: 'fallback-model-1' }, { id: 'fallback-model-2' }], }, @@ -395,7 +368,7 @@ describe('fetchModels with Ollama specific logic', () => { 'Failed to fetch models from Ollama API. Attempting to fetch via OpenAI-compatible endpoint.', error: expect.any(Error), }); - expect(axios.get).toHaveBeenCalledTimes(2); + expect(mockedAxios.get).toHaveBeenCalledTimes(2); }); it('should return an empty array if no baseURL is provided', async () => { @@ -408,8 +381,7 @@ describe('fetchModels with Ollama specific logic', () => { }); it('should not fetch Ollama models if the name does not start with "ollama"', async () => { - // Mock axios to return a different set of models for non-Ollama API calls - axios.get.mockResolvedValue({ + mockedAxios.get.mockResolvedValue({ data: { data: [{ id: 'model-1' }, { id: 'model-2' }], }, @@ -423,16 +395,13 @@ describe('fetchModels with Ollama specific logic', () => { }); expect(models).toEqual(['model-1', 'model-2']); - expect(axios.get).toHaveBeenCalledWith( - 'https://api.test.com/models', // Ensure the correct API endpoint is called - expect.any(Object), // Ensuring some object (headers, etc.) is passed - ); + expect(mockedAxios.get).toHaveBeenCalledWith('https://api.test.com/models', expect.any(Object)); }); }); describe('fetchModels URL construction with trailing slashes', () => { beforeEach(() => { - axios.get.mockResolvedValue({ + mockedAxios.get.mockResolvedValue({ data: { data: [{ id: 'model-1' }, { id: 'model-2' }], }, @@ -451,7 +420,10 @@ describe('fetchModels URL construction with trailing slashes', () => { name: 'TestAPI', }); - expect(axios.get).toHaveBeenCalledWith('https://api.test.com/v1/models', expect.any(Object)); + expect(mockedAxios.get).toHaveBeenCalledWith( + 'https://api.test.com/v1/models', + expect.any(Object), + ); }); it('should handle baseURL without trailing slash normally', async () => { @@ -462,7 +434,10 @@ describe('fetchModels URL construction with trailing slashes', () => { name: 'TestAPI', }); - expect(axios.get).toHaveBeenCalledWith('https://api.test.com/v1/models', expect.any(Object)); + expect(mockedAxios.get).toHaveBeenCalledWith( + 'https://api.test.com/v1/models', + expect.any(Object), + ); }); it('should handle baseURL with multiple trailing slashes', async () => { @@ -473,7 +448,10 @@ describe('fetchModels URL construction with trailing slashes', () => { name: 'TestAPI', }); - expect(axios.get).toHaveBeenCalledWith('https://api.test.com/v1/models', expect.any(Object)); + expect(mockedAxios.get).toHaveBeenCalledWith( + 'https://api.test.com/v1/models', + expect.any(Object), + ); }); it('should correctly append query params after stripping trailing slashes', async () => { @@ -485,7 +463,7 @@ describe('fetchModels URL construction with trailing slashes', () => { userIdQuery: true, }); - expect(axios.get).toHaveBeenCalledWith( + expect(mockedAxios.get).toHaveBeenCalledWith( 'https://api.test.com/v1/models?user=user123', expect.any(Object), ); @@ -519,6 +497,17 @@ describe('splitAndTrim', () => { }); describe('getAnthropicModels', () => { + let originalEnv: NodeJS.ProcessEnv; + + beforeEach(() => { + originalEnv = { ...process.env }; + }); + + afterEach(() => { + process.env = originalEnv; + jest.clearAllMocks(); + }); + it('returns default models when ANTHROPIC_MODELS is not set', async () => { delete process.env.ANTHROPIC_MODELS; const models = await getAnthropicModels(); @@ -535,7 +524,7 @@ describe('getAnthropicModels', () => { delete process.env.ANTHROPIC_MODELS; process.env.ANTHROPIC_API_KEY = 'test-anthropic-key'; - axios.get.mockResolvedValue({ + mockedAxios.get.mockResolvedValue({ data: { data: [{ id: 'claude-3' }, { id: 'claude-4' }], }, @@ -548,7 +537,7 @@ describe('getAnthropicModels', () => { name: EModelEndpoint.anthropic, }); - expect(axios.get).toHaveBeenCalledWith( + expect(mockedAxios.get).toHaveBeenCalledWith( expect.any(String), expect.objectContaining({ headers: { @@ -564,7 +553,7 @@ describe('getAnthropicModels', () => { 'X-Custom-Header': 'custom-value', }; - axios.get.mockResolvedValue({ + mockedAxios.get.mockResolvedValue({ data: { data: [{ id: 'claude-3' }], }, @@ -578,7 +567,7 @@ describe('getAnthropicModels', () => { headers: customHeaders, }); - expect(axios.get).toHaveBeenCalledWith( + expect(mockedAxios.get).toHaveBeenCalledWith( expect.any(String), expect.objectContaining({ headers: { @@ -591,6 +580,16 @@ describe('getAnthropicModels', () => { }); describe('getGoogleModels', () => { + let originalEnv: NodeJS.ProcessEnv; + + beforeEach(() => { + originalEnv = { ...process.env }; + }); + + afterEach(() => { + process.env = originalEnv; + }); + it('returns default models when GOOGLE_MODELS is not set', () => { delete process.env.GOOGLE_MODELS; const models = getGoogleModels(); @@ -605,6 +604,16 @@ describe('getGoogleModels', () => { }); describe('getBedrockModels', () => { + let originalEnv: NodeJS.ProcessEnv; + + beforeEach(() => { + originalEnv = { ...process.env }; + }); + + afterEach(() => { + process.env = originalEnv; + }); + it('returns default models when BEDROCK_AWS_MODELS is not set', () => { delete process.env.BEDROCK_AWS_MODELS; const models = getBedrockModels(); diff --git a/packages/api/src/endpoints/models.ts b/packages/api/src/endpoints/models.ts new file mode 100644 index 0000000000..45e0f89d38 --- /dev/null +++ b/packages/api/src/endpoints/models.ts @@ -0,0 +1,383 @@ +import axios from 'axios'; +import { logger } from '@librechat/data-schemas'; +import { HttpsProxyAgent } from 'https-proxy-agent'; +import { CacheKeys, KnownEndpoints, EModelEndpoint, defaultModels } from 'librechat-data-provider'; +import type { IUser } from '@librechat/data-schemas'; +import { + processModelData, + extractBaseURL, + isUserProvided, + resolveHeaders, + deriveBaseURL, + logAxiosError, + inputSchema, +} from '~/utils'; +import { standardCache } from '~/cache'; + +export interface FetchModelsParams { + /** User ID for API requests */ + user?: string; + /** API key for authentication */ + apiKey: string; + /** Base URL for the API */ + baseURL?: string; + /** Endpoint name (defaults to 'openAI') */ + name?: string; + /** Whether directEndpoint was configured */ + direct?: boolean; + /** Whether to fetch from Azure */ + azure?: boolean; + /** Whether to send user ID as query parameter */ + userIdQuery?: boolean; + /** Whether to create token configuration from API response */ + createTokenConfig?: boolean; + /** Cache key for token configuration (uses name if omitted) */ + tokenKey?: string; + /** Optional headers for the request */ + headers?: Record | null; + /** Optional user object for header resolution */ + userObject?: Partial; +} + +/** + * Fetches Ollama models from the specified base API path. + * @param baseURL - The Ollama server URL + * @param options - Optional configuration + * @returns Promise resolving to array of model names + */ +async function fetchOllamaModels( + baseURL: string, + options: { headers?: Record | null; user?: Partial } = {}, +): Promise { + if (!baseURL) { + return []; + } + + const ollamaEndpoint = deriveBaseURL(baseURL); + + const resolvedHeaders = resolveHeaders({ + headers: options.headers ?? undefined, + user: options.user, + }); + + const response = await axios.get<{ models: Array<{ name: string }> }>( + `${ollamaEndpoint}/api/tags`, + { + headers: resolvedHeaders, + timeout: 5000, + }, + ); + + return response.data.models.map((tag) => tag.name); +} + +/** + * Splits a string by commas and trims each resulting value. + * @param input - The input string to split. + * @returns An array of trimmed values. + */ +export function splitAndTrim(input: string | null | undefined): string[] { + if (!input || typeof input !== 'string') { + return []; + } + return input + .split(',') + .map((item) => item.trim()) + .filter(Boolean); +} + +/** + * Fetches models from the specified base API path or Azure, based on the provided configuration. + * + * @param params - The parameters for fetching the models. + * @returns A promise that resolves to an array of model identifiers. + */ +export async function fetchModels({ + user, + apiKey, + baseURL: _baseURL, + name = EModelEndpoint.openAI, + direct = false, + azure = false, + userIdQuery = false, + createTokenConfig = true, + tokenKey, + headers, + userObject, +}: FetchModelsParams): Promise { + let models: string[] = []; + const baseURL = direct ? extractBaseURL(_baseURL ?? '') : _baseURL; + + if (!baseURL && !azure) { + return models; + } + + if (!apiKey) { + return models; + } + + if (name && name.toLowerCase().startsWith(KnownEndpoints.ollama)) { + try { + return await fetchOllamaModels(baseURL ?? '', { headers, user: userObject }); + } catch (ollamaError) { + const logMessage = + 'Failed to fetch models from Ollama API. Attempting to fetch via OpenAI-compatible endpoint.'; + logAxiosError({ message: logMessage, error: ollamaError as Error }); + } + } + + try { + const options: { + headers: Record; + timeout: number; + httpsAgent?: HttpsProxyAgent; + } = { + headers: { + ...(headers ?? {}), + }, + timeout: 5000, + }; + + if (name === EModelEndpoint.anthropic) { + options.headers = { + 'x-api-key': apiKey, + 'anthropic-version': process.env.ANTHROPIC_VERSION || '2023-06-01', + }; + } else { + options.headers.Authorization = `Bearer ${apiKey}`; + } + + if (process.env.PROXY) { + options.httpsAgent = new HttpsProxyAgent(process.env.PROXY); + } + + if (process.env.OPENAI_ORGANIZATION && baseURL?.includes('openai')) { + options.headers['OpenAI-Organization'] = process.env.OPENAI_ORGANIZATION; + } + + const url = new URL(`${(baseURL ?? '').replace(/\/+$/, '')}${azure ? '' : '/models'}`); + if (user && userIdQuery) { + url.searchParams.append('user', user); + } + const res = await axios.get(url.toString(), options); + + const input = res.data; + + const validationResult = inputSchema.safeParse(input); + if (validationResult.success && createTokenConfig) { + const endpointTokenConfig = processModelData(input); + const cache = standardCache(CacheKeys.TOKEN_CONFIG); + await cache.set(tokenKey ?? name, endpointTokenConfig); + } + models = input.data.map((item: { id: string }) => item.id); + } catch (error) { + const logMessage = `Failed to fetch models from ${azure ? 'Azure ' : ''}${name} API`; + logAxiosError({ message: logMessage, error: error as Error }); + } + + return models; +} + +/** Options for fetching OpenAI models */ +export interface GetOpenAIModelsOptions { + /** User ID for API requests */ + user?: string; + /** Whether to fetch from Azure */ + azure?: boolean; + /** Whether to fetch models for the Assistants endpoint */ + assistants?: boolean; + /** OpenAI API key (if not using environment variable) */ + openAIApiKey?: string; + /** Whether user provides their own API key */ + userProvidedOpenAI?: boolean; +} + +/** + * Fetches models from OpenAI or Azure based on the provided options. + * @param opts - Options for fetching models + * @param _models - Fallback models array + * @returns Promise resolving to array of model IDs + */ +export async function fetchOpenAIModels( + opts: GetOpenAIModelsOptions, + _models: string[] = [], +): Promise { + let models = _models.slice() ?? []; + const apiKey = opts.openAIApiKey ?? process.env.OPENAI_API_KEY; + const openaiBaseURL = 'https://api.openai.com/v1'; + let baseURL = openaiBaseURL; + let reverseProxyUrl = process.env.OPENAI_REVERSE_PROXY; + + if (opts.assistants && process.env.ASSISTANTS_BASE_URL) { + reverseProxyUrl = process.env.ASSISTANTS_BASE_URL; + } else if (opts.azure) { + return models; + } + + if (reverseProxyUrl) { + baseURL = extractBaseURL(reverseProxyUrl) ?? openaiBaseURL; + } + + const modelsCache = standardCache(CacheKeys.MODEL_QUERIES); + + const cachedModels = await modelsCache.get(baseURL); + if (cachedModels) { + return cachedModels as string[]; + } + + if (baseURL || opts.azure) { + models = await fetchModels({ + apiKey: apiKey ?? '', + baseURL, + azure: opts.azure, + user: opts.user, + name: EModelEndpoint.openAI, + }); + } + + if (models.length === 0) { + return _models; + } + + if (baseURL === openaiBaseURL) { + const regex = /(text-davinci-003|gpt-|o\d+)/; + const excludeRegex = /audio|realtime/; + models = models.filter((model) => regex.test(model) && !excludeRegex.test(model)); + const instructModels = models.filter((model) => model.includes('instruct')); + const otherModels = models.filter((model) => !model.includes('instruct')); + models = otherModels.concat(instructModels); + } + + await modelsCache.set(baseURL, models); + return models; +} + +/** + * Loads the default models for OpenAI or Azure. + * @param opts - Options for getting models + * @returns Promise resolving to array of model IDs + */ +export async function getOpenAIModels(opts: GetOpenAIModelsOptions = {}): Promise { + let models = defaultModels[EModelEndpoint.openAI]; + + if (opts.assistants) { + models = defaultModels[EModelEndpoint.assistants]; + } else if (opts.azure) { + models = defaultModels[EModelEndpoint.azureAssistants]; + } + + let key: string; + if (opts.assistants) { + key = 'ASSISTANTS_MODELS'; + } else if (opts.azure) { + key = 'AZURE_OPENAI_MODELS'; + } else { + key = 'OPENAI_MODELS'; + } + + if (process.env[key]) { + return splitAndTrim(process.env[key]); + } + + if (opts.userProvidedOpenAI) { + return models; + } + + return await fetchOpenAIModels(opts, models); +} + +/** + * Fetches models from the Anthropic API. + * @param opts - Options for fetching models + * @param _models - Fallback models array + * @returns Promise resolving to array of model IDs + */ +export async function fetchAnthropicModels( + opts: { user?: string } = {}, + _models: string[] = [], +): Promise { + let models = _models.slice() ?? []; + const apiKey = process.env.ANTHROPIC_API_KEY; + const anthropicBaseURL = 'https://api.anthropic.com/v1'; + let baseURL = anthropicBaseURL; + const reverseProxyUrl = process.env.ANTHROPIC_REVERSE_PROXY; + + if (reverseProxyUrl) { + baseURL = extractBaseURL(reverseProxyUrl) ?? anthropicBaseURL; + } + + if (!apiKey) { + return models; + } + + const modelsCache = standardCache(CacheKeys.MODEL_QUERIES); + + const cachedModels = await modelsCache.get(baseURL); + if (cachedModels) { + return cachedModels as string[]; + } + + if (baseURL) { + models = await fetchModels({ + apiKey, + baseURL, + user: opts.user, + name: EModelEndpoint.anthropic, + tokenKey: EModelEndpoint.anthropic, + }); + } + + if (models.length === 0) { + return _models; + } + + await modelsCache.set(baseURL, models); + return models; +} + +/** + * Gets Anthropic models from environment or API. + * @param opts - Options for fetching models + * @returns Promise resolving to array of model IDs + */ +export async function getAnthropicModels(opts: { user?: string } = {}): Promise { + const models = defaultModels[EModelEndpoint.anthropic]; + if (process.env.ANTHROPIC_MODELS) { + return splitAndTrim(process.env.ANTHROPIC_MODELS); + } + + if (isUserProvided(process.env.ANTHROPIC_API_KEY)) { + return models; + } + + try { + return await fetchAnthropicModels(opts, models); + } catch (error) { + logger.error('Error fetching Anthropic models:', error); + return models; + } +} + +/** + * Gets Google models from environment or defaults. + * @returns Array of model IDs + */ +export function getGoogleModels(): string[] { + let models = defaultModels[EModelEndpoint.google]; + if (process.env.GOOGLE_MODELS) { + models = splitAndTrim(process.env.GOOGLE_MODELS); + } + return models; +} + +/** + * Gets Bedrock models from environment or defaults. + * @returns Array of model IDs + */ +export function getBedrockModels(): string[] { + let models = defaultModels[EModelEndpoint.bedrock]; + if (process.env.BEDROCK_AWS_MODELS) { + models = splitAndTrim(process.env.BEDROCK_AWS_MODELS); + } + return models; +} diff --git a/packages/api/src/endpoints/openai/initialize.ts b/packages/api/src/endpoints/openai/initialize.ts index 9b1c5dd131..33ce233d34 100644 --- a/packages/api/src/endpoints/openai/initialize.ts +++ b/packages/api/src/endpoints/openai/initialize.ts @@ -1,13 +1,11 @@ import { ErrorTypes, EModelEndpoint, mapModelToAzureConfig } from 'librechat-data-provider'; import type { - InitializeOpenAIOptionsParams, + BaseInitializeParams, + InitializeResultBase, OpenAIConfigOptions, - LLMConfigResult, UserKeyValues, } from '~/types'; -import { getAzureCredentials } from '~/utils/azure'; -import { isUserProvided } from '~/utils/common'; -import { resolveHeaders } from '~/utils/env'; +import { getAzureCredentials, resolveHeaders, isUserProvided, checkUserKeyExpiry } from '~/utils'; import { getOpenAIConfig } from './config'; /** @@ -18,25 +16,18 @@ import { getOpenAIConfig } from './config'; * @returns Promise resolving to OpenAI configuration options * @throws Error if API key is missing or user key has expired */ -export const initializeOpenAI = async ({ +export async function initializeOpenAI({ req, - appConfig, - overrideModel, - endpointOption, - overrideEndpoint, - getUserKeyValues, - checkUserKeyExpiry, -}: InitializeOpenAIOptionsParams): Promise => { + endpoint, + model_parameters, + db, +}: BaseInitializeParams): Promise { + const appConfig = req.config; const { PROXY, OPENAI_API_KEY, AZURE_API_KEY, OPENAI_REVERSE_PROXY, AZURE_OPENAI_BASEURL } = process.env; const { key: expiresAt } = req.body; - const modelName = overrideModel ?? req.body.model; - const endpoint = overrideEndpoint ?? req.body.endpoint; - - if (!endpoint) { - throw new Error('Endpoint is required'); - } + const modelName = model_parameters?.model as string | undefined; const credentials = { [EModelEndpoint.openAI]: OPENAI_API_KEY, @@ -54,7 +45,7 @@ export const initializeOpenAI = async ({ let userValues: UserKeyValues | null = null; if (expiresAt && (userProvidesKey || userProvidesURL)) { checkUserKeyExpiry(expiresAt, endpoint); - userValues = await getUserKeyValues({ userId: req.user.id, name: endpoint }); + userValues = await db.getUserKeyValues({ userId: req.user?.id ?? '', name: endpoint }); } let apiKey = userProvidesKey @@ -71,7 +62,8 @@ export const initializeOpenAI = async ({ }; const isAzureOpenAI = endpoint === EModelEndpoint.azureOpenAI; - const azureConfig = isAzureOpenAI && appConfig.endpoints?.[EModelEndpoint.azureOpenAI]; + const azureConfig = isAzureOpenAI && appConfig?.endpoints?.[EModelEndpoint.azureOpenAI]; + let isServerless = false; if (isAzureOpenAI && azureConfig) { const { modelGroupMap, groupMap } = azureConfig; @@ -85,6 +77,7 @@ export const initializeOpenAI = async ({ modelGroupMap, groupMap, }); + isServerless = serverless === true; clientOptions.reverseProxyUrl = configBaseURL ?? clientOptions.reverseProxyUrl; clientOptions.headers = resolveHeaders({ @@ -99,9 +92,9 @@ export const initializeOpenAI = async ({ } apiKey = azureOptions.azureOpenAIApiKey; - clientOptions.azure = !serverless ? azureOptions : undefined; + clientOptions.azure = !isServerless ? azureOptions : undefined; - if (serverless === true) { + if (isServerless) { clientOptions.defaultQuery = azureOptions.azureOpenAIApiVersion ? { 'api-version': azureOptions.azureOpenAIApiVersion } : undefined; @@ -130,9 +123,9 @@ export const initializeOpenAI = async ({ } const modelOptions = { - ...endpointOption.model_parameters, + ...(model_parameters ?? {}), model: modelName, - user: req.user.id, + user: req.user?.id, }; const finalClientOptions: OpenAIConfigOptions = { @@ -142,8 +135,13 @@ export const initializeOpenAI = async ({ const options = getOpenAIConfig(apiKey, finalClientOptions, endpoint); - const openAIConfig = appConfig.endpoints?.[EModelEndpoint.openAI]; - const allConfig = appConfig.endpoints?.all; + /** Set useLegacyContent for Azure serverless deployments */ + if (isServerless) { + (options as InitializeResultBase).useLegacyContent = true; + } + + const openAIConfig = appConfig?.endpoints?.[EModelEndpoint.openAI]; + const allConfig = appConfig?.endpoints?.all; const azureRate = modelName?.includes('gpt-4') ? 30 : 17; let streamRate: number | undefined; @@ -163,4 +161,4 @@ export const initializeOpenAI = async ({ } return options; -}; +} diff --git a/packages/api/src/mcp/__tests__/tokens.test.ts b/packages/api/src/mcp/__tests__/tokens.test.ts index f3f91afd1e..dcdcacc316 100644 --- a/packages/api/src/mcp/__tests__/tokens.test.ts +++ b/packages/api/src/mcp/__tests__/tokens.test.ts @@ -1,9 +1,10 @@ -import { MCPTokenStorage } from '~/mcp/oauth/tokens'; -import { decryptV2 } from '~/crypto'; -import type { TokenMethods, IToken } from '@librechat/data-schemas'; import { Types } from 'mongoose'; +import { decryptV2 } from '@librechat/data-schemas'; +import type { TokenMethods, IToken } from '@librechat/data-schemas'; +import { MCPTokenStorage } from '~/mcp/oauth/tokens'; -jest.mock('~/crypto', () => ({ +jest.mock('@librechat/data-schemas', () => ({ + ...jest.requireActual('@librechat/data-schemas'), decryptV2: jest.fn(), })); diff --git a/packages/api/src/mcp/oauth/tokens.ts b/packages/api/src/mcp/oauth/tokens.ts index 1f615ee48b..fb560893df 100644 --- a/packages/api/src/mcp/oauth/tokens.ts +++ b/packages/api/src/mcp/oauth/tokens.ts @@ -1,8 +1,7 @@ -import { logger } from '@librechat/data-schemas'; +import { logger, encryptV2, decryptV2 } from '@librechat/data-schemas'; import type { OAuthTokens, OAuthClientInformation } from '@modelcontextprotocol/sdk/shared/auth.js'; import type { TokenMethods, IToken } from '@librechat/data-schemas'; import type { MCPOAuthTokens, ExtendedOAuthTokens, OAuthMetadata } from './types'; -import { encryptV2, decryptV2 } from '~/crypto'; import { isSystemUserId } from '~/mcp/enum'; interface StoreTokensParams { diff --git a/packages/api/src/oauth/tokens.ts b/packages/api/src/oauth/tokens.ts index 8489bdcb97..e51e91b842 100644 --- a/packages/api/src/oauth/tokens.ts +++ b/packages/api/src/oauth/tokens.ts @@ -1,9 +1,8 @@ import axios from 'axios'; -import { logger } from '@librechat/data-schemas'; +import { logger, encryptV2, decryptV2 } from '@librechat/data-schemas'; import { TokenExchangeMethodEnum } from 'librechat-data-provider'; import type { TokenMethods } from '@librechat/data-schemas'; import type { AxiosError } from 'axios'; -import { encryptV2, decryptV2 } from '~/crypto'; import { logAxiosError } from '~/utils'; export function createHandleOAuthToken({ diff --git a/packages/api/src/prompts/artifacts/components.ts b/packages/api/src/prompts/artifacts/components.ts new file mode 100644 index 0000000000..ca463002c9 --- /dev/null +++ b/packages/api/src/prompts/artifacts/components.ts @@ -0,0 +1,711 @@ +export interface ShadcnComponent { + componentName: string; + importDocs: string; + usageDocs: string; +} + +/** Essential Components */ +const essentialComponents: Record = { + avatar: { + componentName: 'Avatar', + importDocs: 'import { Avatar, AvatarFallback, AvatarImage } from "/components/ui/avatar"', + usageDocs: ` + + + CN +`, + }, + button: { + componentName: 'Button', + importDocs: 'import { Button } from "/components/ui/button"', + usageDocs: ` +`, + }, + card: { + componentName: 'Card', + importDocs: ` +import { + Card, + CardContent, + CardDescription, + CardFooter, + CardHeader, + CardTitle, +} from "/components/ui/card"`, + usageDocs: ` + + + Card Title + Card Description + + +

Card Content

+
+ +

Card Footer

+
+
`, + }, + checkbox: { + componentName: 'Checkbox', + importDocs: 'import { Checkbox } from "/components/ui/checkbox"', + usageDocs: '', + }, + input: { + componentName: 'Input', + importDocs: 'import { Input } from "/components/ui/input"', + usageDocs: '', + }, + label: { + componentName: 'Label', + importDocs: 'import { Label } from "/components/ui/label"', + usageDocs: '', + }, + radioGroup: { + componentName: 'RadioGroup', + importDocs: ` +import { Label } from "/components/ui/label" +import { RadioGroup, RadioGroupItem } from "/components/ui/radio-group"`, + usageDocs: ` + +
+ + +
+
+ + +
+
`, + }, + select: { + componentName: 'Select', + importDocs: ` +import { + Select, + SelectContent, + SelectItem, + SelectTrigger, + SelectValue, +} from "/components/ui/select"`, + usageDocs: ` +`, + }, + textarea: { + componentName: 'Textarea', + importDocs: 'import { Textarea } from "/components/ui/textarea"', + usageDocs: '