LibreChat/api/server/services/MCP.spec.js

1349 lines
43 KiB
JavaScript
Raw Normal View History

🔒 feat: Add MCP server domain restrictions for remote transports (#11013) * 🔒 feat: Add MCP server domain restrictions for remote transports * 🔒 feat: Implement comprehensive MCP error handling and domain validation - Added `handleMCPError` function to centralize error responses for domain restrictions and inspection failures. - Introduced custom error classes: `MCPDomainNotAllowedError` and `MCPInspectionFailedError` for better error management. - Updated MCP server controllers to utilize the new error handling mechanism. - Enhanced domain validation logic in `createMCPTools` and `createMCPTool` functions to prevent operations on disallowed domains. - Added tests for runtime domain validation scenarios to ensure correct behavior. * chore: import order * 🔒 feat: Enhance domain validation in MCP tools with user role-based restrictions - Integrated `getAppConfig` to fetch allowed domains based on user roles in `createMCPTools` and `createMCPTool` functions. - Removed the deprecated `getAllowedDomains` method from `MCPServersRegistry`. - Updated tests to verify domain restrictions are applied correctly based on user roles. - Ensured that domain validation logic is consistent and efficient across tool creation processes. * 🔒 test: Refactor MCP tests to utilize configurable app settings - Introduced a mock for `getAppConfig` to enhance test flexibility. - Removed redundant mock definition to streamline test setup. - Ensured tests are aligned with the latest domain validation logic. --------- Co-authored-by: Atef Bellaaj <slalom.bellaaj@external.daimlertruck.com> Co-authored-by: Danny Avila <danny@librechat.ai>
2025-12-18 19:57:49 +01:00
// Mock all dependencies - define mocks before imports
// Mock all dependencies
jest.mock('@librechat/data-schemas', () => ({
logger: {
debug: jest.fn(),
error: jest.fn(),
info: jest.fn(),
warn: jest.fn(),
},
}));
🔒 feat: Add MCP server domain restrictions for remote transports (#11013) * 🔒 feat: Add MCP server domain restrictions for remote transports * 🔒 feat: Implement comprehensive MCP error handling and domain validation - Added `handleMCPError` function to centralize error responses for domain restrictions and inspection failures. - Introduced custom error classes: `MCPDomainNotAllowedError` and `MCPInspectionFailedError` for better error management. - Updated MCP server controllers to utilize the new error handling mechanism. - Enhanced domain validation logic in `createMCPTools` and `createMCPTool` functions to prevent operations on disallowed domains. - Added tests for runtime domain validation scenarios to ensure correct behavior. * chore: import order * 🔒 feat: Enhance domain validation in MCP tools with user role-based restrictions - Integrated `getAppConfig` to fetch allowed domains based on user roles in `createMCPTools` and `createMCPTool` functions. - Removed the deprecated `getAllowedDomains` method from `MCPServersRegistry`. - Updated tests to verify domain restrictions are applied correctly based on user roles. - Ensured that domain validation logic is consistent and efficient across tool creation processes. * 🔒 test: Refactor MCP tests to utilize configurable app settings - Introduced a mock for `getAppConfig` to enhance test flexibility. - Removed redundant mock definition to streamline test setup. - Ensured tests are aligned with the latest domain validation logic. --------- Co-authored-by: Atef Bellaaj <slalom.bellaaj@external.daimlertruck.com> Co-authored-by: Danny Avila <danny@librechat.ai>
2025-12-18 19:57:49 +01:00
// Create mock registry instance
const mockRegistryInstance = {
getOAuthServers: jest.fn(() => Promise.resolve(new Set())),
getAllServerConfigs: jest.fn(() => Promise.resolve({})),
🔒 feat: Add MCP server domain restrictions for remote transports (#11013) * 🔒 feat: Add MCP server domain restrictions for remote transports * 🔒 feat: Implement comprehensive MCP error handling and domain validation - Added `handleMCPError` function to centralize error responses for domain restrictions and inspection failures. - Introduced custom error classes: `MCPDomainNotAllowedError` and `MCPInspectionFailedError` for better error management. - Updated MCP server controllers to utilize the new error handling mechanism. - Enhanced domain validation logic in `createMCPTools` and `createMCPTool` functions to prevent operations on disallowed domains. - Added tests for runtime domain validation scenarios to ensure correct behavior. * chore: import order * 🔒 feat: Enhance domain validation in MCP tools with user role-based restrictions - Integrated `getAppConfig` to fetch allowed domains based on user roles in `createMCPTools` and `createMCPTool` functions. - Removed the deprecated `getAllowedDomains` method from `MCPServersRegistry`. - Updated tests to verify domain restrictions are applied correctly based on user roles. - Ensured that domain validation logic is consistent and efficient across tool creation processes. * 🔒 test: Refactor MCP tests to utilize configurable app settings - Introduced a mock for `getAppConfig` to enhance test flexibility. - Removed redundant mock definition to streamline test setup. - Ensured tests are aligned with the latest domain validation logic. --------- Co-authored-by: Atef Bellaaj <slalom.bellaaj@external.daimlertruck.com> Co-authored-by: Danny Avila <danny@librechat.ai>
2025-12-18 19:57:49 +01:00
getServerConfig: jest.fn(() => Promise.resolve(null)),
🏗️ feat: 3-Tier MCP Server Architecture with Config-Source Lazy Init (#12435) * feat: add MCPServerSource type, tenantMcpPolicy schema, and source-based dbSourced wiring - Add `tenantMcpPolicy` to `mcpSettings` in YAML config schema with `enabled`, `maxServersPerTenant`, `allowedTransports`, and `allowedDomains` - Add `MCPServerSource` type ('yaml' | 'config' | 'user') and `source` field to `ParsedServerConfig` - Change `dbSourced` determination from `!!config.dbId` to `config.source === 'user'` across MCPManager, ConnectionsRepository, UserConnectionManager, and MCPServerInspector - Set `source: 'user'` on all DB-sourced servers in ServerConfigsDB * feat: three-layer MCPServersRegistry with config cache and lazy init - Add `configCacheRepo` as third repository layer between YAML cache and DB for admin-defined config-source MCP servers - Implement `ensureConfigServers()` that identifies config-override servers from resolved `getAppConfig()` mcpConfig, lazily inspects them, and caches parsed configs with `source: 'config'` - Add `lazyInitConfigServer()` with timeout, stub-on-failure, and concurrent-init deduplication via `pendingConfigInits` map - Extend `getAllServerConfigs()` with optional `configServers` param for three-way merge: YAML → Config → User - Add `getServerConfig()` lookup through config cache layer - Add `invalidateConfigCache()` for clearing config-source inspection results on admin config mutations - Tag `source: 'yaml'` on CACHE-stored servers and `source: 'user'` on DB-stored servers in `addServer()` and `addServerStub()` * feat: wire tenant context into MCP controllers, services, and cache invalidation - Resolve config-source servers via `getAppConfig({ role, tenantId })` in `getMCPTools()` and `getMCPServersList()` controllers - Pass `ensureConfigServers()` results through `getAllServerConfigs()` for three-way merge of YAML + Config + User servers - Add tenant/role context to `getMCPSetupData()` and connection status routes via `getTenantId()` from ALS - Add `clearMcpConfigCache()` to `invalidateConfigCaches()` so admin config mutations trigger re-inspection of config-source MCP servers * feat: enforce tenantMcpPolicy on admin config mcpServers mutations - Add `validateMcpServerPolicy()` helper that checks mcpServers against operator-defined `tenantMcpPolicy` (enabled, maxServersPerTenant, allowedTransports, allowedDomains) - Wire validation into `upsertConfigOverrides` and `patchConfigField` handlers — rejects with 403 when policy is violated - Infer transport type from config shape (command → stdio, url protocol → websocket/sse, type field → streamable-http) - Validate server domains against policy allowlist when configured * revert: remove tenantMcpPolicy schema and enforcement The existing admin config CRUD routes already provide the mechanism for granular MCP server prepopulation (groups, roles, users). The tenantMcpPolicy gating adds unnecessary complexity that can be revisited if needed in the future. - Remove tenantMcpPolicy from mcpSettings Zod schema - Remove validateMcpServerPolicy helper and TenantMcpPolicy interface - Remove policy enforcement from upsertConfigOverrides and patchConfigField handlers * test: update test assertions for source field and config-server wiring - Use objectContaining in MCPServersRegistry reset test to account for new source: 'yaml' field on CACHE-stored configs - Add getTenantId and ensureConfigServers mocks to MCP route tests - Add getAppConfig mock to route test Config service mock - Update getMCPSetupData assertion to expect second options argument - Update getAllServerConfigs assertions for new configServers parameter * fix: disconnect active connections when config-source servers are evicted When admin config overrides change and config-source MCP servers are removed, the invalidation now proactively disconnects active connections for evicted servers instead of leaving them lingering until timeout. - Return evicted server names from invalidateConfigCache() - Disconnect app-level connections for evicted servers in clearMcpConfigCache() via MCPManager.appConnections.disconnect() * fix: address code review findings (CRITICAL, MAJOR, MINOR) CRITICAL fixes: - Scope configCacheRepo keys by config content hash to prevent cross-tenant cache poisoning when two tenants define the same server name with different configurations - Change dbSourced checks from `source === 'user'` to `source !== 'yaml' && source !== 'config'` so undefined source (pre-upgrade cached configs) fails closed to restricted mode MAJOR fixes: - Derive OAuth servers from already-computed mcpConfig instead of calling getOAuthServers() separately — config-source OAuth servers are now properly detected - Add parseInt radix (10) and NaN guard with fallback to 30_000 for CONFIG_SERVER_INIT_TIMEOUT_MS - Add CONFIG_CACHE_NAMESPACE to aggregate-key branch in ServerConfigsCacheFactory to avoid SCAN-based Redis stalls - Remove `if (role || tenantId)` guard in getMCPSetupData — config servers now always resolve regardless of tenant context MINOR fixes: - Extract resolveAllMcpConfigs() helper in mcp controller to eliminate 3x copy-pasted config resolution boilerplate - Distinguish "not initialized" from real errors in clearMcpConfigCache — log actual failures instead of swallowing - Remove narrative inline comments per style guide - Remove dead try/catch inside Promise.allSettled in ensureConfigServers (inner method never throws) - Memoize YAML server names to avoid repeated cacheConfigsRepo.getAll() calls per request Test updates: - Add ensureConfigServers mock to registry test fixtures - Update getMCPSetupData assertions for inline OAuth derivation * fix: address code review findings (CRITICAL, MAJOR, MINOR) CRITICAL fixes: - Break circular dependency: move CONFIG_CACHE_NAMESPACE from MCPServersRegistry to ServerConfigsCacheFactory - Fix dbSourced fail-closed: use source field when present, fall back to legacy dbId check when absent (backward-compatible with pre-upgrade cached configs that lack source field) MAJOR fixes: - Add CONFIG_CACHE_NAMESPACE to aggregate-key set in ServerConfigsCacheFactory to avoid SCAN-based Redis stalls - Add comprehensive test suite (ensureConfigServers.test.ts, 18 tests) covering lazy init, stub-on-failure, cross-tenant isolation via config hash keys, concurrent deduplication, merge order, and cache invalidation MINOR fixes: - Update MCPServerInspector test assertion for dbSourced change * fix: restore getServerConfig lookup for config-source servers (NEW-1) Add configNameToKey map that indexes server name → hash-based cache key for O(1) lookup by name in getServerConfig. This restores the config cache layer that was dropped when hash-based keys were introduced. Without this fix, config-source servers appeared in tool listings (via getAllServerConfigs) but getServerConfig returned undefined, breaking all connection and tool call paths. - Populate configNameToKey in ensureSingleConfigServer - Clear configNameToKey in invalidateConfigCache and reset - Clear stale read-through cache entries after lazy init - Remove dead code in invalidateConfigCache (config.title, key parsing) - Add getServerConfig tests for config-source server lookup * fix: eliminate configNameToKey race via caller-provided configServers param Replace the process-global configNameToKey map (last-writer-wins under concurrent multi-tenant load) with a configServers parameter on getServerConfig. Callers pass the pre-resolved config servers map directly — no shared mutable state, no cross-tenant race. - Add optional configServers param to getServerConfig; when provided, returns matching config directly without any global lookup - Remove configNameToKey map entirely (was the source of the race) - Extract server names from cache keys via lastIndexOf in invalidateConfigCache (safe for names containing colons) - Use mcpConfig[serverName] directly in getMCPTools instead of a redundant getServerConfig call - Add cross-tenant isolation test for getServerConfig * fix: populate read-through cache after config server lazy init After lazyInitConfigServer succeeds, write the parsed config to readThroughCache keyed by serverName so that getServerConfig calls from ConnectionsRepository, UserConnectionManager, and MCPManager.callTool find the config without needing configServers. Without this, config-source servers appeared in tool listings but every connection attempt and tool call returned undefined. * fix: user-scoped getServerConfig fallback to server-only cache key When getServerConfig is called with a userId (e.g., from callTool or UserConnectionManager), the cache key is serverName::userId. Config-source servers are cached under the server-only key (no userId). Add a fallback so user-scoped lookups find config-source servers in the read-through cache. * fix: configCacheRepo fallback, isUserSourced DRY, cross-process race CRITICAL: Add findInConfigCache fallback in getServerConfig so config-source servers remain reachable after readThroughCache TTL expires (5s). Without this, every tool call after 5s returned undefined for config-source servers. MAJOR: Extract isUserSourced() helper to mcp/utils.ts and replace all 5 inline dbSourced ternary expressions (MCPManager x2, ConnectionsRepository, UserConnectionManager, MCPServerInspector). MAJOR: Fix cross-process Redis race in lazyInitConfigServer — when configCacheRepo.add throws (key exists from another process), fall back to reading the existing entry instead of returning undefined. MINOR: Parallelize invalidateConfigCache awaits with Promise.all. Remove redundant .catch(() => {}) inside Promise.allSettled. Tighten dedup test assertion to toBe(1). Add TTL-expiry tests for getServerConfig (with and without userId). * feat: thread configServers through getAppToolFunctions and formatInstructionsForContext Add optional configServers parameter to getAppToolFunctions, getInstructions, and formatInstructionsForContext so config-source server tools and instructions are visible to agent initialization and context injection paths. Existing callers (boot-time init, tests) pass no argument and continue to work unchanged. Agent runtime paths can now thread resolved config servers from request context. * fix: stale failure stubs retry after 5 min, upsert for cross-process races - Add CONFIG_STUB_RETRY_MS (5 min) — stale failure stubs are retried instead of permanently disabling config-source servers after transient errors (DNS outage, cold-start race) - Extract upsertConfigCache() helper that tries add then falls back to update, preventing cross-process Redis races where a second instance's successful inspection result was discarded - Add test for stale-stub retry after CONFIG_STUB_RETRY_MS * fix: stamp updatedAt on failure stubs, null-guard callTool config, test cleanup - Add updatedAt: Date.now() to failure stubs in lazyInitConfigServer so CONFIG_STUB_RETRY_MS (5 min) window works correctly — without it, stubs were always considered stale (updatedAt ?? 0 → epoch → always expired) - Add null guard for rawConfig in MCPManager.callTool before passing to preProcessGraphTokens — prevents unsafe `as` cast on undefined - Log double-failure in upsertConfigCache instead of silently swallowing - Replace module-scope Date.now monkey-patch with jest.useFakeTimers / jest.setSystemTime / jest.useRealTimers in ensureConfigServers tests * fix: server-only readThrough fallback only returns truthy values Prevents a cached undefined from a prior no-userId lookup from short-circuiting the DB query on a subsequent userId-scoped lookup. * fix: remove findInConfigCache to eliminate cross-tenant config leakage The findInConfigCache prefix scan (serverName:*) could return any tenant's config after readThrough TTL expires, violating tenant isolation. Config-source servers are now ONLY resolvable through: 1. The configServers param (callers with tenant context from ALS) 2. The readThrough cache (populated by ensureSingleConfigServer, 5s TTL, repopulated on every HTTP request via resolveAllMcpConfigs) Connection/tool-call paths without tenant context rely exclusively on the readThrough cache. If it expires before the next HTTP request repopulates it, the server is not found — which is correct because there is no tenant context to determine which config to return. - Remove findInConfigCache method and its call in getServerConfig - Update server-only readThrough fallback to only return truthy values (prevents cached undefined from short-circuiting user-scoped DB lookup) - Update tests to document tenant isolation behavior after cache expiry * style: fix import order per AGENTS.md conventions Sort package imports shortest-to-longest, local imports longest-to-shortest across MCPServersRegistry, ConnectionsRepository, MCPManager, UserConnectionManager, and MCPServerInspector. * fix: eliminate cross-tenant readThrough contamination and TTL-expiry tool failures Thread pre-resolved serverConfig from tool creation context into callTool, removing dependency on the readThrough cache for config-source servers. This fixes two issues: - Cross-tenant contamination: the readThrough cache key was unscoped (just serverName), so concurrent multi-tenant requests for same-named servers would overwrite each other's entries - TTL expiry: tool calls happening >5s after config resolution would fail with "Configuration not found" because the readThrough entry had expired Changes: - Add optional serverConfig param to MCPManager.callTool — uses provided config directly, falling back to getServerConfig lookup for YAML/user servers - Thread serverConfig from createMCPTool through createToolInstance closure to callTool - Remove readThrough write from ensureSingleConfigServer — config-source servers are only accessible via configServers param (tenant-scoped) - Remove server-only readThrough fallback from getServerConfig - Increase config cache hash from 8 to 16 hex chars (64-bit) - Add isUserSourced boundary tests for all source/dbId combinations - Fix double Object.keys call in getMCPTools controller - Update test assertions for new getServerConfig behavior * fix: cache base configs for config-server users; narrow upsertConfigCache error handling - Refactor getAllServerConfigs to separate base config fetch (YAML + DB) from config-server layering. Base configs are cached via readThroughCacheAll regardless of whether configServers is provided, eliminating uncached MongoDB queries per request for config-server users - Narrow upsertConfigCache catch to duplicate-key errors only; infrastructure errors (Redis timeouts, network failures) now propagate instead of being silently swallowed, preventing inspection storms during outages * fix: restore correct merge order and document upsert error matching - Restore YAML → Config → User DB precedence in getAllServerConfigs (user DB servers have highest precedence, matching the JSDoc contract) - Add source comment on upsertConfigCache duplicate-key detection linking to the two cache implementations that define the error message * feat: complete config-source server support across all execution paths Wire configServers through the entire agent execution pipeline so config-source MCP servers are fully functional — not just visible in listings but executable in agent sessions. - Thread configServers into handleTools.js agent tool pipeline: resolve config servers from tenant context before MCP tool iteration, pass to getServerConfig, createMCPTools, and createMCPTool - Thread configServers into agent instructions pipeline: applyContextToAgent → getMCPInstructionsForServers → formatInstructionsForContext, resolved in client.js before agent context application - Add configServers param to createMCPTool and createMCPTools for reconnect path fallback - Add source field to redactServerSecrets allowlist for client UI differentiation of server tiers - Narrow invalidateConfigCache to only clear readThroughCacheAll (merged results), preserving YAML individual-server readThrough entries - Update context.spec.ts assertions for new configServers parameter * fix: add missing mocks for config-source server dependencies in client.test.js Mock getMCPServersRegistry, getAppConfig, and getTenantId that were added to client.js but not reflected in the test file's jest.mock declarations. * fix: update formatInstructionsForContext assertions for configServers param The test assertions expected formatInstructionsForContext to be called with only the server names array, but it now receives configServers as a second argument after the config-source server feature wiring. * fix: move configServers resolution before MCP tool loop to avoid TDZ configServers was declared with `let` after the first tool loop but referenced inside it via getServerConfig(), causing a ReferenceError temporal dead zone. Move declaration and resolution before the loop, using tools.some(mcpToolPattern) to gate the async resolution. * fix: address review findings — cache bypass, discoverServerTools gap, DRY - #2: getAllServerConfigs now always uses getBaseServerConfigs (cached via readThroughCacheAll) instead of bypassing it when configServers is present. Extracts user-DB entries from cached base by diffing against YAML keys to maintain YAML → Config → User DB merge order without extra MongoDB calls. - #3: Add configServers param to ToolDiscoveryOptions and thread it through discoverServerTools → getServerConfig so config-source servers are discoverable during OAuth reconnection flows. - #6: Replace inline import() type annotations in context.ts with proper import type { ParsedServerConfig } per AGENTS.md conventions. - #7: Extract resolveConfigServers(req) helper in MCP.js and use it from handleTools.js and client.js, eliminating the duplicated 6-line config resolution pattern. - #10: Restore removed "why" comment explaining getLoaded() vs getAll() choice in getMCPSetupData — documents non-obvious correctness constraint. - #11: Fix incomplete JSDoc param type on resolveAllMcpConfigs. * fix: consolidate imports, reorder constants, fix YAML-DB merge edge case - Merge duplicate @librechat/data-schemas requires in MCP.js into one - Move resolveConfigServers after module-level constants - Fix getAllServerConfigs edge case where user-DB entry overriding a YAML entry with the same name was excluded from userDbConfigs; now uses reference equality check to detect DB-overwritten YAML keys * fix: replace fragile string-match error detection with proper upsert method Add upsert() to IServerConfigsRepositoryInterface and all implementations (InMemory, Redis, RedisAggregateKey, DB). This eliminates the brittle error message string match ('already exists in cache') in upsertConfigCache that was the only thing preventing cross-process init races from silently discarding inspection results. Each implementation handles add-or-update atomically: - InMemory: direct Map.set() - Redis: direct cache.set() - RedisAggregateKey: read-modify-write under write lock - DB: delegates to update() (DB servers use explicit add() with ACL setup) * fix: wire configServers through remaining HTTP endpoints - getMCPServerById: use resolveAllMcpConfigs instead of bare getServerConfig - reinitialize route: resolve configServers before getServerConfig - auth-values route: resolve configServers before getServerConfig - getOAuthHeaders: accept configServers param, thread from callers - Update mcp.spec.js tests to mock getAllServerConfigs for GET by name * fix: thread serverConfig through getConnection for config-source servers Config-source servers exist only in configCacheRepo, not in YAML cache or DB. When callTool → getConnection → getUserConnection → getServerConfig runs without configServers, it returns undefined and throws. Fix by threading the pre-resolved serverConfig (providedConfig) from callTool through getConnection → getUserConnection → createUserConnectionInternal, using it as a fallback before the registry lookup. * fix: thread configServers through reinit, reconnect, and tool definition paths Wire configServers through every remaining call chain that creates or reconnects MCP server connections: - reinitMCPServer: accepts serverConfig and configServers, uses them for getServerConfig fallback, getConnection, and discoverServerTools - reconnectServer: accepts and passes configServers to reinitMCPServer - createMCPTools/createMCPTool: pass configServers to reconnectServer - ToolService.loadToolDefinitionsWrapper: resolves configServers from req, passes to both reinitMCPServer call sites - reinitialize route: passes serverConfig and configServers to reinitMCPServer * fix: address review findings — simplify merge, harden error paths, fix log labels - Simplify getAllServerConfigs merge: replace fragile reference-equality loop with direct spread { ...yamlConfigs, ...configServers, ...base } - Guard upsertConfigCache in lazyInitConfigServer catch block so cache failures don't mask the original inspection error - Deduplicate getYamlServerNames cold-start with promise dedup pattern - Remove dead `if (!mcpConfig)` guard in getMCPSetupData - Fix hardcoded "App server" in ServerConfigsCacheRedisAggregateKey error messages — now uses this.namespace for correct Config/App labeling - Remove misleading OAuth callback comment about readThrough cache - Move resolveConfigServers after module-level constants in MCP.js * fix: clear rejected yamlServerNames promise, fix config-source reinspect, fix reset log label - Clear yamlServerNamesPromise on rejection so transient cache errors don't permanently prevent ensureConfigServers from working - Skip reinspectServer for config-source servers (source: 'config') in reinitMCPServer — they lack a CACHE/DB storage location; retry is handled by CONFIG_STUB_RETRY_MS in ensureConfigServers - Use source field instead of dbId for storageLocation derivation - Fix remaining hardcoded "App" in reset() leaderCheck message * fix: persist oauthHeaders in flow state for config-source OAuth servers The OAuth callback route has no JWT auth context and cannot resolve config-source server configs. Previously, getOAuthHeaders would silently return {} for config-source servers, dropping custom token exchange headers. Now oauthHeaders are persisted in MCPOAuthFlowMetadata during flow initiation (which has auth context), and the callback reads them from the stored flow state with a fallback to the registry lookup for YAML/user-DB servers. * fix: update tests for getMCPSetupData null guard removal and ToolService mock - MCP.spec.js: update test to expect graceful handling of null mcpConfig instead of a throw (getAllServerConfigs always returns an object) - MCP.js: add defensive || {} for Object.entries(mcpConfig) in case of null from test mocks - ToolService.spec.js: add missing mock for ~/server/services/MCP (resolveConfigServers) * fix: address review findings — DRY, naming, logging, dead code, defensive guards - #1: Simplify getAllServerConfigs to single getBaseServerConfigs call, eliminating redundant double-fetch of cacheConfigsRepo.getAll() - #2: Add warning log when oauthHeaders absent from OAuth callback flow state - #3: Extract resolveAllMcpConfigs to MCP.js service layer; controller imports shared helper instead of reimplementing - #4: Rename _serverConfig/_provider to capturedServerConfig/capturedProvider in createToolInstance — these are actively used, not unused - #5: Log rejected results from ensureConfigServers Promise.allSettled so cache errors are visible instead of silently dropped - #6: Remove dead 'MCP config not found' error handlers from routes - #7: Document circular-dependency reason for dynamic require in clearMcpConfigCache - #8: Remove logger.error from withTimeout to prevent double-logging timeouts - #10: Add explicit userId guard in ServerConfigsDB.upsert with clear error message - #12: Use spread instead of mutation in addServer for immutability consistency - Add upsert mock to ensureConfigServers.test.ts DB mock - Update route tests for resolveAllMcpConfigs import change * fix: restore correct merge priority, use immutable spread, fix test mock - getAllServerConfigs: { ...configServers, ...base } so userDB wins over configServers, matching documented "User DB (highest)" priority - lazyInitConfigServer: use immutable spread instead of direct mutation for parsedConfig.source, consistent with addServer fix - Fix test to mock getAllServerConfigs as {} instead of null, remove unnecessary || {} defensive guard in getMCPSetupData * fix: error handling, stable hashing, flatten nesting, remove dead param - Wrap resolveConfigServers/resolveAllMcpConfigs in try/catch with graceful {} fallback so transient DB/cache errors don't crash tool pipeline - Sort keys in configCacheKey JSON.stringify for deterministic hashing regardless of object property insertion order - Flatten clearMcpConfigCache from 3 nested try-catch to early returns; document that user connections are cleaned up lazily (accepted tradeoff) - Remove dead configServers param from getAppToolFunctions (never passed) - Add security rationale comment for source field in redactServerSecrets * fix: use recursive key-sorting replacer in configCacheKey to prevent cross-tenant cache collision The array replacer in JSON.stringify acts as a property allowlist at every nesting depth, silently dropping nested keys like headers['X-API-Key'], oauth.client_secret, etc. Two configs with different nested values but identical top-level structure produced the same hash, causing cross-tenant cache hits and potential credential contamination. Switch to a function replacer that recursively sorts keys at all depths without dropping any properties. Also document the known gap in getOAuthServers: config-source OAuth servers are not covered by auto-reconnection or uninstall cleanup because callers lack request context. * fix: move clearMcpConfigCache to packages/api to eliminate circular dependency The function only depends on MCPServersRegistry and MCPManager, both of which live in packages/api. Import it directly from @librechat/api in the CJS layer instead of using dynamic require('~/config'). * chore: imports/fields ordering * fix: address review findings — error handling, targeted lookup, test gaps - Narrow resolveAllMcpConfigs catch to only wrap ensureConfigServers so getAppConfig/getAllServerConfigs failures propagate instead of masking infrastructure errors as empty server lists. - Use targeted getServerConfig in getMCPServerById instead of fetching all server configs for a single-server lookup. - Forward configServers to inner createMCPTool calls so reconnect path works for config-source servers. - Update getAllServerConfigs JSDoc to document disjoint-key design. - Add OAuth callback oauthHeaders fallback tests (flow state present vs registry fallback). - Add resolveConfigServers/resolveAllMcpConfigs unit tests covering happy path and error propagation. * fix: add getOAuthReconnectionManager mock to OAuth callback tests * chore: imports ordering
2026-03-28 10:36:43 -04:00
ensureConfigServers: jest.fn(() => Promise.resolve({})),
};
🔒 feat: Add MCP server domain restrictions for remote transports (#11013) * 🔒 feat: Add MCP server domain restrictions for remote transports * 🔒 feat: Implement comprehensive MCP error handling and domain validation - Added `handleMCPError` function to centralize error responses for domain restrictions and inspection failures. - Introduced custom error classes: `MCPDomainNotAllowedError` and `MCPInspectionFailedError` for better error management. - Updated MCP server controllers to utilize the new error handling mechanism. - Enhanced domain validation logic in `createMCPTools` and `createMCPTool` functions to prevent operations on disallowed domains. - Added tests for runtime domain validation scenarios to ensure correct behavior. * chore: import order * 🔒 feat: Enhance domain validation in MCP tools with user role-based restrictions - Integrated `getAppConfig` to fetch allowed domains based on user roles in `createMCPTools` and `createMCPTool` functions. - Removed the deprecated `getAllowedDomains` method from `MCPServersRegistry`. - Updated tests to verify domain restrictions are applied correctly based on user roles. - Ensured that domain validation logic is consistent and efficient across tool creation processes. * 🔒 test: Refactor MCP tests to utilize configurable app settings - Introduced a mock for `getAppConfig` to enhance test flexibility. - Removed redundant mock definition to streamline test setup. - Ensured tests are aligned with the latest domain validation logic. --------- Co-authored-by: Atef Bellaaj <slalom.bellaaj@external.daimlertruck.com> Co-authored-by: Danny Avila <danny@librechat.ai>
2025-12-18 19:57:49 +01:00
// Create isMCPDomainAllowed mock that can be configured per-test
const mockIsMCPDomainAllowed = jest.fn(() => Promise.resolve(true));
const mockGetAppConfig = jest.fn(() => Promise.resolve({}));
jest.mock('@librechat/api', () => {
2026-02-13 13:33:25 -05:00
const actual = jest.requireActual('@librechat/api');
🔒 feat: Add MCP server domain restrictions for remote transports (#11013) * 🔒 feat: Add MCP server domain restrictions for remote transports * 🔒 feat: Implement comprehensive MCP error handling and domain validation - Added `handleMCPError` function to centralize error responses for domain restrictions and inspection failures. - Introduced custom error classes: `MCPDomainNotAllowedError` and `MCPInspectionFailedError` for better error management. - Updated MCP server controllers to utilize the new error handling mechanism. - Enhanced domain validation logic in `createMCPTools` and `createMCPTool` functions to prevent operations on disallowed domains. - Added tests for runtime domain validation scenarios to ensure correct behavior. * chore: import order * 🔒 feat: Enhance domain validation in MCP tools with user role-based restrictions - Integrated `getAppConfig` to fetch allowed domains based on user roles in `createMCPTools` and `createMCPTool` functions. - Removed the deprecated `getAllowedDomains` method from `MCPServersRegistry`. - Updated tests to verify domain restrictions are applied correctly based on user roles. - Ensured that domain validation logic is consistent and efficient across tool creation processes. * 🔒 test: Refactor MCP tests to utilize configurable app settings - Introduced a mock for `getAppConfig` to enhance test flexibility. - Removed redundant mock definition to streamline test setup. - Ensured tests are aligned with the latest domain validation logic. --------- Co-authored-by: Atef Bellaaj <slalom.bellaaj@external.daimlertruck.com> Co-authored-by: Danny Avila <danny@librechat.ai>
2025-12-18 19:57:49 +01:00
return {
2026-02-13 13:33:25 -05:00
...actual,
🔒 feat: Add MCP server domain restrictions for remote transports (#11013) * 🔒 feat: Add MCP server domain restrictions for remote transports * 🔒 feat: Implement comprehensive MCP error handling and domain validation - Added `handleMCPError` function to centralize error responses for domain restrictions and inspection failures. - Introduced custom error classes: `MCPDomainNotAllowedError` and `MCPInspectionFailedError` for better error management. - Updated MCP server controllers to utilize the new error handling mechanism. - Enhanced domain validation logic in `createMCPTools` and `createMCPTool` functions to prevent operations on disallowed domains. - Added tests for runtime domain validation scenarios to ensure correct behavior. * chore: import order * 🔒 feat: Enhance domain validation in MCP tools with user role-based restrictions - Integrated `getAppConfig` to fetch allowed domains based on user roles in `createMCPTools` and `createMCPTool` functions. - Removed the deprecated `getAllowedDomains` method from `MCPServersRegistry`. - Updated tests to verify domain restrictions are applied correctly based on user roles. - Ensured that domain validation logic is consistent and efficient across tool creation processes. * 🔒 test: Refactor MCP tests to utilize configurable app settings - Introduced a mock for `getAppConfig` to enhance test flexibility. - Removed redundant mock definition to streamline test setup. - Ensured tests are aligned with the latest domain validation logic. --------- Co-authored-by: Atef Bellaaj <slalom.bellaaj@external.daimlertruck.com> Co-authored-by: Danny Avila <danny@librechat.ai>
2025-12-18 19:57:49 +01:00
sendEvent: jest.fn(),
get isMCPDomainAllowed() {
return mockIsMCPDomainAllowed;
},
2026-02-13 13:33:25 -05:00
GenerationJobManager: {
emitChunk: jest.fn(),
🔒 feat: Add MCP server domain restrictions for remote transports (#11013) * 🔒 feat: Add MCP server domain restrictions for remote transports * 🔒 feat: Implement comprehensive MCP error handling and domain validation - Added `handleMCPError` function to centralize error responses for domain restrictions and inspection failures. - Introduced custom error classes: `MCPDomainNotAllowedError` and `MCPInspectionFailedError` for better error management. - Updated MCP server controllers to utilize the new error handling mechanism. - Enhanced domain validation logic in `createMCPTools` and `createMCPTool` functions to prevent operations on disallowed domains. - Added tests for runtime domain validation scenarios to ensure correct behavior. * chore: import order * 🔒 feat: Enhance domain validation in MCP tools with user role-based restrictions - Integrated `getAppConfig` to fetch allowed domains based on user roles in `createMCPTools` and `createMCPTool` functions. - Removed the deprecated `getAllowedDomains` method from `MCPServersRegistry`. - Updated tests to verify domain restrictions are applied correctly based on user roles. - Ensured that domain validation logic is consistent and efficient across tool creation processes. * 🔒 test: Refactor MCP tests to utilize configurable app settings - Introduced a mock for `getAppConfig` to enhance test flexibility. - Removed redundant mock definition to streamline test setup. - Ensured tests are aligned with the latest domain validation logic. --------- Co-authored-by: Atef Bellaaj <slalom.bellaaj@external.daimlertruck.com> Co-authored-by: Danny Avila <danny@librechat.ai>
2025-12-18 19:57:49 +01:00
},
};
});
const { logger } = require('@librechat/data-schemas');
const { MCPOAuthHandler } = require('@librechat/api');
2026-02-13 13:33:25 -05:00
const { CacheKeys, Constants } = require('librechat-data-provider');
const D = Constants.mcp_delimiter;
🔒 feat: Add MCP server domain restrictions for remote transports (#11013) * 🔒 feat: Add MCP server domain restrictions for remote transports * 🔒 feat: Implement comprehensive MCP error handling and domain validation - Added `handleMCPError` function to centralize error responses for domain restrictions and inspection failures. - Introduced custom error classes: `MCPDomainNotAllowedError` and `MCPInspectionFailedError` for better error management. - Updated MCP server controllers to utilize the new error handling mechanism. - Enhanced domain validation logic in `createMCPTools` and `createMCPTool` functions to prevent operations on disallowed domains. - Added tests for runtime domain validation scenarios to ensure correct behavior. * chore: import order * 🔒 feat: Enhance domain validation in MCP tools with user role-based restrictions - Integrated `getAppConfig` to fetch allowed domains based on user roles in `createMCPTools` and `createMCPTool` functions. - Removed the deprecated `getAllowedDomains` method from `MCPServersRegistry`. - Updated tests to verify domain restrictions are applied correctly based on user roles. - Ensured that domain validation logic is consistent and efficient across tool creation processes. * 🔒 test: Refactor MCP tests to utilize configurable app settings - Introduced a mock for `getAppConfig` to enhance test flexibility. - Removed redundant mock definition to streamline test setup. - Ensured tests are aligned with the latest domain validation logic. --------- Co-authored-by: Atef Bellaaj <slalom.bellaaj@external.daimlertruck.com> Co-authored-by: Danny Avila <danny@librechat.ai>
2025-12-18 19:57:49 +01:00
const {
createMCPTool,
createMCPTools,
getMCPSetupData,
checkOAuthFlowStatus,
getServerConnectionStatus,
🧪 chore: MCP Reconnect Storm Follow-Up Fixes and Integration Tests (#12172) * 🧪 test: Add reconnection storm regression tests for MCPConnection Introduced a comprehensive test suite for reconnection storm scenarios, validating circuit breaker, throttling, cooldown, and timeout fixes. The tests utilize real MCP SDK transports and a StreamableHTTP server to ensure accurate behavior under rapid connect/disconnect cycles and error handling for SSE 400/405 responses. This enhances the reliability of the MCPConnection by ensuring proper handling of reconnection logic and circuit breaker functionality. * 🔧 fix: Update createUnavailableToolStub to return structured response Modified the `createUnavailableToolStub` function to return an array containing the unavailable message and a null value, enhancing the response structure. Additionally, added a debug log to skip tool creation when the result is null, improving the handling of reconnection scenarios in the MCP service. * 🧪 test: Enhance MCP tool creation tests for cache and throttle interactions Added new test cases for the `createMCPTool` function to validate the caching behavior when tools are unavailable or throttled. The tests ensure that tools are correctly cached as missing and prevent unnecessary reconnects across different users, improving the reliability of the MCP service under concurrent usage scenarios. Additionally, introduced a test for the `createMCPTools` function to verify that it returns an empty array when reconnect is throttled, ensuring proper handling of throttling logic. * 📝 docs: Update AGENTS.md with testing philosophy and guidelines Expanded the testing section in AGENTS.md to emphasize the importance of using real logic over mocks, advocating for the use of spies and real dependencies in tests. Added specific recommendations for testing with MongoDB and MCP SDK, highlighting the need to mock only uncontrollable external services. This update aims to improve testing practices and encourage more robust test implementations. * 🧪 test: Enhance reconnection storm tests with socket tracking and SSE handling Updated the reconnection storm test suite to include a new socket tracking mechanism for better resource management during tests. Improved the handling of SSE 400/405 responses by ensuring they are processed in the same branch as 404 errors, preventing unhandled cases. This enhances the reliability of the MCPConnection under rapid reconnect scenarios and ensures proper error handling. * 🔧 fix: Implement cache eviction for stale reconnect attempts and missing tools Added an `evictStale` function to manage the size of the `lastReconnectAttempts` and `missingToolCache` maps, ensuring they do not exceed a maximum cache size. This enhancement improves resource management by removing outdated entries based on a specified time-to-live (TTL), thereby optimizing the MCP service's performance during reconnection scenarios.
2026-03-10 17:44:13 -04:00
createUnavailableToolStub,
🔒 feat: Add MCP server domain restrictions for remote transports (#11013) * 🔒 feat: Add MCP server domain restrictions for remote transports * 🔒 feat: Implement comprehensive MCP error handling and domain validation - Added `handleMCPError` function to centralize error responses for domain restrictions and inspection failures. - Introduced custom error classes: `MCPDomainNotAllowedError` and `MCPInspectionFailedError` for better error management. - Updated MCP server controllers to utilize the new error handling mechanism. - Enhanced domain validation logic in `createMCPTools` and `createMCPTool` functions to prevent operations on disallowed domains. - Added tests for runtime domain validation scenarios to ensure correct behavior. * chore: import order * 🔒 feat: Enhance domain validation in MCP tools with user role-based restrictions - Integrated `getAppConfig` to fetch allowed domains based on user roles in `createMCPTools` and `createMCPTool` functions. - Removed the deprecated `getAllowedDomains` method from `MCPServersRegistry`. - Updated tests to verify domain restrictions are applied correctly based on user roles. - Ensured that domain validation logic is consistent and efficient across tool creation processes. * 🔒 test: Refactor MCP tests to utilize configurable app settings - Introduced a mock for `getAppConfig` to enhance test flexibility. - Removed redundant mock definition to streamline test setup. - Ensured tests are aligned with the latest domain validation logic. --------- Co-authored-by: Atef Bellaaj <slalom.bellaaj@external.daimlertruck.com> Co-authored-by: Danny Avila <danny@librechat.ai>
2025-12-18 19:57:49 +01:00
} = require('./MCP');
jest.mock('./Config', () => ({
loadCustomConfig: jest.fn(),
🔒 feat: Add MCP server domain restrictions for remote transports (#11013) * 🔒 feat: Add MCP server domain restrictions for remote transports * 🔒 feat: Implement comprehensive MCP error handling and domain validation - Added `handleMCPError` function to centralize error responses for domain restrictions and inspection failures. - Introduced custom error classes: `MCPDomainNotAllowedError` and `MCPInspectionFailedError` for better error management. - Updated MCP server controllers to utilize the new error handling mechanism. - Enhanced domain validation logic in `createMCPTools` and `createMCPTool` functions to prevent operations on disallowed domains. - Added tests for runtime domain validation scenarios to ensure correct behavior. * chore: import order * 🔒 feat: Enhance domain validation in MCP tools with user role-based restrictions - Integrated `getAppConfig` to fetch allowed domains based on user roles in `createMCPTools` and `createMCPTool` functions. - Removed the deprecated `getAllowedDomains` method from `MCPServersRegistry`. - Updated tests to verify domain restrictions are applied correctly based on user roles. - Ensured that domain validation logic is consistent and efficient across tool creation processes. * 🔒 test: Refactor MCP tests to utilize configurable app settings - Introduced a mock for `getAppConfig` to enhance test flexibility. - Removed redundant mock definition to streamline test setup. - Ensured tests are aligned with the latest domain validation logic. --------- Co-authored-by: Atef Bellaaj <slalom.bellaaj@external.daimlertruck.com> Co-authored-by: Danny Avila <danny@librechat.ai>
2025-12-18 19:57:49 +01:00
get getAppConfig() {
return mockGetAppConfig;
},
}));
jest.mock('~/config', () => ({
getMCPManager: jest.fn(),
getFlowStateManager: jest.fn(),
getOAuthReconnectionManager: jest.fn(),
getMCPServersRegistry: jest.fn(() => mockRegistryInstance),
}));
jest.mock('~/cache', () => ({
getLogStores: jest.fn(),
}));
jest.mock('~/models', () => ({
findToken: jest.fn(),
createToken: jest.fn(),
updateToken: jest.fn(),
}));
jest.mock('./Tools/mcp', () => ({
reinitMCPServer: jest.fn(),
}));
jest.mock('./GraphTokenService', () => ({
getGraphApiToken: jest.fn(),
}));
describe('tests for the new helper functions used by the MCP connection status endpoints', () => {
let mockGetMCPManager;
let mockGetFlowStateManager;
let mockGetLogStores;
let mockGetOAuthReconnectionManager;
beforeEach(() => {
jest.clearAllMocks();
2026-02-13 13:33:25 -05:00
jest.spyOn(MCPOAuthHandler, 'generateFlowId');
mockGetMCPManager = require('~/config').getMCPManager;
mockGetFlowStateManager = require('~/config').getFlowStateManager;
mockGetLogStores = require('~/cache').getLogStores;
mockGetOAuthReconnectionManager = require('~/config').getOAuthReconnectionManager;
});
describe('getMCPSetupData', () => {
const mockUserId = 'user-123';
const mockConfig = {
server1: { type: 'stdio' },
server2: { type: 'http' },
};
beforeEach(() => {
mockGetMCPManager.mockReturnValue({
🔄 refactor: MCP Server Init and Stale Cache Handling (#10984) * 🔧 refactor: Update MCP connection handling to improve performance and testing * refactor: Replace getAll() with getLoaded() in MCP.js to prevent unnecessary connection creation for user-context servers. * test: Adjust MCP.spec.js to mock getLoaded() instead of getAll() for consistency with the new implementation. * feat: Enhance MCPServersInitializer to reset initialization flag for better handling of process restarts and stale data. * test: Add integration tests to verify re-initialization behavior and ensure stale data is cleared when necessary. * 🔧 refactor: Enhance cached endpoints config handling for GPT plugins * refactor: Update MCPServersInitializer tests to use new server management methods * refactor: Replace direct Redis server manipulation with registry.addServer and registry.getServerConfig for better abstraction and consistency. * test: Adjust integration tests to verify server initialization and stale data handling using the updated methods. * 🔧 refactor: Increase retry limits and delay for MCP server creation * Updated MAX_CREATE_RETRIES from 3 to 5 to allow for more attempts during server creation. * Increased RETRY_BASE_DELAY_MS from 10 to 25 milliseconds to provide a longer wait time between retries, improving stability in server initialization. * refactor: Update MCPServersInitializer tests to utilize new registry methods * refactor: Replace direct access to sharedAppServers with registry.getServerConfig for improved abstraction. * test: Adjust tests to verify server initialization and stale data handling using the updated registry methods, ensuring consistency and clarity in the test structure.
2025-12-15 16:46:56 -05:00
appConnections: { getLoaded: jest.fn(() => new Map()) },
getUserConnections: jest.fn(() => new Map()),
});
mockRegistryInstance.getOAuthServers.mockResolvedValue(new Set());
mockRegistryInstance.getAllServerConfigs.mockResolvedValue(mockConfig);
});
it('should successfully return MCP setup data', async () => {
🏗️ feat: 3-Tier MCP Server Architecture with Config-Source Lazy Init (#12435) * feat: add MCPServerSource type, tenantMcpPolicy schema, and source-based dbSourced wiring - Add `tenantMcpPolicy` to `mcpSettings` in YAML config schema with `enabled`, `maxServersPerTenant`, `allowedTransports`, and `allowedDomains` - Add `MCPServerSource` type ('yaml' | 'config' | 'user') and `source` field to `ParsedServerConfig` - Change `dbSourced` determination from `!!config.dbId` to `config.source === 'user'` across MCPManager, ConnectionsRepository, UserConnectionManager, and MCPServerInspector - Set `source: 'user'` on all DB-sourced servers in ServerConfigsDB * feat: three-layer MCPServersRegistry with config cache and lazy init - Add `configCacheRepo` as third repository layer between YAML cache and DB for admin-defined config-source MCP servers - Implement `ensureConfigServers()` that identifies config-override servers from resolved `getAppConfig()` mcpConfig, lazily inspects them, and caches parsed configs with `source: 'config'` - Add `lazyInitConfigServer()` with timeout, stub-on-failure, and concurrent-init deduplication via `pendingConfigInits` map - Extend `getAllServerConfigs()` with optional `configServers` param for three-way merge: YAML → Config → User - Add `getServerConfig()` lookup through config cache layer - Add `invalidateConfigCache()` for clearing config-source inspection results on admin config mutations - Tag `source: 'yaml'` on CACHE-stored servers and `source: 'user'` on DB-stored servers in `addServer()` and `addServerStub()` * feat: wire tenant context into MCP controllers, services, and cache invalidation - Resolve config-source servers via `getAppConfig({ role, tenantId })` in `getMCPTools()` and `getMCPServersList()` controllers - Pass `ensureConfigServers()` results through `getAllServerConfigs()` for three-way merge of YAML + Config + User servers - Add tenant/role context to `getMCPSetupData()` and connection status routes via `getTenantId()` from ALS - Add `clearMcpConfigCache()` to `invalidateConfigCaches()` so admin config mutations trigger re-inspection of config-source MCP servers * feat: enforce tenantMcpPolicy on admin config mcpServers mutations - Add `validateMcpServerPolicy()` helper that checks mcpServers against operator-defined `tenantMcpPolicy` (enabled, maxServersPerTenant, allowedTransports, allowedDomains) - Wire validation into `upsertConfigOverrides` and `patchConfigField` handlers — rejects with 403 when policy is violated - Infer transport type from config shape (command → stdio, url protocol → websocket/sse, type field → streamable-http) - Validate server domains against policy allowlist when configured * revert: remove tenantMcpPolicy schema and enforcement The existing admin config CRUD routes already provide the mechanism for granular MCP server prepopulation (groups, roles, users). The tenantMcpPolicy gating adds unnecessary complexity that can be revisited if needed in the future. - Remove tenantMcpPolicy from mcpSettings Zod schema - Remove validateMcpServerPolicy helper and TenantMcpPolicy interface - Remove policy enforcement from upsertConfigOverrides and patchConfigField handlers * test: update test assertions for source field and config-server wiring - Use objectContaining in MCPServersRegistry reset test to account for new source: 'yaml' field on CACHE-stored configs - Add getTenantId and ensureConfigServers mocks to MCP route tests - Add getAppConfig mock to route test Config service mock - Update getMCPSetupData assertion to expect second options argument - Update getAllServerConfigs assertions for new configServers parameter * fix: disconnect active connections when config-source servers are evicted When admin config overrides change and config-source MCP servers are removed, the invalidation now proactively disconnects active connections for evicted servers instead of leaving them lingering until timeout. - Return evicted server names from invalidateConfigCache() - Disconnect app-level connections for evicted servers in clearMcpConfigCache() via MCPManager.appConnections.disconnect() * fix: address code review findings (CRITICAL, MAJOR, MINOR) CRITICAL fixes: - Scope configCacheRepo keys by config content hash to prevent cross-tenant cache poisoning when two tenants define the same server name with different configurations - Change dbSourced checks from `source === 'user'` to `source !== 'yaml' && source !== 'config'` so undefined source (pre-upgrade cached configs) fails closed to restricted mode MAJOR fixes: - Derive OAuth servers from already-computed mcpConfig instead of calling getOAuthServers() separately — config-source OAuth servers are now properly detected - Add parseInt radix (10) and NaN guard with fallback to 30_000 for CONFIG_SERVER_INIT_TIMEOUT_MS - Add CONFIG_CACHE_NAMESPACE to aggregate-key branch in ServerConfigsCacheFactory to avoid SCAN-based Redis stalls - Remove `if (role || tenantId)` guard in getMCPSetupData — config servers now always resolve regardless of tenant context MINOR fixes: - Extract resolveAllMcpConfigs() helper in mcp controller to eliminate 3x copy-pasted config resolution boilerplate - Distinguish "not initialized" from real errors in clearMcpConfigCache — log actual failures instead of swallowing - Remove narrative inline comments per style guide - Remove dead try/catch inside Promise.allSettled in ensureConfigServers (inner method never throws) - Memoize YAML server names to avoid repeated cacheConfigsRepo.getAll() calls per request Test updates: - Add ensureConfigServers mock to registry test fixtures - Update getMCPSetupData assertions for inline OAuth derivation * fix: address code review findings (CRITICAL, MAJOR, MINOR) CRITICAL fixes: - Break circular dependency: move CONFIG_CACHE_NAMESPACE from MCPServersRegistry to ServerConfigsCacheFactory - Fix dbSourced fail-closed: use source field when present, fall back to legacy dbId check when absent (backward-compatible with pre-upgrade cached configs that lack source field) MAJOR fixes: - Add CONFIG_CACHE_NAMESPACE to aggregate-key set in ServerConfigsCacheFactory to avoid SCAN-based Redis stalls - Add comprehensive test suite (ensureConfigServers.test.ts, 18 tests) covering lazy init, stub-on-failure, cross-tenant isolation via config hash keys, concurrent deduplication, merge order, and cache invalidation MINOR fixes: - Update MCPServerInspector test assertion for dbSourced change * fix: restore getServerConfig lookup for config-source servers (NEW-1) Add configNameToKey map that indexes server name → hash-based cache key for O(1) lookup by name in getServerConfig. This restores the config cache layer that was dropped when hash-based keys were introduced. Without this fix, config-source servers appeared in tool listings (via getAllServerConfigs) but getServerConfig returned undefined, breaking all connection and tool call paths. - Populate configNameToKey in ensureSingleConfigServer - Clear configNameToKey in invalidateConfigCache and reset - Clear stale read-through cache entries after lazy init - Remove dead code in invalidateConfigCache (config.title, key parsing) - Add getServerConfig tests for config-source server lookup * fix: eliminate configNameToKey race via caller-provided configServers param Replace the process-global configNameToKey map (last-writer-wins under concurrent multi-tenant load) with a configServers parameter on getServerConfig. Callers pass the pre-resolved config servers map directly — no shared mutable state, no cross-tenant race. - Add optional configServers param to getServerConfig; when provided, returns matching config directly without any global lookup - Remove configNameToKey map entirely (was the source of the race) - Extract server names from cache keys via lastIndexOf in invalidateConfigCache (safe for names containing colons) - Use mcpConfig[serverName] directly in getMCPTools instead of a redundant getServerConfig call - Add cross-tenant isolation test for getServerConfig * fix: populate read-through cache after config server lazy init After lazyInitConfigServer succeeds, write the parsed config to readThroughCache keyed by serverName so that getServerConfig calls from ConnectionsRepository, UserConnectionManager, and MCPManager.callTool find the config without needing configServers. Without this, config-source servers appeared in tool listings but every connection attempt and tool call returned undefined. * fix: user-scoped getServerConfig fallback to server-only cache key When getServerConfig is called with a userId (e.g., from callTool or UserConnectionManager), the cache key is serverName::userId. Config-source servers are cached under the server-only key (no userId). Add a fallback so user-scoped lookups find config-source servers in the read-through cache. * fix: configCacheRepo fallback, isUserSourced DRY, cross-process race CRITICAL: Add findInConfigCache fallback in getServerConfig so config-source servers remain reachable after readThroughCache TTL expires (5s). Without this, every tool call after 5s returned undefined for config-source servers. MAJOR: Extract isUserSourced() helper to mcp/utils.ts and replace all 5 inline dbSourced ternary expressions (MCPManager x2, ConnectionsRepository, UserConnectionManager, MCPServerInspector). MAJOR: Fix cross-process Redis race in lazyInitConfigServer — when configCacheRepo.add throws (key exists from another process), fall back to reading the existing entry instead of returning undefined. MINOR: Parallelize invalidateConfigCache awaits with Promise.all. Remove redundant .catch(() => {}) inside Promise.allSettled. Tighten dedup test assertion to toBe(1). Add TTL-expiry tests for getServerConfig (with and without userId). * feat: thread configServers through getAppToolFunctions and formatInstructionsForContext Add optional configServers parameter to getAppToolFunctions, getInstructions, and formatInstructionsForContext so config-source server tools and instructions are visible to agent initialization and context injection paths. Existing callers (boot-time init, tests) pass no argument and continue to work unchanged. Agent runtime paths can now thread resolved config servers from request context. * fix: stale failure stubs retry after 5 min, upsert for cross-process races - Add CONFIG_STUB_RETRY_MS (5 min) — stale failure stubs are retried instead of permanently disabling config-source servers after transient errors (DNS outage, cold-start race) - Extract upsertConfigCache() helper that tries add then falls back to update, preventing cross-process Redis races where a second instance's successful inspection result was discarded - Add test for stale-stub retry after CONFIG_STUB_RETRY_MS * fix: stamp updatedAt on failure stubs, null-guard callTool config, test cleanup - Add updatedAt: Date.now() to failure stubs in lazyInitConfigServer so CONFIG_STUB_RETRY_MS (5 min) window works correctly — without it, stubs were always considered stale (updatedAt ?? 0 → epoch → always expired) - Add null guard for rawConfig in MCPManager.callTool before passing to preProcessGraphTokens — prevents unsafe `as` cast on undefined - Log double-failure in upsertConfigCache instead of silently swallowing - Replace module-scope Date.now monkey-patch with jest.useFakeTimers / jest.setSystemTime / jest.useRealTimers in ensureConfigServers tests * fix: server-only readThrough fallback only returns truthy values Prevents a cached undefined from a prior no-userId lookup from short-circuiting the DB query on a subsequent userId-scoped lookup. * fix: remove findInConfigCache to eliminate cross-tenant config leakage The findInConfigCache prefix scan (serverName:*) could return any tenant's config after readThrough TTL expires, violating tenant isolation. Config-source servers are now ONLY resolvable through: 1. The configServers param (callers with tenant context from ALS) 2. The readThrough cache (populated by ensureSingleConfigServer, 5s TTL, repopulated on every HTTP request via resolveAllMcpConfigs) Connection/tool-call paths without tenant context rely exclusively on the readThrough cache. If it expires before the next HTTP request repopulates it, the server is not found — which is correct because there is no tenant context to determine which config to return. - Remove findInConfigCache method and its call in getServerConfig - Update server-only readThrough fallback to only return truthy values (prevents cached undefined from short-circuiting user-scoped DB lookup) - Update tests to document tenant isolation behavior after cache expiry * style: fix import order per AGENTS.md conventions Sort package imports shortest-to-longest, local imports longest-to-shortest across MCPServersRegistry, ConnectionsRepository, MCPManager, UserConnectionManager, and MCPServerInspector. * fix: eliminate cross-tenant readThrough contamination and TTL-expiry tool failures Thread pre-resolved serverConfig from tool creation context into callTool, removing dependency on the readThrough cache for config-source servers. This fixes two issues: - Cross-tenant contamination: the readThrough cache key was unscoped (just serverName), so concurrent multi-tenant requests for same-named servers would overwrite each other's entries - TTL expiry: tool calls happening >5s after config resolution would fail with "Configuration not found" because the readThrough entry had expired Changes: - Add optional serverConfig param to MCPManager.callTool — uses provided config directly, falling back to getServerConfig lookup for YAML/user servers - Thread serverConfig from createMCPTool through createToolInstance closure to callTool - Remove readThrough write from ensureSingleConfigServer — config-source servers are only accessible via configServers param (tenant-scoped) - Remove server-only readThrough fallback from getServerConfig - Increase config cache hash from 8 to 16 hex chars (64-bit) - Add isUserSourced boundary tests for all source/dbId combinations - Fix double Object.keys call in getMCPTools controller - Update test assertions for new getServerConfig behavior * fix: cache base configs for config-server users; narrow upsertConfigCache error handling - Refactor getAllServerConfigs to separate base config fetch (YAML + DB) from config-server layering. Base configs are cached via readThroughCacheAll regardless of whether configServers is provided, eliminating uncached MongoDB queries per request for config-server users - Narrow upsertConfigCache catch to duplicate-key errors only; infrastructure errors (Redis timeouts, network failures) now propagate instead of being silently swallowed, preventing inspection storms during outages * fix: restore correct merge order and document upsert error matching - Restore YAML → Config → User DB precedence in getAllServerConfigs (user DB servers have highest precedence, matching the JSDoc contract) - Add source comment on upsertConfigCache duplicate-key detection linking to the two cache implementations that define the error message * feat: complete config-source server support across all execution paths Wire configServers through the entire agent execution pipeline so config-source MCP servers are fully functional — not just visible in listings but executable in agent sessions. - Thread configServers into handleTools.js agent tool pipeline: resolve config servers from tenant context before MCP tool iteration, pass to getServerConfig, createMCPTools, and createMCPTool - Thread configServers into agent instructions pipeline: applyContextToAgent → getMCPInstructionsForServers → formatInstructionsForContext, resolved in client.js before agent context application - Add configServers param to createMCPTool and createMCPTools for reconnect path fallback - Add source field to redactServerSecrets allowlist for client UI differentiation of server tiers - Narrow invalidateConfigCache to only clear readThroughCacheAll (merged results), preserving YAML individual-server readThrough entries - Update context.spec.ts assertions for new configServers parameter * fix: add missing mocks for config-source server dependencies in client.test.js Mock getMCPServersRegistry, getAppConfig, and getTenantId that were added to client.js but not reflected in the test file's jest.mock declarations. * fix: update formatInstructionsForContext assertions for configServers param The test assertions expected formatInstructionsForContext to be called with only the server names array, but it now receives configServers as a second argument after the config-source server feature wiring. * fix: move configServers resolution before MCP tool loop to avoid TDZ configServers was declared with `let` after the first tool loop but referenced inside it via getServerConfig(), causing a ReferenceError temporal dead zone. Move declaration and resolution before the loop, using tools.some(mcpToolPattern) to gate the async resolution. * fix: address review findings — cache bypass, discoverServerTools gap, DRY - #2: getAllServerConfigs now always uses getBaseServerConfigs (cached via readThroughCacheAll) instead of bypassing it when configServers is present. Extracts user-DB entries from cached base by diffing against YAML keys to maintain YAML → Config → User DB merge order without extra MongoDB calls. - #3: Add configServers param to ToolDiscoveryOptions and thread it through discoverServerTools → getServerConfig so config-source servers are discoverable during OAuth reconnection flows. - #6: Replace inline import() type annotations in context.ts with proper import type { ParsedServerConfig } per AGENTS.md conventions. - #7: Extract resolveConfigServers(req) helper in MCP.js and use it from handleTools.js and client.js, eliminating the duplicated 6-line config resolution pattern. - #10: Restore removed "why" comment explaining getLoaded() vs getAll() choice in getMCPSetupData — documents non-obvious correctness constraint. - #11: Fix incomplete JSDoc param type on resolveAllMcpConfigs. * fix: consolidate imports, reorder constants, fix YAML-DB merge edge case - Merge duplicate @librechat/data-schemas requires in MCP.js into one - Move resolveConfigServers after module-level constants - Fix getAllServerConfigs edge case where user-DB entry overriding a YAML entry with the same name was excluded from userDbConfigs; now uses reference equality check to detect DB-overwritten YAML keys * fix: replace fragile string-match error detection with proper upsert method Add upsert() to IServerConfigsRepositoryInterface and all implementations (InMemory, Redis, RedisAggregateKey, DB). This eliminates the brittle error message string match ('already exists in cache') in upsertConfigCache that was the only thing preventing cross-process init races from silently discarding inspection results. Each implementation handles add-or-update atomically: - InMemory: direct Map.set() - Redis: direct cache.set() - RedisAggregateKey: read-modify-write under write lock - DB: delegates to update() (DB servers use explicit add() with ACL setup) * fix: wire configServers through remaining HTTP endpoints - getMCPServerById: use resolveAllMcpConfigs instead of bare getServerConfig - reinitialize route: resolve configServers before getServerConfig - auth-values route: resolve configServers before getServerConfig - getOAuthHeaders: accept configServers param, thread from callers - Update mcp.spec.js tests to mock getAllServerConfigs for GET by name * fix: thread serverConfig through getConnection for config-source servers Config-source servers exist only in configCacheRepo, not in YAML cache or DB. When callTool → getConnection → getUserConnection → getServerConfig runs without configServers, it returns undefined and throws. Fix by threading the pre-resolved serverConfig (providedConfig) from callTool through getConnection → getUserConnection → createUserConnectionInternal, using it as a fallback before the registry lookup. * fix: thread configServers through reinit, reconnect, and tool definition paths Wire configServers through every remaining call chain that creates or reconnects MCP server connections: - reinitMCPServer: accepts serverConfig and configServers, uses them for getServerConfig fallback, getConnection, and discoverServerTools - reconnectServer: accepts and passes configServers to reinitMCPServer - createMCPTools/createMCPTool: pass configServers to reconnectServer - ToolService.loadToolDefinitionsWrapper: resolves configServers from req, passes to both reinitMCPServer call sites - reinitialize route: passes serverConfig and configServers to reinitMCPServer * fix: address review findings — simplify merge, harden error paths, fix log labels - Simplify getAllServerConfigs merge: replace fragile reference-equality loop with direct spread { ...yamlConfigs, ...configServers, ...base } - Guard upsertConfigCache in lazyInitConfigServer catch block so cache failures don't mask the original inspection error - Deduplicate getYamlServerNames cold-start with promise dedup pattern - Remove dead `if (!mcpConfig)` guard in getMCPSetupData - Fix hardcoded "App server" in ServerConfigsCacheRedisAggregateKey error messages — now uses this.namespace for correct Config/App labeling - Remove misleading OAuth callback comment about readThrough cache - Move resolveConfigServers after module-level constants in MCP.js * fix: clear rejected yamlServerNames promise, fix config-source reinspect, fix reset log label - Clear yamlServerNamesPromise on rejection so transient cache errors don't permanently prevent ensureConfigServers from working - Skip reinspectServer for config-source servers (source: 'config') in reinitMCPServer — they lack a CACHE/DB storage location; retry is handled by CONFIG_STUB_RETRY_MS in ensureConfigServers - Use source field instead of dbId for storageLocation derivation - Fix remaining hardcoded "App" in reset() leaderCheck message * fix: persist oauthHeaders in flow state for config-source OAuth servers The OAuth callback route has no JWT auth context and cannot resolve config-source server configs. Previously, getOAuthHeaders would silently return {} for config-source servers, dropping custom token exchange headers. Now oauthHeaders are persisted in MCPOAuthFlowMetadata during flow initiation (which has auth context), and the callback reads them from the stored flow state with a fallback to the registry lookup for YAML/user-DB servers. * fix: update tests for getMCPSetupData null guard removal and ToolService mock - MCP.spec.js: update test to expect graceful handling of null mcpConfig instead of a throw (getAllServerConfigs always returns an object) - MCP.js: add defensive || {} for Object.entries(mcpConfig) in case of null from test mocks - ToolService.spec.js: add missing mock for ~/server/services/MCP (resolveConfigServers) * fix: address review findings — DRY, naming, logging, dead code, defensive guards - #1: Simplify getAllServerConfigs to single getBaseServerConfigs call, eliminating redundant double-fetch of cacheConfigsRepo.getAll() - #2: Add warning log when oauthHeaders absent from OAuth callback flow state - #3: Extract resolveAllMcpConfigs to MCP.js service layer; controller imports shared helper instead of reimplementing - #4: Rename _serverConfig/_provider to capturedServerConfig/capturedProvider in createToolInstance — these are actively used, not unused - #5: Log rejected results from ensureConfigServers Promise.allSettled so cache errors are visible instead of silently dropped - #6: Remove dead 'MCP config not found' error handlers from routes - #7: Document circular-dependency reason for dynamic require in clearMcpConfigCache - #8: Remove logger.error from withTimeout to prevent double-logging timeouts - #10: Add explicit userId guard in ServerConfigsDB.upsert with clear error message - #12: Use spread instead of mutation in addServer for immutability consistency - Add upsert mock to ensureConfigServers.test.ts DB mock - Update route tests for resolveAllMcpConfigs import change * fix: restore correct merge priority, use immutable spread, fix test mock - getAllServerConfigs: { ...configServers, ...base } so userDB wins over configServers, matching documented "User DB (highest)" priority - lazyInitConfigServer: use immutable spread instead of direct mutation for parsedConfig.source, consistent with addServer fix - Fix test to mock getAllServerConfigs as {} instead of null, remove unnecessary || {} defensive guard in getMCPSetupData * fix: error handling, stable hashing, flatten nesting, remove dead param - Wrap resolveConfigServers/resolveAllMcpConfigs in try/catch with graceful {} fallback so transient DB/cache errors don't crash tool pipeline - Sort keys in configCacheKey JSON.stringify for deterministic hashing regardless of object property insertion order - Flatten clearMcpConfigCache from 3 nested try-catch to early returns; document that user connections are cleaned up lazily (accepted tradeoff) - Remove dead configServers param from getAppToolFunctions (never passed) - Add security rationale comment for source field in redactServerSecrets * fix: use recursive key-sorting replacer in configCacheKey to prevent cross-tenant cache collision The array replacer in JSON.stringify acts as a property allowlist at every nesting depth, silently dropping nested keys like headers['X-API-Key'], oauth.client_secret, etc. Two configs with different nested values but identical top-level structure produced the same hash, causing cross-tenant cache hits and potential credential contamination. Switch to a function replacer that recursively sorts keys at all depths without dropping any properties. Also document the known gap in getOAuthServers: config-source OAuth servers are not covered by auto-reconnection or uninstall cleanup because callers lack request context. * fix: move clearMcpConfigCache to packages/api to eliminate circular dependency The function only depends on MCPServersRegistry and MCPManager, both of which live in packages/api. Import it directly from @librechat/api in the CJS layer instead of using dynamic require('~/config'). * chore: imports/fields ordering * fix: address review findings — error handling, targeted lookup, test gaps - Narrow resolveAllMcpConfigs catch to only wrap ensureConfigServers so getAppConfig/getAllServerConfigs failures propagate instead of masking infrastructure errors as empty server lists. - Use targeted getServerConfig in getMCPServerById instead of fetching all server configs for a single-server lookup. - Forward configServers to inner createMCPTool calls so reconnect path works for config-source servers. - Update getAllServerConfigs JSDoc to document disjoint-key design. - Add OAuth callback oauthHeaders fallback tests (flow state present vs registry fallback). - Add resolveConfigServers/resolveAllMcpConfigs unit tests covering happy path and error propagation. * fix: add getOAuthReconnectionManager mock to OAuth callback tests * chore: imports ordering
2026-03-28 10:36:43 -04:00
const mockConfigWithOAuth = {
server1: { type: 'stdio' },
server2: { type: 'http', requiresOAuth: true },
};
mockRegistryInstance.getAllServerConfigs.mockResolvedValue(mockConfigWithOAuth);
const mockAppConnections = new Map([['server1', { status: 'connected' }]]);
const mockUserConnections = new Map([['server2', { status: 'disconnected' }]]);
const mockMCPManager = {
🔄 refactor: MCP Server Init and Stale Cache Handling (#10984) * 🔧 refactor: Update MCP connection handling to improve performance and testing * refactor: Replace getAll() with getLoaded() in MCP.js to prevent unnecessary connection creation for user-context servers. * test: Adjust MCP.spec.js to mock getLoaded() instead of getAll() for consistency with the new implementation. * feat: Enhance MCPServersInitializer to reset initialization flag for better handling of process restarts and stale data. * test: Add integration tests to verify re-initialization behavior and ensure stale data is cleared when necessary. * 🔧 refactor: Enhance cached endpoints config handling for GPT plugins * refactor: Update MCPServersInitializer tests to use new server management methods * refactor: Replace direct Redis server manipulation with registry.addServer and registry.getServerConfig for better abstraction and consistency. * test: Adjust integration tests to verify server initialization and stale data handling using the updated methods. * 🔧 refactor: Increase retry limits and delay for MCP server creation * Updated MAX_CREATE_RETRIES from 3 to 5 to allow for more attempts during server creation. * Increased RETRY_BASE_DELAY_MS from 10 to 25 milliseconds to provide a longer wait time between retries, improving stability in server initialization. * refactor: Update MCPServersInitializer tests to utilize new registry methods * refactor: Replace direct access to sharedAppServers with registry.getServerConfig for improved abstraction. * test: Adjust tests to verify server initialization and stale data handling using the updated registry methods, ensuring consistency and clarity in the test structure.
2025-12-15 16:46:56 -05:00
appConnections: { getLoaded: jest.fn(() => Promise.resolve(mockAppConnections)) },
getUserConnections: jest.fn(() => mockUserConnections),
};
mockGetMCPManager.mockReturnValue(mockMCPManager);
const result = await getMCPSetupData(mockUserId);
🏗️ feat: 3-Tier MCP Server Architecture with Config-Source Lazy Init (#12435) * feat: add MCPServerSource type, tenantMcpPolicy schema, and source-based dbSourced wiring - Add `tenantMcpPolicy` to `mcpSettings` in YAML config schema with `enabled`, `maxServersPerTenant`, `allowedTransports`, and `allowedDomains` - Add `MCPServerSource` type ('yaml' | 'config' | 'user') and `source` field to `ParsedServerConfig` - Change `dbSourced` determination from `!!config.dbId` to `config.source === 'user'` across MCPManager, ConnectionsRepository, UserConnectionManager, and MCPServerInspector - Set `source: 'user'` on all DB-sourced servers in ServerConfigsDB * feat: three-layer MCPServersRegistry with config cache and lazy init - Add `configCacheRepo` as third repository layer between YAML cache and DB for admin-defined config-source MCP servers - Implement `ensureConfigServers()` that identifies config-override servers from resolved `getAppConfig()` mcpConfig, lazily inspects them, and caches parsed configs with `source: 'config'` - Add `lazyInitConfigServer()` with timeout, stub-on-failure, and concurrent-init deduplication via `pendingConfigInits` map - Extend `getAllServerConfigs()` with optional `configServers` param for three-way merge: YAML → Config → User - Add `getServerConfig()` lookup through config cache layer - Add `invalidateConfigCache()` for clearing config-source inspection results on admin config mutations - Tag `source: 'yaml'` on CACHE-stored servers and `source: 'user'` on DB-stored servers in `addServer()` and `addServerStub()` * feat: wire tenant context into MCP controllers, services, and cache invalidation - Resolve config-source servers via `getAppConfig({ role, tenantId })` in `getMCPTools()` and `getMCPServersList()` controllers - Pass `ensureConfigServers()` results through `getAllServerConfigs()` for three-way merge of YAML + Config + User servers - Add tenant/role context to `getMCPSetupData()` and connection status routes via `getTenantId()` from ALS - Add `clearMcpConfigCache()` to `invalidateConfigCaches()` so admin config mutations trigger re-inspection of config-source MCP servers * feat: enforce tenantMcpPolicy on admin config mcpServers mutations - Add `validateMcpServerPolicy()` helper that checks mcpServers against operator-defined `tenantMcpPolicy` (enabled, maxServersPerTenant, allowedTransports, allowedDomains) - Wire validation into `upsertConfigOverrides` and `patchConfigField` handlers — rejects with 403 when policy is violated - Infer transport type from config shape (command → stdio, url protocol → websocket/sse, type field → streamable-http) - Validate server domains against policy allowlist when configured * revert: remove tenantMcpPolicy schema and enforcement The existing admin config CRUD routes already provide the mechanism for granular MCP server prepopulation (groups, roles, users). The tenantMcpPolicy gating adds unnecessary complexity that can be revisited if needed in the future. - Remove tenantMcpPolicy from mcpSettings Zod schema - Remove validateMcpServerPolicy helper and TenantMcpPolicy interface - Remove policy enforcement from upsertConfigOverrides and patchConfigField handlers * test: update test assertions for source field and config-server wiring - Use objectContaining in MCPServersRegistry reset test to account for new source: 'yaml' field on CACHE-stored configs - Add getTenantId and ensureConfigServers mocks to MCP route tests - Add getAppConfig mock to route test Config service mock - Update getMCPSetupData assertion to expect second options argument - Update getAllServerConfigs assertions for new configServers parameter * fix: disconnect active connections when config-source servers are evicted When admin config overrides change and config-source MCP servers are removed, the invalidation now proactively disconnects active connections for evicted servers instead of leaving them lingering until timeout. - Return evicted server names from invalidateConfigCache() - Disconnect app-level connections for evicted servers in clearMcpConfigCache() via MCPManager.appConnections.disconnect() * fix: address code review findings (CRITICAL, MAJOR, MINOR) CRITICAL fixes: - Scope configCacheRepo keys by config content hash to prevent cross-tenant cache poisoning when two tenants define the same server name with different configurations - Change dbSourced checks from `source === 'user'` to `source !== 'yaml' && source !== 'config'` so undefined source (pre-upgrade cached configs) fails closed to restricted mode MAJOR fixes: - Derive OAuth servers from already-computed mcpConfig instead of calling getOAuthServers() separately — config-source OAuth servers are now properly detected - Add parseInt radix (10) and NaN guard with fallback to 30_000 for CONFIG_SERVER_INIT_TIMEOUT_MS - Add CONFIG_CACHE_NAMESPACE to aggregate-key branch in ServerConfigsCacheFactory to avoid SCAN-based Redis stalls - Remove `if (role || tenantId)` guard in getMCPSetupData — config servers now always resolve regardless of tenant context MINOR fixes: - Extract resolveAllMcpConfigs() helper in mcp controller to eliminate 3x copy-pasted config resolution boilerplate - Distinguish "not initialized" from real errors in clearMcpConfigCache — log actual failures instead of swallowing - Remove narrative inline comments per style guide - Remove dead try/catch inside Promise.allSettled in ensureConfigServers (inner method never throws) - Memoize YAML server names to avoid repeated cacheConfigsRepo.getAll() calls per request Test updates: - Add ensureConfigServers mock to registry test fixtures - Update getMCPSetupData assertions for inline OAuth derivation * fix: address code review findings (CRITICAL, MAJOR, MINOR) CRITICAL fixes: - Break circular dependency: move CONFIG_CACHE_NAMESPACE from MCPServersRegistry to ServerConfigsCacheFactory - Fix dbSourced fail-closed: use source field when present, fall back to legacy dbId check when absent (backward-compatible with pre-upgrade cached configs that lack source field) MAJOR fixes: - Add CONFIG_CACHE_NAMESPACE to aggregate-key set in ServerConfigsCacheFactory to avoid SCAN-based Redis stalls - Add comprehensive test suite (ensureConfigServers.test.ts, 18 tests) covering lazy init, stub-on-failure, cross-tenant isolation via config hash keys, concurrent deduplication, merge order, and cache invalidation MINOR fixes: - Update MCPServerInspector test assertion for dbSourced change * fix: restore getServerConfig lookup for config-source servers (NEW-1) Add configNameToKey map that indexes server name → hash-based cache key for O(1) lookup by name in getServerConfig. This restores the config cache layer that was dropped when hash-based keys were introduced. Without this fix, config-source servers appeared in tool listings (via getAllServerConfigs) but getServerConfig returned undefined, breaking all connection and tool call paths. - Populate configNameToKey in ensureSingleConfigServer - Clear configNameToKey in invalidateConfigCache and reset - Clear stale read-through cache entries after lazy init - Remove dead code in invalidateConfigCache (config.title, key parsing) - Add getServerConfig tests for config-source server lookup * fix: eliminate configNameToKey race via caller-provided configServers param Replace the process-global configNameToKey map (last-writer-wins under concurrent multi-tenant load) with a configServers parameter on getServerConfig. Callers pass the pre-resolved config servers map directly — no shared mutable state, no cross-tenant race. - Add optional configServers param to getServerConfig; when provided, returns matching config directly without any global lookup - Remove configNameToKey map entirely (was the source of the race) - Extract server names from cache keys via lastIndexOf in invalidateConfigCache (safe for names containing colons) - Use mcpConfig[serverName] directly in getMCPTools instead of a redundant getServerConfig call - Add cross-tenant isolation test for getServerConfig * fix: populate read-through cache after config server lazy init After lazyInitConfigServer succeeds, write the parsed config to readThroughCache keyed by serverName so that getServerConfig calls from ConnectionsRepository, UserConnectionManager, and MCPManager.callTool find the config without needing configServers. Without this, config-source servers appeared in tool listings but every connection attempt and tool call returned undefined. * fix: user-scoped getServerConfig fallback to server-only cache key When getServerConfig is called with a userId (e.g., from callTool or UserConnectionManager), the cache key is serverName::userId. Config-source servers are cached under the server-only key (no userId). Add a fallback so user-scoped lookups find config-source servers in the read-through cache. * fix: configCacheRepo fallback, isUserSourced DRY, cross-process race CRITICAL: Add findInConfigCache fallback in getServerConfig so config-source servers remain reachable after readThroughCache TTL expires (5s). Without this, every tool call after 5s returned undefined for config-source servers. MAJOR: Extract isUserSourced() helper to mcp/utils.ts and replace all 5 inline dbSourced ternary expressions (MCPManager x2, ConnectionsRepository, UserConnectionManager, MCPServerInspector). MAJOR: Fix cross-process Redis race in lazyInitConfigServer — when configCacheRepo.add throws (key exists from another process), fall back to reading the existing entry instead of returning undefined. MINOR: Parallelize invalidateConfigCache awaits with Promise.all. Remove redundant .catch(() => {}) inside Promise.allSettled. Tighten dedup test assertion to toBe(1). Add TTL-expiry tests for getServerConfig (with and without userId). * feat: thread configServers through getAppToolFunctions and formatInstructionsForContext Add optional configServers parameter to getAppToolFunctions, getInstructions, and formatInstructionsForContext so config-source server tools and instructions are visible to agent initialization and context injection paths. Existing callers (boot-time init, tests) pass no argument and continue to work unchanged. Agent runtime paths can now thread resolved config servers from request context. * fix: stale failure stubs retry after 5 min, upsert for cross-process races - Add CONFIG_STUB_RETRY_MS (5 min) — stale failure stubs are retried instead of permanently disabling config-source servers after transient errors (DNS outage, cold-start race) - Extract upsertConfigCache() helper that tries add then falls back to update, preventing cross-process Redis races where a second instance's successful inspection result was discarded - Add test for stale-stub retry after CONFIG_STUB_RETRY_MS * fix: stamp updatedAt on failure stubs, null-guard callTool config, test cleanup - Add updatedAt: Date.now() to failure stubs in lazyInitConfigServer so CONFIG_STUB_RETRY_MS (5 min) window works correctly — without it, stubs were always considered stale (updatedAt ?? 0 → epoch → always expired) - Add null guard for rawConfig in MCPManager.callTool before passing to preProcessGraphTokens — prevents unsafe `as` cast on undefined - Log double-failure in upsertConfigCache instead of silently swallowing - Replace module-scope Date.now monkey-patch with jest.useFakeTimers / jest.setSystemTime / jest.useRealTimers in ensureConfigServers tests * fix: server-only readThrough fallback only returns truthy values Prevents a cached undefined from a prior no-userId lookup from short-circuiting the DB query on a subsequent userId-scoped lookup. * fix: remove findInConfigCache to eliminate cross-tenant config leakage The findInConfigCache prefix scan (serverName:*) could return any tenant's config after readThrough TTL expires, violating tenant isolation. Config-source servers are now ONLY resolvable through: 1. The configServers param (callers with tenant context from ALS) 2. The readThrough cache (populated by ensureSingleConfigServer, 5s TTL, repopulated on every HTTP request via resolveAllMcpConfigs) Connection/tool-call paths without tenant context rely exclusively on the readThrough cache. If it expires before the next HTTP request repopulates it, the server is not found — which is correct because there is no tenant context to determine which config to return. - Remove findInConfigCache method and its call in getServerConfig - Update server-only readThrough fallback to only return truthy values (prevents cached undefined from short-circuiting user-scoped DB lookup) - Update tests to document tenant isolation behavior after cache expiry * style: fix import order per AGENTS.md conventions Sort package imports shortest-to-longest, local imports longest-to-shortest across MCPServersRegistry, ConnectionsRepository, MCPManager, UserConnectionManager, and MCPServerInspector. * fix: eliminate cross-tenant readThrough contamination and TTL-expiry tool failures Thread pre-resolved serverConfig from tool creation context into callTool, removing dependency on the readThrough cache for config-source servers. This fixes two issues: - Cross-tenant contamination: the readThrough cache key was unscoped (just serverName), so concurrent multi-tenant requests for same-named servers would overwrite each other's entries - TTL expiry: tool calls happening >5s after config resolution would fail with "Configuration not found" because the readThrough entry had expired Changes: - Add optional serverConfig param to MCPManager.callTool — uses provided config directly, falling back to getServerConfig lookup for YAML/user servers - Thread serverConfig from createMCPTool through createToolInstance closure to callTool - Remove readThrough write from ensureSingleConfigServer — config-source servers are only accessible via configServers param (tenant-scoped) - Remove server-only readThrough fallback from getServerConfig - Increase config cache hash from 8 to 16 hex chars (64-bit) - Add isUserSourced boundary tests for all source/dbId combinations - Fix double Object.keys call in getMCPTools controller - Update test assertions for new getServerConfig behavior * fix: cache base configs for config-server users; narrow upsertConfigCache error handling - Refactor getAllServerConfigs to separate base config fetch (YAML + DB) from config-server layering. Base configs are cached via readThroughCacheAll regardless of whether configServers is provided, eliminating uncached MongoDB queries per request for config-server users - Narrow upsertConfigCache catch to duplicate-key errors only; infrastructure errors (Redis timeouts, network failures) now propagate instead of being silently swallowed, preventing inspection storms during outages * fix: restore correct merge order and document upsert error matching - Restore YAML → Config → User DB precedence in getAllServerConfigs (user DB servers have highest precedence, matching the JSDoc contract) - Add source comment on upsertConfigCache duplicate-key detection linking to the two cache implementations that define the error message * feat: complete config-source server support across all execution paths Wire configServers through the entire agent execution pipeline so config-source MCP servers are fully functional — not just visible in listings but executable in agent sessions. - Thread configServers into handleTools.js agent tool pipeline: resolve config servers from tenant context before MCP tool iteration, pass to getServerConfig, createMCPTools, and createMCPTool - Thread configServers into agent instructions pipeline: applyContextToAgent → getMCPInstructionsForServers → formatInstructionsForContext, resolved in client.js before agent context application - Add configServers param to createMCPTool and createMCPTools for reconnect path fallback - Add source field to redactServerSecrets allowlist for client UI differentiation of server tiers - Narrow invalidateConfigCache to only clear readThroughCacheAll (merged results), preserving YAML individual-server readThrough entries - Update context.spec.ts assertions for new configServers parameter * fix: add missing mocks for config-source server dependencies in client.test.js Mock getMCPServersRegistry, getAppConfig, and getTenantId that were added to client.js but not reflected in the test file's jest.mock declarations. * fix: update formatInstructionsForContext assertions for configServers param The test assertions expected formatInstructionsForContext to be called with only the server names array, but it now receives configServers as a second argument after the config-source server feature wiring. * fix: move configServers resolution before MCP tool loop to avoid TDZ configServers was declared with `let` after the first tool loop but referenced inside it via getServerConfig(), causing a ReferenceError temporal dead zone. Move declaration and resolution before the loop, using tools.some(mcpToolPattern) to gate the async resolution. * fix: address review findings — cache bypass, discoverServerTools gap, DRY - #2: getAllServerConfigs now always uses getBaseServerConfigs (cached via readThroughCacheAll) instead of bypassing it when configServers is present. Extracts user-DB entries from cached base by diffing against YAML keys to maintain YAML → Config → User DB merge order without extra MongoDB calls. - #3: Add configServers param to ToolDiscoveryOptions and thread it through discoverServerTools → getServerConfig so config-source servers are discoverable during OAuth reconnection flows. - #6: Replace inline import() type annotations in context.ts with proper import type { ParsedServerConfig } per AGENTS.md conventions. - #7: Extract resolveConfigServers(req) helper in MCP.js and use it from handleTools.js and client.js, eliminating the duplicated 6-line config resolution pattern. - #10: Restore removed "why" comment explaining getLoaded() vs getAll() choice in getMCPSetupData — documents non-obvious correctness constraint. - #11: Fix incomplete JSDoc param type on resolveAllMcpConfigs. * fix: consolidate imports, reorder constants, fix YAML-DB merge edge case - Merge duplicate @librechat/data-schemas requires in MCP.js into one - Move resolveConfigServers after module-level constants - Fix getAllServerConfigs edge case where user-DB entry overriding a YAML entry with the same name was excluded from userDbConfigs; now uses reference equality check to detect DB-overwritten YAML keys * fix: replace fragile string-match error detection with proper upsert method Add upsert() to IServerConfigsRepositoryInterface and all implementations (InMemory, Redis, RedisAggregateKey, DB). This eliminates the brittle error message string match ('already exists in cache') in upsertConfigCache that was the only thing preventing cross-process init races from silently discarding inspection results. Each implementation handles add-or-update atomically: - InMemory: direct Map.set() - Redis: direct cache.set() - RedisAggregateKey: read-modify-write under write lock - DB: delegates to update() (DB servers use explicit add() with ACL setup) * fix: wire configServers through remaining HTTP endpoints - getMCPServerById: use resolveAllMcpConfigs instead of bare getServerConfig - reinitialize route: resolve configServers before getServerConfig - auth-values route: resolve configServers before getServerConfig - getOAuthHeaders: accept configServers param, thread from callers - Update mcp.spec.js tests to mock getAllServerConfigs for GET by name * fix: thread serverConfig through getConnection for config-source servers Config-source servers exist only in configCacheRepo, not in YAML cache or DB. When callTool → getConnection → getUserConnection → getServerConfig runs without configServers, it returns undefined and throws. Fix by threading the pre-resolved serverConfig (providedConfig) from callTool through getConnection → getUserConnection → createUserConnectionInternal, using it as a fallback before the registry lookup. * fix: thread configServers through reinit, reconnect, and tool definition paths Wire configServers through every remaining call chain that creates or reconnects MCP server connections: - reinitMCPServer: accepts serverConfig and configServers, uses them for getServerConfig fallback, getConnection, and discoverServerTools - reconnectServer: accepts and passes configServers to reinitMCPServer - createMCPTools/createMCPTool: pass configServers to reconnectServer - ToolService.loadToolDefinitionsWrapper: resolves configServers from req, passes to both reinitMCPServer call sites - reinitialize route: passes serverConfig and configServers to reinitMCPServer * fix: address review findings — simplify merge, harden error paths, fix log labels - Simplify getAllServerConfigs merge: replace fragile reference-equality loop with direct spread { ...yamlConfigs, ...configServers, ...base } - Guard upsertConfigCache in lazyInitConfigServer catch block so cache failures don't mask the original inspection error - Deduplicate getYamlServerNames cold-start with promise dedup pattern - Remove dead `if (!mcpConfig)` guard in getMCPSetupData - Fix hardcoded "App server" in ServerConfigsCacheRedisAggregateKey error messages — now uses this.namespace for correct Config/App labeling - Remove misleading OAuth callback comment about readThrough cache - Move resolveConfigServers after module-level constants in MCP.js * fix: clear rejected yamlServerNames promise, fix config-source reinspect, fix reset log label - Clear yamlServerNamesPromise on rejection so transient cache errors don't permanently prevent ensureConfigServers from working - Skip reinspectServer for config-source servers (source: 'config') in reinitMCPServer — they lack a CACHE/DB storage location; retry is handled by CONFIG_STUB_RETRY_MS in ensureConfigServers - Use source field instead of dbId for storageLocation derivation - Fix remaining hardcoded "App" in reset() leaderCheck message * fix: persist oauthHeaders in flow state for config-source OAuth servers The OAuth callback route has no JWT auth context and cannot resolve config-source server configs. Previously, getOAuthHeaders would silently return {} for config-source servers, dropping custom token exchange headers. Now oauthHeaders are persisted in MCPOAuthFlowMetadata during flow initiation (which has auth context), and the callback reads them from the stored flow state with a fallback to the registry lookup for YAML/user-DB servers. * fix: update tests for getMCPSetupData null guard removal and ToolService mock - MCP.spec.js: update test to expect graceful handling of null mcpConfig instead of a throw (getAllServerConfigs always returns an object) - MCP.js: add defensive || {} for Object.entries(mcpConfig) in case of null from test mocks - ToolService.spec.js: add missing mock for ~/server/services/MCP (resolveConfigServers) * fix: address review findings — DRY, naming, logging, dead code, defensive guards - #1: Simplify getAllServerConfigs to single getBaseServerConfigs call, eliminating redundant double-fetch of cacheConfigsRepo.getAll() - #2: Add warning log when oauthHeaders absent from OAuth callback flow state - #3: Extract resolveAllMcpConfigs to MCP.js service layer; controller imports shared helper instead of reimplementing - #4: Rename _serverConfig/_provider to capturedServerConfig/capturedProvider in createToolInstance — these are actively used, not unused - #5: Log rejected results from ensureConfigServers Promise.allSettled so cache errors are visible instead of silently dropped - #6: Remove dead 'MCP config not found' error handlers from routes - #7: Document circular-dependency reason for dynamic require in clearMcpConfigCache - #8: Remove logger.error from withTimeout to prevent double-logging timeouts - #10: Add explicit userId guard in ServerConfigsDB.upsert with clear error message - #12: Use spread instead of mutation in addServer for immutability consistency - Add upsert mock to ensureConfigServers.test.ts DB mock - Update route tests for resolveAllMcpConfigs import change * fix: restore correct merge priority, use immutable spread, fix test mock - getAllServerConfigs: { ...configServers, ...base } so userDB wins over configServers, matching documented "User DB (highest)" priority - lazyInitConfigServer: use immutable spread instead of direct mutation for parsedConfig.source, consistent with addServer fix - Fix test to mock getAllServerConfigs as {} instead of null, remove unnecessary || {} defensive guard in getMCPSetupData * fix: error handling, stable hashing, flatten nesting, remove dead param - Wrap resolveConfigServers/resolveAllMcpConfigs in try/catch with graceful {} fallback so transient DB/cache errors don't crash tool pipeline - Sort keys in configCacheKey JSON.stringify for deterministic hashing regardless of object property insertion order - Flatten clearMcpConfigCache from 3 nested try-catch to early returns; document that user connections are cleaned up lazily (accepted tradeoff) - Remove dead configServers param from getAppToolFunctions (never passed) - Add security rationale comment for source field in redactServerSecrets * fix: use recursive key-sorting replacer in configCacheKey to prevent cross-tenant cache collision The array replacer in JSON.stringify acts as a property allowlist at every nesting depth, silently dropping nested keys like headers['X-API-Key'], oauth.client_secret, etc. Two configs with different nested values but identical top-level structure produced the same hash, causing cross-tenant cache hits and potential credential contamination. Switch to a function replacer that recursively sorts keys at all depths without dropping any properties. Also document the known gap in getOAuthServers: config-source OAuth servers are not covered by auto-reconnection or uninstall cleanup because callers lack request context. * fix: move clearMcpConfigCache to packages/api to eliminate circular dependency The function only depends on MCPServersRegistry and MCPManager, both of which live in packages/api. Import it directly from @librechat/api in the CJS layer instead of using dynamic require('~/config'). * chore: imports/fields ordering * fix: address review findings — error handling, targeted lookup, test gaps - Narrow resolveAllMcpConfigs catch to only wrap ensureConfigServers so getAppConfig/getAllServerConfigs failures propagate instead of masking infrastructure errors as empty server lists. - Use targeted getServerConfig in getMCPServerById instead of fetching all server configs for a single-server lookup. - Forward configServers to inner createMCPTool calls so reconnect path works for config-source servers. - Update getAllServerConfigs JSDoc to document disjoint-key design. - Add OAuth callback oauthHeaders fallback tests (flow state present vs registry fallback). - Add resolveConfigServers/resolveAllMcpConfigs unit tests covering happy path and error propagation. * fix: add getOAuthReconnectionManager mock to OAuth callback tests * chore: imports ordering
2026-03-28 10:36:43 -04:00
expect(mockRegistryInstance.ensureConfigServers).toHaveBeenCalled();
expect(mockRegistryInstance.getAllServerConfigs).toHaveBeenCalledWith(
mockUserId,
expect.any(Object),
);
expect(mockGetMCPManager).toHaveBeenCalledWith(mockUserId);
🔄 refactor: MCP Server Init and Stale Cache Handling (#10984) * 🔧 refactor: Update MCP connection handling to improve performance and testing * refactor: Replace getAll() with getLoaded() in MCP.js to prevent unnecessary connection creation for user-context servers. * test: Adjust MCP.spec.js to mock getLoaded() instead of getAll() for consistency with the new implementation. * feat: Enhance MCPServersInitializer to reset initialization flag for better handling of process restarts and stale data. * test: Add integration tests to verify re-initialization behavior and ensure stale data is cleared when necessary. * 🔧 refactor: Enhance cached endpoints config handling for GPT plugins * refactor: Update MCPServersInitializer tests to use new server management methods * refactor: Replace direct Redis server manipulation with registry.addServer and registry.getServerConfig for better abstraction and consistency. * test: Adjust integration tests to verify server initialization and stale data handling using the updated methods. * 🔧 refactor: Increase retry limits and delay for MCP server creation * Updated MAX_CREATE_RETRIES from 3 to 5 to allow for more attempts during server creation. * Increased RETRY_BASE_DELAY_MS from 10 to 25 milliseconds to provide a longer wait time between retries, improving stability in server initialization. * refactor: Update MCPServersInitializer tests to utilize new registry methods * refactor: Replace direct access to sharedAppServers with registry.getServerConfig for improved abstraction. * test: Adjust tests to verify server initialization and stale data handling using the updated registry methods, ensuring consistency and clarity in the test structure.
2025-12-15 16:46:56 -05:00
expect(mockMCPManager.appConnections.getLoaded).toHaveBeenCalled();
expect(mockMCPManager.getUserConnections).toHaveBeenCalledWith(mockUserId);
🏗️ feat: 3-Tier MCP Server Architecture with Config-Source Lazy Init (#12435) * feat: add MCPServerSource type, tenantMcpPolicy schema, and source-based dbSourced wiring - Add `tenantMcpPolicy` to `mcpSettings` in YAML config schema with `enabled`, `maxServersPerTenant`, `allowedTransports`, and `allowedDomains` - Add `MCPServerSource` type ('yaml' | 'config' | 'user') and `source` field to `ParsedServerConfig` - Change `dbSourced` determination from `!!config.dbId` to `config.source === 'user'` across MCPManager, ConnectionsRepository, UserConnectionManager, and MCPServerInspector - Set `source: 'user'` on all DB-sourced servers in ServerConfigsDB * feat: three-layer MCPServersRegistry with config cache and lazy init - Add `configCacheRepo` as third repository layer between YAML cache and DB for admin-defined config-source MCP servers - Implement `ensureConfigServers()` that identifies config-override servers from resolved `getAppConfig()` mcpConfig, lazily inspects them, and caches parsed configs with `source: 'config'` - Add `lazyInitConfigServer()` with timeout, stub-on-failure, and concurrent-init deduplication via `pendingConfigInits` map - Extend `getAllServerConfigs()` with optional `configServers` param for three-way merge: YAML → Config → User - Add `getServerConfig()` lookup through config cache layer - Add `invalidateConfigCache()` for clearing config-source inspection results on admin config mutations - Tag `source: 'yaml'` on CACHE-stored servers and `source: 'user'` on DB-stored servers in `addServer()` and `addServerStub()` * feat: wire tenant context into MCP controllers, services, and cache invalidation - Resolve config-source servers via `getAppConfig({ role, tenantId })` in `getMCPTools()` and `getMCPServersList()` controllers - Pass `ensureConfigServers()` results through `getAllServerConfigs()` for three-way merge of YAML + Config + User servers - Add tenant/role context to `getMCPSetupData()` and connection status routes via `getTenantId()` from ALS - Add `clearMcpConfigCache()` to `invalidateConfigCaches()` so admin config mutations trigger re-inspection of config-source MCP servers * feat: enforce tenantMcpPolicy on admin config mcpServers mutations - Add `validateMcpServerPolicy()` helper that checks mcpServers against operator-defined `tenantMcpPolicy` (enabled, maxServersPerTenant, allowedTransports, allowedDomains) - Wire validation into `upsertConfigOverrides` and `patchConfigField` handlers — rejects with 403 when policy is violated - Infer transport type from config shape (command → stdio, url protocol → websocket/sse, type field → streamable-http) - Validate server domains against policy allowlist when configured * revert: remove tenantMcpPolicy schema and enforcement The existing admin config CRUD routes already provide the mechanism for granular MCP server prepopulation (groups, roles, users). The tenantMcpPolicy gating adds unnecessary complexity that can be revisited if needed in the future. - Remove tenantMcpPolicy from mcpSettings Zod schema - Remove validateMcpServerPolicy helper and TenantMcpPolicy interface - Remove policy enforcement from upsertConfigOverrides and patchConfigField handlers * test: update test assertions for source field and config-server wiring - Use objectContaining in MCPServersRegistry reset test to account for new source: 'yaml' field on CACHE-stored configs - Add getTenantId and ensureConfigServers mocks to MCP route tests - Add getAppConfig mock to route test Config service mock - Update getMCPSetupData assertion to expect second options argument - Update getAllServerConfigs assertions for new configServers parameter * fix: disconnect active connections when config-source servers are evicted When admin config overrides change and config-source MCP servers are removed, the invalidation now proactively disconnects active connections for evicted servers instead of leaving them lingering until timeout. - Return evicted server names from invalidateConfigCache() - Disconnect app-level connections for evicted servers in clearMcpConfigCache() via MCPManager.appConnections.disconnect() * fix: address code review findings (CRITICAL, MAJOR, MINOR) CRITICAL fixes: - Scope configCacheRepo keys by config content hash to prevent cross-tenant cache poisoning when two tenants define the same server name with different configurations - Change dbSourced checks from `source === 'user'` to `source !== 'yaml' && source !== 'config'` so undefined source (pre-upgrade cached configs) fails closed to restricted mode MAJOR fixes: - Derive OAuth servers from already-computed mcpConfig instead of calling getOAuthServers() separately — config-source OAuth servers are now properly detected - Add parseInt radix (10) and NaN guard with fallback to 30_000 for CONFIG_SERVER_INIT_TIMEOUT_MS - Add CONFIG_CACHE_NAMESPACE to aggregate-key branch in ServerConfigsCacheFactory to avoid SCAN-based Redis stalls - Remove `if (role || tenantId)` guard in getMCPSetupData — config servers now always resolve regardless of tenant context MINOR fixes: - Extract resolveAllMcpConfigs() helper in mcp controller to eliminate 3x copy-pasted config resolution boilerplate - Distinguish "not initialized" from real errors in clearMcpConfigCache — log actual failures instead of swallowing - Remove narrative inline comments per style guide - Remove dead try/catch inside Promise.allSettled in ensureConfigServers (inner method never throws) - Memoize YAML server names to avoid repeated cacheConfigsRepo.getAll() calls per request Test updates: - Add ensureConfigServers mock to registry test fixtures - Update getMCPSetupData assertions for inline OAuth derivation * fix: address code review findings (CRITICAL, MAJOR, MINOR) CRITICAL fixes: - Break circular dependency: move CONFIG_CACHE_NAMESPACE from MCPServersRegistry to ServerConfigsCacheFactory - Fix dbSourced fail-closed: use source field when present, fall back to legacy dbId check when absent (backward-compatible with pre-upgrade cached configs that lack source field) MAJOR fixes: - Add CONFIG_CACHE_NAMESPACE to aggregate-key set in ServerConfigsCacheFactory to avoid SCAN-based Redis stalls - Add comprehensive test suite (ensureConfigServers.test.ts, 18 tests) covering lazy init, stub-on-failure, cross-tenant isolation via config hash keys, concurrent deduplication, merge order, and cache invalidation MINOR fixes: - Update MCPServerInspector test assertion for dbSourced change * fix: restore getServerConfig lookup for config-source servers (NEW-1) Add configNameToKey map that indexes server name → hash-based cache key for O(1) lookup by name in getServerConfig. This restores the config cache layer that was dropped when hash-based keys were introduced. Without this fix, config-source servers appeared in tool listings (via getAllServerConfigs) but getServerConfig returned undefined, breaking all connection and tool call paths. - Populate configNameToKey in ensureSingleConfigServer - Clear configNameToKey in invalidateConfigCache and reset - Clear stale read-through cache entries after lazy init - Remove dead code in invalidateConfigCache (config.title, key parsing) - Add getServerConfig tests for config-source server lookup * fix: eliminate configNameToKey race via caller-provided configServers param Replace the process-global configNameToKey map (last-writer-wins under concurrent multi-tenant load) with a configServers parameter on getServerConfig. Callers pass the pre-resolved config servers map directly — no shared mutable state, no cross-tenant race. - Add optional configServers param to getServerConfig; when provided, returns matching config directly without any global lookup - Remove configNameToKey map entirely (was the source of the race) - Extract server names from cache keys via lastIndexOf in invalidateConfigCache (safe for names containing colons) - Use mcpConfig[serverName] directly in getMCPTools instead of a redundant getServerConfig call - Add cross-tenant isolation test for getServerConfig * fix: populate read-through cache after config server lazy init After lazyInitConfigServer succeeds, write the parsed config to readThroughCache keyed by serverName so that getServerConfig calls from ConnectionsRepository, UserConnectionManager, and MCPManager.callTool find the config without needing configServers. Without this, config-source servers appeared in tool listings but every connection attempt and tool call returned undefined. * fix: user-scoped getServerConfig fallback to server-only cache key When getServerConfig is called with a userId (e.g., from callTool or UserConnectionManager), the cache key is serverName::userId. Config-source servers are cached under the server-only key (no userId). Add a fallback so user-scoped lookups find config-source servers in the read-through cache. * fix: configCacheRepo fallback, isUserSourced DRY, cross-process race CRITICAL: Add findInConfigCache fallback in getServerConfig so config-source servers remain reachable after readThroughCache TTL expires (5s). Without this, every tool call after 5s returned undefined for config-source servers. MAJOR: Extract isUserSourced() helper to mcp/utils.ts and replace all 5 inline dbSourced ternary expressions (MCPManager x2, ConnectionsRepository, UserConnectionManager, MCPServerInspector). MAJOR: Fix cross-process Redis race in lazyInitConfigServer — when configCacheRepo.add throws (key exists from another process), fall back to reading the existing entry instead of returning undefined. MINOR: Parallelize invalidateConfigCache awaits with Promise.all. Remove redundant .catch(() => {}) inside Promise.allSettled. Tighten dedup test assertion to toBe(1). Add TTL-expiry tests for getServerConfig (with and without userId). * feat: thread configServers through getAppToolFunctions and formatInstructionsForContext Add optional configServers parameter to getAppToolFunctions, getInstructions, and formatInstructionsForContext so config-source server tools and instructions are visible to agent initialization and context injection paths. Existing callers (boot-time init, tests) pass no argument and continue to work unchanged. Agent runtime paths can now thread resolved config servers from request context. * fix: stale failure stubs retry after 5 min, upsert for cross-process races - Add CONFIG_STUB_RETRY_MS (5 min) — stale failure stubs are retried instead of permanently disabling config-source servers after transient errors (DNS outage, cold-start race) - Extract upsertConfigCache() helper that tries add then falls back to update, preventing cross-process Redis races where a second instance's successful inspection result was discarded - Add test for stale-stub retry after CONFIG_STUB_RETRY_MS * fix: stamp updatedAt on failure stubs, null-guard callTool config, test cleanup - Add updatedAt: Date.now() to failure stubs in lazyInitConfigServer so CONFIG_STUB_RETRY_MS (5 min) window works correctly — without it, stubs were always considered stale (updatedAt ?? 0 → epoch → always expired) - Add null guard for rawConfig in MCPManager.callTool before passing to preProcessGraphTokens — prevents unsafe `as` cast on undefined - Log double-failure in upsertConfigCache instead of silently swallowing - Replace module-scope Date.now monkey-patch with jest.useFakeTimers / jest.setSystemTime / jest.useRealTimers in ensureConfigServers tests * fix: server-only readThrough fallback only returns truthy values Prevents a cached undefined from a prior no-userId lookup from short-circuiting the DB query on a subsequent userId-scoped lookup. * fix: remove findInConfigCache to eliminate cross-tenant config leakage The findInConfigCache prefix scan (serverName:*) could return any tenant's config after readThrough TTL expires, violating tenant isolation. Config-source servers are now ONLY resolvable through: 1. The configServers param (callers with tenant context from ALS) 2. The readThrough cache (populated by ensureSingleConfigServer, 5s TTL, repopulated on every HTTP request via resolveAllMcpConfigs) Connection/tool-call paths without tenant context rely exclusively on the readThrough cache. If it expires before the next HTTP request repopulates it, the server is not found — which is correct because there is no tenant context to determine which config to return. - Remove findInConfigCache method and its call in getServerConfig - Update server-only readThrough fallback to only return truthy values (prevents cached undefined from short-circuiting user-scoped DB lookup) - Update tests to document tenant isolation behavior after cache expiry * style: fix import order per AGENTS.md conventions Sort package imports shortest-to-longest, local imports longest-to-shortest across MCPServersRegistry, ConnectionsRepository, MCPManager, UserConnectionManager, and MCPServerInspector. * fix: eliminate cross-tenant readThrough contamination and TTL-expiry tool failures Thread pre-resolved serverConfig from tool creation context into callTool, removing dependency on the readThrough cache for config-source servers. This fixes two issues: - Cross-tenant contamination: the readThrough cache key was unscoped (just serverName), so concurrent multi-tenant requests for same-named servers would overwrite each other's entries - TTL expiry: tool calls happening >5s after config resolution would fail with "Configuration not found" because the readThrough entry had expired Changes: - Add optional serverConfig param to MCPManager.callTool — uses provided config directly, falling back to getServerConfig lookup for YAML/user servers - Thread serverConfig from createMCPTool through createToolInstance closure to callTool - Remove readThrough write from ensureSingleConfigServer — config-source servers are only accessible via configServers param (tenant-scoped) - Remove server-only readThrough fallback from getServerConfig - Increase config cache hash from 8 to 16 hex chars (64-bit) - Add isUserSourced boundary tests for all source/dbId combinations - Fix double Object.keys call in getMCPTools controller - Update test assertions for new getServerConfig behavior * fix: cache base configs for config-server users; narrow upsertConfigCache error handling - Refactor getAllServerConfigs to separate base config fetch (YAML + DB) from config-server layering. Base configs are cached via readThroughCacheAll regardless of whether configServers is provided, eliminating uncached MongoDB queries per request for config-server users - Narrow upsertConfigCache catch to duplicate-key errors only; infrastructure errors (Redis timeouts, network failures) now propagate instead of being silently swallowed, preventing inspection storms during outages * fix: restore correct merge order and document upsert error matching - Restore YAML → Config → User DB precedence in getAllServerConfigs (user DB servers have highest precedence, matching the JSDoc contract) - Add source comment on upsertConfigCache duplicate-key detection linking to the two cache implementations that define the error message * feat: complete config-source server support across all execution paths Wire configServers through the entire agent execution pipeline so config-source MCP servers are fully functional — not just visible in listings but executable in agent sessions. - Thread configServers into handleTools.js agent tool pipeline: resolve config servers from tenant context before MCP tool iteration, pass to getServerConfig, createMCPTools, and createMCPTool - Thread configServers into agent instructions pipeline: applyContextToAgent → getMCPInstructionsForServers → formatInstructionsForContext, resolved in client.js before agent context application - Add configServers param to createMCPTool and createMCPTools for reconnect path fallback - Add source field to redactServerSecrets allowlist for client UI differentiation of server tiers - Narrow invalidateConfigCache to only clear readThroughCacheAll (merged results), preserving YAML individual-server readThrough entries - Update context.spec.ts assertions for new configServers parameter * fix: add missing mocks for config-source server dependencies in client.test.js Mock getMCPServersRegistry, getAppConfig, and getTenantId that were added to client.js but not reflected in the test file's jest.mock declarations. * fix: update formatInstructionsForContext assertions for configServers param The test assertions expected formatInstructionsForContext to be called with only the server names array, but it now receives configServers as a second argument after the config-source server feature wiring. * fix: move configServers resolution before MCP tool loop to avoid TDZ configServers was declared with `let` after the first tool loop but referenced inside it via getServerConfig(), causing a ReferenceError temporal dead zone. Move declaration and resolution before the loop, using tools.some(mcpToolPattern) to gate the async resolution. * fix: address review findings — cache bypass, discoverServerTools gap, DRY - #2: getAllServerConfigs now always uses getBaseServerConfigs (cached via readThroughCacheAll) instead of bypassing it when configServers is present. Extracts user-DB entries from cached base by diffing against YAML keys to maintain YAML → Config → User DB merge order without extra MongoDB calls. - #3: Add configServers param to ToolDiscoveryOptions and thread it through discoverServerTools → getServerConfig so config-source servers are discoverable during OAuth reconnection flows. - #6: Replace inline import() type annotations in context.ts with proper import type { ParsedServerConfig } per AGENTS.md conventions. - #7: Extract resolveConfigServers(req) helper in MCP.js and use it from handleTools.js and client.js, eliminating the duplicated 6-line config resolution pattern. - #10: Restore removed "why" comment explaining getLoaded() vs getAll() choice in getMCPSetupData — documents non-obvious correctness constraint. - #11: Fix incomplete JSDoc param type on resolveAllMcpConfigs. * fix: consolidate imports, reorder constants, fix YAML-DB merge edge case - Merge duplicate @librechat/data-schemas requires in MCP.js into one - Move resolveConfigServers after module-level constants - Fix getAllServerConfigs edge case where user-DB entry overriding a YAML entry with the same name was excluded from userDbConfigs; now uses reference equality check to detect DB-overwritten YAML keys * fix: replace fragile string-match error detection with proper upsert method Add upsert() to IServerConfigsRepositoryInterface and all implementations (InMemory, Redis, RedisAggregateKey, DB). This eliminates the brittle error message string match ('already exists in cache') in upsertConfigCache that was the only thing preventing cross-process init races from silently discarding inspection results. Each implementation handles add-or-update atomically: - InMemory: direct Map.set() - Redis: direct cache.set() - RedisAggregateKey: read-modify-write under write lock - DB: delegates to update() (DB servers use explicit add() with ACL setup) * fix: wire configServers through remaining HTTP endpoints - getMCPServerById: use resolveAllMcpConfigs instead of bare getServerConfig - reinitialize route: resolve configServers before getServerConfig - auth-values route: resolve configServers before getServerConfig - getOAuthHeaders: accept configServers param, thread from callers - Update mcp.spec.js tests to mock getAllServerConfigs for GET by name * fix: thread serverConfig through getConnection for config-source servers Config-source servers exist only in configCacheRepo, not in YAML cache or DB. When callTool → getConnection → getUserConnection → getServerConfig runs without configServers, it returns undefined and throws. Fix by threading the pre-resolved serverConfig (providedConfig) from callTool through getConnection → getUserConnection → createUserConnectionInternal, using it as a fallback before the registry lookup. * fix: thread configServers through reinit, reconnect, and tool definition paths Wire configServers through every remaining call chain that creates or reconnects MCP server connections: - reinitMCPServer: accepts serverConfig and configServers, uses them for getServerConfig fallback, getConnection, and discoverServerTools - reconnectServer: accepts and passes configServers to reinitMCPServer - createMCPTools/createMCPTool: pass configServers to reconnectServer - ToolService.loadToolDefinitionsWrapper: resolves configServers from req, passes to both reinitMCPServer call sites - reinitialize route: passes serverConfig and configServers to reinitMCPServer * fix: address review findings — simplify merge, harden error paths, fix log labels - Simplify getAllServerConfigs merge: replace fragile reference-equality loop with direct spread { ...yamlConfigs, ...configServers, ...base } - Guard upsertConfigCache in lazyInitConfigServer catch block so cache failures don't mask the original inspection error - Deduplicate getYamlServerNames cold-start with promise dedup pattern - Remove dead `if (!mcpConfig)` guard in getMCPSetupData - Fix hardcoded "App server" in ServerConfigsCacheRedisAggregateKey error messages — now uses this.namespace for correct Config/App labeling - Remove misleading OAuth callback comment about readThrough cache - Move resolveConfigServers after module-level constants in MCP.js * fix: clear rejected yamlServerNames promise, fix config-source reinspect, fix reset log label - Clear yamlServerNamesPromise on rejection so transient cache errors don't permanently prevent ensureConfigServers from working - Skip reinspectServer for config-source servers (source: 'config') in reinitMCPServer — they lack a CACHE/DB storage location; retry is handled by CONFIG_STUB_RETRY_MS in ensureConfigServers - Use source field instead of dbId for storageLocation derivation - Fix remaining hardcoded "App" in reset() leaderCheck message * fix: persist oauthHeaders in flow state for config-source OAuth servers The OAuth callback route has no JWT auth context and cannot resolve config-source server configs. Previously, getOAuthHeaders would silently return {} for config-source servers, dropping custom token exchange headers. Now oauthHeaders are persisted in MCPOAuthFlowMetadata during flow initiation (which has auth context), and the callback reads them from the stored flow state with a fallback to the registry lookup for YAML/user-DB servers. * fix: update tests for getMCPSetupData null guard removal and ToolService mock - MCP.spec.js: update test to expect graceful handling of null mcpConfig instead of a throw (getAllServerConfigs always returns an object) - MCP.js: add defensive || {} for Object.entries(mcpConfig) in case of null from test mocks - ToolService.spec.js: add missing mock for ~/server/services/MCP (resolveConfigServers) * fix: address review findings — DRY, naming, logging, dead code, defensive guards - #1: Simplify getAllServerConfigs to single getBaseServerConfigs call, eliminating redundant double-fetch of cacheConfigsRepo.getAll() - #2: Add warning log when oauthHeaders absent from OAuth callback flow state - #3: Extract resolveAllMcpConfigs to MCP.js service layer; controller imports shared helper instead of reimplementing - #4: Rename _serverConfig/_provider to capturedServerConfig/capturedProvider in createToolInstance — these are actively used, not unused - #5: Log rejected results from ensureConfigServers Promise.allSettled so cache errors are visible instead of silently dropped - #6: Remove dead 'MCP config not found' error handlers from routes - #7: Document circular-dependency reason for dynamic require in clearMcpConfigCache - #8: Remove logger.error from withTimeout to prevent double-logging timeouts - #10: Add explicit userId guard in ServerConfigsDB.upsert with clear error message - #12: Use spread instead of mutation in addServer for immutability consistency - Add upsert mock to ensureConfigServers.test.ts DB mock - Update route tests for resolveAllMcpConfigs import change * fix: restore correct merge priority, use immutable spread, fix test mock - getAllServerConfigs: { ...configServers, ...base } so userDB wins over configServers, matching documented "User DB (highest)" priority - lazyInitConfigServer: use immutable spread instead of direct mutation for parsedConfig.source, consistent with addServer fix - Fix test to mock getAllServerConfigs as {} instead of null, remove unnecessary || {} defensive guard in getMCPSetupData * fix: error handling, stable hashing, flatten nesting, remove dead param - Wrap resolveConfigServers/resolveAllMcpConfigs in try/catch with graceful {} fallback so transient DB/cache errors don't crash tool pipeline - Sort keys in configCacheKey JSON.stringify for deterministic hashing regardless of object property insertion order - Flatten clearMcpConfigCache from 3 nested try-catch to early returns; document that user connections are cleaned up lazily (accepted tradeoff) - Remove dead configServers param from getAppToolFunctions (never passed) - Add security rationale comment for source field in redactServerSecrets * fix: use recursive key-sorting replacer in configCacheKey to prevent cross-tenant cache collision The array replacer in JSON.stringify acts as a property allowlist at every nesting depth, silently dropping nested keys like headers['X-API-Key'], oauth.client_secret, etc. Two configs with different nested values but identical top-level structure produced the same hash, causing cross-tenant cache hits and potential credential contamination. Switch to a function replacer that recursively sorts keys at all depths without dropping any properties. Also document the known gap in getOAuthServers: config-source OAuth servers are not covered by auto-reconnection or uninstall cleanup because callers lack request context. * fix: move clearMcpConfigCache to packages/api to eliminate circular dependency The function only depends on MCPServersRegistry and MCPManager, both of which live in packages/api. Import it directly from @librechat/api in the CJS layer instead of using dynamic require('~/config'). * chore: imports/fields ordering * fix: address review findings — error handling, targeted lookup, test gaps - Narrow resolveAllMcpConfigs catch to only wrap ensureConfigServers so getAppConfig/getAllServerConfigs failures propagate instead of masking infrastructure errors as empty server lists. - Use targeted getServerConfig in getMCPServerById instead of fetching all server configs for a single-server lookup. - Forward configServers to inner createMCPTool calls so reconnect path works for config-source servers. - Update getAllServerConfigs JSDoc to document disjoint-key design. - Add OAuth callback oauthHeaders fallback tests (flow state present vs registry fallback). - Add resolveConfigServers/resolveAllMcpConfigs unit tests covering happy path and error propagation. * fix: add getOAuthReconnectionManager mock to OAuth callback tests * chore: imports ordering
2026-03-28 10:36:43 -04:00
expect(result.mcpConfig).toEqual(mockConfigWithOAuth);
expect(result.appConnections).toEqual(mockAppConnections);
expect(result.userConnections).toEqual(mockUserConnections);
expect(result.oauthServers).toEqual(new Set(['server2']));
});
🏗️ feat: 3-Tier MCP Server Architecture with Config-Source Lazy Init (#12435) * feat: add MCPServerSource type, tenantMcpPolicy schema, and source-based dbSourced wiring - Add `tenantMcpPolicy` to `mcpSettings` in YAML config schema with `enabled`, `maxServersPerTenant`, `allowedTransports`, and `allowedDomains` - Add `MCPServerSource` type ('yaml' | 'config' | 'user') and `source` field to `ParsedServerConfig` - Change `dbSourced` determination from `!!config.dbId` to `config.source === 'user'` across MCPManager, ConnectionsRepository, UserConnectionManager, and MCPServerInspector - Set `source: 'user'` on all DB-sourced servers in ServerConfigsDB * feat: three-layer MCPServersRegistry with config cache and lazy init - Add `configCacheRepo` as third repository layer between YAML cache and DB for admin-defined config-source MCP servers - Implement `ensureConfigServers()` that identifies config-override servers from resolved `getAppConfig()` mcpConfig, lazily inspects them, and caches parsed configs with `source: 'config'` - Add `lazyInitConfigServer()` with timeout, stub-on-failure, and concurrent-init deduplication via `pendingConfigInits` map - Extend `getAllServerConfigs()` with optional `configServers` param for three-way merge: YAML → Config → User - Add `getServerConfig()` lookup through config cache layer - Add `invalidateConfigCache()` for clearing config-source inspection results on admin config mutations - Tag `source: 'yaml'` on CACHE-stored servers and `source: 'user'` on DB-stored servers in `addServer()` and `addServerStub()` * feat: wire tenant context into MCP controllers, services, and cache invalidation - Resolve config-source servers via `getAppConfig({ role, tenantId })` in `getMCPTools()` and `getMCPServersList()` controllers - Pass `ensureConfigServers()` results through `getAllServerConfigs()` for three-way merge of YAML + Config + User servers - Add tenant/role context to `getMCPSetupData()` and connection status routes via `getTenantId()` from ALS - Add `clearMcpConfigCache()` to `invalidateConfigCaches()` so admin config mutations trigger re-inspection of config-source MCP servers * feat: enforce tenantMcpPolicy on admin config mcpServers mutations - Add `validateMcpServerPolicy()` helper that checks mcpServers against operator-defined `tenantMcpPolicy` (enabled, maxServersPerTenant, allowedTransports, allowedDomains) - Wire validation into `upsertConfigOverrides` and `patchConfigField` handlers — rejects with 403 when policy is violated - Infer transport type from config shape (command → stdio, url protocol → websocket/sse, type field → streamable-http) - Validate server domains against policy allowlist when configured * revert: remove tenantMcpPolicy schema and enforcement The existing admin config CRUD routes already provide the mechanism for granular MCP server prepopulation (groups, roles, users). The tenantMcpPolicy gating adds unnecessary complexity that can be revisited if needed in the future. - Remove tenantMcpPolicy from mcpSettings Zod schema - Remove validateMcpServerPolicy helper and TenantMcpPolicy interface - Remove policy enforcement from upsertConfigOverrides and patchConfigField handlers * test: update test assertions for source field and config-server wiring - Use objectContaining in MCPServersRegistry reset test to account for new source: 'yaml' field on CACHE-stored configs - Add getTenantId and ensureConfigServers mocks to MCP route tests - Add getAppConfig mock to route test Config service mock - Update getMCPSetupData assertion to expect second options argument - Update getAllServerConfigs assertions for new configServers parameter * fix: disconnect active connections when config-source servers are evicted When admin config overrides change and config-source MCP servers are removed, the invalidation now proactively disconnects active connections for evicted servers instead of leaving them lingering until timeout. - Return evicted server names from invalidateConfigCache() - Disconnect app-level connections for evicted servers in clearMcpConfigCache() via MCPManager.appConnections.disconnect() * fix: address code review findings (CRITICAL, MAJOR, MINOR) CRITICAL fixes: - Scope configCacheRepo keys by config content hash to prevent cross-tenant cache poisoning when two tenants define the same server name with different configurations - Change dbSourced checks from `source === 'user'` to `source !== 'yaml' && source !== 'config'` so undefined source (pre-upgrade cached configs) fails closed to restricted mode MAJOR fixes: - Derive OAuth servers from already-computed mcpConfig instead of calling getOAuthServers() separately — config-source OAuth servers are now properly detected - Add parseInt radix (10) and NaN guard with fallback to 30_000 for CONFIG_SERVER_INIT_TIMEOUT_MS - Add CONFIG_CACHE_NAMESPACE to aggregate-key branch in ServerConfigsCacheFactory to avoid SCAN-based Redis stalls - Remove `if (role || tenantId)` guard in getMCPSetupData — config servers now always resolve regardless of tenant context MINOR fixes: - Extract resolveAllMcpConfigs() helper in mcp controller to eliminate 3x copy-pasted config resolution boilerplate - Distinguish "not initialized" from real errors in clearMcpConfigCache — log actual failures instead of swallowing - Remove narrative inline comments per style guide - Remove dead try/catch inside Promise.allSettled in ensureConfigServers (inner method never throws) - Memoize YAML server names to avoid repeated cacheConfigsRepo.getAll() calls per request Test updates: - Add ensureConfigServers mock to registry test fixtures - Update getMCPSetupData assertions for inline OAuth derivation * fix: address code review findings (CRITICAL, MAJOR, MINOR) CRITICAL fixes: - Break circular dependency: move CONFIG_CACHE_NAMESPACE from MCPServersRegistry to ServerConfigsCacheFactory - Fix dbSourced fail-closed: use source field when present, fall back to legacy dbId check when absent (backward-compatible with pre-upgrade cached configs that lack source field) MAJOR fixes: - Add CONFIG_CACHE_NAMESPACE to aggregate-key set in ServerConfigsCacheFactory to avoid SCAN-based Redis stalls - Add comprehensive test suite (ensureConfigServers.test.ts, 18 tests) covering lazy init, stub-on-failure, cross-tenant isolation via config hash keys, concurrent deduplication, merge order, and cache invalidation MINOR fixes: - Update MCPServerInspector test assertion for dbSourced change * fix: restore getServerConfig lookup for config-source servers (NEW-1) Add configNameToKey map that indexes server name → hash-based cache key for O(1) lookup by name in getServerConfig. This restores the config cache layer that was dropped when hash-based keys were introduced. Without this fix, config-source servers appeared in tool listings (via getAllServerConfigs) but getServerConfig returned undefined, breaking all connection and tool call paths. - Populate configNameToKey in ensureSingleConfigServer - Clear configNameToKey in invalidateConfigCache and reset - Clear stale read-through cache entries after lazy init - Remove dead code in invalidateConfigCache (config.title, key parsing) - Add getServerConfig tests for config-source server lookup * fix: eliminate configNameToKey race via caller-provided configServers param Replace the process-global configNameToKey map (last-writer-wins under concurrent multi-tenant load) with a configServers parameter on getServerConfig. Callers pass the pre-resolved config servers map directly — no shared mutable state, no cross-tenant race. - Add optional configServers param to getServerConfig; when provided, returns matching config directly without any global lookup - Remove configNameToKey map entirely (was the source of the race) - Extract server names from cache keys via lastIndexOf in invalidateConfigCache (safe for names containing colons) - Use mcpConfig[serverName] directly in getMCPTools instead of a redundant getServerConfig call - Add cross-tenant isolation test for getServerConfig * fix: populate read-through cache after config server lazy init After lazyInitConfigServer succeeds, write the parsed config to readThroughCache keyed by serverName so that getServerConfig calls from ConnectionsRepository, UserConnectionManager, and MCPManager.callTool find the config without needing configServers. Without this, config-source servers appeared in tool listings but every connection attempt and tool call returned undefined. * fix: user-scoped getServerConfig fallback to server-only cache key When getServerConfig is called with a userId (e.g., from callTool or UserConnectionManager), the cache key is serverName::userId. Config-source servers are cached under the server-only key (no userId). Add a fallback so user-scoped lookups find config-source servers in the read-through cache. * fix: configCacheRepo fallback, isUserSourced DRY, cross-process race CRITICAL: Add findInConfigCache fallback in getServerConfig so config-source servers remain reachable after readThroughCache TTL expires (5s). Without this, every tool call after 5s returned undefined for config-source servers. MAJOR: Extract isUserSourced() helper to mcp/utils.ts and replace all 5 inline dbSourced ternary expressions (MCPManager x2, ConnectionsRepository, UserConnectionManager, MCPServerInspector). MAJOR: Fix cross-process Redis race in lazyInitConfigServer — when configCacheRepo.add throws (key exists from another process), fall back to reading the existing entry instead of returning undefined. MINOR: Parallelize invalidateConfigCache awaits with Promise.all. Remove redundant .catch(() => {}) inside Promise.allSettled. Tighten dedup test assertion to toBe(1). Add TTL-expiry tests for getServerConfig (with and without userId). * feat: thread configServers through getAppToolFunctions and formatInstructionsForContext Add optional configServers parameter to getAppToolFunctions, getInstructions, and formatInstructionsForContext so config-source server tools and instructions are visible to agent initialization and context injection paths. Existing callers (boot-time init, tests) pass no argument and continue to work unchanged. Agent runtime paths can now thread resolved config servers from request context. * fix: stale failure stubs retry after 5 min, upsert for cross-process races - Add CONFIG_STUB_RETRY_MS (5 min) — stale failure stubs are retried instead of permanently disabling config-source servers after transient errors (DNS outage, cold-start race) - Extract upsertConfigCache() helper that tries add then falls back to update, preventing cross-process Redis races where a second instance's successful inspection result was discarded - Add test for stale-stub retry after CONFIG_STUB_RETRY_MS * fix: stamp updatedAt on failure stubs, null-guard callTool config, test cleanup - Add updatedAt: Date.now() to failure stubs in lazyInitConfigServer so CONFIG_STUB_RETRY_MS (5 min) window works correctly — without it, stubs were always considered stale (updatedAt ?? 0 → epoch → always expired) - Add null guard for rawConfig in MCPManager.callTool before passing to preProcessGraphTokens — prevents unsafe `as` cast on undefined - Log double-failure in upsertConfigCache instead of silently swallowing - Replace module-scope Date.now monkey-patch with jest.useFakeTimers / jest.setSystemTime / jest.useRealTimers in ensureConfigServers tests * fix: server-only readThrough fallback only returns truthy values Prevents a cached undefined from a prior no-userId lookup from short-circuiting the DB query on a subsequent userId-scoped lookup. * fix: remove findInConfigCache to eliminate cross-tenant config leakage The findInConfigCache prefix scan (serverName:*) could return any tenant's config after readThrough TTL expires, violating tenant isolation. Config-source servers are now ONLY resolvable through: 1. The configServers param (callers with tenant context from ALS) 2. The readThrough cache (populated by ensureSingleConfigServer, 5s TTL, repopulated on every HTTP request via resolveAllMcpConfigs) Connection/tool-call paths without tenant context rely exclusively on the readThrough cache. If it expires before the next HTTP request repopulates it, the server is not found — which is correct because there is no tenant context to determine which config to return. - Remove findInConfigCache method and its call in getServerConfig - Update server-only readThrough fallback to only return truthy values (prevents cached undefined from short-circuiting user-scoped DB lookup) - Update tests to document tenant isolation behavior after cache expiry * style: fix import order per AGENTS.md conventions Sort package imports shortest-to-longest, local imports longest-to-shortest across MCPServersRegistry, ConnectionsRepository, MCPManager, UserConnectionManager, and MCPServerInspector. * fix: eliminate cross-tenant readThrough contamination and TTL-expiry tool failures Thread pre-resolved serverConfig from tool creation context into callTool, removing dependency on the readThrough cache for config-source servers. This fixes two issues: - Cross-tenant contamination: the readThrough cache key was unscoped (just serverName), so concurrent multi-tenant requests for same-named servers would overwrite each other's entries - TTL expiry: tool calls happening >5s after config resolution would fail with "Configuration not found" because the readThrough entry had expired Changes: - Add optional serverConfig param to MCPManager.callTool — uses provided config directly, falling back to getServerConfig lookup for YAML/user servers - Thread serverConfig from createMCPTool through createToolInstance closure to callTool - Remove readThrough write from ensureSingleConfigServer — config-source servers are only accessible via configServers param (tenant-scoped) - Remove server-only readThrough fallback from getServerConfig - Increase config cache hash from 8 to 16 hex chars (64-bit) - Add isUserSourced boundary tests for all source/dbId combinations - Fix double Object.keys call in getMCPTools controller - Update test assertions for new getServerConfig behavior * fix: cache base configs for config-server users; narrow upsertConfigCache error handling - Refactor getAllServerConfigs to separate base config fetch (YAML + DB) from config-server layering. Base configs are cached via readThroughCacheAll regardless of whether configServers is provided, eliminating uncached MongoDB queries per request for config-server users - Narrow upsertConfigCache catch to duplicate-key errors only; infrastructure errors (Redis timeouts, network failures) now propagate instead of being silently swallowed, preventing inspection storms during outages * fix: restore correct merge order and document upsert error matching - Restore YAML → Config → User DB precedence in getAllServerConfigs (user DB servers have highest precedence, matching the JSDoc contract) - Add source comment on upsertConfigCache duplicate-key detection linking to the two cache implementations that define the error message * feat: complete config-source server support across all execution paths Wire configServers through the entire agent execution pipeline so config-source MCP servers are fully functional — not just visible in listings but executable in agent sessions. - Thread configServers into handleTools.js agent tool pipeline: resolve config servers from tenant context before MCP tool iteration, pass to getServerConfig, createMCPTools, and createMCPTool - Thread configServers into agent instructions pipeline: applyContextToAgent → getMCPInstructionsForServers → formatInstructionsForContext, resolved in client.js before agent context application - Add configServers param to createMCPTool and createMCPTools for reconnect path fallback - Add source field to redactServerSecrets allowlist for client UI differentiation of server tiers - Narrow invalidateConfigCache to only clear readThroughCacheAll (merged results), preserving YAML individual-server readThrough entries - Update context.spec.ts assertions for new configServers parameter * fix: add missing mocks for config-source server dependencies in client.test.js Mock getMCPServersRegistry, getAppConfig, and getTenantId that were added to client.js but not reflected in the test file's jest.mock declarations. * fix: update formatInstructionsForContext assertions for configServers param The test assertions expected formatInstructionsForContext to be called with only the server names array, but it now receives configServers as a second argument after the config-source server feature wiring. * fix: move configServers resolution before MCP tool loop to avoid TDZ configServers was declared with `let` after the first tool loop but referenced inside it via getServerConfig(), causing a ReferenceError temporal dead zone. Move declaration and resolution before the loop, using tools.some(mcpToolPattern) to gate the async resolution. * fix: address review findings — cache bypass, discoverServerTools gap, DRY - #2: getAllServerConfigs now always uses getBaseServerConfigs (cached via readThroughCacheAll) instead of bypassing it when configServers is present. Extracts user-DB entries from cached base by diffing against YAML keys to maintain YAML → Config → User DB merge order without extra MongoDB calls. - #3: Add configServers param to ToolDiscoveryOptions and thread it through discoverServerTools → getServerConfig so config-source servers are discoverable during OAuth reconnection flows. - #6: Replace inline import() type annotations in context.ts with proper import type { ParsedServerConfig } per AGENTS.md conventions. - #7: Extract resolveConfigServers(req) helper in MCP.js and use it from handleTools.js and client.js, eliminating the duplicated 6-line config resolution pattern. - #10: Restore removed "why" comment explaining getLoaded() vs getAll() choice in getMCPSetupData — documents non-obvious correctness constraint. - #11: Fix incomplete JSDoc param type on resolveAllMcpConfigs. * fix: consolidate imports, reorder constants, fix YAML-DB merge edge case - Merge duplicate @librechat/data-schemas requires in MCP.js into one - Move resolveConfigServers after module-level constants - Fix getAllServerConfigs edge case where user-DB entry overriding a YAML entry with the same name was excluded from userDbConfigs; now uses reference equality check to detect DB-overwritten YAML keys * fix: replace fragile string-match error detection with proper upsert method Add upsert() to IServerConfigsRepositoryInterface and all implementations (InMemory, Redis, RedisAggregateKey, DB). This eliminates the brittle error message string match ('already exists in cache') in upsertConfigCache that was the only thing preventing cross-process init races from silently discarding inspection results. Each implementation handles add-or-update atomically: - InMemory: direct Map.set() - Redis: direct cache.set() - RedisAggregateKey: read-modify-write under write lock - DB: delegates to update() (DB servers use explicit add() with ACL setup) * fix: wire configServers through remaining HTTP endpoints - getMCPServerById: use resolveAllMcpConfigs instead of bare getServerConfig - reinitialize route: resolve configServers before getServerConfig - auth-values route: resolve configServers before getServerConfig - getOAuthHeaders: accept configServers param, thread from callers - Update mcp.spec.js tests to mock getAllServerConfigs for GET by name * fix: thread serverConfig through getConnection for config-source servers Config-source servers exist only in configCacheRepo, not in YAML cache or DB. When callTool → getConnection → getUserConnection → getServerConfig runs without configServers, it returns undefined and throws. Fix by threading the pre-resolved serverConfig (providedConfig) from callTool through getConnection → getUserConnection → createUserConnectionInternal, using it as a fallback before the registry lookup. * fix: thread configServers through reinit, reconnect, and tool definition paths Wire configServers through every remaining call chain that creates or reconnects MCP server connections: - reinitMCPServer: accepts serverConfig and configServers, uses them for getServerConfig fallback, getConnection, and discoverServerTools - reconnectServer: accepts and passes configServers to reinitMCPServer - createMCPTools/createMCPTool: pass configServers to reconnectServer - ToolService.loadToolDefinitionsWrapper: resolves configServers from req, passes to both reinitMCPServer call sites - reinitialize route: passes serverConfig and configServers to reinitMCPServer * fix: address review findings — simplify merge, harden error paths, fix log labels - Simplify getAllServerConfigs merge: replace fragile reference-equality loop with direct spread { ...yamlConfigs, ...configServers, ...base } - Guard upsertConfigCache in lazyInitConfigServer catch block so cache failures don't mask the original inspection error - Deduplicate getYamlServerNames cold-start with promise dedup pattern - Remove dead `if (!mcpConfig)` guard in getMCPSetupData - Fix hardcoded "App server" in ServerConfigsCacheRedisAggregateKey error messages — now uses this.namespace for correct Config/App labeling - Remove misleading OAuth callback comment about readThrough cache - Move resolveConfigServers after module-level constants in MCP.js * fix: clear rejected yamlServerNames promise, fix config-source reinspect, fix reset log label - Clear yamlServerNamesPromise on rejection so transient cache errors don't permanently prevent ensureConfigServers from working - Skip reinspectServer for config-source servers (source: 'config') in reinitMCPServer — they lack a CACHE/DB storage location; retry is handled by CONFIG_STUB_RETRY_MS in ensureConfigServers - Use source field instead of dbId for storageLocation derivation - Fix remaining hardcoded "App" in reset() leaderCheck message * fix: persist oauthHeaders in flow state for config-source OAuth servers The OAuth callback route has no JWT auth context and cannot resolve config-source server configs. Previously, getOAuthHeaders would silently return {} for config-source servers, dropping custom token exchange headers. Now oauthHeaders are persisted in MCPOAuthFlowMetadata during flow initiation (which has auth context), and the callback reads them from the stored flow state with a fallback to the registry lookup for YAML/user-DB servers. * fix: update tests for getMCPSetupData null guard removal and ToolService mock - MCP.spec.js: update test to expect graceful handling of null mcpConfig instead of a throw (getAllServerConfigs always returns an object) - MCP.js: add defensive || {} for Object.entries(mcpConfig) in case of null from test mocks - ToolService.spec.js: add missing mock for ~/server/services/MCP (resolveConfigServers) * fix: address review findings — DRY, naming, logging, dead code, defensive guards - #1: Simplify getAllServerConfigs to single getBaseServerConfigs call, eliminating redundant double-fetch of cacheConfigsRepo.getAll() - #2: Add warning log when oauthHeaders absent from OAuth callback flow state - #3: Extract resolveAllMcpConfigs to MCP.js service layer; controller imports shared helper instead of reimplementing - #4: Rename _serverConfig/_provider to capturedServerConfig/capturedProvider in createToolInstance — these are actively used, not unused - #5: Log rejected results from ensureConfigServers Promise.allSettled so cache errors are visible instead of silently dropped - #6: Remove dead 'MCP config not found' error handlers from routes - #7: Document circular-dependency reason for dynamic require in clearMcpConfigCache - #8: Remove logger.error from withTimeout to prevent double-logging timeouts - #10: Add explicit userId guard in ServerConfigsDB.upsert with clear error message - #12: Use spread instead of mutation in addServer for immutability consistency - Add upsert mock to ensureConfigServers.test.ts DB mock - Update route tests for resolveAllMcpConfigs import change * fix: restore correct merge priority, use immutable spread, fix test mock - getAllServerConfigs: { ...configServers, ...base } so userDB wins over configServers, matching documented "User DB (highest)" priority - lazyInitConfigServer: use immutable spread instead of direct mutation for parsedConfig.source, consistent with addServer fix - Fix test to mock getAllServerConfigs as {} instead of null, remove unnecessary || {} defensive guard in getMCPSetupData * fix: error handling, stable hashing, flatten nesting, remove dead param - Wrap resolveConfigServers/resolveAllMcpConfigs in try/catch with graceful {} fallback so transient DB/cache errors don't crash tool pipeline - Sort keys in configCacheKey JSON.stringify for deterministic hashing regardless of object property insertion order - Flatten clearMcpConfigCache from 3 nested try-catch to early returns; document that user connections are cleaned up lazily (accepted tradeoff) - Remove dead configServers param from getAppToolFunctions (never passed) - Add security rationale comment for source field in redactServerSecrets * fix: use recursive key-sorting replacer in configCacheKey to prevent cross-tenant cache collision The array replacer in JSON.stringify acts as a property allowlist at every nesting depth, silently dropping nested keys like headers['X-API-Key'], oauth.client_secret, etc. Two configs with different nested values but identical top-level structure produced the same hash, causing cross-tenant cache hits and potential credential contamination. Switch to a function replacer that recursively sorts keys at all depths without dropping any properties. Also document the known gap in getOAuthServers: config-source OAuth servers are not covered by auto-reconnection or uninstall cleanup because callers lack request context. * fix: move clearMcpConfigCache to packages/api to eliminate circular dependency The function only depends on MCPServersRegistry and MCPManager, both of which live in packages/api. Import it directly from @librechat/api in the CJS layer instead of using dynamic require('~/config'). * chore: imports/fields ordering * fix: address review findings — error handling, targeted lookup, test gaps - Narrow resolveAllMcpConfigs catch to only wrap ensureConfigServers so getAppConfig/getAllServerConfigs failures propagate instead of masking infrastructure errors as empty server lists. - Use targeted getServerConfig in getMCPServerById instead of fetching all server configs for a single-server lookup. - Forward configServers to inner createMCPTool calls so reconnect path works for config-source servers. - Update getAllServerConfigs JSDoc to document disjoint-key design. - Add OAuth callback oauthHeaders fallback tests (flow state present vs registry fallback). - Add resolveConfigServers/resolveAllMcpConfigs unit tests covering happy path and error propagation. * fix: add getOAuthReconnectionManager mock to OAuth callback tests * chore: imports ordering
2026-03-28 10:36:43 -04:00
it('should return empty data when no servers are configured', async () => {
mockRegistryInstance.getAllServerConfigs.mockResolvedValue({});
const result = await getMCPSetupData(mockUserId);
expect(result.mcpConfig).toEqual({});
expect(result.oauthServers).toEqual(new Set());
});
it('should handle null values from MCP manager gracefully', async () => {
mockRegistryInstance.getAllServerConfigs.mockResolvedValue(mockConfig);
const mockMCPManager = {
🔄 refactor: MCP Server Init and Stale Cache Handling (#10984) * 🔧 refactor: Update MCP connection handling to improve performance and testing * refactor: Replace getAll() with getLoaded() in MCP.js to prevent unnecessary connection creation for user-context servers. * test: Adjust MCP.spec.js to mock getLoaded() instead of getAll() for consistency with the new implementation. * feat: Enhance MCPServersInitializer to reset initialization flag for better handling of process restarts and stale data. * test: Add integration tests to verify re-initialization behavior and ensure stale data is cleared when necessary. * 🔧 refactor: Enhance cached endpoints config handling for GPT plugins * refactor: Update MCPServersInitializer tests to use new server management methods * refactor: Replace direct Redis server manipulation with registry.addServer and registry.getServerConfig for better abstraction and consistency. * test: Adjust integration tests to verify server initialization and stale data handling using the updated methods. * 🔧 refactor: Increase retry limits and delay for MCP server creation * Updated MAX_CREATE_RETRIES from 3 to 5 to allow for more attempts during server creation. * Increased RETRY_BASE_DELAY_MS from 10 to 25 milliseconds to provide a longer wait time between retries, improving stability in server initialization. * refactor: Update MCPServersInitializer tests to utilize new registry methods * refactor: Replace direct access to sharedAppServers with registry.getServerConfig for improved abstraction. * test: Adjust tests to verify server initialization and stale data handling using the updated registry methods, ensuring consistency and clarity in the test structure.
2025-12-15 16:46:56 -05:00
appConnections: { getLoaded: jest.fn(() => Promise.resolve(null)) },
getUserConnections: jest.fn(() => null),
};
mockGetMCPManager.mockReturnValue(mockMCPManager);
mockRegistryInstance.getOAuthServers.mockResolvedValue(new Set());
const result = await getMCPSetupData(mockUserId);
expect(result).toEqual({
mcpConfig: mockConfig,
appConnections: new Map(),
userConnections: new Map(),
oauthServers: new Set(),
});
});
});
describe('checkOAuthFlowStatus', () => {
const mockUserId = 'user-123';
const mockServerName = 'test-server';
const mockFlowId = 'flow-123';
beforeEach(() => {
const mockFlowsCache = {};
const mockFlowManager = {
getFlowState: jest.fn(),
};
mockGetLogStores.mockReturnValue(mockFlowsCache);
mockGetFlowStateManager.mockReturnValue(mockFlowManager);
MCPOAuthHandler.generateFlowId.mockReturnValue(mockFlowId);
});
it('should return false flags when no flow state exists', async () => {
const mockFlowManager = { getFlowState: jest.fn(() => null) };
mockGetFlowStateManager.mockReturnValue(mockFlowManager);
const result = await checkOAuthFlowStatus(mockUserId, mockServerName);
expect(mockGetLogStores).toHaveBeenCalledWith(CacheKeys.FLOWS);
expect(MCPOAuthHandler.generateFlowId).toHaveBeenCalledWith(mockUserId, mockServerName);
expect(mockFlowManager.getFlowState).toHaveBeenCalledWith(mockFlowId, 'mcp_oauth');
expect(result).toEqual({ hasActiveFlow: false, hasFailedFlow: false });
});
it('should detect failed flow when status is FAILED', async () => {
const mockFlowState = {
status: 'FAILED',
createdAt: Date.now() - 60000, // 1 minute ago
ttl: 180000,
};
const mockFlowManager = { getFlowState: jest.fn(() => mockFlowState) };
mockGetFlowStateManager.mockReturnValue(mockFlowManager);
const result = await checkOAuthFlowStatus(mockUserId, mockServerName);
expect(result).toEqual({ hasActiveFlow: false, hasFailedFlow: true });
expect(logger.debug).toHaveBeenCalledWith(
expect.stringContaining('Found failed OAuth flow'),
expect.objectContaining({
flowId: mockFlowId,
status: 'FAILED',
}),
);
});
it('should detect failed flow when flow has timed out', async () => {
const mockFlowState = {
status: 'PENDING',
createdAt: Date.now() - 200000, // 200 seconds ago (> 180s TTL)
ttl: 180000,
};
const mockFlowManager = { getFlowState: jest.fn(() => mockFlowState) };
mockGetFlowStateManager.mockReturnValue(mockFlowManager);
const result = await checkOAuthFlowStatus(mockUserId, mockServerName);
expect(result).toEqual({ hasActiveFlow: false, hasFailedFlow: true });
expect(logger.debug).toHaveBeenCalledWith(
expect.stringContaining('Found failed OAuth flow'),
expect.objectContaining({
timedOut: true,
}),
);
});
it('should detect failed flow when TTL not specified and flow exceeds default TTL', async () => {
const mockFlowState = {
status: 'PENDING',
createdAt: Date.now() - 200000, // 200 seconds ago (> 180s default TTL)
// ttl not specified, should use 180000 default
};
const mockFlowManager = { getFlowState: jest.fn(() => mockFlowState) };
mockGetFlowStateManager.mockReturnValue(mockFlowManager);
const result = await checkOAuthFlowStatus(mockUserId, mockServerName);
expect(result).toEqual({ hasActiveFlow: false, hasFailedFlow: true });
});
it('should detect active flow when status is PENDING and within TTL', async () => {
const mockFlowState = {
status: 'PENDING',
createdAt: Date.now() - 60000, // 1 minute ago (< 180s TTL)
ttl: 180000,
};
const mockFlowManager = { getFlowState: jest.fn(() => mockFlowState) };
mockGetFlowStateManager.mockReturnValue(mockFlowManager);
const result = await checkOAuthFlowStatus(mockUserId, mockServerName);
expect(result).toEqual({ hasActiveFlow: true, hasFailedFlow: false });
expect(logger.debug).toHaveBeenCalledWith(
expect.stringContaining('Found active OAuth flow'),
expect.objectContaining({
flowId: mockFlowId,
}),
);
});
it('should return false flags for other statuses', async () => {
const mockFlowState = {
status: 'COMPLETED',
createdAt: Date.now() - 60000,
ttl: 180000,
};
const mockFlowManager = { getFlowState: jest.fn(() => mockFlowState) };
mockGetFlowStateManager.mockReturnValue(mockFlowManager);
const result = await checkOAuthFlowStatus(mockUserId, mockServerName);
expect(result).toEqual({ hasActiveFlow: false, hasFailedFlow: false });
});
it('should handle errors gracefully', async () => {
const mockError = new Error('Flow state error');
const mockFlowManager = {
getFlowState: jest.fn(() => {
throw mockError;
}),
};
mockGetFlowStateManager.mockReturnValue(mockFlowManager);
const result = await checkOAuthFlowStatus(mockUserId, mockServerName);
expect(result).toEqual({ hasActiveFlow: false, hasFailedFlow: false });
expect(logger.error).toHaveBeenCalledWith(
expect.stringContaining('Error checking OAuth flows'),
mockError,
);
});
});
describe('getServerConnectionStatus', () => {
const mockUserId = 'user-123';
const mockServerName = 'test-server';
🏗️ feat: Dynamic MCP Server Infrastructure with Access Control (#10787) * Feature: Dynamic MCP Server with Full UI Management * 🚦 feat: Add MCP Connection Status icons to MCPBuilder panel (#10805) * feature: Add MCP server connection status icons to MCPBuilder panel * refactor: Simplify MCPConfigDialog rendering in MCPBuilderPanel --------- Co-authored-by: Atef Bellaaj <slalom.bellaaj@external.daimlertruck.com> Co-authored-by: Danny Avila <danny@librechat.ai> * fix: address code review feedback for MCP server management - Fix OAuth secret preservation to avoid mutating input parameter by creating a merged config copy in ServerConfigsDB.update() - Improve error handling in getResourcePermissionsMap to propagate critical errors instead of silently returning empty Map - Extract duplicated MCP server filter logic by exposing selectableServers from useMCPServerManager hook and using it in MCPSelect component * test: Update PermissionService tests to throw errors on invalid resource types - Changed the test for handling invalid resource types to ensure it throws an error instead of returning an empty permissions map. - Updated the expectation to check for the specific error message when an invalid resource type is provided. * feat: Implement retry logic for MCP server creation to handle race conditions - Enhanced the createMCPServer method to include retry logic with exponential backoff for handling duplicate key errors during concurrent server creation. - Updated tests to verify that all concurrent requests succeed and that unique server names are generated. - Added a helper function to identify MongoDB duplicate key errors, improving error handling during server creation. * refactor: StatusIcon to use CircleCheck for connected status - Replaced the PlugZap icon with CircleCheck in the ConnectedStatusIcon component to better represent the connected state. - Ensured consistent icon usage across the component for improved visual clarity. * test: Update AccessControlService tests to throw errors on invalid resource types - Modified the test for invalid resource types to ensure it throws an error with a specific message instead of returning an empty permissions map. - This change enhances error handling and improves test coverage for the AccessControlService. * fix: Update error message for missing server name in MCP server retrieval - Changed the error message returned when the server name is not provided from 'MCP ID is required' to 'Server name is required' for better clarity and accuracy in the API response. --------- Co-authored-by: Atef Bellaaj <slalom.bellaaj@external.daimlertruck.com> Co-authored-by: Danny Avila <danny@librechat.ai>
2025-12-04 21:37:23 +01:00
const mockConfig = { updatedAt: Date.now() };
it('should return app connection state when available', async () => {
const appConnections = new Map([
[
mockServerName,
{
connectionState: 'connected',
isStale: jest.fn(() => false),
},
],
]);
const userConnections = new Map();
const oauthServers = new Set();
const result = await getServerConnectionStatus(
mockUserId,
mockServerName,
mockConfig,
appConnections,
userConnections,
oauthServers,
);
expect(result).toEqual({
requiresOAuth: false,
connectionState: 'connected',
});
});
it('should fallback to user connection state when app connection not available', async () => {
const appConnections = new Map();
const userConnections = new Map([
[
mockServerName,
{
connectionState: 'connecting',
isStale: jest.fn(() => false),
},
],
]);
const oauthServers = new Set();
const result = await getServerConnectionStatus(
mockUserId,
mockServerName,
mockConfig,
appConnections,
userConnections,
oauthServers,
);
expect(result).toEqual({
requiresOAuth: false,
connectionState: 'connecting',
});
});
it('should default to disconnected when no connections exist', async () => {
const appConnections = new Map();
const userConnections = new Map();
const oauthServers = new Set();
const result = await getServerConnectionStatus(
mockUserId,
mockServerName,
mockConfig,
appConnections,
userConnections,
oauthServers,
);
expect(result).toEqual({
requiresOAuth: false,
connectionState: 'disconnected',
});
});
it('should prioritize app connection over user connection', async () => {
const appConnections = new Map([
[
mockServerName,
{
connectionState: 'connected',
isStale: jest.fn(() => false),
},
],
]);
const userConnections = new Map([
[
mockServerName,
{
connectionState: 'disconnected',
isStale: jest.fn(() => false),
},
],
]);
const oauthServers = new Set();
const result = await getServerConnectionStatus(
mockUserId,
mockServerName,
mockConfig,
appConnections,
userConnections,
oauthServers,
);
expect(result).toEqual({
requiresOAuth: false,
connectionState: 'connected',
});
});
it('should indicate OAuth requirement when server is in OAuth servers set', async () => {
const appConnections = new Map();
const userConnections = new Map();
const oauthServers = new Set([mockServerName]);
// Mock OAuthReconnectionManager
const mockOAuthReconnectionManager = {
isReconnecting: jest.fn(() => false),
};
mockGetOAuthReconnectionManager.mockReturnValue(mockOAuthReconnectionManager);
const result = await getServerConnectionStatus(
mockUserId,
mockServerName,
mockConfig,
appConnections,
userConnections,
oauthServers,
);
expect(result.requiresOAuth).toBe(true);
});
it('should handle OAuth flow status when disconnected and requires OAuth with failed flow', async () => {
const appConnections = new Map();
const userConnections = new Map();
const oauthServers = new Set([mockServerName]);
// Mock OAuthReconnectionManager
const mockOAuthReconnectionManager = {
isReconnecting: jest.fn(() => false),
};
mockGetOAuthReconnectionManager.mockReturnValue(mockOAuthReconnectionManager);
// Mock flow state to return failed flow
const mockFlowManager = {
getFlowState: jest.fn(() => ({
status: 'FAILED',
createdAt: Date.now() - 60000,
ttl: 180000,
})),
};
mockGetFlowStateManager.mockReturnValue(mockFlowManager);
mockGetLogStores.mockReturnValue({});
MCPOAuthHandler.generateFlowId.mockReturnValue('test-flow-id');
const result = await getServerConnectionStatus(
mockUserId,
mockServerName,
mockConfig,
appConnections,
userConnections,
oauthServers,
);
expect(result).toEqual({
requiresOAuth: true,
connectionState: 'error',
});
});
it('should handle OAuth flow status when disconnected and requires OAuth with active flow', async () => {
const appConnections = new Map();
const userConnections = new Map();
const oauthServers = new Set([mockServerName]);
// Mock OAuthReconnectionManager
const mockOAuthReconnectionManager = {
isReconnecting: jest.fn(() => false),
};
mockGetOAuthReconnectionManager.mockReturnValue(mockOAuthReconnectionManager);
// Mock flow state to return active flow
const mockFlowManager = {
getFlowState: jest.fn(() => ({
status: 'PENDING',
createdAt: Date.now() - 60000, // 1 minute ago
ttl: 180000, // 3 minutes TTL
})),
};
mockGetFlowStateManager.mockReturnValue(mockFlowManager);
mockGetLogStores.mockReturnValue({});
MCPOAuthHandler.generateFlowId.mockReturnValue('test-flow-id');
const result = await getServerConnectionStatus(
mockUserId,
mockServerName,
mockConfig,
appConnections,
userConnections,
oauthServers,
);
expect(result).toEqual({
requiresOAuth: true,
connectionState: 'connecting',
});
});
it('should handle OAuth flow status when disconnected and requires OAuth with no flow', async () => {
const appConnections = new Map();
const userConnections = new Map();
const oauthServers = new Set([mockServerName]);
// Mock OAuthReconnectionManager
const mockOAuthReconnectionManager = {
isReconnecting: jest.fn(() => false),
};
mockGetOAuthReconnectionManager.mockReturnValue(mockOAuthReconnectionManager);
// Mock flow state to return no flow
const mockFlowManager = {
getFlowState: jest.fn(() => null),
};
mockGetFlowStateManager.mockReturnValue(mockFlowManager);
mockGetLogStores.mockReturnValue({});
MCPOAuthHandler.generateFlowId.mockReturnValue('test-flow-id');
const result = await getServerConnectionStatus(
mockUserId,
mockServerName,
mockConfig,
appConnections,
userConnections,
oauthServers,
);
expect(result).toEqual({
requiresOAuth: true,
connectionState: 'disconnected',
});
});
it('should return connecting state when OAuth server is reconnecting', async () => {
const appConnections = new Map();
const userConnections = new Map();
const oauthServers = new Set([mockServerName]);
// Mock OAuthReconnectionManager to return true for isReconnecting
const mockOAuthReconnectionManager = {
isReconnecting: jest.fn(() => true),
};
mockGetOAuthReconnectionManager.mockReturnValue(mockOAuthReconnectionManager);
const result = await getServerConnectionStatus(
mockUserId,
mockServerName,
mockConfig,
appConnections,
userConnections,
oauthServers,
);
expect(result).toEqual({
requiresOAuth: true,
connectionState: 'connecting',
});
expect(mockOAuthReconnectionManager.isReconnecting).toHaveBeenCalledWith(
mockUserId,
mockServerName,
);
});
it('should not check OAuth flow status when server is connected', async () => {
const mockFlowManager = {
getFlowState: jest.fn(),
};
mockGetFlowStateManager.mockReturnValue(mockFlowManager);
mockGetLogStores.mockReturnValue({});
const appConnections = new Map([
[
mockServerName,
{
connectionState: 'connected',
isStale: jest.fn(() => false),
},
],
]);
const userConnections = new Map();
const oauthServers = new Set([mockServerName]);
const result = await getServerConnectionStatus(
mockUserId,
mockServerName,
mockConfig,
appConnections,
userConnections,
oauthServers,
);
expect(result).toEqual({
requiresOAuth: true,
connectionState: 'connected',
});
// Should not call flow manager since server is connected
expect(mockFlowManager.getFlowState).not.toHaveBeenCalled();
});
it('should not check OAuth flow status when server does not require OAuth', async () => {
const mockFlowManager = {
getFlowState: jest.fn(),
};
mockGetFlowStateManager.mockReturnValue(mockFlowManager);
mockGetLogStores.mockReturnValue({});
const appConnections = new Map();
const userConnections = new Map();
const oauthServers = new Set(); // Server not in OAuth servers
const result = await getServerConnectionStatus(
mockUserId,
mockServerName,
mockConfig,
appConnections,
userConnections,
oauthServers,
);
expect(result).toEqual({
requiresOAuth: false,
connectionState: 'disconnected',
});
// Should not call flow manager since server doesn't require OAuth
expect(mockFlowManager.getFlowState).not.toHaveBeenCalled();
});
});
});
describe('User parameter passing tests', () => {
let mockReinitMCPServer;
let mockGetFlowStateManager;
let mockGetLogStores;
beforeEach(() => {
jest.clearAllMocks();
mockReinitMCPServer = require('./Tools/mcp').reinitMCPServer;
mockGetFlowStateManager = require('~/config').getFlowStateManager;
mockGetLogStores = require('~/cache').getLogStores;
// Setup default mocks
mockGetLogStores.mockReturnValue({});
mockGetFlowStateManager.mockReturnValue({
createFlowWithHandler: jest.fn(),
failFlow: jest.fn(),
});
🔒 feat: Add MCP server domain restrictions for remote transports (#11013) * 🔒 feat: Add MCP server domain restrictions for remote transports * 🔒 feat: Implement comprehensive MCP error handling and domain validation - Added `handleMCPError` function to centralize error responses for domain restrictions and inspection failures. - Introduced custom error classes: `MCPDomainNotAllowedError` and `MCPInspectionFailedError` for better error management. - Updated MCP server controllers to utilize the new error handling mechanism. - Enhanced domain validation logic in `createMCPTools` and `createMCPTool` functions to prevent operations on disallowed domains. - Added tests for runtime domain validation scenarios to ensure correct behavior. * chore: import order * 🔒 feat: Enhance domain validation in MCP tools with user role-based restrictions - Integrated `getAppConfig` to fetch allowed domains based on user roles in `createMCPTools` and `createMCPTool` functions. - Removed the deprecated `getAllowedDomains` method from `MCPServersRegistry`. - Updated tests to verify domain restrictions are applied correctly based on user roles. - Ensured that domain validation logic is consistent and efficient across tool creation processes. * 🔒 test: Refactor MCP tests to utilize configurable app settings - Introduced a mock for `getAppConfig` to enhance test flexibility. - Removed redundant mock definition to streamline test setup. - Ensured tests are aligned with the latest domain validation logic. --------- Co-authored-by: Atef Bellaaj <slalom.bellaaj@external.daimlertruck.com> Co-authored-by: Danny Avila <danny@librechat.ai>
2025-12-18 19:57:49 +01:00
// Reset domain validation mock to default (allow all)
mockIsMCPDomainAllowed.mockReset();
mockIsMCPDomainAllowed.mockResolvedValue(true);
// Reset registry mocks
mockRegistryInstance.getServerConfig.mockReset();
mockRegistryInstance.getServerConfig.mockResolvedValue(null);
// Reset getAppConfig mock to default (no restrictions)
mockGetAppConfig.mockReset();
mockGetAppConfig.mockResolvedValue({});
});
describe('createMCPTools', () => {
it('should pass user parameter to reinitMCPServer when calling reconnectServer internally', async () => {
const mockUser = { id: 'test-user-123', name: 'Test User' };
const mockRes = { write: jest.fn(), flush: jest.fn() };
const mockSignal = new AbortController().signal;
mockReinitMCPServer.mockResolvedValue({
tools: [{ name: 'test-tool' }],
availableTools: {
2026-02-13 13:33:25 -05:00
[`test-tool${D}test-server`]: {
function: {
description: 'Test tool',
parameters: { type: 'object', properties: {} },
},
},
},
});
await createMCPTools({
res: mockRes,
user: mockUser,
serverName: 'test-server',
provider: 'openai',
signal: mockSignal,
userMCPAuthMap: {},
});
// Verify reinitMCPServer was called with the user
expect(mockReinitMCPServer).toHaveBeenCalledWith(
expect.objectContaining({
user: mockUser,
serverName: 'test-server',
}),
);
expect(mockReinitMCPServer.mock.calls[0][0].user).toBe(mockUser);
});
it('should throw error if user is not provided', async () => {
const mockRes = { write: jest.fn(), flush: jest.fn() };
mockReinitMCPServer.mockResolvedValue({
tools: [],
availableTools: {},
});
// Call without user should throw error
await expect(
createMCPTools({
res: mockRes,
user: undefined,
serverName: 'test-server',
provider: 'openai',
userMCPAuthMap: {},
}),
).rejects.toThrow("Cannot read properties of undefined (reading 'id')");
// Verify reinitMCPServer was not called due to early error
expect(mockReinitMCPServer).not.toHaveBeenCalled();
});
});
describe('createMCPTool', () => {
it('should pass user parameter to reinitMCPServer when tool not in cache', async () => {
const mockUser = { id: 'test-user-456', email: 'test@example.com' };
const mockRes = { write: jest.fn(), flush: jest.fn() };
const mockSignal = new AbortController().signal;
mockReinitMCPServer.mockResolvedValue({
availableTools: {
2026-02-13 13:33:25 -05:00
[`test-tool${D}test-server`]: {
function: {
description: 'Test tool',
parameters: { type: 'object', properties: {} },
},
},
},
});
// Call without availableTools to trigger reinit
await createMCPTool({
res: mockRes,
user: mockUser,
2026-02-13 13:33:25 -05:00
toolKey: `test-tool${D}test-server`,
provider: 'openai',
signal: mockSignal,
userMCPAuthMap: {},
availableTools: undefined, // Force reinit
});
// Verify reinitMCPServer was called with the user
expect(mockReinitMCPServer).toHaveBeenCalledWith(
expect.objectContaining({
user: mockUser,
serverName: 'test-server',
}),
);
expect(mockReinitMCPServer.mock.calls[0][0].user).toBe(mockUser);
});
it('should not call reinitMCPServer when tool is in cache', async () => {
const mockUser = { id: 'test-user-789' };
const mockRes = { write: jest.fn(), flush: jest.fn() };
const availableTools = {
2026-02-13 13:33:25 -05:00
[`test-tool${D}test-server`]: {
function: {
description: 'Cached tool',
parameters: { type: 'object', properties: {} },
},
},
};
await createMCPTool({
res: mockRes,
user: mockUser,
2026-02-13 13:33:25 -05:00
toolKey: `test-tool${D}test-server`,
provider: 'openai',
userMCPAuthMap: {},
availableTools: availableTools,
});
// Verify reinitMCPServer was NOT called since tool was in cache
expect(mockReinitMCPServer).not.toHaveBeenCalled();
});
});
describe('reinitMCPServer (via reconnectServer)', () => {
it('should always receive user parameter when called from createMCPTools', async () => {
const mockUser = { id: 'user-001', role: 'admin' };
const mockRes = { write: jest.fn(), flush: jest.fn() };
// Track all calls to reinitMCPServer
const reinitCalls = [];
mockReinitMCPServer.mockImplementation((params) => {
reinitCalls.push(params);
return Promise.resolve({
tools: [{ name: 'tool1' }, { name: 'tool2' }],
availableTools: {
2026-02-13 13:33:25 -05:00
[`tool1${D}server1`]: { function: { description: 'Tool 1', parameters: {} } },
[`tool2${D}server1`]: { function: { description: 'Tool 2', parameters: {} } },
},
});
});
await createMCPTools({
res: mockRes,
user: mockUser,
serverName: 'server1',
provider: 'anthropic',
userMCPAuthMap: {},
});
// Verify all calls to reinitMCPServer had the user
expect(reinitCalls.length).toBeGreaterThan(0);
reinitCalls.forEach((call) => {
expect(call.user).toBe(mockUser);
expect(call.user.id).toBe('user-001');
});
});
it('should always receive user parameter when called from createMCPTool', async () => {
const mockUser = { id: 'user-002', permissions: ['read', 'write'] };
const mockRes = { write: jest.fn(), flush: jest.fn() };
// Track all calls to reinitMCPServer
const reinitCalls = [];
mockReinitMCPServer.mockImplementation((params) => {
reinitCalls.push(params);
return Promise.resolve({
availableTools: {
2026-02-13 13:33:25 -05:00
[`my-tool${D}my-server`]: {
function: { description: 'My Tool', parameters: {} },
},
},
});
});
await createMCPTool({
res: mockRes,
user: mockUser,
2026-02-13 13:33:25 -05:00
toolKey: `my-tool${D}my-server`,
provider: 'google',
userMCPAuthMap: {},
availableTools: undefined, // Force reinit
});
// Verify the call to reinitMCPServer had the user
expect(reinitCalls.length).toBe(1);
expect(reinitCalls[0].user).toBe(mockUser);
expect(reinitCalls[0].user.id).toBe('user-002');
});
});
🔒 feat: Add MCP server domain restrictions for remote transports (#11013) * 🔒 feat: Add MCP server domain restrictions for remote transports * 🔒 feat: Implement comprehensive MCP error handling and domain validation - Added `handleMCPError` function to centralize error responses for domain restrictions and inspection failures. - Introduced custom error classes: `MCPDomainNotAllowedError` and `MCPInspectionFailedError` for better error management. - Updated MCP server controllers to utilize the new error handling mechanism. - Enhanced domain validation logic in `createMCPTools` and `createMCPTool` functions to prevent operations on disallowed domains. - Added tests for runtime domain validation scenarios to ensure correct behavior. * chore: import order * 🔒 feat: Enhance domain validation in MCP tools with user role-based restrictions - Integrated `getAppConfig` to fetch allowed domains based on user roles in `createMCPTools` and `createMCPTool` functions. - Removed the deprecated `getAllowedDomains` method from `MCPServersRegistry`. - Updated tests to verify domain restrictions are applied correctly based on user roles. - Ensured that domain validation logic is consistent and efficient across tool creation processes. * 🔒 test: Refactor MCP tests to utilize configurable app settings - Introduced a mock for `getAppConfig` to enhance test flexibility. - Removed redundant mock definition to streamline test setup. - Ensured tests are aligned with the latest domain validation logic. --------- Co-authored-by: Atef Bellaaj <slalom.bellaaj@external.daimlertruck.com> Co-authored-by: Danny Avila <danny@librechat.ai>
2025-12-18 19:57:49 +01:00
describe('Runtime domain validation', () => {
it('should skip tool creation when domain is not allowed', async () => {
const mockUser = { id: 'domain-test-user', role: 'user' };
const mockRes = { write: jest.fn(), flush: jest.fn() };
// Mock server config with URL (remote server)
mockRegistryInstance.getServerConfig.mockResolvedValue({
url: 'https://disallowed-domain.com/sse',
});
// Mock getAppConfig to return domain restrictions
mockGetAppConfig.mockResolvedValue({
mcpSettings: { allowedDomains: ['allowed-domain.com'] },
});
// Mock domain validation to return false (domain not allowed)
mockIsMCPDomainAllowed.mockResolvedValueOnce(false);
const result = await createMCPTool({
res: mockRes,
user: mockUser,
2026-02-13 13:33:25 -05:00
toolKey: `test-tool${D}test-server`,
🔒 feat: Add MCP server domain restrictions for remote transports (#11013) * 🔒 feat: Add MCP server domain restrictions for remote transports * 🔒 feat: Implement comprehensive MCP error handling and domain validation - Added `handleMCPError` function to centralize error responses for domain restrictions and inspection failures. - Introduced custom error classes: `MCPDomainNotAllowedError` and `MCPInspectionFailedError` for better error management. - Updated MCP server controllers to utilize the new error handling mechanism. - Enhanced domain validation logic in `createMCPTools` and `createMCPTool` functions to prevent operations on disallowed domains. - Added tests for runtime domain validation scenarios to ensure correct behavior. * chore: import order * 🔒 feat: Enhance domain validation in MCP tools with user role-based restrictions - Integrated `getAppConfig` to fetch allowed domains based on user roles in `createMCPTools` and `createMCPTool` functions. - Removed the deprecated `getAllowedDomains` method from `MCPServersRegistry`. - Updated tests to verify domain restrictions are applied correctly based on user roles. - Ensured that domain validation logic is consistent and efficient across tool creation processes. * 🔒 test: Refactor MCP tests to utilize configurable app settings - Introduced a mock for `getAppConfig` to enhance test flexibility. - Removed redundant mock definition to streamline test setup. - Ensured tests are aligned with the latest domain validation logic. --------- Co-authored-by: Atef Bellaaj <slalom.bellaaj@external.daimlertruck.com> Co-authored-by: Danny Avila <danny@librechat.ai>
2025-12-18 19:57:49 +01:00
provider: 'openai',
userMCPAuthMap: {},
availableTools: {
2026-02-13 13:33:25 -05:00
[`test-tool${D}test-server`]: {
🔒 feat: Add MCP server domain restrictions for remote transports (#11013) * 🔒 feat: Add MCP server domain restrictions for remote transports * 🔒 feat: Implement comprehensive MCP error handling and domain validation - Added `handleMCPError` function to centralize error responses for domain restrictions and inspection failures. - Introduced custom error classes: `MCPDomainNotAllowedError` and `MCPInspectionFailedError` for better error management. - Updated MCP server controllers to utilize the new error handling mechanism. - Enhanced domain validation logic in `createMCPTools` and `createMCPTool` functions to prevent operations on disallowed domains. - Added tests for runtime domain validation scenarios to ensure correct behavior. * chore: import order * 🔒 feat: Enhance domain validation in MCP tools with user role-based restrictions - Integrated `getAppConfig` to fetch allowed domains based on user roles in `createMCPTools` and `createMCPTool` functions. - Removed the deprecated `getAllowedDomains` method from `MCPServersRegistry`. - Updated tests to verify domain restrictions are applied correctly based on user roles. - Ensured that domain validation logic is consistent and efficient across tool creation processes. * 🔒 test: Refactor MCP tests to utilize configurable app settings - Introduced a mock for `getAppConfig` to enhance test flexibility. - Removed redundant mock definition to streamline test setup. - Ensured tests are aligned with the latest domain validation logic. --------- Co-authored-by: Atef Bellaaj <slalom.bellaaj@external.daimlertruck.com> Co-authored-by: Danny Avila <danny@librechat.ai>
2025-12-18 19:57:49 +01:00
function: {
description: 'Test tool',
parameters: { type: 'object', properties: {} },
},
},
},
});
// Should return undefined for disallowed domain
expect(result).toBeUndefined();
// Should not call reinitMCPServer since domain check failed
expect(mockReinitMCPServer).not.toHaveBeenCalled();
// Verify getAppConfig was called with user role
expect(mockGetAppConfig).toHaveBeenCalledWith({ role: 'user' });
// Verify domain validation was called with correct parameters
expect(mockIsMCPDomainAllowed).toHaveBeenCalledWith(
{ url: 'https://disallowed-domain.com/sse' },
['allowed-domain.com'],
);
});
it('should allow tool creation when domain is allowed', async () => {
const mockUser = { id: 'domain-test-user', role: 'admin' };
const mockRes = { write: jest.fn(), flush: jest.fn() };
// Mock server config with URL (remote server)
mockRegistryInstance.getServerConfig.mockResolvedValue({
url: 'https://allowed-domain.com/sse',
});
// Mock getAppConfig to return domain restrictions
mockGetAppConfig.mockResolvedValue({
mcpSettings: { allowedDomains: ['allowed-domain.com'] },
});
// Mock domain validation to return true (domain allowed)
mockIsMCPDomainAllowed.mockResolvedValueOnce(true);
const availableTools = {
2026-02-13 13:33:25 -05:00
[`test-tool${D}test-server`]: {
🔒 feat: Add MCP server domain restrictions for remote transports (#11013) * 🔒 feat: Add MCP server domain restrictions for remote transports * 🔒 feat: Implement comprehensive MCP error handling and domain validation - Added `handleMCPError` function to centralize error responses for domain restrictions and inspection failures. - Introduced custom error classes: `MCPDomainNotAllowedError` and `MCPInspectionFailedError` for better error management. - Updated MCP server controllers to utilize the new error handling mechanism. - Enhanced domain validation logic in `createMCPTools` and `createMCPTool` functions to prevent operations on disallowed domains. - Added tests for runtime domain validation scenarios to ensure correct behavior. * chore: import order * 🔒 feat: Enhance domain validation in MCP tools with user role-based restrictions - Integrated `getAppConfig` to fetch allowed domains based on user roles in `createMCPTools` and `createMCPTool` functions. - Removed the deprecated `getAllowedDomains` method from `MCPServersRegistry`. - Updated tests to verify domain restrictions are applied correctly based on user roles. - Ensured that domain validation logic is consistent and efficient across tool creation processes. * 🔒 test: Refactor MCP tests to utilize configurable app settings - Introduced a mock for `getAppConfig` to enhance test flexibility. - Removed redundant mock definition to streamline test setup. - Ensured tests are aligned with the latest domain validation logic. --------- Co-authored-by: Atef Bellaaj <slalom.bellaaj@external.daimlertruck.com> Co-authored-by: Danny Avila <danny@librechat.ai>
2025-12-18 19:57:49 +01:00
function: {
description: 'Test tool',
parameters: { type: 'object', properties: {} },
},
},
};
const result = await createMCPTool({
res: mockRes,
user: mockUser,
2026-02-13 13:33:25 -05:00
toolKey: `test-tool${D}test-server`,
🔒 feat: Add MCP server domain restrictions for remote transports (#11013) * 🔒 feat: Add MCP server domain restrictions for remote transports * 🔒 feat: Implement comprehensive MCP error handling and domain validation - Added `handleMCPError` function to centralize error responses for domain restrictions and inspection failures. - Introduced custom error classes: `MCPDomainNotAllowedError` and `MCPInspectionFailedError` for better error management. - Updated MCP server controllers to utilize the new error handling mechanism. - Enhanced domain validation logic in `createMCPTools` and `createMCPTool` functions to prevent operations on disallowed domains. - Added tests for runtime domain validation scenarios to ensure correct behavior. * chore: import order * 🔒 feat: Enhance domain validation in MCP tools with user role-based restrictions - Integrated `getAppConfig` to fetch allowed domains based on user roles in `createMCPTools` and `createMCPTool` functions. - Removed the deprecated `getAllowedDomains` method from `MCPServersRegistry`. - Updated tests to verify domain restrictions are applied correctly based on user roles. - Ensured that domain validation logic is consistent and efficient across tool creation processes. * 🔒 test: Refactor MCP tests to utilize configurable app settings - Introduced a mock for `getAppConfig` to enhance test flexibility. - Removed redundant mock definition to streamline test setup. - Ensured tests are aligned with the latest domain validation logic. --------- Co-authored-by: Atef Bellaaj <slalom.bellaaj@external.daimlertruck.com> Co-authored-by: Danny Avila <danny@librechat.ai>
2025-12-18 19:57:49 +01:00
provider: 'openai',
userMCPAuthMap: {},
availableTools,
});
// Should create tool successfully
expect(result).toBeDefined();
// Verify getAppConfig was called with user role
expect(mockGetAppConfig).toHaveBeenCalledWith({ role: 'admin' });
});
it('should skip domain validation for stdio transports (no URL)', async () => {
const mockUser = { id: 'stdio-test-user' };
const mockRes = { write: jest.fn(), flush: jest.fn() };
// Mock server config without URL (stdio transport)
mockRegistryInstance.getServerConfig.mockResolvedValue({
command: 'npx',
args: ['@modelcontextprotocol/server'],
});
// Mock getAppConfig (should not be called for stdio)
mockGetAppConfig.mockResolvedValue({
mcpSettings: { allowedDomains: ['restricted-domain.com'] },
});
const availableTools = {
2026-02-13 13:33:25 -05:00
[`test-tool${D}test-server`]: {
🔒 feat: Add MCP server domain restrictions for remote transports (#11013) * 🔒 feat: Add MCP server domain restrictions for remote transports * 🔒 feat: Implement comprehensive MCP error handling and domain validation - Added `handleMCPError` function to centralize error responses for domain restrictions and inspection failures. - Introduced custom error classes: `MCPDomainNotAllowedError` and `MCPInspectionFailedError` for better error management. - Updated MCP server controllers to utilize the new error handling mechanism. - Enhanced domain validation logic in `createMCPTools` and `createMCPTool` functions to prevent operations on disallowed domains. - Added tests for runtime domain validation scenarios to ensure correct behavior. * chore: import order * 🔒 feat: Enhance domain validation in MCP tools with user role-based restrictions - Integrated `getAppConfig` to fetch allowed domains based on user roles in `createMCPTools` and `createMCPTool` functions. - Removed the deprecated `getAllowedDomains` method from `MCPServersRegistry`. - Updated tests to verify domain restrictions are applied correctly based on user roles. - Ensured that domain validation logic is consistent and efficient across tool creation processes. * 🔒 test: Refactor MCP tests to utilize configurable app settings - Introduced a mock for `getAppConfig` to enhance test flexibility. - Removed redundant mock definition to streamline test setup. - Ensured tests are aligned with the latest domain validation logic. --------- Co-authored-by: Atef Bellaaj <slalom.bellaaj@external.daimlertruck.com> Co-authored-by: Danny Avila <danny@librechat.ai>
2025-12-18 19:57:49 +01:00
function: {
description: 'Test tool',
parameters: { type: 'object', properties: {} },
},
},
};
const result = await createMCPTool({
res: mockRes,
user: mockUser,
2026-02-13 13:33:25 -05:00
toolKey: `test-tool${D}test-server`,
🔒 feat: Add MCP server domain restrictions for remote transports (#11013) * 🔒 feat: Add MCP server domain restrictions for remote transports * 🔒 feat: Implement comprehensive MCP error handling and domain validation - Added `handleMCPError` function to centralize error responses for domain restrictions and inspection failures. - Introduced custom error classes: `MCPDomainNotAllowedError` and `MCPInspectionFailedError` for better error management. - Updated MCP server controllers to utilize the new error handling mechanism. - Enhanced domain validation logic in `createMCPTools` and `createMCPTool` functions to prevent operations on disallowed domains. - Added tests for runtime domain validation scenarios to ensure correct behavior. * chore: import order * 🔒 feat: Enhance domain validation in MCP tools with user role-based restrictions - Integrated `getAppConfig` to fetch allowed domains based on user roles in `createMCPTools` and `createMCPTool` functions. - Removed the deprecated `getAllowedDomains` method from `MCPServersRegistry`. - Updated tests to verify domain restrictions are applied correctly based on user roles. - Ensured that domain validation logic is consistent and efficient across tool creation processes. * 🔒 test: Refactor MCP tests to utilize configurable app settings - Introduced a mock for `getAppConfig` to enhance test flexibility. - Removed redundant mock definition to streamline test setup. - Ensured tests are aligned with the latest domain validation logic. --------- Co-authored-by: Atef Bellaaj <slalom.bellaaj@external.daimlertruck.com> Co-authored-by: Danny Avila <danny@librechat.ai>
2025-12-18 19:57:49 +01:00
provider: 'openai',
userMCPAuthMap: {},
availableTools,
});
// Should create tool successfully without domain check
expect(result).toBeDefined();
// Should not call getAppConfig or isMCPDomainAllowed for stdio transport (no URL)
expect(mockGetAppConfig).not.toHaveBeenCalled();
expect(mockIsMCPDomainAllowed).not.toHaveBeenCalled();
});
it('should return empty array from createMCPTools when domain is not allowed', async () => {
const mockUser = { id: 'domain-test-user', role: 'user' };
const mockRes = { write: jest.fn(), flush: jest.fn() };
// Mock server config with URL (remote server)
const serverConfig = { url: 'https://disallowed-domain.com/sse' };
mockRegistryInstance.getServerConfig.mockResolvedValue(serverConfig);
// Mock getAppConfig to return domain restrictions
mockGetAppConfig.mockResolvedValue({
mcpSettings: { allowedDomains: ['allowed-domain.com'] },
});
// Mock domain validation to return false (domain not allowed)
mockIsMCPDomainAllowed.mockResolvedValueOnce(false);
const result = await createMCPTools({
res: mockRes,
user: mockUser,
serverName: 'test-server',
provider: 'openai',
userMCPAuthMap: {},
config: serverConfig,
});
// Should return empty array for disallowed domain
expect(result).toEqual([]);
// Should not call reinitMCPServer since domain check failed early
expect(mockReinitMCPServer).not.toHaveBeenCalled();
// Verify getAppConfig was called with user role
expect(mockGetAppConfig).toHaveBeenCalledWith({ role: 'user' });
});
it('should use user role when fetching domain restrictions', async () => {
const adminUser = { id: 'admin-user', role: 'admin' };
const regularUser = { id: 'regular-user', role: 'user' };
const mockRes = { write: jest.fn(), flush: jest.fn() };
mockRegistryInstance.getServerConfig.mockResolvedValue({
url: 'https://some-domain.com/sse',
});
// Mock different responses based on role
mockGetAppConfig
.mockResolvedValueOnce({ mcpSettings: { allowedDomains: ['admin-allowed.com'] } })
.mockResolvedValueOnce({ mcpSettings: { allowedDomains: ['user-allowed.com'] } });
mockIsMCPDomainAllowed.mockResolvedValue(true);
const availableTools = {
2026-02-13 13:33:25 -05:00
[`test-tool${D}test-server`]: {
🔒 feat: Add MCP server domain restrictions for remote transports (#11013) * 🔒 feat: Add MCP server domain restrictions for remote transports * 🔒 feat: Implement comprehensive MCP error handling and domain validation - Added `handleMCPError` function to centralize error responses for domain restrictions and inspection failures. - Introduced custom error classes: `MCPDomainNotAllowedError` and `MCPInspectionFailedError` for better error management. - Updated MCP server controllers to utilize the new error handling mechanism. - Enhanced domain validation logic in `createMCPTools` and `createMCPTool` functions to prevent operations on disallowed domains. - Added tests for runtime domain validation scenarios to ensure correct behavior. * chore: import order * 🔒 feat: Enhance domain validation in MCP tools with user role-based restrictions - Integrated `getAppConfig` to fetch allowed domains based on user roles in `createMCPTools` and `createMCPTool` functions. - Removed the deprecated `getAllowedDomains` method from `MCPServersRegistry`. - Updated tests to verify domain restrictions are applied correctly based on user roles. - Ensured that domain validation logic is consistent and efficient across tool creation processes. * 🔒 test: Refactor MCP tests to utilize configurable app settings - Introduced a mock for `getAppConfig` to enhance test flexibility. - Removed redundant mock definition to streamline test setup. - Ensured tests are aligned with the latest domain validation logic. --------- Co-authored-by: Atef Bellaaj <slalom.bellaaj@external.daimlertruck.com> Co-authored-by: Danny Avila <danny@librechat.ai>
2025-12-18 19:57:49 +01:00
function: {
description: 'Test tool',
parameters: { type: 'object', properties: {} },
},
},
};
// Call with admin user
await createMCPTool({
res: mockRes,
user: adminUser,
2026-02-13 13:33:25 -05:00
toolKey: `test-tool${D}test-server`,
🔒 feat: Add MCP server domain restrictions for remote transports (#11013) * 🔒 feat: Add MCP server domain restrictions for remote transports * 🔒 feat: Implement comprehensive MCP error handling and domain validation - Added `handleMCPError` function to centralize error responses for domain restrictions and inspection failures. - Introduced custom error classes: `MCPDomainNotAllowedError` and `MCPInspectionFailedError` for better error management. - Updated MCP server controllers to utilize the new error handling mechanism. - Enhanced domain validation logic in `createMCPTools` and `createMCPTool` functions to prevent operations on disallowed domains. - Added tests for runtime domain validation scenarios to ensure correct behavior. * chore: import order * 🔒 feat: Enhance domain validation in MCP tools with user role-based restrictions - Integrated `getAppConfig` to fetch allowed domains based on user roles in `createMCPTools` and `createMCPTool` functions. - Removed the deprecated `getAllowedDomains` method from `MCPServersRegistry`. - Updated tests to verify domain restrictions are applied correctly based on user roles. - Ensured that domain validation logic is consistent and efficient across tool creation processes. * 🔒 test: Refactor MCP tests to utilize configurable app settings - Introduced a mock for `getAppConfig` to enhance test flexibility. - Removed redundant mock definition to streamline test setup. - Ensured tests are aligned with the latest domain validation logic. --------- Co-authored-by: Atef Bellaaj <slalom.bellaaj@external.daimlertruck.com> Co-authored-by: Danny Avila <danny@librechat.ai>
2025-12-18 19:57:49 +01:00
provider: 'openai',
userMCPAuthMap: {},
availableTools,
});
// Reset and call with regular user
mockRegistryInstance.getServerConfig.mockResolvedValue({
url: 'https://some-domain.com/sse',
});
await createMCPTool({
res: mockRes,
user: regularUser,
2026-02-13 13:33:25 -05:00
toolKey: `test-tool${D}test-server`,
🔒 feat: Add MCP server domain restrictions for remote transports (#11013) * 🔒 feat: Add MCP server domain restrictions for remote transports * 🔒 feat: Implement comprehensive MCP error handling and domain validation - Added `handleMCPError` function to centralize error responses for domain restrictions and inspection failures. - Introduced custom error classes: `MCPDomainNotAllowedError` and `MCPInspectionFailedError` for better error management. - Updated MCP server controllers to utilize the new error handling mechanism. - Enhanced domain validation logic in `createMCPTools` and `createMCPTool` functions to prevent operations on disallowed domains. - Added tests for runtime domain validation scenarios to ensure correct behavior. * chore: import order * 🔒 feat: Enhance domain validation in MCP tools with user role-based restrictions - Integrated `getAppConfig` to fetch allowed domains based on user roles in `createMCPTools` and `createMCPTool` functions. - Removed the deprecated `getAllowedDomains` method from `MCPServersRegistry`. - Updated tests to verify domain restrictions are applied correctly based on user roles. - Ensured that domain validation logic is consistent and efficient across tool creation processes. * 🔒 test: Refactor MCP tests to utilize configurable app settings - Introduced a mock for `getAppConfig` to enhance test flexibility. - Removed redundant mock definition to streamline test setup. - Ensured tests are aligned with the latest domain validation logic. --------- Co-authored-by: Atef Bellaaj <slalom.bellaaj@external.daimlertruck.com> Co-authored-by: Danny Avila <danny@librechat.ai>
2025-12-18 19:57:49 +01:00
provider: 'openai',
userMCPAuthMap: {},
availableTools,
});
// Verify getAppConfig was called with correct roles
expect(mockGetAppConfig).toHaveBeenNthCalledWith(1, { role: 'admin' });
expect(mockGetAppConfig).toHaveBeenNthCalledWith(2, { role: 'user' });
});
});
🧪 chore: MCP Reconnect Storm Follow-Up Fixes and Integration Tests (#12172) * 🧪 test: Add reconnection storm regression tests for MCPConnection Introduced a comprehensive test suite for reconnection storm scenarios, validating circuit breaker, throttling, cooldown, and timeout fixes. The tests utilize real MCP SDK transports and a StreamableHTTP server to ensure accurate behavior under rapid connect/disconnect cycles and error handling for SSE 400/405 responses. This enhances the reliability of the MCPConnection by ensuring proper handling of reconnection logic and circuit breaker functionality. * 🔧 fix: Update createUnavailableToolStub to return structured response Modified the `createUnavailableToolStub` function to return an array containing the unavailable message and a null value, enhancing the response structure. Additionally, added a debug log to skip tool creation when the result is null, improving the handling of reconnection scenarios in the MCP service. * 🧪 test: Enhance MCP tool creation tests for cache and throttle interactions Added new test cases for the `createMCPTool` function to validate the caching behavior when tools are unavailable or throttled. The tests ensure that tools are correctly cached as missing and prevent unnecessary reconnects across different users, improving the reliability of the MCP service under concurrent usage scenarios. Additionally, introduced a test for the `createMCPTools` function to verify that it returns an empty array when reconnect is throttled, ensuring proper handling of throttling logic. * 📝 docs: Update AGENTS.md with testing philosophy and guidelines Expanded the testing section in AGENTS.md to emphasize the importance of using real logic over mocks, advocating for the use of spies and real dependencies in tests. Added specific recommendations for testing with MongoDB and MCP SDK, highlighting the need to mock only uncontrollable external services. This update aims to improve testing practices and encourage more robust test implementations. * 🧪 test: Enhance reconnection storm tests with socket tracking and SSE handling Updated the reconnection storm test suite to include a new socket tracking mechanism for better resource management during tests. Improved the handling of SSE 400/405 responses by ensuring they are processed in the same branch as 404 errors, preventing unhandled cases. This enhances the reliability of the MCPConnection under rapid reconnect scenarios and ensures proper error handling. * 🔧 fix: Implement cache eviction for stale reconnect attempts and missing tools Added an `evictStale` function to manage the size of the `lastReconnectAttempts` and `missingToolCache` maps, ensuring they do not exceed a maximum cache size. This enhancement improves resource management by removing outdated entries based on a specified time-to-live (TTL), thereby optimizing the MCP service's performance during reconnection scenarios.
2026-03-10 17:44:13 -04:00
describe('createUnavailableToolStub', () => {
it('should return a tool whose _call returns a valid CONTENT_AND_ARTIFACT two-tuple', async () => {
const stub = createUnavailableToolStub('myTool', 'myServer');
// invoke() goes through langchain's base tool, which checks responseFormat.
// CONTENT_AND_ARTIFACT requires [content, artifact] — a bare string would throw:
// "Tool response format is "content_and_artifact" but the output was not a two-tuple"
const result = await stub.invoke({});
// If we reach here without throwing, the two-tuple format is correct.
// invoke() returns the content portion of [content, artifact] as a string.
expect(result).toContain('temporarily unavailable');
});
});
describe('negative tool cache and throttle interaction', () => {
it('should cache tool as missing even when throttled (cross-user dedup)', async () => {
const mockUser = { id: 'throttle-test-user' };
const mockRes = { write: jest.fn(), flush: jest.fn() };
// First call: reconnect succeeds but tool not found
mockReinitMCPServer.mockResolvedValueOnce({
availableTools: {},
});
await createMCPTool({
res: mockRes,
user: mockUser,
toolKey: `missing-tool${D}cache-dedup-server`,
provider: 'openai',
userMCPAuthMap: {},
availableTools: undefined,
});
// Second call within 10s for DIFFERENT tool on same server:
// reconnect is throttled (returns null), tool is still cached as missing.
// This is intentional: the cache acts as cross-user dedup since the
// throttle is per-user-per-server and can't prevent N different users
// from each triggering their own reconnect.
const result2 = await createMCPTool({
res: mockRes,
user: mockUser,
toolKey: `other-tool${D}cache-dedup-server`,
provider: 'openai',
userMCPAuthMap: {},
availableTools: undefined,
});
expect(result2).toBeDefined();
expect(result2.name).toContain('other-tool');
expect(mockReinitMCPServer).toHaveBeenCalledTimes(1);
});
it('should prevent user B from triggering reconnect when user A already cached the tool', async () => {
const userA = { id: 'cache-user-A' };
const userB = { id: 'cache-user-B' };
const mockRes = { write: jest.fn(), flush: jest.fn() };
// User A: real reconnect, tool not found → cached
mockReinitMCPServer.mockResolvedValueOnce({
availableTools: {},
});
await createMCPTool({
res: mockRes,
user: userA,
toolKey: `shared-tool${D}cross-user-server`,
provider: 'openai',
userMCPAuthMap: {},
availableTools: undefined,
});
expect(mockReinitMCPServer).toHaveBeenCalledTimes(1);
// User B requests the SAME tool within 10s.
// The negative cache is keyed by toolKey (no user prefix), so user B
// gets a cache hit and no reconnect fires. This is the cross-user
// storm protection: without this, user B's unthrottled first request
// would trigger a second reconnect to the same server.
const result = await createMCPTool({
res: mockRes,
user: userB,
toolKey: `shared-tool${D}cross-user-server`,
provider: 'openai',
userMCPAuthMap: {},
availableTools: undefined,
});
expect(result).toBeDefined();
expect(result.name).toContain('shared-tool');
// reinitMCPServer still called only once — user B hit the cache
expect(mockReinitMCPServer).toHaveBeenCalledTimes(1);
});
it('should prevent user B from triggering reconnect for throttle-cached tools', async () => {
const userA = { id: 'storm-user-A' };
const userB = { id: 'storm-user-B' };
const mockRes = { write: jest.fn(), flush: jest.fn() };
// User A: real reconnect for tool-1, tool not found → cached
mockReinitMCPServer.mockResolvedValueOnce({
availableTools: {},
});
await createMCPTool({
res: mockRes,
user: userA,
toolKey: `tool-1${D}storm-server`,
provider: 'openai',
userMCPAuthMap: {},
availableTools: undefined,
});
// User A: tool-2 on same server within 10s → throttled → cached from throttle
await createMCPTool({
res: mockRes,
user: userA,
toolKey: `tool-2${D}storm-server`,
provider: 'openai',
userMCPAuthMap: {},
availableTools: undefined,
});
expect(mockReinitMCPServer).toHaveBeenCalledTimes(1);
// User B requests tool-2 — gets cache hit from the throttle-cached entry.
// Without this caching, user B would trigger a real reconnect since
// user B has their own throttle key and hasn't reconnected yet.
const result = await createMCPTool({
res: mockRes,
user: userB,
toolKey: `tool-2${D}storm-server`,
provider: 'openai',
userMCPAuthMap: {},
availableTools: undefined,
});
expect(result).toBeDefined();
expect(result.name).toContain('tool-2');
// Still only 1 real reconnect — user B was protected by the cache
expect(mockReinitMCPServer).toHaveBeenCalledTimes(1);
});
});
describe('createMCPTools throttle handling', () => {
it('should return empty array with debug log when reconnect is throttled', async () => {
const mockUser = { id: 'throttle-tools-user' };
const mockRes = { write: jest.fn(), flush: jest.fn() };
// First call: real reconnect
mockReinitMCPServer.mockResolvedValueOnce({
tools: [{ name: 'tool1' }],
availableTools: {
[`tool1${D}throttle-tools-server`]: {
function: { description: 'Tool 1', parameters: {} },
},
},
});
await createMCPTools({
res: mockRes,
user: mockUser,
serverName: 'throttle-tools-server',
provider: 'openai',
userMCPAuthMap: {},
});
// Second call within 10s — throttled
const result = await createMCPTools({
res: mockRes,
user: mockUser,
serverName: 'throttle-tools-server',
provider: 'openai',
userMCPAuthMap: {},
});
expect(result).toEqual([]);
// reinitMCPServer called only once — second was throttled
expect(mockReinitMCPServer).toHaveBeenCalledTimes(1);
// Should log at debug level (not warn) for throttled case
expect(logger.debug).toHaveBeenCalledWith(expect.stringContaining('Reconnect throttled'));
});
});
describe('User parameter integrity', () => {
it('should preserve user object properties through the call chain', async () => {
const complexUser = {
id: 'complex-user',
name: 'John Doe',
email: 'john@example.com',
metadata: { subscription: 'premium', settings: { theme: 'dark' } },
};
const mockRes = { write: jest.fn(), flush: jest.fn() };
let capturedUser = null;
mockReinitMCPServer.mockImplementation((params) => {
capturedUser = params.user;
return Promise.resolve({
tools: [{ name: 'test' }],
availableTools: {
2026-02-13 13:33:25 -05:00
[`test${D}server`]: { function: { description: 'Test', parameters: {} } },
},
});
});
await createMCPTools({
res: mockRes,
user: complexUser,
serverName: 'server',
provider: 'openai',
userMCPAuthMap: {},
});
// Verify the complete user object was passed
expect(capturedUser).toEqual(complexUser);
expect(capturedUser.id).toBe('complex-user');
expect(capturedUser.metadata.subscription).toBe('premium');
expect(capturedUser.metadata.settings.theme).toBe('dark');
});
it('should throw error when user is null', async () => {
const mockRes = { write: jest.fn(), flush: jest.fn() };
mockReinitMCPServer.mockResolvedValue({
tools: [],
availableTools: {},
});
await expect(
createMCPTools({
res: mockRes,
user: null,
serverName: 'test-server',
provider: 'openai',
userMCPAuthMap: {},
}),
).rejects.toThrow("Cannot read properties of null (reading 'id')");
// Verify reinitMCPServer was not called due to early error
expect(mockReinitMCPServer).not.toHaveBeenCalled();
});
});
});