📡 refactor: MCP Runtime Config Sync with Redis Distributed Locking (#10352)

* 🔄 Refactoring: MCP Runtime Configuration Reload
 - PrivateServerConfigs own cache classes (inMemory and Redis).
 - Connections staleness detection by comparing (connection.createdAt and config.LastUpdatedAt)
 - ConnectionsRepo access Registry instead of in memory config dict and renew stale connections
 - MCPManager: adjusted init of ConnectionsRepo (app level)
 - UserConnectionManager: renew stale connections
 - skipped test, to test "should only clear keys in its own namespace"
 - MCPPrivateServerLoader: new component to manage logic of loading / editing private servers on runtime
 - PrivateServersLoadStatusCache to track private server cache status
 - New unit and integration tests.
Misc:
 - add es lint rule to enforce line between class methods

* Fix cluster mode batch update and delete workarround. Fixed unit tests for cluster mode.

* Fix Keyv redis clear cache namespace  awareness issue + Integration tests fixes

* chore: address copilot comments

* Fixing rebase issue: removed the mcp config fallback in single getServerConfig method:
- to not to interfere with the logic of the right Tier (APP/USER/Private)
- If userId is null, the getServerConfig should not return configs that are a SharedUser tier and not APP tier

* chore: add dev-staging branch to workflow triggers for backend, cache integration, and ESLint checks

---------

Co-authored-by: Atef Bellaaj <slalom.bellaaj@external.daimlertruck.com>
This commit is contained in:
Atef Bellaaj 2025-11-26 15:11:36 +01:00 committed by Danny Avila
parent 52e6796635
commit ac68e629e6
No known key found for this signature in database
GPG key ID: BF31EEB2C5CA0956
49 changed files with 5244 additions and 257 deletions

View file

@ -9,6 +9,11 @@ import { isLeader } from '~/cluster';
export abstract class BaseRegistryCache {
protected readonly PREFIX = 'MCP::ServersRegistry';
protected abstract readonly cache: Keyv;
protected readonly leaderOnly: boolean;
constructor(leaderOnly?: boolean) {
this.leaderOnly = leaderOnly ?? false;
}
protected async leaderCheck(action: string): Promise<void> {
if (!(await isLeader())) throw new Error(`Only leader can ${action}.`);
@ -20,7 +25,9 @@ export abstract class BaseRegistryCache {
}
public async reset(): Promise<void> {
await this.leaderCheck(`reset ${this.cache.namespace} cache`);
if (this.leaderOnly) {
await this.leaderCheck(`reset ${this.cache.namespace} cache`);
}
await this.cache.clear();
}
}

View file

@ -0,0 +1,115 @@
import type * as t from '~/mcp/types';
import { ServerConfigsCache, ServerConfigsCacheFactory } from '../ServerConfigsCacheFactory';
import { logger } from '@librechat/data-schemas';
export abstract class PrivateServerConfigsCacheBase {
protected readonly PREFIX = 'MCP::ServersRegistry::Servers::Private';
protected caches: Map<string, ServerConfigsCache> = new Map();
public async add(
userId: string,
serverName: string,
config: t.ParsedServerConfig,
): Promise<void> {
const userCache = this.getOrCreatePrivateUserCache(userId);
await userCache.add(serverName, config);
}
public async update(
userId: string,
serverName: string,
config: t.ParsedServerConfig,
): Promise<void> {
const userCache = this.getOrCreatePrivateUserCache(userId);
await userCache.update(serverName, config);
}
/**
* Get a specific server config from a user's cache.
*/
public async get(userId: string, serverName: string): Promise<t.ParsedServerConfig | undefined> {
const cache = this.getOrCreatePrivateUserCache(userId);
return await cache.get(serverName);
}
/**
* Get all server configs for a user.
*/
public async getAll(userId: string): Promise<Record<string, t.ParsedServerConfig>> {
const cache = this.getOrCreatePrivateUserCache(userId);
return await cache.getAll();
}
/**
* Check if a user has a cache instance loaded.
*/
public abstract has(userId: string): Promise<boolean>;
public async remove(userId: string, serverName: string): Promise<void> {
const userCache = this.getOrCreatePrivateUserCache(userId);
await userCache.remove(serverName);
}
public async reset(userId: string): Promise<void> {
const cache = this.getOrCreatePrivateUserCache(userId);
return cache.reset();
}
// ============= BATCH OPERATION PRIMITIVES =============
// Simple primitives for MCPPrivateServerLoader orchestration - no business logic
/**
* Update server config in ALL user caches that already have it.
* Efficient: Uses pattern-based scan, skips users who don't have it.
* Use case: Metadata changed (command, args, env)
*/
public abstract updateServerConfigIfExists(
serverName: string,
config: t.ParsedServerConfig,
): Promise<void>;
/**
* Add server config ONLY to users whose caches are already initialized.
* Skips users without initialized caches (doesn't create new caches).
* Use case: Granting access to existing users
*/
public abstract addServerConfigIfCacheExists(
userIds: string[],
serverName: string,
config: t.ParsedServerConfig,
): Promise<void>;
/**
* Remove server config ONLY from users whose caches exist.
* Ignores users without initialized caches.
* Use case: Revoking access from users
*/
public abstract removeServerConfigIfCacheExists(
userIds: string[],
serverName: string,
): Promise<void>;
/**
* Find all users who have this server in their cache.
* Primitive for determining affected users.
*/
public abstract findUsersWithServer(serverName: string): Promise<string[]>;
/**
* Clear all private server configs for all users (nuclear option).
* Use sparingly - typically only for testing or full reset.
*/
public abstract resetAll(): Promise<void>;
protected getOrCreatePrivateUserCache(userId: string): ServerConfigsCache {
if (!userId) {
logger.error('userId is required to get or create private user cache');
throw new Error('userId is required to get or create private user cache');
}
if (!this.caches.has(userId)) {
const cache = ServerConfigsCacheFactory.create(userId, 'Private', false);
this.caches.set(userId, cache);
}
return this.caches.get(userId)!;
}
}

View file

@ -0,0 +1,32 @@
import { cacheConfig } from '~/cache';
import { PrivateServerConfigsCacheInMemory } from './PrivateServerConfigsCacheInMemory';
import { PrivateServerConfigsCacheRedis } from './PrivateServerConfigsCacheRedis';
export type PrivateServerConfigsCache =
| PrivateServerConfigsCacheInMemory
| PrivateServerConfigsCacheRedis;
/**
* Factory for creating the appropriate PrivateServerConfigsCache implementation based on deployment mode.
* Automatically selects between in-memory and Redis-backed storage depending on USE_REDIS config.
* In single-instance mode (USE_REDIS=false), returns lightweight in-memory cache.
* In cluster mode (USE_REDIS=true), returns Redis-backed cache with distributed coordination.
* Provides a unified interface regardless of the underlying storage mechanism.
*/
export class PrivateServerConfigsCacheFactory {
/**
* Create a ServerConfigsCache instance.
* Returns Redis implementation if Redis is configured, otherwise in-memory implementation.
*
* @returns PrivateServerConfigsCache instance
*/
static create(): PrivateServerConfigsCache {
if (cacheConfig.USE_REDIS) {
return new PrivateServerConfigsCacheRedis();
}
// In-memory mode uses a simple Map - doesn't need owner/namespace
return new PrivateServerConfigsCacheInMemory();
}
}

View file

@ -0,0 +1,105 @@
import { ParsedServerConfig } from '~/mcp/types';
import { PrivateServerConfigsCacheBase } from './PrivateServerConfigsCacheBase';
import { logger } from '@librechat/data-schemas';
import { ServerConfigsCacheInMemory } from '../ServerConfigsCacheInMemory';
export class PrivateServerConfigsCacheInMemory extends PrivateServerConfigsCacheBase {
public async has(userId: string): Promise<boolean> {
return this.caches.has(userId);
}
public async updateServerConfigIfExists(
serverName: string,
config: ParsedServerConfig,
): Promise<void> {
let updatedCount = 0;
for (const [userId, userCache] of this.caches.entries()) {
const existing = await userCache.get(serverName);
if (existing) {
const inMemoryCache = userCache as ServerConfigsCacheInMemory;
await inMemoryCache.set(serverName, config);
updatedCount++;
logger.debug(`[MCP][PrivateServers][InMemory] Updated "${serverName}" for user ${userId}`);
}
}
logger.info(
`[MCP][PrivateServers][InMemory] Propagated config update for "${serverName}" to ${updatedCount} users`,
);
}
public async addServerConfigIfCacheExists(
userIds: string[],
serverName: string,
config: ParsedServerConfig,
): Promise<void> {
let addedCount = 0;
for (const userId of userIds) {
if (this.caches.has(userId)) {
// Only if cache initialized
const userCache = this.getOrCreatePrivateUserCache(userId);
const inMemoryCache = userCache as ServerConfigsCacheInMemory;
await inMemoryCache.set(serverName, config);
addedCount++;
logger.debug(`[MCP][PrivateServers][InMemory] Added "${serverName}" to user ${userId}`);
}
}
logger.info(
`[MCP][PrivateServers][InMemory] Granted access to "${serverName}" for ${addedCount}/${userIds.length} initialized users`,
);
}
public async removeServerConfigIfCacheExists(
userIds: string[],
serverName: string,
): Promise<void> {
let removedCount = 0;
for (const userId of userIds) {
if (this.caches.has(userId)) {
try {
const userCache = this.getOrCreatePrivateUserCache(userId);
await userCache.remove(serverName);
removedCount++;
logger.debug(
`[MCP][PrivateServers][InMemory] Removed "${serverName}" from user ${userId}`,
);
} catch (error) {
// Ignore - server might not exist for this user
logger.debug(
`[MCP][PrivateServers][InMemory] Server "${serverName}" not found for user ${userId}`,
error,
);
}
}
}
logger.info(
`[MCP][PrivateServers][InMemory] Revoked access to "${serverName}" from ${removedCount}/${userIds.length} users`,
);
}
public async findUsersWithServer(serverName: string): Promise<string[]> {
const userIds: string[] = [];
for (const [userId, userCache] of this.caches.entries()) {
const config = await userCache.get(serverName);
if (config) {
userIds.push(userId);
}
}
return userIds;
}
/**
* Clear ALL servers from ALL user caches (nuclear option).
*/
public async resetAll(): Promise<void> {
this.caches.clear();
logger.info(`[MCP][PrivateServers][InMemory] Cleared ALL user caches`);
}
}

View file

@ -0,0 +1,284 @@
import { ParsedServerConfig } from '~/mcp/types';
import { keyvRedisClient } from '~/cache';
import { PrivateServerConfigsCacheBase } from './PrivateServerConfigsCacheBase';
import { logger } from '@librechat/data-schemas';
import { cacheConfig } from '~/cache/cacheConfig';
import { batchDeleteKeys, scanKeys } from '~/cache/redisUtils';
export class PrivateServerConfigsCacheRedis extends PrivateServerConfigsCacheBase {
/**
* Detect if Redis is running in cluster mode.
* In cluster mode, we need to avoid CROSSSLOT errors by using pipelines instead of multi() transactions.
*/
private isClusterMode(): boolean {
return cacheConfig.USE_REDIS_CLUSTER;
}
public async has(userId: string): Promise<boolean> {
if (!userId || !keyvRedisClient || !('scanIterator' in keyvRedisClient)) {
return false;
}
const pattern = `*${this.PREFIX}::${userId}:*`;
for await (const _key of keyvRedisClient.scanIterator({
MATCH: pattern,
COUNT: 1,
})) {
return true;
}
return false; // No keys found - cache not initialized
}
public async updateServerConfigIfExists(
serverName: string,
config: ParsedServerConfig,
): Promise<void> {
if (!keyvRedisClient || !('scanIterator' in keyvRedisClient)) {
logger.warn('[MCP][PrivateServers][Redis] Redis SCAN not available');
return;
}
const pattern = this.generateScanKeyPattern(serverName);
try {
// Efficient: Pattern-based scan for specific serverName
// All cache keys that have the serverName
const keysToUpdate = await scanKeys(keyvRedisClient, pattern);
if (keysToUpdate.length > 0) {
const updatedConfig = { ...config, lastUpdatedAt: Date.now() };
const keyvFormat = { value: updatedConfig, expires: null };
const serializedConfig = JSON.stringify(keyvFormat);
const chunkSize = cacheConfig.REDIS_UPDATE_CHUNK_SIZE;
if (this.isClusterMode()) {
// Cluster mode: Use individual commands in parallel (no atomicity, but works across slots)
for (let i = 0; i < keysToUpdate.length; i += chunkSize) {
const chunk = keysToUpdate.slice(i, i + chunkSize);
await Promise.all(
chunk.map((key) => keyvRedisClient!.set(key, serializedConfig, { XX: true })),
);
}
} else {
// Single-node mode: Use multi() for atomic transactions
for (let i = 0; i < keysToUpdate.length; i += chunkSize) {
const chunk = keysToUpdate.slice(i, i + chunkSize);
const multi = keyvRedisClient.multi();
for (const key of chunk) {
multi.set(key, serializedConfig, { XX: true });
}
await multi.exec();
}
}
logger.info(
`[MCP][PrivateServers][Redis] Propagated config update for "${serverName}" to ${keysToUpdate.length} users`,
);
} else {
logger.debug(`[MCP][PrivateServers][Redis] No users have "${serverName}"`);
}
} catch (error) {
logger.error(`[MCP][PrivateServers][Redis] Error updating "${serverName}"`, error);
throw error;
}
}
public async addServerConfigIfCacheExists(
userIds: string[],
serverName: string,
config: ParsedServerConfig,
): Promise<void> {
if (!keyvRedisClient) return;
// Optimized: Single SCAN to get all users with initialized caches
const allUsersWithCaches = await this.getAllUserIds();
// Filter to only users with initialized caches
const eligibleUserIds = userIds.filter((id) => allUsersWithCaches.has(id));
if (eligibleUserIds.length === 0) {
logger.info(
`[MCP][PrivateServers][Redis] No initialized users to grant access to "${serverName}"`,
);
return;
}
// Batch add using pipeline with NX (only set if key doesn't exist)
const updatedConfig = { ...config, lastUpdatedAt: Date.now() };
const keyvFormat = { value: updatedConfig, expires: null };
const serializedConfig = JSON.stringify(keyvFormat);
const globalPrefix = cacheConfig.REDIS_KEY_PREFIX;
const separator = cacheConfig.GLOBAL_PREFIX_SEPARATOR;
const chunkSize = cacheConfig.REDIS_UPDATE_CHUNK_SIZE;
if (this.isClusterMode()) {
// Cluster mode: Use individual commands in parallel (no atomicity, but works across slots)
for (let i = 0; i < eligibleUserIds.length; i += chunkSize) {
const chunk = eligibleUserIds.slice(i, i + chunkSize);
await Promise.all(
chunk.map((userId) => {
const namespace = `${this.PREFIX}::${userId}`;
const fullKey = globalPrefix
? `${globalPrefix}${separator}${namespace}:${serverName}`
: `${namespace}:${serverName}`;
return keyvRedisClient!.set(fullKey, serializedConfig, { NX: true });
}),
);
}
} else {
// Single-node mode: Use multi() for atomic transactions
for (let i = 0; i < eligibleUserIds.length; i += chunkSize) {
const chunk = eligibleUserIds.slice(i, i + chunkSize);
const multi = keyvRedisClient.multi();
for (const userId of chunk) {
const namespace = `${this.PREFIX}::${userId}`;
const fullKey = globalPrefix
? `${globalPrefix}${separator}${namespace}:${serverName}`
: `${namespace}:${serverName}`;
multi.set(fullKey, serializedConfig, { NX: true });
}
await multi.exec();
}
}
logger.info(
`[MCP][PrivateServers][Redis] Granted access to "${serverName}" for ${eligibleUserIds.length}/${userIds.length} initialized users`,
);
}
public async removeServerConfigIfCacheExists(
userIds: string[],
serverName: string,
): Promise<void> {
if (!keyvRedisClient) return;
// Optimized: Direct key construction - no SCAN needed!
// Build full Redis keys directly since we know userId and serverName
const globalPrefix = cacheConfig.REDIS_KEY_PREFIX;
const separator = cacheConfig.GLOBAL_PREFIX_SEPARATOR;
const keysToDelete: string[] = [];
for (const userId of userIds) {
// Construct the full Redis key
const namespace = `${this.PREFIX}::${userId}`;
const fullKey = globalPrefix
? `${globalPrefix}${separator}${namespace}:${serverName}`
: `${namespace}:${serverName}`;
keysToDelete.push(fullKey);
}
if (keysToDelete.length > 0) {
// Use utility function for efficient parallel deletion
const removedCount = await batchDeleteKeys(keyvRedisClient, keysToDelete);
logger.info(
`[MCP][PrivateServers][Redis] Revoked access to "${serverName}" from ${removedCount}/${userIds.length} users`,
);
}
}
public async findUsersWithServer(serverName: string): Promise<string[]> {
if (!keyvRedisClient || !('scanIterator' in keyvRedisClient)) {
return [];
}
const pattern = this.generateScanKeyPattern(serverName);
try {
const keys = await scanKeys(keyvRedisClient, pattern);
const userIds: string[] = [];
for (const key of keys) {
const userId = this.extractUserIdFromKey(key);
if (userId) {
userIds.push(userId);
}
}
return userIds;
} catch (error) {
logger.error(`[MCP][PrivateServers][Redis] Error finding users with "${serverName}"`, error);
return [];
}
}
/**
* Scans Redis to find all unique userIds that have private server configs.
* This method is used for efficient batch operations (add/update/delete) across all users.
*
* Performance note: This scans all private server config keys in Redis.
* Use sparingly as it can be expensive with many users.
*/
private async getAllUserIds(): Promise<Set<string>> {
if (!keyvRedisClient || !('scanIterator' in keyvRedisClient)) {
logger.warn('[MCP][PrivateServerConfigs][Redis] Redis SCAN not available');
return new Set();
}
const userIds = new Set<string>();
// Pattern to match all private server configs: MCP::ServersRegistry::Servers::*:*
const pattern = `*${this.PREFIX}::*:*`;
try {
const keys = await scanKeys(keyvRedisClient, pattern);
for (const key of keys) {
const userId = this.extractUserIdFromKey(key);
if (userId) {
userIds.add(userId);
}
}
} catch (error) {
logger.error('[MCP][PrivateServerConfigs][Redis] Error scanning for userIds', error);
throw error;
}
return userIds;
}
/**
* Extract userId from a Redis key.
* Key format: MCP::ServersRegistry::Servers::userId:serverName
*/
private extractUserIdFromKey(key: string): string | null {
// Remove any global prefix, then extract userId
const keyWithoutGlobalPrefix = key.includes(this.PREFIX)
? key.substring(key.indexOf(this.PREFIX))
: key;
const withoutPrefix = keyWithoutGlobalPrefix.replace(`${this.PREFIX}::`, '');
const lastColonIndex = withoutPrefix.lastIndexOf(':');
if (lastColonIndex === -1) return null;
return withoutPrefix.substring(0, lastColonIndex);
}
/**
* Clear ALL servers from ALL user caches (nuclear option).
*/
public async resetAll(): Promise<void> {
if (!keyvRedisClient || !('scanIterator' in keyvRedisClient)) return;
// Pattern to match all private user server configs
// Format: MCP::ServersRegistry::Servers::userId:serverName
const pattern = `*${this.PREFIX}::*:*`;
// Use utility functions for efficient scan and parallel deletion
const keysToDelete = await scanKeys(keyvRedisClient, pattern);
if (keysToDelete.length > 0) {
await batchDeleteKeys(keyvRedisClient, keysToDelete);
}
logger.info(`[MCP][Cache][Redis] Cleared all user caches: ${keysToDelete.length} entries`);
}
private generateScanKeyPattern(serverName: string): string {
return `*${this.PREFIX}::*:${serverName}`;
}
}

View file

@ -0,0 +1,166 @@
import { standardCache, keyvRedisClient } from '~/cache';
import { cacheConfig } from '~/cache/cacheConfig';
import { BaseRegistryCache } from './BaseRegistryCache';
import { logger } from '@librechat/data-schemas';
const LOADED_KEY_PREFIX = 'USER_PRIVATE_SERVERS_LOADED';
const LOCK_KEY_PREFIX = 'USER_PRIVATE_SERVERS_LOAD_LOCK';
// Default TTL values (in milliseconds)
const DEFAULT_LOADED_TTL = 3600 * 1000; // 1 hour - should match cache entry TTL
const DEFAULT_LOCK_TTL = 30 * 1000; // 30 seconds - lock timeout
const DEFAULT_WAIT_INTERVAL = 100; // 100ms between checks
/**
* Dedicated cache for managing private server loading status with TTL synchronization.
* Solves three critical issues:
* 1. TTL Synchronization: Loaded flags expire in sync with cache entries
* 2. Cache Eviction Detection: When cache expires, flag expires too
* 3. Race Condition Prevention: Distributed locking prevents concurrent loads
*
* Design:
* - Loaded flags have same TTL as cache entries (prevents desync)
* - Distributed locks prevent multiple processes loading same user
* - Wait mechanism allows processes to wait for ongoing loads
* - Works correctly for users with 0 servers (trusts TTL, no cache verification)
*/
class PrivateServersLoadStatusCache extends BaseRegistryCache {
protected readonly cache = standardCache(`${this.PREFIX}::PrivateServersLoadStatus`);
/**
* Check if user's private servers are fully loaded.
* If false, servers need to be loaded from DB.
*
* @param userId - User ID
* @returns true if user's private servers are fully loaded
*/
public async isLoaded(userId: string): Promise<boolean> {
const key = `${LOADED_KEY_PREFIX}::${userId}`;
return (await this.cache.get(key)) === true;
}
/**
* Mark user's private servers as fully loaded with TTL.
* TTL MUST match the cache entry TTL to prevent desync.
*
* @param userId - User ID
* @param ttl - Time to live in milliseconds (default: 1 hour)
*/
public async setLoaded(userId: string, ttl: number = DEFAULT_LOADED_TTL): Promise<void> {
const key = `${LOADED_KEY_PREFIX}::${userId}`;
const success = await this.cache.set(key, true, ttl);
this.successCheck(`set loaded status for user ${userId}`, success);
logger.debug(`[MCP][LoadStatusCache] Marked user ${userId} as loaded (TTL: ${ttl}ms)`);
}
/**
* Acquire a distributed lock for loading a user's private servers.
* Prevents concurrent processes from loading the same user's servers.
*
* Uses atomic Redis SET NX PX for race-free locking.
* Returns true immediately if Redis is not available (no locking needed in single-process mode).
*
* @param userId - User ID
* @param ttl - Lock timeout in milliseconds (default: 30s)
* @returns true if lock acquired, false if already locked
*/
public async acquireLoadLock(userId: string, ttl: number = DEFAULT_LOCK_TTL): Promise<boolean> {
const key = `${LOCK_KEY_PREFIX}::${userId}`;
// Distributed locking only needed when Redis is available (multi-instance mode)
if (!keyvRedisClient || !('set' in keyvRedisClient)) {
logger.debug(`[MCP][LoadStatusCache] Redis not available, skipping lock for user ${userId}`);
return true;
}
try {
// Build the full Redis key (accounting for namespace and global prefix)
const namespace = `${this.PREFIX}::PrivateServersLoadStatus`;
const globalPrefix = cacheConfig.REDIS_KEY_PREFIX;
const separator = cacheConfig.GLOBAL_PREFIX_SEPARATOR;
const fullKey = globalPrefix
? `${globalPrefix}${separator}${namespace}:${key}`
: `${namespace}:${key}`;
// Redis SET with NX (only if not exists) and PX (millisecond expiry) - ATOMIC!
const result = await keyvRedisClient.set(fullKey, Date.now().toString(), {
NX: true, // Only set if key doesn't exist
PX: ttl, // Expiry in milliseconds
});
const acquired = result === 'OK';
if (acquired) {
logger.debug(
`[MCP][LoadStatusCache] Acquired load lock for user ${userId} (TTL: ${ttl}ms)`,
);
} else {
logger.debug(`[MCP][LoadStatusCache] Load lock already held for user ${userId}`);
}
return acquired;
} catch (error) {
logger.error(`[MCP][LoadStatusCache] Error acquiring lock for user ${userId}:`, error);
return false;
}
}
/**
* Release the distributed lock for a user's private server loading.
* Should be called in a finally block to ensure lock is always released.
*
* @param userId - User ID
*/
public async releaseLoadLock(userId: string): Promise<void> {
const key = `${LOCK_KEY_PREFIX}::${userId}`;
await this.cache.delete(key);
logger.debug(`[MCP][LoadStatusCache] Released load lock for user ${userId}`);
}
/**
* Wait for another process to finish loading a user's private servers.
* Used when a lock is already held by another process.
*
* @param userId - User ID
* @param maxWaitTime - Maximum time to wait in milliseconds (default: 5s)
* @param checkInterval - Interval between checks in milliseconds (default: 100ms)
* @returns true if loading completed within maxWaitTime, false if timeout
*/
public async waitForLoad(
userId: string,
maxWaitTime: number = 5000,
checkInterval: number = DEFAULT_WAIT_INTERVAL,
): Promise<boolean> {
const startTime = Date.now();
while (Date.now() - startTime < maxWaitTime) {
const loaded = await this.isLoaded(userId);
if (loaded) {
logger.debug(`[MCP][LoadStatusCache] User ${userId} loading completed by another process`);
return true;
}
// Wait before checking again
await new Promise((resolve) => setTimeout(resolve, checkInterval));
}
logger.warn(
`[MCP][LoadStatusCache] Timeout waiting for user ${userId} loading (waited ${maxWaitTime}ms)`,
);
return false;
}
/**
* Clear loaded status for a user.
* Used for testing or manual cache invalidation.
*
* @param userId - User ID
*/
public async clearLoaded(userId: string): Promise<void> {
const key = `${LOADED_KEY_PREFIX}::${userId}`;
await this.cache.delete(key);
logger.debug(`[MCP][LoadStatusCache] Cleared loaded status for user ${userId}`);
}
}
export const privateServersLoadStatusCache = new PrivateServersLoadStatusCache();

View file

@ -5,11 +5,15 @@ import { BaseRegistryCache } from './BaseRegistryCache';
const INITIALIZED = 'INITIALIZED';
/**
* Cache for tracking MCP Servers Registry metadata and status across distributed instances.
* Cache for tracking MCP Servers Registry global metadata and status across distributed instances.
* Uses Redis-backed storage to coordinate state between leader and follower nodes.
* Currently, tracks initialization status to ensure only the leader performs initialization
* while followers wait for completion. Designed to be extended with additional registry
* metadata as needed (e.g., last update timestamps, version info, health status).
* Tracks global initialization status for the registry.
*
* Note: Per-user private server loading status is tracked separately in PrivateServersLoadStatusCache
* to enable TTL synchronization and distributed locking.
*
* Designed to be extended with additional global registry metadata as needed
* (e.g., last update timestamps, version info, health status).
* This cache is only meant to be used internally by registry management components.
*/
class RegistryStatusCache extends BaseRegistryCache {

View file

@ -20,9 +20,13 @@ export class ServerConfigsCacheFactory {
* @param leaderOnly - Whether operations should only be performed by the leader (only applies to Redis)
* @returns ServerConfigsCache instance
*/
static create(owner: string, leaderOnly: boolean): ServerConfigsCache {
static create(
owner: string,
scope: 'Shared' | 'Private',
leaderOnly: boolean,
): ServerConfigsCache {
if (cacheConfig.USE_REDIS) {
return new ServerConfigsCacheRedis(owner, leaderOnly);
return new ServerConfigsCacheRedis(owner, scope, leaderOnly);
}
// In-memory mode uses a simple Map - doesn't need owner/namespace

View file

@ -15,7 +15,7 @@ export class ServerConfigsCacheInMemory {
throw new Error(
`Server "${serverName}" already exists in cache. Use update() to modify existing configs.`,
);
this.cache.set(serverName, config);
this.cache.set(serverName, { ...config, lastUpdatedAt: Date.now() });
}
public async update(serverName: string, config: ParsedServerConfig): Promise<void> {
@ -23,7 +23,15 @@ export class ServerConfigsCacheInMemory {
throw new Error(
`Server "${serverName}" does not exist in cache. Use add() to create new configs.`,
);
this.cache.set(serverName, config);
this.cache.set(serverName, { ...config, lastUpdatedAt: Date.now() });
}
/**
* Sets a server config without checking if it exists (upsert operation).
* Use this for bulk operations where you want to add or update without error handling.
*/
public async set(serverName: string, config: ParsedServerConfig): Promise<void> {
this.cache.set(serverName, { ...config, lastUpdatedAt: Date.now() });
}
public async remove(serverName: string): Promise<void> {
@ -43,4 +51,12 @@ export class ServerConfigsCacheInMemory {
public async reset(): Promise<void> {
this.cache.clear();
}
/**
* Returns a placeholder namespace for consistency with Redis implementation.
* In-memory cache doesn't use namespaces, so this always returns empty string.
*/
public getNamespace(): string {
return '';
}
}

View file

@ -14,13 +14,11 @@ import { BaseRegistryCache } from './BaseRegistryCache';
export class ServerConfigsCacheRedis extends BaseRegistryCache {
protected readonly cache: Keyv;
private readonly owner: string;
private readonly leaderOnly: boolean;
constructor(owner: string, leaderOnly: boolean) {
super();
constructor(owner: string, scope: 'Shared' | 'Private', leaderOnly: boolean) {
super(leaderOnly);
this.owner = owner;
this.leaderOnly = leaderOnly;
this.cache = standardCache(`${this.PREFIX}::Servers::${owner}`);
this.cache = standardCache(`${this.PREFIX}::Servers::${scope}::${owner}`);
}
public async add(serverName: string, config: ParsedServerConfig): Promise<void> {
@ -30,7 +28,7 @@ export class ServerConfigsCacheRedis extends BaseRegistryCache {
throw new Error(
`Server "${serverName}" already exists in cache. Use update() to modify existing configs.`,
);
const success = await this.cache.set(serverName, config);
const success = await this.cache.set(serverName, { ...config, lastUpdatedAt: Date.now() });
this.successCheck(`add ${this.owner} server "${serverName}"`, success);
}
@ -41,10 +39,21 @@ export class ServerConfigsCacheRedis extends BaseRegistryCache {
throw new Error(
`Server "${serverName}" does not exist in cache. Use add() to create new configs.`,
);
const success = await this.cache.set(serverName, config);
const success = await this.cache.set(serverName, { ...config, lastUpdatedAt: Date.now() });
this.successCheck(`update ${this.owner} server "${serverName}"`, success);
}
/**
* Sets a server config without checking if it exists (upsert operation).
* Use this for bulk operations where you want to add or update without error handling.
* Note: Respects leaderOnly flag if set.
*/
public async set(serverName: string, config: ParsedServerConfig): Promise<void> {
if (this.leaderOnly) await this.leaderCheck(`set ${this.owner} MCP servers`);
const success = await this.cache.set(serverName, { ...config, lastUpdatedAt: Date.now() });
this.successCheck(`set ${this.owner} server "${serverName}"`, success);
}
public async remove(serverName: string): Promise<void> {
if (this.leaderOnly) await this.leaderCheck(`remove ${this.owner} MCP servers`);
const success = await this.cache.delete(serverName);
@ -68,9 +77,9 @@ export class ServerConfigsCacheRedis extends BaseRegistryCache {
// Full key format: "prefix::namespace:keyName"
const lastColonIndex = key.lastIndexOf(':');
const keyName = key.substring(lastColonIndex + 1);
const value = await this.cache.get(keyName);
if (value) {
entries.push([keyName, value as ParsedServerConfig]);
const config = (await this.cache.get(keyName)) as ParsedServerConfig | undefined;
if (config) {
entries.push([keyName, config]);
}
}
} else {
@ -79,4 +88,12 @@ export class ServerConfigsCacheRedis extends BaseRegistryCache {
return fromPairs(entries);
}
/**
* Returns the Redis namespace for this cache instance.
* Used for constructing full Redis keys when needed for batch operations.
*/
public getNamespace(): string {
return this.cache.namespace ?? '';
}
}

View file

@ -0,0 +1,71 @@
import { PrivateServerConfigsCacheFactory } from '../PrivateServerConfigs/PrivateServerConfigsCacheFactory';
import { PrivateServerConfigsCacheInMemory } from '../PrivateServerConfigs/PrivateServerConfigsCacheInMemory';
import { PrivateServerConfigsCacheRedis } from '../PrivateServerConfigs/PrivateServerConfigsCacheRedis';
import { cacheConfig } from '~/cache';
// Mock the cache implementations
jest.mock('../PrivateServerConfigs/PrivateServerConfigsCacheInMemory');
jest.mock('../PrivateServerConfigs/PrivateServerConfigsCacheRedis');
// Mock the cache config module
jest.mock('~/cache', () => ({
cacheConfig: {
USE_REDIS: false,
},
}));
describe('PrivateServerConfigsCacheFactory', () => {
beforeEach(() => {
jest.clearAllMocks();
});
describe('create()', () => {
it('should return PrivateServerConfigsCacheRedis when USE_REDIS is true', () => {
// Arrange
cacheConfig.USE_REDIS = true;
// Act
const cache = PrivateServerConfigsCacheFactory.create();
// Assert
expect(cache).toBeInstanceOf(PrivateServerConfigsCacheRedis);
expect(PrivateServerConfigsCacheRedis).toHaveBeenCalled();
});
it('should return PrivateServerConfigsCacheInMemory when USE_REDIS is false', () => {
// Arrange
cacheConfig.USE_REDIS = false;
// Act
const cache = PrivateServerConfigsCacheFactory.create();
// Assert
expect(cache).toBeInstanceOf(PrivateServerConfigsCacheInMemory);
expect(PrivateServerConfigsCacheInMemory).toHaveBeenCalled();
});
it('should create PrivateServerConfigsCacheInMemory without parameters when USE_REDIS is false', () => {
// Arrange
cacheConfig.USE_REDIS = false;
// Act
PrivateServerConfigsCacheFactory.create();
// Assert
// Private cache doesn't use any parameters
expect(PrivateServerConfigsCacheInMemory).toHaveBeenCalledWith();
});
it('should create PrivateServerConfigsCacheRedis without parameters when USE_REDIS is true', () => {
// Arrange
cacheConfig.USE_REDIS = true;
// Act
PrivateServerConfigsCacheFactory.create();
// Assert
// Private cache doesn't use any parameters
expect(PrivateServerConfigsCacheRedis).toHaveBeenCalledWith();
});
});
});

View file

@ -0,0 +1,346 @@
import { expect } from '@playwright/test';
import { ParsedServerConfig } from '~/mcp/types';
const FIXED_TIME = 1699564800000;
const originalDateNow = Date.now;
Date.now = jest.fn(() => FIXED_TIME);
describe('PrivateServerConfigsCacheInMemory Tests', () => {
let PrivateServerConfigsCacheInMemory: typeof import('../PrivateServerConfigs/PrivateServerConfigsCacheInMemory').PrivateServerConfigsCacheInMemory;
let cache: InstanceType<
typeof import('../PrivateServerConfigs/PrivateServerConfigsCacheInMemory').PrivateServerConfigsCacheInMemory
>;
// Test data
const mockConfig1: ParsedServerConfig = {
command: 'node',
args: ['server1.js'],
env: { TEST: 'value1' },
lastUpdatedAt: FIXED_TIME,
};
const mockConfig2: ParsedServerConfig = {
command: 'python',
args: ['server2.py'],
env: { TEST: 'value2' },
lastUpdatedAt: FIXED_TIME,
};
const mockConfig3: ParsedServerConfig = {
command: 'node',
args: ['server3.js'],
url: 'http://localhost:3000',
requiresOAuth: true,
lastUpdatedAt: FIXED_TIME,
};
beforeAll(async () => {
// Import modules
const cacheModule = await import('../PrivateServerConfigs/PrivateServerConfigsCacheInMemory');
PrivateServerConfigsCacheInMemory = cacheModule.PrivateServerConfigsCacheInMemory;
});
afterAll(() => {
Date.now = originalDateNow;
});
beforeEach(() => {
// Create a fresh instance for each test
cache = new PrivateServerConfigsCacheInMemory();
});
describe('add and get operations', () => {
it('should add and retrieve a server config for a user', async () => {
await cache.add('user1', 'server1', mockConfig1);
const result = await cache.get('user1', 'server1');
expect(result).toEqual(mockConfig1);
});
it('should return undefined for non-existent server', async () => {
const result = await cache.get('user1', 'non-existent');
expect(result).toBeUndefined();
});
it('should throw error when adding duplicate server for same user', async () => {
await cache.add('user1', 'server1', mockConfig1);
await expect(cache.add('user1', 'server1', mockConfig2)).rejects.toThrow(
'Server "server1" already exists in cache. Use update() to modify existing configs.',
);
});
it('should handle multiple server configs for a single user', async () => {
await cache.add('user1', 'server1', mockConfig1);
await cache.add('user1', 'server2', mockConfig2);
await cache.add('user1', 'server3', mockConfig3);
const result1 = await cache.get('user1', 'server1');
const result2 = await cache.get('user1', 'server2');
const result3 = await cache.get('user1', 'server3');
expect(result1).toEqual(mockConfig1);
expect(result2).toEqual(mockConfig2);
expect(result3).toEqual(mockConfig3);
});
it('should isolate server configs between different users', async () => {
await cache.add('user1', 'server1', mockConfig1);
await cache.add('user2', 'server1', mockConfig2);
const user1Result = await cache.get('user1', 'server1');
const user2Result = await cache.get('user2', 'server1');
expect(user1Result).toEqual(mockConfig1);
expect(user2Result).toEqual(mockConfig2);
});
});
describe('getAll operation', () => {
it('should return empty object when user has no servers', async () => {
const result = await cache.getAll('user1');
expect(result).toEqual({});
});
it('should return all server configs for a user', async () => {
await cache.add('user1', 'server1', mockConfig1);
await cache.add('user1', 'server2', mockConfig2);
await cache.add('user1', 'server3', mockConfig3);
const result = await cache.getAll('user1');
expect(result).toEqual({
server1: mockConfig1,
server2: mockConfig2,
server3: mockConfig3,
});
});
it('should only return configs for specific user', async () => {
await cache.add('user1', 'server1', mockConfig1);
await cache.add('user1', 'server2', mockConfig2);
await cache.add('user2', 'server3', mockConfig3);
const user1Result = await cache.getAll('user1');
const user2Result = await cache.getAll('user2');
expect(Object.keys(user1Result).length).toBe(2);
expect(Object.keys(user2Result).length).toBe(1);
expect(user1Result.server3).toBeUndefined();
expect(user2Result.server1).toBeUndefined();
});
});
describe('update operation', () => {
it('should update an existing server config', async () => {
await cache.add('user1', 'server1', mockConfig1);
expect(await cache.get('user1', 'server1')).toEqual(mockConfig1);
await cache.update('user1', 'server1', mockConfig2);
const result = await cache.get('user1', 'server1');
expect(result).toEqual(mockConfig2);
});
it('should throw error when updating non-existent server', async () => {
await expect(cache.update('user1', 'non-existent', mockConfig1)).rejects.toThrow(
'Server "non-existent" does not exist in cache. Use add() to create new configs.',
);
});
it('should only update for specific user', async () => {
await cache.add('user1', 'server1', mockConfig1);
await cache.add('user2', 'server1', mockConfig2);
await cache.update('user1', 'server1', mockConfig3);
expect(await cache.get('user1', 'server1')).toEqual(mockConfig3);
expect(await cache.get('user2', 'server1')).toEqual(mockConfig2);
});
});
describe('remove operation', () => {
it('should remove an existing server config', async () => {
await cache.add('user1', 'server1', mockConfig1);
expect(await cache.get('user1', 'server1')).toEqual(mockConfig1);
await cache.remove('user1', 'server1');
expect(await cache.get('user1', 'server1')).toBeUndefined();
});
it('should throw error when removing non-existent server', async () => {
await expect(cache.remove('user1', 'non-existent')).rejects.toThrow(
'Failed to remove server "non-existent" in cache.',
);
});
it('should only remove from specific user', async () => {
await cache.add('user1', 'server1', mockConfig1);
await cache.add('user2', 'server1', mockConfig2);
await cache.remove('user1', 'server1');
expect(await cache.get('user1', 'server1')).toBeUndefined();
expect(await cache.get('user2', 'server1')).toEqual(mockConfig2);
});
it('should allow re-adding a removed server', async () => {
await cache.add('user1', 'server1', mockConfig1);
await cache.remove('user1', 'server1');
await cache.add('user1', 'server1', mockConfig3);
const result = await cache.get('user1', 'server1');
expect(result).toEqual(mockConfig3);
});
});
describe('reset operation', () => {
it('should clear all servers for a specific user', async () => {
await cache.add('user1', 'server1', mockConfig1);
await cache.add('user1', 'server2', mockConfig2);
await cache.add('user2', 'server3', mockConfig3);
await cache.reset('user1');
const user1Result = await cache.getAll('user1');
const user2Result = await cache.getAll('user2');
expect(Object.keys(user1Result).length).toBe(0);
expect(Object.keys(user2Result).length).toBe(1);
});
});
describe('has operation', () => {
it('should return true for users with loaded cache', async () => {
await cache.add('user1', 'server1', mockConfig1);
expect(await cache.has('user1')).toBe(true);
});
it('should return false for users without loaded cache', async () => {
expect(await cache.has('user1')).toBe(false);
});
});
describe('updateServerConfigIfExists operation', () => {
it('should update server config for all users who have it', async () => {
await cache.add('user1', 'server1', mockConfig1);
await cache.add('user2', 'server1', mockConfig1);
await cache.add('user3', 'server2', mockConfig2);
await cache.updateServerConfigIfExists('server1', mockConfig3);
expect(await cache.get('user1', 'server1')).toEqual(mockConfig3);
expect(await cache.get('user2', 'server1')).toEqual(mockConfig3);
expect(await cache.get('user3', 'server1')).toBeUndefined();
expect(await cache.get('user3', 'server2')).toEqual(mockConfig2);
});
it('should handle case when no users have the server', async () => {
await cache.add('user1', 'server2', mockConfig2);
await cache.add('user2', 'server3', mockConfig3);
await expect(cache.updateServerConfigIfExists('server1', mockConfig1)).resolves.not.toThrow();
expect(await cache.get('user1', 'server2')).toEqual(mockConfig2);
expect(await cache.get('user2', 'server3')).toEqual(mockConfig3);
});
it('should handle case with no loaded user caches', async () => {
await expect(cache.updateServerConfigIfExists('server1', mockConfig1)).resolves.not.toThrow();
});
});
describe('addServerConfigIfCacheExists operation', () => {
it('should add server to specified users with initialized caches', async () => {
await cache.add('user1', 'other', mockConfig1);
await cache.add('user2', 'other', mockConfig1);
await cache.addServerConfigIfCacheExists(['user1', 'user2', 'user3'], 'server1', mockConfig2);
expect(await cache.get('user1', 'server1')).toEqual(mockConfig2);
expect(await cache.get('user2', 'server1')).toEqual(mockConfig2);
expect(await cache.get('user3', 'server1')).toBeUndefined();
});
it('should not add to users without initialized caches', async () => {
await cache.addServerConfigIfCacheExists(['user1', 'user2'], 'server1', mockConfig1);
expect(await cache.get('user1', 'server1')).toBeUndefined();
expect(await cache.get('user2', 'server1')).toBeUndefined();
});
it('should handle empty userIds array', async () => {
await expect(
cache.addServerConfigIfCacheExists([], 'server1', mockConfig1),
).resolves.not.toThrow();
});
});
describe('removeServerConfigIfCacheExists operation', () => {
it('should remove server from specified users', async () => {
await cache.add('user1', 'server1', mockConfig1);
await cache.add('user2', 'server1', mockConfig1);
await cache.add('user3', 'server1', mockConfig1);
await cache.removeServerConfigIfCacheExists(['user1', 'user3'], 'server1');
expect(await cache.get('user1', 'server1')).toBeUndefined();
expect(await cache.get('user2', 'server1')).toEqual(mockConfig1);
expect(await cache.get('user3', 'server1')).toBeUndefined();
});
it('should handle users who do not have the server', async () => {
await cache.add('user1', 'server1', mockConfig1);
await expect(
cache.removeServerConfigIfCacheExists(['user1', 'user2'], 'server1'),
).resolves.not.toThrow();
expect(await cache.get('user1', 'server1')).toBeUndefined();
});
it('should handle empty userIds array', async () => {
await expect(cache.removeServerConfigIfCacheExists([], 'server1')).resolves.not.toThrow();
});
});
describe('findUsersWithServer operation', () => {
it('should return all users who have the server', async () => {
await cache.add('user1', 'server1', mockConfig1);
await cache.add('user2', 'server1', mockConfig1);
await cache.add('user3', 'other', mockConfig2);
const users = await cache.findUsersWithServer('server1');
expect(users.sort()).toEqual(['user1', 'user2'].sort());
});
it('should return empty array if no users have the server', async () => {
await cache.add('user1', 'other', mockConfig1);
const users = await cache.findUsersWithServer('server1');
expect(users).toEqual([]);
});
it('should return empty array with no loaded user caches', async () => {
const users = await cache.findUsersWithServer('server1');
expect(users).toEqual([]);
});
});
describe('resetAll operation', () => {
it('should clear all servers for all users', async () => {
await cache.add('user1', 'server1', mockConfig1);
await cache.add('user1', 'server2', mockConfig2);
await cache.add('user2', 'server1', mockConfig1);
await cache.add('user2', 'server3', mockConfig3);
await cache.resetAll();
expect(await cache.has('user1')).toBe(false);
expect(await cache.has('user2')).toBe(false);
});
it('should handle case with no loaded user caches', async () => {
// Should not throw
await expect(cache.resetAll()).resolves.not.toThrow();
});
});
});

View file

@ -0,0 +1,606 @@
import { expect } from '@playwright/test';
import { ParsedServerConfig } from '~/mcp/types';
describe('PrivateServerConfigsCacheRedis Integration Tests', () => {
let PrivateServerConfigsCacheRedis: typeof import('../PrivateServerConfigs/PrivateServerConfigsCacheRedis').PrivateServerConfigsCacheRedis;
let keyvRedisClient: Awaited<typeof import('~/cache/redisClients')>['keyvRedisClient'];
let cache: InstanceType<
typeof import('../PrivateServerConfigs/PrivateServerConfigsCacheRedis').PrivateServerConfigsCacheRedis
>;
// Test data
const mockConfig1: ParsedServerConfig = {
command: 'node',
args: ['server1.js'],
env: { TEST: 'value1' },
};
const mockConfig2: ParsedServerConfig = {
command: 'python',
args: ['server2.py'],
env: { TEST: 'value2' },
};
const mockConfig3: ParsedServerConfig = {
command: 'node',
args: ['server3.js'],
url: 'http://localhost:3000',
requiresOAuth: true,
};
beforeAll(async () => {
// Set up environment variables for Redis (only if not already set)
process.env.USE_REDIS = process.env.USE_REDIS ?? 'true';
process.env.REDIS_URI = process.env.REDIS_URI ?? 'redis://127.0.0.1:6379';
process.env.USE_REDIS_CLUSTER = process.env.USE_REDIS_CLUSTER ?? 'false';
console.log('USING CLUSETER....', process.env.USE_REDIS_CLUSTER);
process.env.REDIS_KEY_PREFIX =
process.env.REDIS_KEY_PREFIX ?? 'PrivateServerConfigsCacheRedis-IntegrationTest';
// Import modules after setting env vars
const cacheModule = await import('../PrivateServerConfigs/PrivateServerConfigsCacheRedis');
const redisClients = await import('~/cache/redisClients');
PrivateServerConfigsCacheRedis = cacheModule.PrivateServerConfigsCacheRedis;
keyvRedisClient = redisClients.keyvRedisClient;
// Ensure Redis is connected
if (!keyvRedisClient) throw new Error('Redis client is not initialized');
// Wait for connection and topology discovery to complete
await redisClients.keyvRedisClientReady;
});
beforeEach(() => {
// Create a fresh instance for each test
cache = new PrivateServerConfigsCacheRedis();
});
afterEach(async () => {
// Clean up: clear all test keys from Redis
if (keyvRedisClient && 'scanIterator' in keyvRedisClient) {
const pattern = '*PrivateServerConfigsCacheRedis-IntegrationTest*';
const keysToDelete: string[] = [];
// Collect all keys first
for await (const key of keyvRedisClient.scanIterator({ MATCH: pattern })) {
keysToDelete.push(key);
}
// Delete in parallel for cluster mode efficiency
if (keysToDelete.length > 0) {
await Promise.all(keysToDelete.map((key) => keyvRedisClient!.del(key)));
}
}
});
afterAll(async () => {
// Close Redis connection
if (keyvRedisClient?.isOpen) await keyvRedisClient.disconnect();
});
describe('add and get operations', () => {
it('should add and retrieve a server config for a user', async () => {
const randonPrefix = Math.random().toString(36).substring(2, 8);
await cache.add(`user1-${randonPrefix}`, `server1-${randonPrefix}`, mockConfig1);
const result = await cache.get(`user1-${randonPrefix}`, `server1-${randonPrefix}`);
expect(result).toMatchObject(mockConfig1);
});
it('should return undefined for non-existent server', async () => {
const randonPrefix = Math.random().toString(36).substring(2, 8);
const result = await cache.get(`user1-${randonPrefix}`, 'non-existent');
expect(result).toBeUndefined();
});
it('should throw error when adding duplicate server for same user', async () => {
const randonPrefix = Math.random().toString(36).substring(2, 8);
await cache.add(`user1-${randonPrefix}`, `server1-${randonPrefix}`, mockConfig1);
await expect(
cache.add(`user1-${randonPrefix}`, `server1-${randonPrefix}`, mockConfig2),
).rejects.toThrow(
`Server "server1-${randonPrefix}" already exists in cache. Use update() to modify existing configs.`,
);
});
it('should handle multiple server configs for a single user', async () => {
const randonPrefix = Math.random().toString(36).substring(2, 8);
await cache.add(`user1-${randonPrefix}`, `server1-${randonPrefix}`, mockConfig1);
await cache.add(`user1-${randonPrefix}`, `server2-${randonPrefix}`, mockConfig2);
await cache.add(`user1-${randonPrefix}`, `server3-${randonPrefix}`, mockConfig3);
const result1 = await cache.get(`user1-${randonPrefix}`, `server1-${randonPrefix}`);
const result2 = await cache.get(`user1-${randonPrefix}`, `server2-${randonPrefix}`);
const result3 = await cache.get(`user1-${randonPrefix}`, `server3-${randonPrefix}`);
expect(result1).toMatchObject(mockConfig1);
expect(result2).toMatchObject(mockConfig2);
expect(result3).toMatchObject(mockConfig3);
});
it('should isolate server configs between different users', async () => {
const randonPrefix = Math.random().toString(36).substring(2, 8);
await cache.add(`user1-${randonPrefix}`, `server1-${randonPrefix}`, mockConfig1);
await cache.add(`user2-${randonPrefix}`, `server1-${randonPrefix}`, mockConfig2);
const user1Result = await cache.get(`user1-${randonPrefix}`, `server1-${randonPrefix}`);
const user2Result = await cache.get(`user2-${randonPrefix}`, `server1-${randonPrefix}`);
expect(user1Result).toMatchObject(mockConfig1);
expect(user2Result).toMatchObject(mockConfig2);
});
});
describe('getAll operation', () => {
it('should return empty object when user has no servers', async () => {
const randonPrefix = Math.random().toString(36).substring(2, 8);
const result = await cache.getAll(`user1-${randonPrefix}`);
expect(result).toMatchObject({});
});
it('should return all server configs for a user', async () => {
const randonPrefix = Math.random().toString(36).substring(2, 8);
await cache.add(`user1-${randonPrefix}`, `server1-${randonPrefix}`, mockConfig1);
await cache.add(`user1-${randonPrefix}`, `server2-${randonPrefix}`, mockConfig2);
await cache.add(`user1-${randonPrefix}`, `server3-${randonPrefix}`, mockConfig3);
const result = await cache.getAll(`user1-${randonPrefix}`);
expect(result).toMatchObject({
[`server1-${randonPrefix}`]: mockConfig1,
[`server2-${randonPrefix}`]: mockConfig2,
[`server3-${randonPrefix}`]: mockConfig3,
});
});
it('should only return configs for specific user', async () => {
const randonPrefix = Math.random().toString(36).substring(2, 8);
await cache.add(`user1-${randonPrefix}`, `server1-${randonPrefix}`, mockConfig1);
await cache.add(`user1-${randonPrefix}`, `server2-${randonPrefix}`, mockConfig2);
await cache.add(`user2-${randonPrefix}`, `server3-${randonPrefix}`, mockConfig3);
const user1Result = await cache.getAll(`user1-${randonPrefix}`);
const user2Result = await cache.getAll(`user2-${randonPrefix}`);
expect(Object.keys(user1Result).length).toBe(2);
expect(Object.keys(user2Result).length).toBe(1);
expect(user1Result.server3).toBeUndefined();
expect(user2Result.server1).toBeUndefined();
});
});
describe('update operation', () => {
it('should update an existing server config', async () => {
const randonPrefix = Math.random().toString(36).substring(2, 8);
await cache.add(`user1-${randonPrefix}`, `server1-${randonPrefix}`, mockConfig1);
expect(await cache.get(`user1-${randonPrefix}`, `server1-${randonPrefix}`)).toMatchObject(
mockConfig1,
);
await cache.update(`user1-${randonPrefix}`, `server1-${randonPrefix}`, mockConfig2);
const result = await cache.get(`user1-${randonPrefix}`, `server1-${randonPrefix}`);
expect(result).toMatchObject(mockConfig2);
});
it('should throw error when updating non-existent server', async () => {
const randonPrefix = Math.random().toString(36).substring(2, 8);
await expect(
cache.update(`user1-${randonPrefix}`, 'non-existent', mockConfig1),
).rejects.toThrow(
'Server "non-existent" does not exist in cache. Use add() to create new configs.',
);
});
it('should only update for specific user', async () => {
const randonPrefix = Math.random().toString(36).substring(2, 8);
await cache.add(`user1-${randonPrefix}`, `server1-${randonPrefix}`, mockConfig1);
await cache.add(`user2-${randonPrefix}`, `server1-${randonPrefix}`, mockConfig2);
await cache.update(`user1-${randonPrefix}`, `server1-${randonPrefix}`, mockConfig3);
expect(await cache.get(`user1-${randonPrefix}`, `server1-${randonPrefix}`)).toMatchObject(
mockConfig3,
);
expect(await cache.get(`user2-${randonPrefix}`, `server1-${randonPrefix}`)).toMatchObject(
mockConfig2,
);
});
});
describe('remove operation', () => {
it('should remove an existing server config', async () => {
const randonPrefix = Math.random().toString(36).substring(2, 8);
await cache.add(`user1-${randonPrefix}`, `server1-${randonPrefix}`, mockConfig1);
expect(await cache.get(`user1-${randonPrefix}`, `server1-${randonPrefix}`)).toMatchObject(
mockConfig1,
);
await cache.remove(`user1-${randonPrefix}`, `server1-${randonPrefix}`);
expect(await cache.get(`user1-${randonPrefix}`, `server1-${randonPrefix}`)).toBeUndefined();
});
it('should throw error when removing non-existent server', async () => {
const randonPrefix = Math.random().toString(36).substring(2, 8);
await expect(cache.remove(`user1-${randonPrefix}`, 'non-existent')).rejects.toThrow(
`Failed to remove user1-${randonPrefix} server "non-existent" in cache.`,
);
});
it('should only remove from specific user', async () => {
const randonPrefix = Math.random().toString(36).substring(2, 8);
await cache.add(`user1-${randonPrefix}`, `server1-${randonPrefix}`, mockConfig1);
await cache.add(`user2-${randonPrefix}`, `server1-${randonPrefix}`, mockConfig2);
await cache.remove(`user1-${randonPrefix}`, `server1-${randonPrefix}`);
expect(await cache.get(`user1-${randonPrefix}`, `server1-${randonPrefix}`)).toBeUndefined();
expect(await cache.get(`user2-${randonPrefix}`, `server1-${randonPrefix}`)).toMatchObject(
mockConfig2,
);
});
it('should allow re-adding a removed server', async () => {
const randonPrefix = Math.random().toString(36).substring(2, 8);
await cache.add(`user1-${randonPrefix}`, `server1-${randonPrefix}`, mockConfig1);
await cache.remove(`user1-${randonPrefix}`, `server1-${randonPrefix}`);
await cache.add(`user1-${randonPrefix}`, `server1-${randonPrefix}`, mockConfig3);
const result = await cache.get(`user1-${randonPrefix}`, `server1-${randonPrefix}`);
expect(result).toMatchObject(mockConfig3);
});
});
describe('reset operation', () => {
it('should clear all servers for a specific user', async () => {
const randonPrefix = Math.random().toString(36).substring(2, 8);
await cache.add(`user1-${randonPrefix}`, `server1-${randonPrefix}`, mockConfig1);
await cache.add(`user1-${randonPrefix}`, `server2-${randonPrefix}`, mockConfig2);
await cache.add(`user2-${randonPrefix}`, `server3-${randonPrefix}`, mockConfig3);
await cache.reset(`user1-${randonPrefix}`);
const user1Result = await cache.getAll(`user1-${randonPrefix}`);
const user2Result = await cache.getAll(`user2-${randonPrefix}`);
expect(Object.keys(user1Result).length).toBe(0);
expect(Object.keys(user2Result).length).toBe(1);
});
});
describe('has operation', () => {
it('should return true for users with loaded cache', async () => {
const randonPrefix = Math.random().toString(36).substring(2, 8);
await cache.add(`user1-${randonPrefix}`, `server1-${randonPrefix}`, mockConfig1);
console.log('check');
expect(await cache.has(`user1-${randonPrefix}`)).toBe(true);
});
it('should return false for users without loaded cache', async () => {
const randonPrefix = Math.random().toString(36).substring(2, 8);
expect(await cache.has(`user1-${randonPrefix}`)).toBe(false);
});
});
describe('updateServerConfigIfExists operation', () => {
it('should update server config for all users who have it', async () => {
const randonPrefix = Math.random().toString(36).substring(2, 8);
await cache.add(`user1-${randonPrefix}`, `server1-${randonPrefix}`, mockConfig1);
await cache.add(`user2-${randonPrefix}`, `server1-${randonPrefix}`, mockConfig1);
await cache.add(`user3-${randonPrefix}`, `server2-${randonPrefix}`, mockConfig2);
await cache.updateServerConfigIfExists(`server1-${randonPrefix}`, mockConfig3);
expect(await cache.get(`user1-${randonPrefix}`, `server1-${randonPrefix}`)).toMatchObject(
mockConfig3,
);
expect(await cache.get(`user2-${randonPrefix}`, `server1-${randonPrefix}`)).toMatchObject(
mockConfig3,
);
expect(await cache.get(`user3-${randonPrefix}`, `server1-${randonPrefix}`)).toBeUndefined();
expect(await cache.get(`user3-${randonPrefix}`, `server2-${randonPrefix}`)).toMatchObject(
mockConfig2,
);
});
it('should update lastUpdatedAt timestamp', async () => {
const randonPrefix = Math.random().toString(36).substring(2, 8);
await cache.add(`user1-${randonPrefix}`, 'server1-share', mockConfig1);
await cache.add(`user2-${randonPrefix}`, 'server1-share', mockConfig1);
const timeBeforeUpdate = Date.now();
await new Promise((r) => setTimeout(() => r(true), 100));
await cache.updateServerConfigIfExists('server1-share', mockConfig2);
const user1Result = await cache.get(`user1-${randonPrefix}`, 'server1-share');
const user2Result = await cache.get(`user2-${randonPrefix}`, 'server1-share');
expect(user1Result).toBeDefined();
expect(user1Result!.lastUpdatedAt! - timeBeforeUpdate).toBeGreaterThan(0);
expect(user2Result!.lastUpdatedAt! - timeBeforeUpdate).toBeGreaterThan(0);
});
it('should handle case when no users have the server', async () => {
const randonPrefix = Math.random().toString(36).substring(2, 8);
await cache.add(`user1-${randonPrefix}`, `server2-${randonPrefix}`, mockConfig2);
await cache.add(`user2-${randonPrefix}`, `server3-${randonPrefix}`, mockConfig3);
await expect(
cache.updateServerConfigIfExists(`server1-${randonPrefix}`, mockConfig1),
).resolves.not.toThrow();
expect(await cache.get(`user1-${randonPrefix}`, `server2-${randonPrefix}`)).toMatchObject(
mockConfig2,
);
expect(await cache.get(`user2-${randonPrefix}`, `server3-${randonPrefix}`)).toMatchObject(
mockConfig3,
);
});
it('should handle case with no user caches', async () => {
const randonPrefix = Math.random().toString(36).substring(2, 8);
await expect(
cache.updateServerConfigIfExists(`server1-${randonPrefix}`, mockConfig1),
).resolves.not.toThrow();
});
it('should work across multiple cache instances (distributed scenario)', async () => {
const randonPrefix = Math.random().toString(36).substring(2, 8);
const cache1 = new PrivateServerConfigsCacheRedis();
const cache2 = new PrivateServerConfigsCacheRedis();
await cache1.add(`user1-${randonPrefix}`, `server1-${randonPrefix}`, mockConfig1);
await cache1.add(`user2-${randonPrefix}`, `server1-${randonPrefix}`, mockConfig1);
await cache2.updateServerConfigIfExists(`server1-${randonPrefix}`, mockConfig3);
expect(await cache1.get(`user1-${randonPrefix}`, `server1-${randonPrefix}`)).toMatchObject(
mockConfig3,
);
expect(await cache1.get(`user2-${randonPrefix}`, `server1-${randonPrefix}`)).toMatchObject(
mockConfig3,
);
});
});
describe('addServerConfigIfCacheExists operation', () => {
it('should add server to specified users with initialized caches', async () => {
const randonPrefix = Math.random().toString(36).substring(2, 8);
await cache.add(`user1-${randonPrefix}`, 'other', mockConfig1);
await cache.add(`user2-${randonPrefix}`, 'other', mockConfig1);
await cache.addServerConfigIfCacheExists(
[`user1-${randonPrefix}`, `user2-${randonPrefix}`, `user3-${randonPrefix}`],
`server1-${randonPrefix}`,
mockConfig2,
);
expect(await cache.get(`user1-${randonPrefix}`, `server1-${randonPrefix}`)).toMatchObject(
mockConfig2,
);
expect(await cache.get(`user2-${randonPrefix}`, `server1-${randonPrefix}`)).toMatchObject(
mockConfig2,
);
expect(await cache.get(`user3-${randonPrefix}`, `server1-${randonPrefix}`)).toBeUndefined();
});
it('should not add to users without initialized caches', async () => {
const randonPrefix = Math.random().toString(36).substring(2, 8);
await cache.addServerConfigIfCacheExists(
[`user1-${randonPrefix}`, `user2-${randonPrefix}`],
`server1-${randonPrefix}`,
mockConfig1,
);
expect(await cache.get(`user1-${randonPrefix}`, `server1-${randonPrefix}`)).toBeUndefined();
expect(await cache.get(`user2-${randonPrefix}`, `server1-${randonPrefix}`)).toBeUndefined();
});
it('should handle empty userIds array', async () => {
const randonPrefix = Math.random().toString(36).substring(2, 8);
await expect(
cache.addServerConfigIfCacheExists([], `server1-${randonPrefix}`, mockConfig1),
).resolves.not.toThrow();
});
it('should work across multiple cache instances (distributed scenario)', async () => {
const randonPrefix = Math.random().toString(36).substring(2, 8);
const cache1 = new PrivateServerConfigsCacheRedis();
const cache2 = new PrivateServerConfigsCacheRedis();
await cache1.add(`user1-${randonPrefix}`, 'other', mockConfig1);
await cache1.add(`user2-${randonPrefix}`, 'other', mockConfig1);
await cache2.addServerConfigIfCacheExists(
[`user1-${randonPrefix}`, `user2-${randonPrefix}`, `user3-${randonPrefix}`],
`server1-${randonPrefix}`,
mockConfig2,
);
expect(await cache1.get(`user1-${randonPrefix}`, `server1-${randonPrefix}`)).toMatchObject(
mockConfig2,
);
expect(await cache1.get(`user2-${randonPrefix}`, `server1-${randonPrefix}`)).toMatchObject(
mockConfig2,
);
expect(await cache1.get(`user3-${randonPrefix}`, `server1-${randonPrefix}`)).toBeUndefined();
});
});
describe('removeServerConfigIfCacheExists operation', () => {
it('should remove server from specified users', async () => {
const randonPrefix = Math.random().toString(36).substring(2, 8);
await cache.add(`user1-${randonPrefix}`, `server1-${randonPrefix}`, mockConfig1);
await cache.add(`user2-${randonPrefix}`, `server1-${randonPrefix}`, mockConfig1);
await cache.add(`user3-${randonPrefix}`, `server1-${randonPrefix}`, mockConfig1);
await cache.removeServerConfigIfCacheExists(
[`user1-${randonPrefix}`, `user3-${randonPrefix}`],
`server1-${randonPrefix}`,
);
expect(await cache.get(`user1-${randonPrefix}`, `server1-${randonPrefix}`)).toBeUndefined();
expect(await cache.get(`user2-${randonPrefix}`, `server1-${randonPrefix}`)).toMatchObject(
mockConfig1,
);
expect(await cache.get(`user3-${randonPrefix}`, `server1-${randonPrefix}`)).toBeUndefined();
});
it('should handle users who do not have the server', async () => {
const randonPrefix = Math.random().toString(36).substring(2, 8);
await cache.add(`user1-${randonPrefix}`, `server1-${randonPrefix}`, mockConfig1);
await expect(
cache.removeServerConfigIfCacheExists(
[`user1-${randonPrefix}`, `user2-${randonPrefix}`],
`server1-${randonPrefix}`,
),
).resolves.not.toThrow();
expect(await cache.get(`user1-${randonPrefix}`, `server1-${randonPrefix}`)).toBeUndefined();
});
it('should handle empty userIds array', async () => {
const randonPrefix = Math.random().toString(36).substring(2, 8);
await expect(
cache.removeServerConfigIfCacheExists([], `server1-${randonPrefix}`),
).resolves.not.toThrow();
});
it('should work across multiple cache instances (distributed scenario)', async () => {
const randonPrefix = Math.random().toString(36).substring(2, 8);
const cache1 = new PrivateServerConfigsCacheRedis();
const cache2 = new PrivateServerConfigsCacheRedis();
await cache1.add(`user1-${randonPrefix}`, `server1-${randonPrefix}`, mockConfig1);
await cache1.add(`user2-${randonPrefix}`, `server1-${randonPrefix}`, mockConfig1);
await cache2.removeServerConfigIfCacheExists(
[`user1-${randonPrefix}`, `user2-${randonPrefix}`],
`server1-${randonPrefix}`,
);
expect(await cache1.get(`user1-${randonPrefix}`, `server1-${randonPrefix}`)).toBeUndefined();
expect(await cache1.get(`user2-${randonPrefix}`, `server1-${randonPrefix}`)).toBeUndefined();
});
});
describe('findUsersWithServer operation', () => {
it('should return all users who have the server', async () => {
const randonPrefix = Math.random().toString(36).substring(2, 8);
await cache.add(`user1-${randonPrefix}`, `server1-${randonPrefix}`, mockConfig1);
await cache.add(`user2-${randonPrefix}`, `server1-${randonPrefix}`, mockConfig1);
await cache.add(`user3-${randonPrefix}`, 'other', mockConfig2);
const users = await cache.findUsersWithServer(`server1-${randonPrefix}`);
expect(users.sort()).toEqual([`user1-${randonPrefix}`, `user2-${randonPrefix}`].sort());
});
it('should return empty array if no users have the server', async () => {
const randonPrefix = Math.random().toString(36).substring(2, 8);
await cache.add(`user1-${randonPrefix}`, 'other', mockConfig1);
const users = await cache.findUsersWithServer(`server1-${randonPrefix}`);
expect(users).toEqual([]);
});
it('should return empty array with no user caches', async () => {
const randonPrefix = Math.random().toString(36).substring(2, 8);
const users = await cache.findUsersWithServer(`server1-${randonPrefix}`);
expect(users).toEqual([]);
});
it('should work across multiple cache instances (distributed scenario)', async () => {
const randonPrefix = Math.random().toString(36).substring(2, 8);
const cache1 = new PrivateServerConfigsCacheRedis();
const cache2 = new PrivateServerConfigsCacheRedis();
await cache1.add(`user1-${randonPrefix}`, `server1-${randonPrefix}`, mockConfig1);
await cache1.add(`user2-${randonPrefix}`, `server1-${randonPrefix}`, mockConfig1);
await cache1.add(`user3-${randonPrefix}`, 'other', mockConfig2);
const users = await cache2.findUsersWithServer(`server1-${randonPrefix}`);
expect(users.sort()).toEqual([`user1-${randonPrefix}`, `user2-${randonPrefix}`].sort());
});
});
describe('resetAll operation', () => {
it('should clear all servers for all users in Redis', async () => {
const randonPrefix = Math.random().toString(36).substring(2, 8);
await cache.add(`user1-${randonPrefix}`, `server1-${randonPrefix}`, mockConfig1);
await cache.add(`user1-${randonPrefix}`, `server2-${randonPrefix}`, mockConfig2);
await cache.add(`user2-${randonPrefix}`, `server1-${randonPrefix}`, mockConfig1);
await cache.add(`user2-${randonPrefix}`, `server3-${randonPrefix}`, mockConfig3);
await cache.resetAll();
expect(await cache.get(`user1-${randonPrefix}`, `server1-${randonPrefix}`)).toBeUndefined();
expect(await cache.get(`user1-${randonPrefix}`, `server2-${randonPrefix}`)).toBeUndefined();
expect(await cache.get(`user2-${randonPrefix}`, `server1-${randonPrefix}`)).toBeUndefined();
expect(await cache.get(`user2-${randonPrefix}`, `server3-${randonPrefix}`)).toBeUndefined();
});
it.skip('should handle case with no user caches', async () => {
// const randonPrefix = Math.random().toString(36).substring(2, 8);
// Should not throw
await expect(cache.resetAll()).resolves.not.toThrow();
});
it('should work across multiple cache instances (distributed scenario)', async () => {
const randonPrefix = Math.random().toString(36).substring(2, 8);
const cache1 = new PrivateServerConfigsCacheRedis();
const cache2 = new PrivateServerConfigsCacheRedis();
// Add servers using cache1
await cache1.add(`user1-${randonPrefix}`, `server1-${randonPrefix}`, mockConfig1);
await cache1.add(`user2-${randonPrefix}`, `server2-${randonPrefix}`, mockConfig2);
// Reset using cache2
await cache2.resetAll();
// Verify using cache1
expect(await cache1.get(`user1-${randonPrefix}`, `server1-${randonPrefix}`)).toBeUndefined();
expect(await cache1.get(`user2-${randonPrefix}`, `server2-${randonPrefix}`)).toBeUndefined();
});
});
});

View file

@ -0,0 +1,397 @@
import { expect } from '@playwright/test';
describe('PrivateServersLoadStatusCache Integration Tests', () => {
let loadStatusCache: typeof import('../PrivateServersLoadStatusCache').privateServersLoadStatusCache;
let keyvRedisClient: Awaited<typeof import('~/cache/redisClients')>['keyvRedisClient'];
let testCounter = 0;
beforeAll(async () => {
// Set up environment variables for Redis (only if not already set)
process.env.USE_REDIS = process.env.USE_REDIS ?? 'true';
process.env.REDIS_URI = process.env.REDIS_URI ?? 'redis://127.0.0.1:6379';
process.env.REDIS_KEY_PREFIX = 'PrivateServersLoadStatusCache-IntegrationTest';
// Import modules after setting env vars
const loadStatusCacheModule = await import('../PrivateServersLoadStatusCache');
const redisClients = await import('~/cache/redisClients');
loadStatusCache = loadStatusCacheModule.privateServersLoadStatusCache;
keyvRedisClient = redisClients.keyvRedisClient;
// Ensure Redis is connected
if (!keyvRedisClient) throw new Error('Redis client is not initialized');
// Wait for Redis connection and topology discovery to complete
await redisClients.keyvRedisClientReady;
process.setMaxListeners(200);
});
beforeEach(() => {
jest.resetModules();
testCounter++;
});
afterEach(async () => {
// Clean up: clear all test keys from Redis
if (keyvRedisClient && 'scanIterator' in keyvRedisClient) {
const pattern = '*PrivateServersLoadStatusCache-IntegrationTest*';
const keysToDelete: string[] = [];
// Collect all keys first
for await (const key of keyvRedisClient.scanIterator({ MATCH: pattern })) {
keysToDelete.push(key);
}
// Delete in parallel for cluster mode efficiency
if (keysToDelete.length > 0) {
await Promise.all(keysToDelete.map((key) => keyvRedisClient!.del(key)));
}
}
});
afterAll(async () => {
// Close Redis connection
if (keyvRedisClient?.isOpen) await keyvRedisClient.disconnect();
});
describe('isLoaded() and setLoaded() integration', () => {
it('should persist loaded status in cache', async () => {
const userId = `user-persist-${testCounter}`;
expect(await loadStatusCache.isLoaded(userId)).toBe(false);
await loadStatusCache.setLoaded(userId, 60000);
expect(await loadStatusCache.isLoaded(userId)).toBe(true);
});
it('should handle multiple users independently', async () => {
const user1 = `user-multi-1-${testCounter}`;
const user2 = `user-multi-2-${testCounter}`;
const user3 = `user-multi-3-${testCounter}`;
await loadStatusCache.setLoaded(user1, 60000);
await loadStatusCache.setLoaded(user2, 60000);
expect(await loadStatusCache.isLoaded(user1)).toBe(true);
expect(await loadStatusCache.isLoaded(user2)).toBe(true);
expect(await loadStatusCache.isLoaded(user3)).toBe(false);
});
it('should respect TTL expiration (short TTL for testing)', async () => {
const userId = `user-ttl-expire-${testCounter}`;
// Set with 1 second TTL
await loadStatusCache.setLoaded(userId, 1000);
expect(await loadStatusCache.isLoaded(userId)).toBe(true);
// Wait for TTL to expire
await new Promise((resolve) => setTimeout(resolve, 1100));
expect(await loadStatusCache.isLoaded(userId)).toBe(false);
}, 10000);
it('should allow re-setting loaded status', async () => {
const userId = `user-reset-${testCounter}`;
await loadStatusCache.setLoaded(userId, 60000);
expect(await loadStatusCache.isLoaded(userId)).toBe(true);
await loadStatusCache.clearLoaded(userId);
expect(await loadStatusCache.isLoaded(userId)).toBe(false);
await loadStatusCache.setLoaded(userId, 60000);
expect(await loadStatusCache.isLoaded(userId)).toBe(true);
});
});
describe('acquireLoadLock() and releaseLoadLock() integration', () => {
it('should acquire lock successfully when available', async () => {
const userId = `user-lock-acquire-${testCounter}`;
const acquired = await loadStatusCache.acquireLoadLock(userId, 10000);
expect(acquired).toBe(true);
// Clean up
await loadStatusCache.releaseLoadLock(userId);
});
it('should prevent concurrent lock acquisition', async () => {
const userId = `user-lock-concurrent-${testCounter}`;
const lock1 = await loadStatusCache.acquireLoadLock(userId, 10000);
expect(lock1).toBe(true);
const lock2 = await loadStatusCache.acquireLoadLock(userId, 10000);
expect(lock2).toBe(false);
// Release lock
await loadStatusCache.releaseLoadLock(userId);
// Should be able to acquire again
const lock3 = await loadStatusCache.acquireLoadLock(userId, 10000);
expect(lock3).toBe(true);
await loadStatusCache.releaseLoadLock(userId);
});
it('should auto-release lock after TTL expires', async () => {
const userId = `user-lock-ttl-${testCounter}`;
const acquired = await loadStatusCache.acquireLoadLock(userId, 1000); // 1 second TTL
expect(acquired).toBe(true);
// Lock should prevent acquisition
const blocked = await loadStatusCache.acquireLoadLock(userId, 1000);
expect(blocked).toBe(false);
// Wait for TTL to expire
await new Promise((resolve) => setTimeout(resolve, 1100));
// Should be able to acquire now
const reacquired = await loadStatusCache.acquireLoadLock(userId, 10000);
expect(reacquired).toBe(true);
await loadStatusCache.releaseLoadLock(userId);
}, 10000);
it('should handle locks for multiple users independently', async () => {
const user1 = `user-lock-multi-1-${testCounter}`;
const user2 = `user-lock-multi-2-${testCounter}`;
const user3 = `user-lock-multi-3-${testCounter}`;
const lock1 = await loadStatusCache.acquireLoadLock(user1, 10000);
const lock2 = await loadStatusCache.acquireLoadLock(user2, 10000);
const lock3 = await loadStatusCache.acquireLoadLock(user3, 10000);
expect(lock1).toBe(true);
expect(lock2).toBe(true);
expect(lock3).toBe(true);
await loadStatusCache.releaseLoadLock(user1);
await loadStatusCache.releaseLoadLock(user2);
await loadStatusCache.releaseLoadLock(user3);
});
it('should allow release of non-existent lock without error', async () => {
const userId = `user-lock-nonexist-${testCounter}`;
await expect(loadStatusCache.releaseLoadLock(userId)).resolves.not.toThrow();
});
});
describe('waitForLoad() integration', () => {
it('should wait and detect when loaded flag is set', async () => {
const userId = `user-wait-detect-${testCounter}`;
// Start waiting in background
const waitPromise = loadStatusCache.waitForLoad(userId, 2000, 100);
// Simulate another process setting the loaded flag after 300ms
const setLoadedPromise = new Promise<void>((resolve) => {
setTimeout(async () => {
await loadStatusCache.setLoaded(userId, 60000);
// Add small delay to ensure Redis write completes
await new Promise((r) => setTimeout(r, 50));
resolve();
}, 300);
});
// Await both in parallel - waitPromise should complete first
const [result] = await Promise.all([waitPromise, setLoadedPromise]);
expect(result).toBe(true);
}, 5000);
it('should timeout if loaded flag is never set', async () => {
const userId = `user-timeout-${testCounter}`;
const result = await loadStatusCache.waitForLoad(userId, 300, 50);
expect(result).toBe(false);
}, 1000);
it('should return immediately if already loaded', async () => {
const userId = `user-immediate-${testCounter}`;
await loadStatusCache.setLoaded(userId, 60000);
// Small delay to ensure Redis write completes
await new Promise((resolve) => setTimeout(resolve, 50));
const startTime = Date.now();
const result = await loadStatusCache.waitForLoad(userId, 5000, 100);
const elapsed = Date.now() - startTime;
expect(result).toBe(true);
expect(elapsed).toBeLessThan(300); // Increased tolerance for CI environments
});
});
describe('Complete load workflow integration', () => {
it('should simulate distributed load coordination', async () => {
const userId = `user-distributed-${testCounter}`;
// Process 1: Acquires lock and loads
const lock1 = await loadStatusCache.acquireLoadLock(userId, 10000);
expect(lock1).toBe(true);
// Process 2: Tries to acquire lock (should fail) and waits
const lock2 = await loadStatusCache.acquireLoadLock(userId, 10000);
expect(lock2).toBe(false);
const waitPromise = loadStatusCache.waitForLoad(userId, 3000, 100);
// Process 1: Completes loading after 300ms
const process1Promise = new Promise<void>((resolve) => {
setTimeout(async () => {
await loadStatusCache.setLoaded(userId, 60000);
await new Promise((r) => setTimeout(r, 50)); // Redis write delay
await loadStatusCache.releaseLoadLock(userId);
resolve();
}, 300);
});
// Process 2: Should detect completion
const completed = await waitPromise;
expect(completed).toBe(true);
// Both processes should now see it as loaded
expect(await loadStatusCache.isLoaded(userId)).toBe(true);
// Wait for process 1 to complete cleanup
await process1Promise;
}, 10000);
it('should handle process crash scenario (lock timeout)', async () => {
const userId = `user-crash-${testCounter}`;
// Process 1: Acquires lock but crashes (doesn't release)
const lock1 = await loadStatusCache.acquireLoadLock(userId, 1000); // 1 second TTL
expect(lock1).toBe(true);
// (simulate crash - no releaseLoadLock call)
// Process 2: Waits for timeout
const waitResult = await loadStatusCache.waitForLoad(userId, 1500, 200);
expect(waitResult).toBe(false); // Timeout (process 1 never completed)
// After lock TTL expires, process 2 can retry
await new Promise((resolve) => setTimeout(resolve, 200));
const retryLock = await loadStatusCache.acquireLoadLock(userId, 10000);
expect(retryLock).toBe(true);
// Process 2 completes successfully
await loadStatusCache.setLoaded(userId, 60000);
await loadStatusCache.releaseLoadLock(userId);
expect(await loadStatusCache.isLoaded(userId)).toBe(true);
}, 10000);
it('should handle concurrent user loads independently', async () => {
const user1 = `user-concurrent-1-${testCounter}`;
const user2 = `user-concurrent-2-${testCounter}`;
const user3 = `user-concurrent-3-${testCounter}`;
// Simulate 3 users loading concurrently
const user1Lock = await loadStatusCache.acquireLoadLock(user1, 10000);
const user2Lock = await loadStatusCache.acquireLoadLock(user2, 10000);
const user3Lock = await loadStatusCache.acquireLoadLock(user3, 10000);
expect(user1Lock).toBe(true);
expect(user2Lock).toBe(true);
expect(user3Lock).toBe(true);
// All complete independently
await Promise.all([
(async () => {
await loadStatusCache.setLoaded(user1, 60000);
await loadStatusCache.releaseLoadLock(user1);
})(),
(async () => {
await loadStatusCache.setLoaded(user2, 60000);
await loadStatusCache.releaseLoadLock(user2);
})(),
(async () => {
await loadStatusCache.setLoaded(user3, 60000);
await loadStatusCache.releaseLoadLock(user3);
})(),
]);
// Small delay for Redis propagation
await new Promise((resolve) => setTimeout(resolve, 100));
expect(await loadStatusCache.isLoaded(user1)).toBe(true);
expect(await loadStatusCache.isLoaded(user2)).toBe(true);
expect(await loadStatusCache.isLoaded(user3)).toBe(true);
});
});
describe('TTL synchronization', () => {
it('should keep loaded flag and cache entry in sync via TTL', async () => {
const userId = `user-ttl-sync-${testCounter}`;
// Set loaded flag with 1 second TTL
await loadStatusCache.setLoaded(userId, 1000);
expect(await loadStatusCache.isLoaded(userId)).toBe(true);
// After TTL expires, both should be gone
await new Promise((resolve) => setTimeout(resolve, 1100));
expect(await loadStatusCache.isLoaded(userId)).toBe(false);
// This simulates cache entry and loaded flag being in sync
// In real usage, if cache entries expire, loaded flag should also expire
}, 10000);
it('should allow different TTLs for different users', async () => {
const user1 = `user-ttl-diff-1-${testCounter}`;
const user2 = `user-ttl-diff-2-${testCounter}`;
await loadStatusCache.setLoaded(user1, 1000); // 1 second
await loadStatusCache.setLoaded(user2, 3000); // 3 seconds
expect(await loadStatusCache.isLoaded(user1)).toBe(true);
expect(await loadStatusCache.isLoaded(user2)).toBe(true);
// Wait for user1 to expire
await new Promise((resolve) => setTimeout(resolve, 1100));
expect(await loadStatusCache.isLoaded(user1)).toBe(false);
expect(await loadStatusCache.isLoaded(user2)).toBe(true); // Still valid
// Wait for user2 to expire
await new Promise((resolve) => setTimeout(resolve, 2000));
expect(await loadStatusCache.isLoaded(user2)).toBe(false);
}, 10000);
});
describe('clearLoaded() integration', () => {
it('should clear loaded status immediately', async () => {
const userId = `user-clear-${testCounter}`;
await loadStatusCache.setLoaded(userId, 60000);
expect(await loadStatusCache.isLoaded(userId)).toBe(true);
await loadStatusCache.clearLoaded(userId);
expect(await loadStatusCache.isLoaded(userId)).toBe(false);
});
it('should allow clearing multiple users', async () => {
const user1 = `user-clear-multi-1-${testCounter}`;
const user2 = `user-clear-multi-2-${testCounter}`;
await loadStatusCache.setLoaded(user1, 60000);
await loadStatusCache.setLoaded(user2, 60000);
await loadStatusCache.clearLoaded(user1);
await loadStatusCache.clearLoaded(user2);
expect(await loadStatusCache.isLoaded(user1)).toBe(false);
expect(await loadStatusCache.isLoaded(user2)).toBe(false);
});
});
});

View file

@ -0,0 +1,329 @@
// Mock dependencies BEFORE imports to avoid hoisting issues
const mockGet = jest.fn();
const mockSet = jest.fn();
const mockDelete = jest.fn();
const mockRedisSet = jest.fn();
const mockRedisDel = jest.fn();
jest.mock('~/cache', () => ({
standardCache: jest.fn(() => ({
// eslint-disable-next-line @typescript-eslint/no-explicit-any
get: (...args: any[]) => mockGet(...args),
// eslint-disable-next-line @typescript-eslint/no-explicit-any
set: (...args: any[]) => mockSet(...args),
// eslint-disable-next-line @typescript-eslint/no-explicit-any
delete: (...args: any[]) => mockDelete(...args),
})),
keyvRedisClient: {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
set: (...args: any[]) => mockRedisSet(...args),
// eslint-disable-next-line @typescript-eslint/no-explicit-any
del: (...args: any[]) => mockRedisDel(...args),
},
}));
jest.mock('~/cache/cacheConfig', () => ({
cacheConfig: {
REDIS_KEY_PREFIX: '',
GLOBAL_PREFIX_SEPARATOR: '::',
},
}));
jest.mock('@librechat/data-schemas', () => ({
logger: {
debug: jest.fn(),
info: jest.fn(),
warn: jest.fn(),
error: jest.fn(),
},
}));
jest.mock('~/cluster', () => ({
isLeader: jest.fn().mockResolvedValue(true),
}));
import { privateServersLoadStatusCache as loadStatusCache } from '../PrivateServersLoadStatusCache';
import { logger } from '@librechat/data-schemas';
describe('PrivateServersLoadStatusCache', () => {
beforeEach(() => {
jest.clearAllMocks();
});
describe('isLoaded()', () => {
it('should return true when user servers are loaded', async () => {
mockGet.mockResolvedValue(true);
const result = await loadStatusCache.isLoaded('user1');
expect(result).toBe(true);
expect(mockGet).toHaveBeenCalledWith('USER_PRIVATE_SERVERS_LOADED::user1');
});
it('should return false when user servers are not loaded', async () => {
mockGet.mockResolvedValue(undefined);
const result = await loadStatusCache.isLoaded('user1');
expect(result).toBe(false);
expect(mockGet).toHaveBeenCalledWith('USER_PRIVATE_SERVERS_LOADED::user1');
});
it('should return false when loaded flag is explicitly false', async () => {
mockGet.mockResolvedValue(false);
const result = await loadStatusCache.isLoaded('user1');
expect(result).toBe(false);
});
});
describe('setLoaded()', () => {
it('should set loaded flag with default TTL', async () => {
mockSet.mockResolvedValue(true);
await loadStatusCache.setLoaded('user1');
expect(mockSet).toHaveBeenCalledWith('USER_PRIVATE_SERVERS_LOADED::user1', true, 3600_000);
expect(logger.debug).toHaveBeenCalledWith(
'[MCP][LoadStatusCache] Marked user user1 as loaded (TTL: 3600000ms)',
);
});
it('should set loaded flag with custom TTL', async () => {
mockSet.mockResolvedValue(true);
await loadStatusCache.setLoaded('user1', 7200000);
expect(mockSet).toHaveBeenCalledWith('USER_PRIVATE_SERVERS_LOADED::user1', true, 7200_000);
expect(logger.debug).toHaveBeenCalledWith(
'[MCP][LoadStatusCache] Marked user user1 as loaded (TTL: 7200000ms)',
);
});
it('should throw error if cache.set fails', async () => {
mockSet.mockResolvedValue(false);
await expect(loadStatusCache.setLoaded('user1')).rejects.toThrow();
});
});
describe('acquireLoadLock()', () => {
it('should acquire lock successfully when no lock exists (using Redis SET NX)', async () => {
mockRedisSet.mockResolvedValue('OK'); // Redis SET NX returns 'OK' on success
const result = await loadStatusCache.acquireLoadLock('user1');
expect(result).toBe(true);
expect(mockRedisSet).toHaveBeenCalledWith(
'MCP::ServersRegistry::PrivateServersLoadStatus:USER_PRIVATE_SERVERS_LOAD_LOCK::user1',
expect.any(String), // Timestamp as string
{ NX: true, PX: 30000 },
);
expect(logger.debug).toHaveBeenCalledWith(
'[MCP][LoadStatusCache] Acquired load lock for user user1 (TTL: 30000ms)',
);
});
it('should fail to acquire lock when lock already exists (Redis returns null)', async () => {
mockRedisSet.mockResolvedValue(null); // Redis SET NX returns null if key exists
const result = await loadStatusCache.acquireLoadLock('user1');
expect(result).toBe(false);
expect(mockRedisSet).toHaveBeenCalledWith(
'MCP::ServersRegistry::PrivateServersLoadStatus:USER_PRIVATE_SERVERS_LOAD_LOCK::user1',
expect.any(String),
{ NX: true, PX: 30000 },
);
expect(logger.debug).toHaveBeenCalledWith(
'[MCP][LoadStatusCache] Load lock already held for user user1',
);
});
it('should acquire lock with custom TTL', async () => {
mockRedisSet.mockResolvedValue('OK');
const result = await loadStatusCache.acquireLoadLock('user1', 60_000);
expect(result).toBe(true);
expect(mockRedisSet).toHaveBeenCalledWith(
'MCP::ServersRegistry::PrivateServersLoadStatus:USER_PRIVATE_SERVERS_LOAD_LOCK::user1',
expect.any(String),
{ NX: true, PX: 60_000 },
);
});
it('should return false if Redis SET fails with error', async () => {
mockRedisSet.mockRejectedValue(new Error('Redis error'));
const result = await loadStatusCache.acquireLoadLock('user1');
expect(result).toBe(false);
expect(logger.error).toHaveBeenCalledWith(
'[MCP][LoadStatusCache] Error acquiring lock for user user1:',
expect.any(Error),
);
});
});
describe('releaseLoadLock()', () => {
it('should release lock successfully', async () => {
await loadStatusCache.releaseLoadLock('user1');
expect(mockDelete).toHaveBeenCalledWith('USER_PRIVATE_SERVERS_LOAD_LOCK::user1');
expect(logger.debug).toHaveBeenCalledWith(
'[MCP][LoadStatusCache] Released load lock for user user1',
);
});
it('should not throw error if lock does not exist', async () => {
mockDelete.mockResolvedValue(undefined);
await expect(loadStatusCache.releaseLoadLock('user1')).resolves.not.toThrow();
});
});
describe('waitForLoad()', () => {
let mockDateNow: jest.SpyInstance;
let currentTime: number;
beforeEach(() => {
jest.useFakeTimers();
currentTime = 1000000; // Starting time
mockDateNow = jest.spyOn(Date, 'now').mockImplementation(() => currentTime);
});
afterEach(() => {
jest.useRealTimers();
mockDateNow.mockRestore();
});
it('should return true when loading completes within timeout', async () => {
let checkCount = 0;
mockGet.mockImplementation(async () => {
checkCount++;
return checkCount >= 3; // Return true on third check
});
const waitPromise = loadStatusCache.waitForLoad('user1', 500, 100);
// Simulate time passing
for (let i = 0; i < 3; i++) {
currentTime += 100;
await jest.advanceTimersByTimeAsync(100);
}
const result = await waitPromise;
expect(result).toBe(true);
expect(logger.debug).toHaveBeenCalledWith(
'[MCP][LoadStatusCache] User user1 loading completed by another process',
);
});
it('should return false when timeout is reached', async () => {
mockGet.mockResolvedValue(false); // Never becomes true
const waitPromise = loadStatusCache.waitForLoad('user1', 300, 100);
// Advance time past the timeout
currentTime += 400;
await jest.advanceTimersByTimeAsync(400);
const result = await waitPromise;
expect(result).toBe(false);
expect(logger.warn).toHaveBeenCalledWith(
'[MCP][LoadStatusCache] Timeout waiting for user user1 loading (waited 300ms)',
);
});
it('should use default timeout and check interval', async () => {
mockGet.mockResolvedValue(true);
const waitPromise = loadStatusCache.waitForLoad('user1');
currentTime += 100;
await jest.advanceTimersByTimeAsync(100);
const result = await waitPromise;
expect(result).toBe(true);
});
it('should poll at specified intervals', async () => {
let checkCount = 0;
mockGet.mockImplementation(async () => {
checkCount++;
return checkCount >= 4; // Return true on fourth check
});
const waitPromise = loadStatusCache.waitForLoad('user1', 1000, 200);
// Advance time for each poll
for (let i = 0; i < 4; i++) {
currentTime += 200;
await jest.advanceTimersByTimeAsync(200);
}
const result = await waitPromise;
expect(result).toBe(true);
expect(checkCount).toBe(4);
});
});
describe('clearLoaded()', () => {
it('should clear loaded status for a user', async () => {
await loadStatusCache.clearLoaded('user1');
expect(mockDelete).toHaveBeenCalledWith('USER_PRIVATE_SERVERS_LOADED::user1');
expect(logger.debug).toHaveBeenCalledWith(
'[MCP][LoadStatusCache] Cleared loaded status for user user1',
);
});
it('should not throw error if loaded status does not exist', async () => {
mockDelete.mockResolvedValue(undefined);
await expect(loadStatusCache.clearLoaded('user1')).resolves.not.toThrow();
});
});
describe('Edge cases', () => {
it('should handle multiple users independently', async () => {
mockRedisSet.mockResolvedValue('OK');
const lock1 = await loadStatusCache.acquireLoadLock('user1');
const lock2 = await loadStatusCache.acquireLoadLock('user2');
expect(lock1).toBe(true);
expect(lock2).toBe(true);
expect(mockRedisSet).toHaveBeenCalledWith(
'MCP::ServersRegistry::PrivateServersLoadStatus:USER_PRIVATE_SERVERS_LOAD_LOCK::user1',
expect.any(String),
{ NX: true, PX: 30000 },
);
expect(mockRedisSet).toHaveBeenCalledWith(
'MCP::ServersRegistry::PrivateServersLoadStatus:USER_PRIVATE_SERVERS_LOAD_LOCK::user2',
expect.any(String),
{ NX: true, PX: 30000 },
);
});
it('should handle concurrent operations on same user', async () => {
mockRedisSet
.mockResolvedValueOnce('OK') // First lock attempt succeeds
.mockResolvedValueOnce(null); // Second lock attempt fails (key exists)
const [lock1, lock2] = await Promise.all([
loadStatusCache.acquireLoadLock('user1'),
loadStatusCache.acquireLoadLock('user1'),
]);
// One should succeed, one should fail (order not guaranteed)
expect([lock1, lock2].sort()).toEqual([false, true]);
});
});
});

View file

@ -36,12 +36,18 @@ describe('RegistryStatusCache Integration Tests', () => {
afterEach(async () => {
// Clean up: clear all test keys from Redis
if (keyvRedisClient) {
if (keyvRedisClient && 'scanIterator' in keyvRedisClient) {
const pattern = '*RegistryStatusCache-IntegrationTest*';
if ('scanIterator' in keyvRedisClient) {
for await (const key of keyvRedisClient.scanIterator({ MATCH: pattern })) {
await keyvRedisClient.del(key);
}
const keysToDelete: string[] = [];
// Collect all keys first
for await (const key of keyvRedisClient.scanIterator({ MATCH: pattern })) {
keysToDelete.push(key);
}
// Delete in parallel for cluster mode efficiency
if (keysToDelete.length > 0) {
await Promise.all(keysToDelete.map((key) => keyvRedisClient!.del(key)));
}
}
});

View file

@ -25,11 +25,11 @@ describe('ServerConfigsCacheFactory', () => {
cacheConfig.USE_REDIS = true;
// Act
const cache = ServerConfigsCacheFactory.create('TestOwner', true);
const cache = ServerConfigsCacheFactory.create('TestOwner', 'Private', true);
// Assert
expect(cache).toBeInstanceOf(ServerConfigsCacheRedis);
expect(ServerConfigsCacheRedis).toHaveBeenCalledWith('TestOwner', true);
expect(ServerConfigsCacheRedis).toHaveBeenCalledWith('TestOwner', 'Private', true);
});
it('should return ServerConfigsCacheInMemory when USE_REDIS is false', () => {
@ -37,7 +37,7 @@ describe('ServerConfigsCacheFactory', () => {
cacheConfig.USE_REDIS = false;
// Act
const cache = ServerConfigsCacheFactory.create('TestOwner', false);
const cache = ServerConfigsCacheFactory.create('TestOwner', 'Private', false);
// Assert
expect(cache).toBeInstanceOf(ServerConfigsCacheInMemory);
@ -49,10 +49,10 @@ describe('ServerConfigsCacheFactory', () => {
cacheConfig.USE_REDIS = true;
// Act
ServerConfigsCacheFactory.create('App', true);
ServerConfigsCacheFactory.create('App', 'Shared', true);
// Assert
expect(ServerConfigsCacheRedis).toHaveBeenCalledWith('App', true);
expect(ServerConfigsCacheRedis).toHaveBeenCalledWith('App', 'Shared', true);
});
it('should create ServerConfigsCacheInMemory without parameters when USE_REDIS is false', () => {
@ -60,7 +60,7 @@ describe('ServerConfigsCacheFactory', () => {
cacheConfig.USE_REDIS = false;
// Act
ServerConfigsCacheFactory.create('User', false);
ServerConfigsCacheFactory.create('User', 'Shared', false);
// Assert
// In-memory cache doesn't use owner/leaderOnly parameters

View file

@ -1,5 +1,8 @@
import { expect } from '@playwright/test';
import { ParsedServerConfig } from '~/mcp/types';
const FIXED_TIME = 1699564800000;
const originalDateNow = Date.now;
Date.now = jest.fn(() => FIXED_TIME);
describe('ServerConfigsCacheInMemory Integration Tests', () => {
let ServerConfigsCacheInMemory: typeof import('../ServerConfigsCacheInMemory').ServerConfigsCacheInMemory;
@ -12,12 +15,14 @@ describe('ServerConfigsCacheInMemory Integration Tests', () => {
command: 'node',
args: ['server1.js'],
env: { TEST: 'value1' },
lastUpdatedAt: FIXED_TIME,
};
const mockConfig2: ParsedServerConfig = {
command: 'python',
args: ['server2.py'],
env: { TEST: 'value2' },
lastUpdatedAt: FIXED_TIME,
};
const mockConfig3: ParsedServerConfig = {
@ -25,6 +30,7 @@ describe('ServerConfigsCacheInMemory Integration Tests', () => {
args: ['server3.js'],
url: 'http://localhost:3000',
requiresOAuth: true,
lastUpdatedAt: FIXED_TIME,
};
beforeAll(async () => {
@ -33,6 +39,10 @@ describe('ServerConfigsCacheInMemory Integration Tests', () => {
ServerConfigsCacheInMemory = cacheModule.ServerConfigsCacheInMemory;
});
afterAll(() => {
Date.now = originalDateNow;
});
beforeEach(() => {
// Create a fresh instance for each test
cache = new ServerConfigsCacheInMemory();

View file

@ -4,8 +4,7 @@ import { ParsedServerConfig } from '~/mcp/types';
describe('ServerConfigsCacheRedis Integration Tests', () => {
let ServerConfigsCacheRedis: typeof import('../ServerConfigsCacheRedis').ServerConfigsCacheRedis;
let keyvRedisClient: Awaited<typeof import('~/cache/redisClients')>['keyvRedisClient'];
let LeaderElection: typeof import('~/cluster/LeaderElection').LeaderElection;
let checkIsLeader: () => Promise<boolean>;
let cache: InstanceType<typeof import('../ServerConfigsCacheRedis').ServerConfigsCacheRedis>;
// Test data
@ -41,49 +40,42 @@ describe('ServerConfigsCacheRedis Integration Tests', () => {
// Import modules after setting env vars
const cacheModule = await import('../ServerConfigsCacheRedis');
const redisClients = await import('~/cache/redisClients');
const leaderElectionModule = await import('~/cluster/LeaderElection');
const clusterModule = await import('~/cluster');
ServerConfigsCacheRedis = cacheModule.ServerConfigsCacheRedis;
keyvRedisClient = redisClients.keyvRedisClient;
LeaderElection = leaderElectionModule.LeaderElection;
checkIsLeader = clusterModule.isLeader;
// Ensure Redis is connected
if (!keyvRedisClient) throw new Error('Redis client is not initialized');
// Wait for connection and topology discovery to complete
await redisClients.keyvRedisClientReady;
// Clear any existing leader key to ensure clean state
await keyvRedisClient.del(LeaderElection.LEADER_KEY);
// Become leader so we can perform write operations (using default election instance)
const isLeader = await checkIsLeader();
expect(isLeader).toBe(true);
});
beforeEach(() => {
// Create a fresh instance for each test with leaderOnly=true
cache = new ServerConfigsCacheRedis('test-user', true);
jest.resetModules();
cache = new ServerConfigsCacheRedis('test-user', 'Shared', false);
});
afterEach(async () => {
// Clean up: clear all test keys from Redis
if (keyvRedisClient) {
if (keyvRedisClient && 'scanIterator' in keyvRedisClient) {
const pattern = '*ServerConfigsCacheRedis-IntegrationTest*';
if ('scanIterator' in keyvRedisClient) {
for await (const key of keyvRedisClient.scanIterator({ MATCH: pattern })) {
await keyvRedisClient.del(key);
}
const keysToDelete: string[] = [];
// Collect all keys first
for await (const key of keyvRedisClient.scanIterator({ MATCH: pattern })) {
keysToDelete.push(key);
}
// Delete in parallel for cluster mode efficiency
if (keysToDelete.length > 0) {
await Promise.all(keysToDelete.map((key) => keyvRedisClient!.del(key)));
}
}
});
afterAll(async () => {
// Clear leader key to allow other tests to become leader
if (keyvRedisClient) await keyvRedisClient.del(LeaderElection.LEADER_KEY);
// Close Redis connection
if (keyvRedisClient?.isOpen) await keyvRedisClient.disconnect();
});
@ -92,7 +84,7 @@ describe('ServerConfigsCacheRedis Integration Tests', () => {
it('should add and retrieve a server config', async () => {
await cache.add('server1', mockConfig1);
const result = await cache.get('server1');
expect(result).toEqual(mockConfig1);
expect(result).toMatchObject(mockConfig1);
});
it('should return undefined for non-existent server', async () => {
@ -116,14 +108,14 @@ describe('ServerConfigsCacheRedis Integration Tests', () => {
const result2 = await cache.get('server2');
const result3 = await cache.get('server3');
expect(result1).toEqual(mockConfig1);
expect(result2).toEqual(mockConfig2);
expect(result3).toEqual(mockConfig3);
expect(result1).toMatchObject(mockConfig1);
expect(result2).toMatchObject(mockConfig2);
expect(result3).toMatchObject(mockConfig3);
});
it('should isolate caches by owner namespace', async () => {
const userCache = new ServerConfigsCacheRedis('user1', true);
const globalCache = new ServerConfigsCacheRedis('global', true);
const userCache = new ServerConfigsCacheRedis('user1', 'Private', false);
const globalCache = new ServerConfigsCacheRedis('global', 'Shared', false);
await userCache.add('server1', mockConfig1);
await globalCache.add('server1', mockConfig2);
@ -131,15 +123,15 @@ describe('ServerConfigsCacheRedis Integration Tests', () => {
const userResult = await userCache.get('server1');
const globalResult = await globalCache.get('server1');
expect(userResult).toEqual(mockConfig1);
expect(globalResult).toEqual(mockConfig2);
expect(userResult).toMatchObject(mockConfig1);
expect(globalResult).toMatchObject(mockConfig2);
});
});
describe('getAll operation', () => {
it('should return empty object when no servers exist', async () => {
const result = await cache.getAll();
expect(result).toEqual({});
expect(result).toMatchObject({});
});
it('should return all server configs', async () => {
@ -148,7 +140,7 @@ describe('ServerConfigsCacheRedis Integration Tests', () => {
await cache.add('server3', mockConfig3);
const result = await cache.getAll();
expect(result).toEqual({
expect(result).toMatchObject({
server1: mockConfig1,
server2: mockConfig2,
server3: mockConfig3,
@ -165,12 +157,12 @@ describe('ServerConfigsCacheRedis Integration Tests', () => {
await cache.add('server3', mockConfig3);
result = await cache.getAll();
expect(Object.keys(result).length).toBe(3);
expect(result.server3).toEqual(mockConfig3);
expect(result.server3).toMatchObject(mockConfig3);
});
it('should only return configs for the specific owner', async () => {
const userCache = new ServerConfigsCacheRedis('user1', true);
const globalCache = new ServerConfigsCacheRedis('global', true);
const userCache = new ServerConfigsCacheRedis('user1', 'Private', false);
const globalCache = new ServerConfigsCacheRedis('global', 'Private', false);
await userCache.add('server1', mockConfig1);
await userCache.add('server2', mockConfig2);
@ -181,20 +173,20 @@ describe('ServerConfigsCacheRedis Integration Tests', () => {
expect(Object.keys(userResult).length).toBe(2);
expect(Object.keys(globalResult).length).toBe(1);
expect(userResult.server1).toEqual(mockConfig1);
expect(userResult.server1).toMatchObject(mockConfig1);
expect(userResult.server3).toBeUndefined();
expect(globalResult.server3).toEqual(mockConfig3);
expect(globalResult.server3).toMatchObject(mockConfig3);
});
});
describe('update operation', () => {
it('should update an existing server config', async () => {
await cache.add('server1', mockConfig1);
expect(await cache.get('server1')).toEqual(mockConfig1);
expect(await cache.get('server1')).toMatchObject(mockConfig1);
await cache.update('server1', mockConfig2);
const result = await cache.get('server1');
expect(result).toEqual(mockConfig2);
expect(result).toMatchObject(mockConfig2);
});
it('should throw error when updating non-existent server', async () => {
@ -209,28 +201,28 @@ describe('ServerConfigsCacheRedis Integration Tests', () => {
await cache.update('server1', mockConfig3);
const result = await cache.getAll();
expect(result.server1).toEqual(mockConfig3);
expect(result.server2).toEqual(mockConfig2);
expect(result.server1).toMatchObject(mockConfig3);
expect(result.server2).toMatchObject(mockConfig2);
});
it('should only update in the specific owner namespace', async () => {
const userCache = new ServerConfigsCacheRedis('user1', true);
const globalCache = new ServerConfigsCacheRedis('global', true);
const userCache = new ServerConfigsCacheRedis('user1', 'Private', false);
const globalCache = new ServerConfigsCacheRedis('global', 'Shared', false);
await userCache.add('server1', mockConfig1);
await globalCache.add('server1', mockConfig2);
await userCache.update('server1', mockConfig3);
expect(await userCache.get('server1')).toEqual(mockConfig3);
expect(await globalCache.get('server1')).toEqual(mockConfig2);
expect(await userCache.get('server1')).toMatchObject(mockConfig3);
expect(await globalCache.get('server1')).toMatchObject(mockConfig2);
});
});
describe('remove operation', () => {
it('should remove an existing server config', async () => {
await cache.add('server1', mockConfig1);
expect(await cache.get('server1')).toEqual(mockConfig1);
expect(await cache.get('server1')).toMatchObject(mockConfig1);
await cache.remove('server1');
expect(await cache.get('server1')).toBeUndefined();
@ -253,7 +245,7 @@ describe('ServerConfigsCacheRedis Integration Tests', () => {
result = await cache.getAll();
expect(Object.keys(result).length).toBe(1);
expect(result.server1).toBeUndefined();
expect(result.server2).toEqual(mockConfig2);
expect(result.server2).toMatchObject(mockConfig2);
});
it('should allow re-adding a removed server', async () => {
@ -262,12 +254,12 @@ describe('ServerConfigsCacheRedis Integration Tests', () => {
await cache.add('server1', mockConfig3);
const result = await cache.get('server1');
expect(result).toEqual(mockConfig3);
expect(result).toMatchObject(mockConfig3);
});
it('should only remove from the specific owner namespace', async () => {
const userCache = new ServerConfigsCacheRedis('user1', true);
const globalCache = new ServerConfigsCacheRedis('global', true);
const userCache = new ServerConfigsCacheRedis('user1', 'Private', false);
const globalCache = new ServerConfigsCacheRedis('global', 'Shared', false);
await userCache.add('server1', mockConfig1);
await globalCache.add('server1', mockConfig2);
@ -275,7 +267,7 @@ describe('ServerConfigsCacheRedis Integration Tests', () => {
await userCache.remove('server1');
expect(await userCache.get('server1')).toBeUndefined();
expect(await globalCache.get('server1')).toEqual(mockConfig2);
expect(await globalCache.get('server1')).toMatchObject(mockConfig2);
});
});
});