mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-12-17 08:50:15 +01:00
📡 refactor: MCP Runtime Config Sync with Redis Distributed Locking (#10352)
* 🔄 Refactoring: MCP Runtime Configuration Reload
- PrivateServerConfigs own cache classes (inMemory and Redis).
- Connections staleness detection by comparing (connection.createdAt and config.LastUpdatedAt)
- ConnectionsRepo access Registry instead of in memory config dict and renew stale connections
- MCPManager: adjusted init of ConnectionsRepo (app level)
- UserConnectionManager: renew stale connections
- skipped test, to test "should only clear keys in its own namespace"
- MCPPrivateServerLoader: new component to manage logic of loading / editing private servers on runtime
- PrivateServersLoadStatusCache to track private server cache status
- New unit and integration tests.
Misc:
- add es lint rule to enforce line between class methods
* Fix cluster mode batch update and delete workarround. Fixed unit tests for cluster mode.
* Fix Keyv redis clear cache namespace awareness issue + Integration tests fixes
* chore: address copilot comments
* Fixing rebase issue: removed the mcp config fallback in single getServerConfig method:
- to not to interfere with the logic of the right Tier (APP/USER/Private)
- If userId is null, the getServerConfig should not return configs that are a SharedUser tier and not APP tier
* chore: add dev-staging branch to workflow triggers for backend, cache integration, and ESLint checks
---------
Co-authored-by: Atef Bellaaj <slalom.bellaaj@external.daimlertruck.com>
This commit is contained in:
parent
52e6796635
commit
ac68e629e6
49 changed files with 5244 additions and 257 deletions
1
.github/workflows/backend-review.yml
vendored
1
.github/workflows/backend-review.yml
vendored
|
|
@ -4,6 +4,7 @@ on:
|
||||||
branches:
|
branches:
|
||||||
- main
|
- main
|
||||||
- dev
|
- dev
|
||||||
|
- dev-staging
|
||||||
- release/*
|
- release/*
|
||||||
paths:
|
paths:
|
||||||
- 'api/**'
|
- 'api/**'
|
||||||
|
|
|
||||||
|
|
@ -5,6 +5,7 @@ on:
|
||||||
branches:
|
branches:
|
||||||
- main
|
- main
|
||||||
- dev
|
- dev
|
||||||
|
- dev-staging
|
||||||
- release/*
|
- release/*
|
||||||
paths:
|
paths:
|
||||||
- 'packages/api/src/cache/**'
|
- 'packages/api/src/cache/**'
|
||||||
|
|
|
||||||
1
.github/workflows/eslint-ci.yml
vendored
1
.github/workflows/eslint-ci.yml
vendored
|
|
@ -5,6 +5,7 @@ on:
|
||||||
branches:
|
branches:
|
||||||
- main
|
- main
|
||||||
- dev
|
- dev
|
||||||
|
- dev-staging
|
||||||
- release/*
|
- release/*
|
||||||
paths:
|
paths:
|
||||||
- 'api/**'
|
- 'api/**'
|
||||||
|
|
|
||||||
|
|
@ -236,6 +236,7 @@ export default [
|
||||||
},
|
},
|
||||||
],
|
],
|
||||||
//
|
//
|
||||||
|
'lines-between-class-members': ['error', 'always', { exceptAfterSingleLine: true }],
|
||||||
'@typescript-eslint/no-unused-expressions': 'off',
|
'@typescript-eslint/no-unused-expressions': 'off',
|
||||||
'@typescript-eslint/no-unused-vars': [
|
'@typescript-eslint/no-unused-vars': [
|
||||||
'warn',
|
'warn',
|
||||||
|
|
@ -285,6 +286,9 @@ export default [
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
files: ['./packages/api/**/*.ts'],
|
files: ['./packages/api/**/*.ts'],
|
||||||
|
rules: {
|
||||||
|
'lines-between-class-members': ['error', 'always', { exceptAfterSingleLine: true }],
|
||||||
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
files: ['./config/translations/**/*.ts'],
|
files: ['./config/translations/**/*.ts'],
|
||||||
|
|
|
||||||
|
|
@ -22,7 +22,7 @@
|
||||||
"test:ci": "jest --coverage --ci --testPathIgnorePatterns=\"\\.*integration\\.|\\.*helper\\.\"",
|
"test:ci": "jest --coverage --ci --testPathIgnorePatterns=\"\\.*integration\\.|\\.*helper\\.\"",
|
||||||
"test:cache-integration:core": "jest --testPathPatterns=\"src/cache/.*\\.cache_integration\\.spec\\.ts$\" --coverage=false",
|
"test:cache-integration:core": "jest --testPathPatterns=\"src/cache/.*\\.cache_integration\\.spec\\.ts$\" --coverage=false",
|
||||||
"test:cache-integration:cluster": "jest --testPathPatterns=\"src/cluster/.*\\.cache_integration\\.spec\\.ts$\" --coverage=false --runInBand",
|
"test:cache-integration:cluster": "jest --testPathPatterns=\"src/cluster/.*\\.cache_integration\\.spec\\.ts$\" --coverage=false --runInBand",
|
||||||
"test:cache-integration:mcp": "jest --testPathPatterns=\"src/mcp/.*\\.cache_integration\\.spec\\.ts$\" --coverage=false",
|
"test:cache-integration:mcp": "jest --testPathPatterns=\"src/mcp/.*\\.cache_integration\\.spec\\.ts$\" --coverage=false --runInBand --forceExit",
|
||||||
"test:cache-integration": "npm run test:cache-integration:core && npm run test:cache-integration:cluster && npm run test:cache-integration:mcp",
|
"test:cache-integration": "npm run test:cache-integration:core && npm run test:cache-integration:cluster && npm run test:cache-integration:mcp",
|
||||||
"verify": "npm run test:ci",
|
"verify": "npm run test:ci",
|
||||||
"b:clean": "bun run rimraf dist",
|
"b:clean": "bun run rimraf dist",
|
||||||
|
|
|
||||||
|
|
@ -138,6 +138,39 @@ describe('standardCache', () => {
|
||||||
await cache2.clear();
|
await cache2.clear();
|
||||||
});
|
});
|
||||||
|
|
||||||
|
test('clear() should only clear keys in its own namespace', async () => {
|
||||||
|
const cacheFactory = await import('../../cacheFactory');
|
||||||
|
|
||||||
|
const cache1 = cacheFactory.standardCache('namespace-clear-test-1');
|
||||||
|
const cache2 = cacheFactory.standardCache('namespace-clear-test-2');
|
||||||
|
|
||||||
|
// Add data to both caches
|
||||||
|
await cache1.set('key1', 'value1-cache1');
|
||||||
|
await cache1.set('key2', 'value2-cache1');
|
||||||
|
await cache2.set('key1', 'value1-cache2');
|
||||||
|
await cache2.set('key2', 'value2-cache2');
|
||||||
|
|
||||||
|
// Verify both caches have their data
|
||||||
|
expect(await cache1.get('key1')).toBe('value1-cache1');
|
||||||
|
expect(await cache1.get('key2')).toBe('value2-cache1');
|
||||||
|
expect(await cache2.get('key1')).toBe('value1-cache2');
|
||||||
|
expect(await cache2.get('key2')).toBe('value2-cache2');
|
||||||
|
|
||||||
|
// Clear cache1 only
|
||||||
|
await cache1.clear();
|
||||||
|
|
||||||
|
// cache1 should be empty
|
||||||
|
expect(await cache1.get('key1')).toBeUndefined();
|
||||||
|
expect(await cache1.get('key2')).toBeUndefined();
|
||||||
|
|
||||||
|
// cache2 should still have its data
|
||||||
|
expect(await cache2.get('key1')).toBe('value1-cache2');
|
||||||
|
expect(await cache2.get('key2')).toBe('value2-cache2');
|
||||||
|
|
||||||
|
// Cleanup
|
||||||
|
await cache2.clear();
|
||||||
|
});
|
||||||
|
|
||||||
test('should respect FORCED_IN_MEMORY_CACHE_NAMESPACES', async () => {
|
test('should respect FORCED_IN_MEMORY_CACHE_NAMESPACES', async () => {
|
||||||
process.env.FORCED_IN_MEMORY_CACHE_NAMESPACES = 'ROLES'; // Use a valid cache key
|
process.env.FORCED_IN_MEMORY_CACHE_NAMESPACES = 'ROLES'; // Use a valid cache key
|
||||||
|
|
||||||
|
|
|
||||||
211
packages/api/src/cache/__tests__/redisUtils.cache_integration.spec.ts
vendored
Normal file
211
packages/api/src/cache/__tests__/redisUtils.cache_integration.spec.ts
vendored
Normal file
|
|
@ -0,0 +1,211 @@
|
||||||
|
import { batchDeleteKeys, scanKeys } from '../redisUtils';
|
||||||
|
|
||||||
|
describe('redisUtils Integration Tests', () => {
|
||||||
|
let keyvRedisClient: Awaited<typeof import('../redisClients')>['keyvRedisClient'];
|
||||||
|
const testPrefix = 'RedisUtils-Integration-Test';
|
||||||
|
|
||||||
|
beforeAll(async () => {
|
||||||
|
// Set up environment variables for Redis (only if not already set)
|
||||||
|
process.env.USE_REDIS = process.env.USE_REDIS ?? 'true';
|
||||||
|
process.env.REDIS_URI = process.env.REDIS_URI ?? 'redis://127.0.0.1:6379';
|
||||||
|
process.env.REDIS_KEY_PREFIX = process.env.REDIS_KEY_PREFIX ?? testPrefix;
|
||||||
|
process.env.REDIS_DELETE_CHUNK_SIZE = '100';
|
||||||
|
|
||||||
|
// Clear module cache to ensure fresh initialization with new env vars
|
||||||
|
jest.resetModules();
|
||||||
|
|
||||||
|
// Import modules after setting env vars and clearing cache
|
||||||
|
const redisClients = await import('../redisClients');
|
||||||
|
keyvRedisClient = redisClients.keyvRedisClient;
|
||||||
|
|
||||||
|
// Ensure Redis is connected
|
||||||
|
if (!keyvRedisClient) throw new Error('Redis client is not initialized');
|
||||||
|
|
||||||
|
// Wait for connection and topology discovery to complete
|
||||||
|
await redisClients.keyvRedisClientReady;
|
||||||
|
});
|
||||||
|
|
||||||
|
afterEach(async () => {
|
||||||
|
// Clean up: clear all test keys from Redis
|
||||||
|
if (keyvRedisClient && 'scanIterator' in keyvRedisClient) {
|
||||||
|
const pattern = `*${testPrefix}*`;
|
||||||
|
const keysToDelete: string[] = [];
|
||||||
|
|
||||||
|
// Collect all keys first
|
||||||
|
for await (const key of keyvRedisClient.scanIterator({ MATCH: pattern })) {
|
||||||
|
keysToDelete.push(key);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete in parallel for cluster mode efficiency
|
||||||
|
if (keysToDelete.length > 0) {
|
||||||
|
await Promise.all(keysToDelete.map((key) => keyvRedisClient!.del(key)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
afterAll(async () => {
|
||||||
|
// Close Redis connection
|
||||||
|
if (keyvRedisClient?.isOpen) await keyvRedisClient.disconnect();
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('batchDeleteKeys', () => {
|
||||||
|
test('should delete multiple keys successfully', async () => {
|
||||||
|
if (!keyvRedisClient) throw new Error('Redis client not available');
|
||||||
|
|
||||||
|
// Setup: Create test keys
|
||||||
|
const keys = [
|
||||||
|
`${testPrefix}::key1`,
|
||||||
|
`${testPrefix}::key2`,
|
||||||
|
`${testPrefix}::key3`,
|
||||||
|
`${testPrefix}::key4`,
|
||||||
|
`${testPrefix}::key5`,
|
||||||
|
];
|
||||||
|
|
||||||
|
for (const key of keys) {
|
||||||
|
await keyvRedisClient.set(key, 'test-value');
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify keys exist
|
||||||
|
for (const key of keys) {
|
||||||
|
const exists = await keyvRedisClient.exists(key);
|
||||||
|
expect(exists).toBe(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Execute: Delete keys
|
||||||
|
const deletedCount = await batchDeleteKeys(keyvRedisClient, keys);
|
||||||
|
|
||||||
|
// Verify: All keys deleted
|
||||||
|
expect(deletedCount).toBe(5);
|
||||||
|
|
||||||
|
for (const key of keys) {
|
||||||
|
const exists = await keyvRedisClient.exists(key);
|
||||||
|
expect(exists).toBe(0);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
test('should handle large batch deletions (>1000 keys)', async () => {
|
||||||
|
if (!keyvRedisClient) throw new Error('Redis client not available');
|
||||||
|
|
||||||
|
// Create 1500 test keys
|
||||||
|
const keys: string[] = [];
|
||||||
|
for (let i = 0; i < 1500; i++) {
|
||||||
|
keys.push(`${testPrefix}::large-batch::${i}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set all keys in batches to avoid overwhelming cluster
|
||||||
|
const setBatchSize = 100;
|
||||||
|
for (let i = 0; i < keys.length; i += setBatchSize) {
|
||||||
|
const batch = keys.slice(i, i + setBatchSize);
|
||||||
|
await Promise.all(batch.map((key) => keyvRedisClient!.set(key, 'value')));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete in batches
|
||||||
|
const deletedCount = await batchDeleteKeys(keyvRedisClient, keys, 500);
|
||||||
|
|
||||||
|
// Verify all deleted
|
||||||
|
expect(deletedCount).toBe(1500);
|
||||||
|
|
||||||
|
const existsResults = await Promise.all(keys.map((key) => keyvRedisClient!.exists(key)));
|
||||||
|
const totalExists = existsResults.reduce((sum, exists) => sum + exists, 0);
|
||||||
|
expect(totalExists).toBe(0);
|
||||||
|
});
|
||||||
|
|
||||||
|
test('should handle mixed existing and non-existing keys', async () => {
|
||||||
|
if (!keyvRedisClient) throw new Error('Redis client not available');
|
||||||
|
|
||||||
|
const existingKeys = [`${testPrefix}::exists1`, `${testPrefix}::exists2`];
|
||||||
|
const nonExistingKeys = [`${testPrefix}::noexist1`, `${testPrefix}::noexist2`];
|
||||||
|
|
||||||
|
// Create only some keys
|
||||||
|
for (const key of existingKeys) {
|
||||||
|
await keyvRedisClient.set(key, 'value');
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try to delete both existing and non-existing
|
||||||
|
const allKeys = [...existingKeys, ...nonExistingKeys];
|
||||||
|
const deletedCount = await batchDeleteKeys(keyvRedisClient, allKeys);
|
||||||
|
|
||||||
|
// Should only delete the existing ones
|
||||||
|
expect(deletedCount).toBe(2);
|
||||||
|
});
|
||||||
|
|
||||||
|
test('should work with custom chunk sizes', async () => {
|
||||||
|
if (!keyvRedisClient) throw new Error('Redis client not available');
|
||||||
|
|
||||||
|
const keys = Array.from({ length: 75 }, (_, i) => `${testPrefix}::chunk::${i}`);
|
||||||
|
|
||||||
|
// Set all keys
|
||||||
|
await Promise.all(keys.map((key) => keyvRedisClient!.set(key, 'value')));
|
||||||
|
|
||||||
|
// Delete with small chunk size (25)
|
||||||
|
const deletedCount = await batchDeleteKeys(keyvRedisClient, keys, 25);
|
||||||
|
|
||||||
|
expect(deletedCount).toBe(75);
|
||||||
|
});
|
||||||
|
|
||||||
|
test('should return 0 for empty keys array', async () => {
|
||||||
|
if (!keyvRedisClient) throw new Error('Redis client not available');
|
||||||
|
|
||||||
|
const deletedCount = await batchDeleteKeys(keyvRedisClient, []);
|
||||||
|
expect(deletedCount).toBe(0);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('scanKeys', () => {
|
||||||
|
test('should scan and find all matching keys', async () => {
|
||||||
|
if (!keyvRedisClient) throw new Error('Redis client not available');
|
||||||
|
|
||||||
|
// Create test keys with a specific pattern
|
||||||
|
const userKeys = [
|
||||||
|
`${testPrefix}::user::1`,
|
||||||
|
`${testPrefix}::user::2`,
|
||||||
|
`${testPrefix}::user::3`,
|
||||||
|
];
|
||||||
|
const sessionKeys = [`${testPrefix}::session::1`, `${testPrefix}::session::2`];
|
||||||
|
|
||||||
|
// Set all keys
|
||||||
|
await Promise.all(
|
||||||
|
[...userKeys, ...sessionKeys].map((key) => keyvRedisClient!.set(key, 'value')),
|
||||||
|
);
|
||||||
|
|
||||||
|
// Scan for user keys only
|
||||||
|
const foundKeys = await scanKeys(keyvRedisClient, `${testPrefix}::user::*`);
|
||||||
|
|
||||||
|
// Should find only user keys
|
||||||
|
expect(foundKeys).toHaveLength(3);
|
||||||
|
expect(foundKeys.sort()).toEqual(userKeys.sort());
|
||||||
|
});
|
||||||
|
|
||||||
|
test('should scan large number of keys', async () => {
|
||||||
|
if (!keyvRedisClient) throw new Error('Redis client not available');
|
||||||
|
|
||||||
|
// Create 2000 test keys
|
||||||
|
const keys: string[] = [];
|
||||||
|
for (let i = 0; i < 2000; i++) {
|
||||||
|
keys.push(`${testPrefix}::large-scan::${i}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set all keys in batches to avoid overwhelming cluster
|
||||||
|
const setBatchSize = 100;
|
||||||
|
for (let i = 0; i < keys.length; i += setBatchSize) {
|
||||||
|
const batch = keys.slice(i, i + setBatchSize);
|
||||||
|
await Promise.all(batch.map((key) => keyvRedisClient!.set(key, 'value')));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scan with custom count
|
||||||
|
const foundKeys = await scanKeys(keyvRedisClient, `${testPrefix}::large-scan::*`, 500);
|
||||||
|
|
||||||
|
// Should find all keys
|
||||||
|
expect(foundKeys).toHaveLength(2000);
|
||||||
|
expect(foundKeys.sort()).toEqual(keys.sort());
|
||||||
|
});
|
||||||
|
|
||||||
|
test('should return empty array when no keys match pattern', async () => {
|
||||||
|
if (!keyvRedisClient) throw new Error('Redis client not available');
|
||||||
|
|
||||||
|
const foundKeys = await scanKeys(keyvRedisClient, `${testPrefix}::nonexistent::*`);
|
||||||
|
|
||||||
|
expect(foundKeys).toEqual([]);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
27
packages/api/src/cache/cacheConfig.ts
vendored
27
packages/api/src/cache/cacheConfig.ts
vendored
|
|
@ -85,6 +85,33 @@ const cacheConfig = {
|
||||||
DEBUG_MEMORY_CACHE: isEnabled(process.env.DEBUG_MEMORY_CACHE),
|
DEBUG_MEMORY_CACHE: isEnabled(process.env.DEBUG_MEMORY_CACHE),
|
||||||
|
|
||||||
BAN_DURATION: math(process.env.BAN_DURATION, 7200000), // 2 hours
|
BAN_DURATION: math(process.env.BAN_DURATION, 7200000), // 2 hours
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Number of keys to delete in each batch during Redis DEL operations.
|
||||||
|
* In cluster mode, keys are deleted individually in parallel chunks to avoid CROSSSLOT errors.
|
||||||
|
* In single-node mode, keys are deleted in batches using DEL with arrays.
|
||||||
|
* Lower values reduce memory usage but increase number of Redis calls.
|
||||||
|
* @default 1000
|
||||||
|
*/
|
||||||
|
REDIS_DELETE_CHUNK_SIZE: math(process.env.REDIS_DELETE_CHUNK_SIZE, 1000),
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Number of keys to update in each batch during Redis SET operations.
|
||||||
|
* In cluster mode, keys are updated individually in parallel chunks to avoid CROSSSLOT errors.
|
||||||
|
* In single-node mode, keys are updated in batches using transactions (multi/exec).
|
||||||
|
* Lower values reduce memory usage but increase number of Redis calls.
|
||||||
|
* @default 1000
|
||||||
|
*/
|
||||||
|
REDIS_UPDATE_CHUNK_SIZE: math(process.env.REDIS_UPDATE_CHUNK_SIZE, 1000),
|
||||||
|
|
||||||
|
/**
|
||||||
|
* COUNT hint for Redis SCAN operations when scanning keys by pattern.
|
||||||
|
* This is a hint to Redis about how many keys to scan in each iteration.
|
||||||
|
* Higher values can reduce round trips but increase memory usage and latency per call.
|
||||||
|
* Note: Redis may return more or fewer keys than this count depending on internal heuristics.
|
||||||
|
* @default 1000
|
||||||
|
*/
|
||||||
|
REDIS_SCAN_COUNT: math(process.env.REDIS_SCAN_COUNT, 1000),
|
||||||
};
|
};
|
||||||
|
|
||||||
export { cacheConfig };
|
export { cacheConfig };
|
||||||
|
|
|
||||||
27
packages/api/src/cache/cacheFactory.ts
vendored
27
packages/api/src/cache/cacheFactory.ts
vendored
|
|
@ -17,6 +17,7 @@ import type { SendCommandFn } from 'rate-limit-redis';
|
||||||
import { keyvRedisClient, ioredisClient } from './redisClients';
|
import { keyvRedisClient, ioredisClient } from './redisClients';
|
||||||
import { cacheConfig } from './cacheConfig';
|
import { cacheConfig } from './cacheConfig';
|
||||||
import { violationFile } from './keyvFiles';
|
import { violationFile } from './keyvFiles';
|
||||||
|
import { batchDeleteKeys, scanKeys } from './redisUtils';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Creates a cache instance using Redis or a fallback store. Suitable for general caching needs.
|
* Creates a cache instance using Redis or a fallback store. Suitable for general caching needs.
|
||||||
|
|
@ -37,6 +38,32 @@ export const standardCache = (namespace: string, ttl?: number, fallbackStore?: o
|
||||||
logger.error(`Cache error in namespace ${namespace}:`, err);
|
logger.error(`Cache error in namespace ${namespace}:`, err);
|
||||||
});
|
});
|
||||||
|
|
||||||
|
// Override clear() to handle namespace-aware deletion
|
||||||
|
// The default Keyv clear() doesn't respect namespace due to the workaround above
|
||||||
|
// Workaround for issue #10487 https://github.com/danny-avila/LibreChat/issues/10487
|
||||||
|
cache.clear = async () => {
|
||||||
|
// Type-safe check for Redis client with scanIterator support
|
||||||
|
if (!keyvRedisClient || !('scanIterator' in keyvRedisClient)) {
|
||||||
|
logger.warn(`Cannot clear namespace ${namespace}: Redis scanIterator not available`);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build pattern: globalPrefix::namespace:* or namespace:*
|
||||||
|
const pattern = cacheConfig.REDIS_KEY_PREFIX
|
||||||
|
? `${cacheConfig.REDIS_KEY_PREFIX}${cacheConfig.GLOBAL_PREFIX_SEPARATOR}${namespace}:*`
|
||||||
|
: `${namespace}:*`;
|
||||||
|
|
||||||
|
// Use utility functions for efficient scan and parallel deletion
|
||||||
|
const keysToDelete = await scanKeys(keyvRedisClient, pattern);
|
||||||
|
|
||||||
|
if (keysToDelete.length === 0) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
await batchDeleteKeys(keyvRedisClient, keysToDelete);
|
||||||
|
logger.debug(`Cleared ${keysToDelete.length} keys from namespace ${namespace}`);
|
||||||
|
};
|
||||||
|
|
||||||
return cache;
|
return cache;
|
||||||
} catch (err) {
|
} catch (err) {
|
||||||
logger.error(`Failed to create Redis cache for namespace ${namespace}:`, err);
|
logger.error(`Failed to create Redis cache for namespace ${namespace}:`, err);
|
||||||
|
|
|
||||||
1
packages/api/src/cache/index.ts
vendored
1
packages/api/src/cache/index.ts
vendored
|
|
@ -3,3 +3,4 @@ export * from './redisClients';
|
||||||
export * from './keyvFiles';
|
export * from './keyvFiles';
|
||||||
export { default as keyvMongo } from './keyvMongo';
|
export { default as keyvMongo } from './keyvMongo';
|
||||||
export * from './cacheFactory';
|
export * from './cacheFactory';
|
||||||
|
export * from './redisUtils';
|
||||||
|
|
|
||||||
129
packages/api/src/cache/redisUtils.ts
vendored
Normal file
129
packages/api/src/cache/redisUtils.ts
vendored
Normal file
|
|
@ -0,0 +1,129 @@
|
||||||
|
import type { RedisClientType, RedisClusterType } from '@redis/client';
|
||||||
|
import { logger } from '@librechat/data-schemas';
|
||||||
|
import { cacheConfig } from './cacheConfig';
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Efficiently deletes multiple Redis keys with support for both cluster and single-node modes.
|
||||||
|
*
|
||||||
|
* - Cluster mode: Deletes keys in parallel chunks to avoid CROSSSLOT errors
|
||||||
|
* - Single-node mode: Uses batch DEL commands for efficiency
|
||||||
|
*
|
||||||
|
* @param client - Redis client (node or cluster)
|
||||||
|
* @param keys - Array of keys to delete
|
||||||
|
* @param chunkSize - Optional chunk size (defaults to REDIS_DELETE_CHUNK_SIZE config)
|
||||||
|
* @returns Number of keys deleted
|
||||||
|
*
|
||||||
|
* @example
|
||||||
|
* ```typescript
|
||||||
|
* const deletedCount = await batchDeleteKeys(keyvRedisClient, ['key1', 'key2', 'key3']);
|
||||||
|
* console.log(`Deleted ${deletedCount} keys`);
|
||||||
|
* ```
|
||||||
|
*/
|
||||||
|
export async function batchDeleteKeys(
|
||||||
|
client: RedisClientType | RedisClusterType,
|
||||||
|
keys: string[],
|
||||||
|
chunkSize?: number,
|
||||||
|
): Promise<number> {
|
||||||
|
const startTime = Date.now();
|
||||||
|
|
||||||
|
if (keys.length === 0) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
const size = chunkSize ?? cacheConfig.REDIS_DELETE_CHUNK_SIZE;
|
||||||
|
const mode = cacheConfig.USE_REDIS_CLUSTER ? 'cluster' : 'single-node';
|
||||||
|
const deletePromises = [];
|
||||||
|
|
||||||
|
if (cacheConfig.USE_REDIS_CLUSTER) {
|
||||||
|
// Cluster mode: Delete each key individually in parallel chunks to avoid CROSSSLOT errors
|
||||||
|
for (let i = 0; i < keys.length; i += size) {
|
||||||
|
const chunk = keys.slice(i, i + size);
|
||||||
|
deletePromises.push(Promise.all(chunk.map((key) => client.del(key))));
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Single-node mode: Batch delete chunks using DEL with array
|
||||||
|
for (let i = 0; i < keys.length; i += size) {
|
||||||
|
const chunk = keys.slice(i, i + size);
|
||||||
|
deletePromises.push(client.del(chunk));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const results = await Promise.all(deletePromises);
|
||||||
|
|
||||||
|
// Sum up deleted counts (cluster returns array of individual counts, single-node returns total)
|
||||||
|
const deletedCount = results.reduce((sum: number, count: number | number[]): number => {
|
||||||
|
if (Array.isArray(count)) {
|
||||||
|
return sum + count.reduce((a, b) => a + b, 0);
|
||||||
|
}
|
||||||
|
return sum + count;
|
||||||
|
}, 0);
|
||||||
|
|
||||||
|
// Performance monitoring
|
||||||
|
const duration = Date.now() - startTime;
|
||||||
|
const batchCount = deletePromises.length;
|
||||||
|
|
||||||
|
if (duration > 1000) {
|
||||||
|
logger.warn(
|
||||||
|
`[Redis][batchDeleteKeys] Slow operation - Duration: ${duration}ms, Mode: ${mode}, Keys: ${keys.length}, Deleted: ${deletedCount}, Batches: ${batchCount}, Chunk size: ${size}`,
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
logger.debug(
|
||||||
|
`[Redis][batchDeleteKeys] Duration: ${duration}ms, Mode: ${mode}, Keys: ${keys.length}, Deleted: ${deletedCount}, Batches: ${batchCount}`,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
return deletedCount;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Scans Redis for keys matching a pattern and collects them into an array.
|
||||||
|
* Uses Redis SCAN to avoid blocking the server.
|
||||||
|
*
|
||||||
|
* @param client - Redis client (node or cluster) with scanIterator support
|
||||||
|
* @param pattern - Pattern to match keys (e.g., 'user:*', 'session:*:active')
|
||||||
|
* @param count - Optional SCAN COUNT hint (defaults to REDIS_SCAN_COUNT config)
|
||||||
|
* @returns Array of matching keys
|
||||||
|
*
|
||||||
|
* @example
|
||||||
|
* ```typescript
|
||||||
|
* const userKeys = await scanKeys(keyvRedisClient, 'user:*');
|
||||||
|
* const sessionKeys = await scanKeys(keyvRedisClient, 'session:*:active', 500);
|
||||||
|
* ```
|
||||||
|
*/
|
||||||
|
export async function scanKeys(
|
||||||
|
client: RedisClientType | RedisClusterType,
|
||||||
|
pattern: string,
|
||||||
|
count?: number,
|
||||||
|
): Promise<string[]> {
|
||||||
|
const startTime = Date.now();
|
||||||
|
const keys: string[] = [];
|
||||||
|
|
||||||
|
// Type guard to check if client has scanIterator
|
||||||
|
if (!('scanIterator' in client)) {
|
||||||
|
throw new Error('Redis client does not support scanIterator');
|
||||||
|
}
|
||||||
|
|
||||||
|
const scanCount = count ?? cacheConfig.REDIS_SCAN_COUNT;
|
||||||
|
|
||||||
|
for await (const key of client.scanIterator({
|
||||||
|
MATCH: pattern,
|
||||||
|
COUNT: scanCount,
|
||||||
|
})) {
|
||||||
|
keys.push(key);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Performance monitoring
|
||||||
|
const duration = Date.now() - startTime;
|
||||||
|
|
||||||
|
if (duration > 1000) {
|
||||||
|
logger.warn(
|
||||||
|
`[Redis][scanKeys] Slow operation - Duration: ${duration}ms, Pattern: "${pattern}", Keys found: ${keys.length}, Scan count: ${scanCount}`,
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
logger.debug(
|
||||||
|
`[Redis][scanKeys] Duration: ${duration}ms, Pattern: "${pattern}", Keys found: ${keys.length}`,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
return keys;
|
||||||
|
}
|
||||||
|
|
@ -4,6 +4,7 @@ export * from './cdn';
|
||||||
export * from './auth';
|
export * from './auth';
|
||||||
/* MCP */
|
/* MCP */
|
||||||
export * from './mcp/registry/MCPServersRegistry';
|
export * from './mcp/registry/MCPServersRegistry';
|
||||||
|
export * from './mcp/registry/MCPPrivateServerLoader';
|
||||||
export * from './mcp/MCPManager';
|
export * from './mcp/MCPManager';
|
||||||
export * from './mcp/connection';
|
export * from './mcp/connection';
|
||||||
export * from './mcp/oauth';
|
export * from './mcp/oauth';
|
||||||
|
|
|
||||||
|
|
@ -1,37 +1,74 @@
|
||||||
import { logger } from '@librechat/data-schemas';
|
import { logger } from '@librechat/data-schemas';
|
||||||
import { MCPConnectionFactory } from '~/mcp/MCPConnectionFactory';
|
import { MCPConnectionFactory } from '~/mcp/MCPConnectionFactory';
|
||||||
import { MCPConnection } from './connection';
|
import { MCPConnection } from './connection';
|
||||||
|
import { mcpServersRegistry as registry } from '~/mcp/registry/MCPServersRegistry';
|
||||||
import type * as t from './types';
|
import type * as t from './types';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Manages MCP connections with lazy loading and reconnection.
|
* Manages MCP connections with lazy loading and reconnection.
|
||||||
* Maintains a pool of connections and handles connection lifecycle management.
|
* Maintains a pool of connections and handles connection lifecycle management.
|
||||||
|
* Queries server configurations dynamically from the MCPServersRegistry (single source of truth).
|
||||||
|
*
|
||||||
|
* Scope-aware: Each repository is tied to a specific owner scope:
|
||||||
|
* - ownerId = undefined → manages app-level servers only
|
||||||
|
* - ownerId = userId → manages user-level and private servers for that user
|
||||||
*/
|
*/
|
||||||
export class ConnectionsRepository {
|
export class ConnectionsRepository {
|
||||||
protected readonly serverConfigs: Record<string, t.MCPOptions>;
|
|
||||||
protected connections: Map<string, MCPConnection> = new Map();
|
protected connections: Map<string, MCPConnection> = new Map();
|
||||||
protected oauthOpts: t.OAuthConnectionOptions | undefined;
|
protected oauthOpts: t.OAuthConnectionOptions | undefined;
|
||||||
|
private readonly ownerId: string | undefined;
|
||||||
|
|
||||||
constructor(serverConfigs: t.MCPServers, oauthOpts?: t.OAuthConnectionOptions) {
|
constructor(ownerId?: string, oauthOpts?: t.OAuthConnectionOptions) {
|
||||||
this.serverConfigs = serverConfigs;
|
this.ownerId = ownerId;
|
||||||
this.oauthOpts = oauthOpts;
|
this.oauthOpts = oauthOpts;
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Checks whether this repository can connect to a specific server */
|
/** Checks whether this repository can connect to a specific server */
|
||||||
has(serverName: string): boolean {
|
async has(serverName: string): Promise<boolean> {
|
||||||
return !!this.serverConfigs[serverName];
|
const config = await registry.getServerConfig(serverName, this.ownerId);
|
||||||
|
if (!config) {
|
||||||
|
//if the config does not exist, clean up any potential orphaned connections (caused by server tier migration)
|
||||||
|
await this.disconnect(serverName);
|
||||||
|
}
|
||||||
|
return !!config;
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Gets or creates a connection for the specified server with lazy loading */
|
/** Gets or creates a connection for the specified server with lazy loading */
|
||||||
async get(serverName: string): Promise<MCPConnection> {
|
async get(serverName: string): Promise<MCPConnection | null> {
|
||||||
|
const serverConfig = await registry.getServerConfig(serverName, this.ownerId);
|
||||||
const existingConnection = this.connections.get(serverName);
|
const existingConnection = this.connections.get(serverName);
|
||||||
if (existingConnection && (await existingConnection.isConnected())) return existingConnection;
|
if (!serverConfig) {
|
||||||
else await this.disconnect(serverName);
|
if (existingConnection) {
|
||||||
|
await existingConnection.disconnect();
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
if (existingConnection) {
|
||||||
|
// Check if config was cached/updated since connection was created
|
||||||
|
if (serverConfig.lastUpdatedAt && existingConnection.isStale(serverConfig.lastUpdatedAt)) {
|
||||||
|
logger.info(
|
||||||
|
`${this.prefix(serverName)} Existing connection for ${serverName} is outdated. Recreating a new connection.`,
|
||||||
|
{
|
||||||
|
connectionCreated: new Date(existingConnection.createdAt).toISOString(),
|
||||||
|
configCachedAt: new Date(serverConfig.lastUpdatedAt).toISOString(),
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
// Disconnect stale connection
|
||||||
|
await existingConnection.disconnect();
|
||||||
|
this.connections.delete(serverName);
|
||||||
|
// Fall through to create new connection
|
||||||
|
} else if (await existingConnection.isConnected()) {
|
||||||
|
return existingConnection;
|
||||||
|
} else {
|
||||||
|
await this.disconnect(serverName);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
const connection = await MCPConnectionFactory.create(
|
const connection = await MCPConnectionFactory.create(
|
||||||
{
|
{
|
||||||
serverName,
|
serverName,
|
||||||
serverConfig: this.getServerConfig(serverName),
|
serverConfig,
|
||||||
},
|
},
|
||||||
this.oauthOpts,
|
this.oauthOpts,
|
||||||
);
|
);
|
||||||
|
|
@ -44,7 +81,7 @@ export class ConnectionsRepository {
|
||||||
async getMany(serverNames: string[]): Promise<Map<string, MCPConnection>> {
|
async getMany(serverNames: string[]): Promise<Map<string, MCPConnection>> {
|
||||||
const connectionPromises = serverNames.map(async (name) => [name, await this.get(name)]);
|
const connectionPromises = serverNames.map(async (name) => [name, await this.get(name)]);
|
||||||
const connections = await Promise.all(connectionPromises);
|
const connections = await Promise.all(connectionPromises);
|
||||||
return new Map(connections as [string, MCPConnection][]);
|
return new Map((connections as [string, MCPConnection][]).filter((v) => !!v[1]));
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Returns all currently loaded connections without creating new ones */
|
/** Returns all currently loaded connections without creating new ones */
|
||||||
|
|
@ -52,9 +89,12 @@ export class ConnectionsRepository {
|
||||||
return this.getMany(Array.from(this.connections.keys()));
|
return this.getMany(Array.from(this.connections.keys()));
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Gets or creates connections for all configured servers */
|
/** Gets or creates connections for all configured servers in this repository's scope */
|
||||||
async getAll(): Promise<Map<string, MCPConnection>> {
|
async getAll(): Promise<Map<string, MCPConnection>> {
|
||||||
return this.getMany(Object.keys(this.serverConfigs));
|
//TODO in the future we should use a scoped config getter (APPLevel, UserLevel, Private)
|
||||||
|
//for now the unexisting config will not throw error
|
||||||
|
const allConfigs = await registry.getAllServerConfigs(this.ownerId);
|
||||||
|
return this.getMany(Object.keys(allConfigs));
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Disconnects and removes a specific server connection from the pool */
|
/** Disconnects and removes a specific server connection from the pool */
|
||||||
|
|
@ -73,13 +113,6 @@ export class ConnectionsRepository {
|
||||||
return serverNames.map((serverName) => this.disconnect(serverName));
|
return serverNames.map((serverName) => this.disconnect(serverName));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Retrieves server configuration by name or throws if not found
|
|
||||||
protected getServerConfig(serverName: string): t.MCPOptions {
|
|
||||||
const serverConfig = this.serverConfigs[serverName];
|
|
||||||
if (serverConfig) return serverConfig;
|
|
||||||
throw new Error(`${this.prefix(serverName)} Server not found in configuration`);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns formatted log prefix for server messages
|
// Returns formatted log prefix for server messages
|
||||||
protected prefix(serverName: string): string {
|
protected prefix(serverName: string): string {
|
||||||
return `[MCP][${serverName}]`;
|
return `[MCP][${serverName}]`;
|
||||||
|
|
|
||||||
|
|
@ -40,8 +40,7 @@ export class MCPManager extends UserConnectionManager {
|
||||||
/** Initializes the MCPManager by setting up server registry and app connections */
|
/** Initializes the MCPManager by setting up server registry and app connections */
|
||||||
public async initialize(configs: t.MCPServers) {
|
public async initialize(configs: t.MCPServers) {
|
||||||
await MCPServersInitializer.initialize(configs);
|
await MCPServersInitializer.initialize(configs);
|
||||||
const appConfigs = await registry.sharedAppServers.getAll();
|
this.appConnections = new ConnectionsRepository(undefined);
|
||||||
this.appConnections = new ConnectionsRepository(appConfigs);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Retrieves an app-level or user-specific connection based on provided arguments */
|
/** Retrieves an app-level or user-specific connection based on provided arguments */
|
||||||
|
|
@ -53,8 +52,10 @@ export class MCPManager extends UserConnectionManager {
|
||||||
flowManager?: FlowStateManager<MCPOAuthTokens | null>;
|
flowManager?: FlowStateManager<MCPOAuthTokens | null>;
|
||||||
} & Omit<t.OAuthConnectionOptions, 'useOAuth' | 'user' | 'flowManager'>,
|
} & Omit<t.OAuthConnectionOptions, 'useOAuth' | 'user' | 'flowManager'>,
|
||||||
): Promise<MCPConnection> {
|
): Promise<MCPConnection> {
|
||||||
if (this.appConnections!.has(args.serverName)) {
|
//the get method checks if the config is still valid as app level
|
||||||
return this.appConnections!.get(args.serverName);
|
const existingAppConnection = await this.appConnections!.get(args.serverName);
|
||||||
|
if (existingAppConnection) {
|
||||||
|
return existingAppConnection;
|
||||||
} else if (args.user?.id) {
|
} else if (args.user?.id) {
|
||||||
return this.getUserConnection(args as Parameters<typeof this.getUserConnection>[0]);
|
return this.getUserConnection(args as Parameters<typeof this.getUserConnection>[0]);
|
||||||
} else {
|
} else {
|
||||||
|
|
@ -83,11 +84,10 @@ export class MCPManager extends UserConnectionManager {
|
||||||
serverName: string,
|
serverName: string,
|
||||||
): Promise<t.LCAvailableTools | null> {
|
): Promise<t.LCAvailableTools | null> {
|
||||||
try {
|
try {
|
||||||
if (this.appConnections?.has(serverName)) {
|
//try get the appConnection (if the config is not in the app level anymore any existing connection will disconnect and get will return null)
|
||||||
return MCPServerInspector.getToolFunctions(
|
const existingAppConnection = await this.appConnections?.get(serverName);
|
||||||
serverName,
|
if (existingAppConnection) {
|
||||||
await this.appConnections.get(serverName),
|
return MCPServerInspector.getToolFunctions(serverName, existingAppConnection);
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const userConnections = this.getUserConnections(userId);
|
const userConnections = this.getUserConnections(userId);
|
||||||
|
|
|
||||||
|
|
@ -54,13 +54,15 @@ export abstract class UserConnectionManager {
|
||||||
throw new McpError(ErrorCode.InvalidRequest, `[MCP] User object missing id property`);
|
throw new McpError(ErrorCode.InvalidRequest, `[MCP] User object missing id property`);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (this.appConnections!.has(serverName)) {
|
if (await this.appConnections!.has(serverName)) {
|
||||||
throw new McpError(
|
throw new McpError(
|
||||||
ErrorCode.InvalidRequest,
|
ErrorCode.InvalidRequest,
|
||||||
`[MCP][User: ${userId}] Trying to create user-specific connection for app-level server "${serverName}"`,
|
`[MCP][User: ${userId}] Trying to create user-specific connection for app-level server "${serverName}"`,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const config = await serversRegistry.getServerConfig(serverName, userId);
|
||||||
|
|
||||||
const userServerMap = this.userConnections.get(userId);
|
const userServerMap = this.userConnections.get(userId);
|
||||||
let connection = forceNew ? undefined : userServerMap?.get(serverName);
|
let connection = forceNew ? undefined : userServerMap?.get(serverName);
|
||||||
const now = Date.now();
|
const now = Date.now();
|
||||||
|
|
@ -77,7 +79,15 @@ export abstract class UserConnectionManager {
|
||||||
}
|
}
|
||||||
connection = undefined; // Force creation of a new connection
|
connection = undefined; // Force creation of a new connection
|
||||||
} else if (connection) {
|
} else if (connection) {
|
||||||
if (await connection.isConnected()) {
|
if (!config || (config.lastUpdatedAt && connection.isStale(config.lastUpdatedAt))) {
|
||||||
|
if (config) {
|
||||||
|
logger.info(
|
||||||
|
`[MCP][User: ${userId}][${serverName}] Config was updated, disconnecting stale connection`,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
await this.disconnectUserConnection(userId, serverName);
|
||||||
|
connection = undefined;
|
||||||
|
} else if (await connection.isConnected()) {
|
||||||
logger.debug(`[MCP][User: ${userId}][${serverName}] Reusing active connection`);
|
logger.debug(`[MCP][User: ${userId}][${serverName}] Reusing active connection`);
|
||||||
this.updateUserLastActivity(userId);
|
this.updateUserLastActivity(userId);
|
||||||
return connection;
|
return connection;
|
||||||
|
|
@ -91,12 +101,7 @@ export abstract class UserConnectionManager {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// If no valid connection exists, create a new one
|
// Now check if config exists for new connection creation
|
||||||
if (!connection) {
|
|
||||||
logger.info(`[MCP][User: ${userId}][${serverName}] Establishing new connection`);
|
|
||||||
}
|
|
||||||
|
|
||||||
const config = await serversRegistry.getServerConfig(serverName, userId);
|
|
||||||
if (!config) {
|
if (!config) {
|
||||||
throw new McpError(
|
throw new McpError(
|
||||||
ErrorCode.InvalidRequest,
|
ErrorCode.InvalidRequest,
|
||||||
|
|
@ -104,6 +109,9 @@ export abstract class UserConnectionManager {
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If no valid connection exists, create a new one
|
||||||
|
logger.info(`[MCP][User: ${userId}][${serverName}] Establishing new connection`);
|
||||||
|
|
||||||
try {
|
try {
|
||||||
connection = await MCPConnectionFactory.create(
|
connection = await MCPConnectionFactory.create(
|
||||||
{
|
{
|
||||||
|
|
|
||||||
|
|
@ -8,6 +8,7 @@ import type * as t from '../types';
|
||||||
jest.mock('@librechat/data-schemas', () => ({
|
jest.mock('@librechat/data-schemas', () => ({
|
||||||
logger: {
|
logger: {
|
||||||
error: jest.fn(),
|
error: jest.fn(),
|
||||||
|
info: jest.fn(),
|
||||||
},
|
},
|
||||||
}));
|
}));
|
||||||
|
|
||||||
|
|
@ -19,11 +20,23 @@ jest.mock('../MCPConnectionFactory', () => ({
|
||||||
|
|
||||||
jest.mock('../connection');
|
jest.mock('../connection');
|
||||||
|
|
||||||
|
// Mock the registry
|
||||||
|
jest.mock('../registry/MCPServersRegistry', () => ({
|
||||||
|
mcpServersRegistry: {
|
||||||
|
getServerConfig: jest.fn(),
|
||||||
|
getAllServerConfigs: jest.fn(),
|
||||||
|
},
|
||||||
|
}));
|
||||||
|
|
||||||
const mockLogger = logger as jest.Mocked<typeof logger>;
|
const mockLogger = logger as jest.Mocked<typeof logger>;
|
||||||
|
|
||||||
|
// Import mocked registry
|
||||||
|
import { mcpServersRegistry as registry } from '../registry/MCPServersRegistry';
|
||||||
|
const mockRegistry = registry as jest.Mocked<typeof registry>;
|
||||||
|
|
||||||
describe('ConnectionsRepository', () => {
|
describe('ConnectionsRepository', () => {
|
||||||
let repository: ConnectionsRepository;
|
let repository: ConnectionsRepository;
|
||||||
let mockServerConfigs: t.MCPServers;
|
let mockServerConfigs: Record<string, t.ParsedServerConfig>;
|
||||||
let mockConnection: jest.Mocked<MCPConnection>;
|
let mockConnection: jest.Mocked<MCPConnection>;
|
||||||
|
|
||||||
beforeEach(() => {
|
beforeEach(() => {
|
||||||
|
|
@ -33,14 +46,28 @@ describe('ConnectionsRepository', () => {
|
||||||
server3: { url: 'ws://localhost:8080', type: 'websocket' },
|
server3: { url: 'ws://localhost:8080', type: 'websocket' },
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// Setup mock registry
|
||||||
|
// eslint-disable-next-line @typescript-eslint/no-unused-vars
|
||||||
|
mockRegistry.getServerConfig = jest.fn((serverName: string, ownerId?: string) =>
|
||||||
|
Promise.resolve(mockServerConfigs[serverName] || undefined),
|
||||||
|
) as jest.Mock;
|
||||||
|
|
||||||
|
// eslint-disable-next-line @typescript-eslint/no-unused-vars
|
||||||
|
mockRegistry.getAllServerConfigs = jest.fn((ownerId?: string) =>
|
||||||
|
Promise.resolve(mockServerConfigs),
|
||||||
|
) as jest.Mock;
|
||||||
|
|
||||||
mockConnection = {
|
mockConnection = {
|
||||||
isConnected: jest.fn().mockResolvedValue(true),
|
isConnected: jest.fn().mockResolvedValue(true),
|
||||||
disconnect: jest.fn().mockResolvedValue(undefined),
|
disconnect: jest.fn().mockResolvedValue(undefined),
|
||||||
|
createdAt: Date.now(),
|
||||||
|
isStale: jest.fn().mockReturnValue(false),
|
||||||
} as unknown as jest.Mocked<MCPConnection>;
|
} as unknown as jest.Mocked<MCPConnection>;
|
||||||
|
|
||||||
(MCPConnectionFactory.create as jest.Mock).mockResolvedValue(mockConnection);
|
(MCPConnectionFactory.create as jest.Mock).mockResolvedValue(mockConnection);
|
||||||
|
|
||||||
repository = new ConnectionsRepository(mockServerConfigs);
|
// Create repository with undefined ownerId (app-level)
|
||||||
|
repository = new ConnectionsRepository(undefined);
|
||||||
|
|
||||||
jest.clearAllMocks();
|
jest.clearAllMocks();
|
||||||
});
|
});
|
||||||
|
|
@ -50,12 +77,12 @@ describe('ConnectionsRepository', () => {
|
||||||
});
|
});
|
||||||
|
|
||||||
describe('has', () => {
|
describe('has', () => {
|
||||||
it('should return true for existing server', () => {
|
it('should return true for existing server', async () => {
|
||||||
expect(repository.has('server1')).toBe(true);
|
expect(await repository.has('server1')).toBe(true);
|
||||||
});
|
});
|
||||||
|
|
||||||
it('should return false for non-existing server', () => {
|
it('should return false for non-existing server', async () => {
|
||||||
expect(repository.has('nonexistent')).toBe(false);
|
expect(await repository.has('nonexistent')).toBe(false);
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
|
|
@ -104,7 +131,90 @@ describe('ConnectionsRepository', () => {
|
||||||
);
|
);
|
||||||
});
|
});
|
||||||
|
|
||||||
it('should throw error for non-existent server configuration', async () => {
|
it('should recreate connection when existing connection is stale', async () => {
|
||||||
|
const connectionCreatedAt = Date.now();
|
||||||
|
const configCachedAt = connectionCreatedAt + 10000; // Config updated 10 seconds after connection was created
|
||||||
|
|
||||||
|
const staleConnection = {
|
||||||
|
isConnected: jest.fn().mockResolvedValue(true),
|
||||||
|
disconnect: jest.fn().mockResolvedValue(undefined),
|
||||||
|
createdAt: connectionCreatedAt,
|
||||||
|
isStale: jest.fn().mockReturnValue(true),
|
||||||
|
} as unknown as jest.Mocked<MCPConnection>;
|
||||||
|
|
||||||
|
// Update server config with lastUpdatedAt timestamp
|
||||||
|
const configWithCachedAt = {
|
||||||
|
...mockServerConfigs.server1,
|
||||||
|
lastUpdatedAt: configCachedAt,
|
||||||
|
};
|
||||||
|
mockRegistry.getServerConfig.mockResolvedValueOnce(configWithCachedAt);
|
||||||
|
|
||||||
|
repository['connections'].set('server1', staleConnection);
|
||||||
|
|
||||||
|
const result = await repository.get('server1');
|
||||||
|
|
||||||
|
// Verify stale check was called with the config's lastUpdatedAt timestamp
|
||||||
|
expect(staleConnection.isStale).toHaveBeenCalledWith(configCachedAt);
|
||||||
|
|
||||||
|
// Verify old connection was disconnected
|
||||||
|
expect(staleConnection.disconnect).toHaveBeenCalled();
|
||||||
|
|
||||||
|
// Verify new connection was created
|
||||||
|
expect(MCPConnectionFactory.create).toHaveBeenCalledWith(
|
||||||
|
{
|
||||||
|
serverName: 'server1',
|
||||||
|
serverConfig: configWithCachedAt,
|
||||||
|
},
|
||||||
|
undefined,
|
||||||
|
);
|
||||||
|
|
||||||
|
// Verify new connection is returned
|
||||||
|
expect(result).toBe(mockConnection);
|
||||||
|
|
||||||
|
// Verify the new connection replaced the stale one in the repository
|
||||||
|
expect(repository['connections'].get('server1')).toBe(mockConnection);
|
||||||
|
expect(repository['connections'].get('server1')).not.toBe(staleConnection);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return existing connection when it is not stale', async () => {
|
||||||
|
const connectionCreatedAt = Date.now();
|
||||||
|
const configCachedAt = connectionCreatedAt - 10000; // Config is older than connection
|
||||||
|
|
||||||
|
const freshConnection = {
|
||||||
|
isConnected: jest.fn().mockResolvedValue(true),
|
||||||
|
disconnect: jest.fn().mockResolvedValue(undefined),
|
||||||
|
createdAt: connectionCreatedAt,
|
||||||
|
isStale: jest.fn().mockReturnValue(false),
|
||||||
|
} as unknown as jest.Mocked<MCPConnection>;
|
||||||
|
|
||||||
|
// Update server config with lastUpdatedAt timestamp
|
||||||
|
const configWithCachedAt = {
|
||||||
|
...mockServerConfigs.server1,
|
||||||
|
lastUpdatedAt: configCachedAt,
|
||||||
|
};
|
||||||
|
mockRegistry.getServerConfig.mockResolvedValueOnce(configWithCachedAt);
|
||||||
|
|
||||||
|
repository['connections'].set('server1', freshConnection);
|
||||||
|
|
||||||
|
const result = await repository.get('server1');
|
||||||
|
|
||||||
|
// Verify stale check was called
|
||||||
|
expect(freshConnection.isStale).toHaveBeenCalledWith(configCachedAt);
|
||||||
|
|
||||||
|
// Verify connection was not disconnected
|
||||||
|
expect(freshConnection.disconnect).not.toHaveBeenCalled();
|
||||||
|
|
||||||
|
// Verify no new connection was created
|
||||||
|
expect(MCPConnectionFactory.create).not.toHaveBeenCalled();
|
||||||
|
|
||||||
|
// Verify existing connection is returned
|
||||||
|
expect(result).toBe(freshConnection);
|
||||||
|
|
||||||
|
// Verify repository still has the same connection
|
||||||
|
expect(repository['connections'].get('server1')).toBe(freshConnection);
|
||||||
|
});
|
||||||
|
//todo revist later when async getAll(): in packages/api/src/mcp/ConnectionsRepository.ts is refactored
|
||||||
|
it.skip('should throw error for non-existent server configuration', async () => {
|
||||||
await expect(repository.get('nonexistent')).rejects.toThrow(
|
await expect(repository.get('nonexistent')).rejects.toThrow(
|
||||||
'[MCP][nonexistent] Server not found in configuration',
|
'[MCP][nonexistent] Server not found in configuration',
|
||||||
);
|
);
|
||||||
|
|
|
||||||
|
|
@ -58,7 +58,7 @@ describe('MCPManager', () => {
|
||||||
appConnectionsConfig: Partial<ConnectionsRepository>,
|
appConnectionsConfig: Partial<ConnectionsRepository>,
|
||||||
): jest.MockedClass<typeof ConnectionsRepository> {
|
): jest.MockedClass<typeof ConnectionsRepository> {
|
||||||
const mock = {
|
const mock = {
|
||||||
has: jest.fn().mockReturnValue(false),
|
has: jest.fn().mockResolvedValue(false),
|
||||||
get: jest.fn().mockResolvedValue({} as unknown as MCPConnection),
|
get: jest.fn().mockResolvedValue({} as unknown as MCPConnection),
|
||||||
...appConnectionsConfig,
|
...appConnectionsConfig,
|
||||||
};
|
};
|
||||||
|
|
@ -303,7 +303,7 @@ describe('MCPManager', () => {
|
||||||
});
|
});
|
||||||
|
|
||||||
mockAppConnections({
|
mockAppConnections({
|
||||||
has: jest.fn().mockReturnValue(true),
|
has: jest.fn().mockResolvedValue(true),
|
||||||
});
|
});
|
||||||
|
|
||||||
const manager = await MCPManager.createInstance(newMCPServersConfig());
|
const manager = await MCPManager.createInstance(newMCPServersConfig());
|
||||||
|
|
@ -321,7 +321,7 @@ describe('MCPManager', () => {
|
||||||
(MCPServerInspector.getToolFunctions as jest.Mock) = jest.fn().mockResolvedValue({});
|
(MCPServerInspector.getToolFunctions as jest.Mock) = jest.fn().mockResolvedValue({});
|
||||||
|
|
||||||
mockAppConnections({
|
mockAppConnections({
|
||||||
has: jest.fn().mockReturnValue(false),
|
get: jest.fn().mockResolvedValue(null),
|
||||||
});
|
});
|
||||||
|
|
||||||
const manager = await MCPManager.createInstance(newMCPServersConfig());
|
const manager = await MCPManager.createInstance(newMCPServersConfig());
|
||||||
|
|
@ -357,7 +357,7 @@ describe('MCPManager', () => {
|
||||||
.mockResolvedValue(expectedTools);
|
.mockResolvedValue(expectedTools);
|
||||||
|
|
||||||
mockAppConnections({
|
mockAppConnections({
|
||||||
has: jest.fn().mockReturnValue(true),
|
has: jest.fn().mockResolvedValue(true),
|
||||||
});
|
});
|
||||||
|
|
||||||
const manager = await MCPManager.createInstance(newMCPServersConfig());
|
const manager = await MCPManager.createInstance(newMCPServersConfig());
|
||||||
|
|
@ -376,7 +376,7 @@ describe('MCPManager', () => {
|
||||||
});
|
});
|
||||||
|
|
||||||
mockAppConnections({
|
mockAppConnections({
|
||||||
has: jest.fn().mockReturnValue(true),
|
has: jest.fn().mockResolvedValue(true),
|
||||||
});
|
});
|
||||||
|
|
||||||
const manager = await MCPManager.createInstance(newMCPServersConfig(specificServerName));
|
const manager = await MCPManager.createInstance(newMCPServersConfig(specificServerName));
|
||||||
|
|
|
||||||
|
|
@ -98,6 +98,12 @@ export class MCPConnection extends EventEmitter {
|
||||||
timeout?: number;
|
timeout?: number;
|
||||||
url?: string;
|
url?: string;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Timestamp when this connection was created.
|
||||||
|
* Used to detect if connection is stale compared to updated config.
|
||||||
|
*/
|
||||||
|
public readonly createdAt: number;
|
||||||
|
|
||||||
setRequestHeaders(headers: Record<string, string> | null): void {
|
setRequestHeaders(headers: Record<string, string> | null): void {
|
||||||
if (!headers) {
|
if (!headers) {
|
||||||
return;
|
return;
|
||||||
|
|
@ -121,6 +127,7 @@ export class MCPConnection extends EventEmitter {
|
||||||
this.iconPath = params.serverConfig.iconPath;
|
this.iconPath = params.serverConfig.iconPath;
|
||||||
this.timeout = params.serverConfig.timeout;
|
this.timeout = params.serverConfig.timeout;
|
||||||
this.lastPingTime = Date.now();
|
this.lastPingTime = Date.now();
|
||||||
|
this.createdAt = Date.now(); // Record creation timestamp for staleness detection
|
||||||
if (params.oauthTokens) {
|
if (params.oauthTokens) {
|
||||||
this.oauthTokens = params.oauthTokens;
|
this.oauthTokens = params.oauthTokens;
|
||||||
}
|
}
|
||||||
|
|
@ -736,6 +743,17 @@ export class MCPConnection extends EventEmitter {
|
||||||
this.oauthTokens = tokens;
|
this.oauthTokens = tokens;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check if this connection is stale compared to config update time.
|
||||||
|
* A connection is stale if it was created before the config was updated.
|
||||||
|
*
|
||||||
|
* @param configUpdatedAt - Unix timestamp (ms) when config was last updated
|
||||||
|
* @returns true if connection was created before config update, false otherwise
|
||||||
|
*/
|
||||||
|
public isStale(configUpdatedAt: number): boolean {
|
||||||
|
return this.createdAt < configUpdatedAt;
|
||||||
|
}
|
||||||
|
|
||||||
private isOAuthError(error: unknown): boolean {
|
private isOAuthError(error: unknown): boolean {
|
||||||
if (!error || typeof error !== 'object') {
|
if (!error || typeof error !== 'object') {
|
||||||
return false;
|
return false;
|
||||||
|
|
|
||||||
276
packages/api/src/mcp/registry/MCPPrivateServerLoader.ts
Normal file
276
packages/api/src/mcp/registry/MCPPrivateServerLoader.ts
Normal file
|
|
@ -0,0 +1,276 @@
|
||||||
|
import { mcpServersRegistry as registry } from './MCPServersRegistry';
|
||||||
|
import { privateServersLoadStatusCache as loadStatusCache } from './cache/PrivateServersLoadStatusCache';
|
||||||
|
import type * as t from '~/mcp/types';
|
||||||
|
import { logger } from '@librechat/data-schemas';
|
||||||
|
import { MCPServerDB } from 'librechat-data-provider';
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Handles loading and updating private MCP servers for users.
|
||||||
|
* Static methods work directly with the registry's privateServersCache.
|
||||||
|
* Similar pattern to MCPServersInitializer but for runtime private server management.
|
||||||
|
*/
|
||||||
|
export class MCPPrivateServerLoader {
|
||||||
|
/**
|
||||||
|
* Load private servers for a specific user with TTL synchronization and distributed locking.
|
||||||
|
* Use case: User logs in, lazy-load their private servers from DB
|
||||||
|
*
|
||||||
|
* Handles three critical issues:
|
||||||
|
* 1. Partial Load Prevention: Loaded flag only set after ALL servers load successfully
|
||||||
|
* 2. TTL Synchronization: Loaded flag expires with cache entries (prevents desync)
|
||||||
|
* 3. Race Condition Prevention: Distributed locking prevents concurrent loads
|
||||||
|
*
|
||||||
|
* Edge cases handled:
|
||||||
|
* - Process crashes mid-load: Flag not set, will retry on next attempt
|
||||||
|
* - Cache eviction: TTL ensures flag expires with cache entries
|
||||||
|
* - Concurrent loads: Lock ensures only one process loads, others wait
|
||||||
|
* - Users with 0 servers: Correctly handled (no cache verification needed)
|
||||||
|
*
|
||||||
|
* @param userId - User ID
|
||||||
|
* @param configsLoader - a callback that fetches db servers available for a user
|
||||||
|
* @param cacheTTL - Cache TTL in milliseconds (default: 3600000 = 1 hour)
|
||||||
|
* @throws {Error} If userId is invalid or loading fails
|
||||||
|
*/
|
||||||
|
public static async loadPrivateServers(
|
||||||
|
userId: string,
|
||||||
|
configsLoader: (userId: string) => Promise<MCPServerDB[]>,
|
||||||
|
cacheTTL: number = 3600000, // 1 hour default
|
||||||
|
): Promise<void> {
|
||||||
|
// Input validation
|
||||||
|
if (!userId?.trim()) {
|
||||||
|
throw new Error('[MCP][PrivateServerLoader] userId is required and cannot be empty');
|
||||||
|
}
|
||||||
|
if (typeof configsLoader !== 'function') {
|
||||||
|
throw new Error('[MCP][PrivateServerLoader] configsLoader must be a function');
|
||||||
|
}
|
||||||
|
|
||||||
|
const alreadyLoaded = await loadStatusCache.isLoaded(userId);
|
||||||
|
if (alreadyLoaded) {
|
||||||
|
logger.debug(`[MCP][PrivateServerLoader] User ${userId} private servers already loaded`);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const lockAcquired = await loadStatusCache.acquireLoadLock(userId);
|
||||||
|
|
||||||
|
if (!lockAcquired) {
|
||||||
|
logger.debug(
|
||||||
|
`[MCP][PrivateServerLoader] Another process is loading user ${userId}, waiting...`,
|
||||||
|
);
|
||||||
|
const completed = await loadStatusCache.waitForLoad(userId);
|
||||||
|
|
||||||
|
if (completed) {
|
||||||
|
logger.debug(`[MCP][PrivateServerLoader] User ${userId} loaded by another process`);
|
||||||
|
return;
|
||||||
|
} else {
|
||||||
|
// Timeout - try to acquire lock again (maybe the other process crashed)
|
||||||
|
logger.warn(
|
||||||
|
`[MCP][PrivateServerLoader] Timeout waiting for user ${userId}, retrying lock acquisition`,
|
||||||
|
);
|
||||||
|
const retryLock = await loadStatusCache.acquireLoadLock(userId);
|
||||||
|
if (!retryLock) {
|
||||||
|
throw new Error(
|
||||||
|
`[MCP][PrivateServerLoader] Failed to acquire load lock for user ${userId}`,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// We have the lock, proceed with loading
|
||||||
|
try {
|
||||||
|
logger.info(`[MCP][PrivateServerLoader] Loading private servers for user ${userId}`);
|
||||||
|
const servers = await configsLoader(userId);
|
||||||
|
//reset cache for the user
|
||||||
|
await registry.privateServersCache.reset(userId);
|
||||||
|
|
||||||
|
for (const server of servers) {
|
||||||
|
const serverName = server.mcp_id;
|
||||||
|
const existing = await registry.privateServersCache.get(userId, serverName);
|
||||||
|
if (!existing) {
|
||||||
|
// Add new server config
|
||||||
|
await registry.privateServersCache.add(userId, serverName, {
|
||||||
|
...server.config,
|
||||||
|
dbId: server._id,
|
||||||
|
});
|
||||||
|
logger.debug(`${this.prefix(serverName)} Added private server for user ${userId}`);
|
||||||
|
} else {
|
||||||
|
logger.debug(
|
||||||
|
`${this.prefix(serverName)} Private server already exists for user ${userId}`,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mark as fully loaded with TTL (synchronized with cache entries)
|
||||||
|
await loadStatusCache.setLoaded(userId, cacheTTL);
|
||||||
|
logger.debug(
|
||||||
|
`[MCP][PrivateServerLoader] User ${userId} private servers fully loaded (${servers.length} servers, TTL: ${cacheTTL}ms)`,
|
||||||
|
);
|
||||||
|
} catch (error) {
|
||||||
|
logger.error(
|
||||||
|
`[MCP][PrivateServerLoader] Loading private servers for user ${userId} failed.`,
|
||||||
|
error,
|
||||||
|
);
|
||||||
|
throw error;
|
||||||
|
} finally {
|
||||||
|
// Always release the lock, even on error
|
||||||
|
await loadStatusCache.releaseLoadLock(userId);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Propagate metadata changes to all users who have this server or update shared cache if the server is shared with PUBLIC.
|
||||||
|
* Use case: Admin updates server url, auth etc..
|
||||||
|
* Efficient: Uses pattern-based scan, updates only affected users
|
||||||
|
*
|
||||||
|
* @param serverName - Server name
|
||||||
|
* @param config - Updated server configuration
|
||||||
|
*/
|
||||||
|
public static async updatePrivateServer(
|
||||||
|
serverName: string,
|
||||||
|
config: t.ParsedServerConfig,
|
||||||
|
): Promise<void> {
|
||||||
|
//check if the private server is promoted to a app level or user shared level server
|
||||||
|
const sharedServer = await registry.getServerConfig(serverName);
|
||||||
|
if (sharedServer) {
|
||||||
|
logger.info(`${this.prefix(serverName)} Promoted private server update`);
|
||||||
|
// server must be removed to simplify moving from App -> Shared and Shared -> App based on the config.
|
||||||
|
// Otherwise we need to figure out if it is an APP or a User shared and whether to migrate or not.
|
||||||
|
|
||||||
|
await registry.removeServer(serverName);
|
||||||
|
await registry.addSharedServer(serverName, config);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
logger.info(`${this.prefix(serverName)} Propagating metadata update to all users`);
|
||||||
|
await registry.privateServersCache.updateServerConfigIfExists(serverName, config);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Add a private server
|
||||||
|
* Use case: Admin / user creates an mcp server from the UI
|
||||||
|
*
|
||||||
|
* @param userId - userId
|
||||||
|
* @param serverName - Server name
|
||||||
|
* @param config - Updated server configuration
|
||||||
|
*/
|
||||||
|
public static async addPrivateServer(
|
||||||
|
userId: string,
|
||||||
|
serverName: string,
|
||||||
|
config: t.ParsedServerConfig,
|
||||||
|
): Promise<void> {
|
||||||
|
logger.info(`${this.prefix(serverName)} add private server to user with Id ${userId}`);
|
||||||
|
await registry.privateServersCache.add(userId, serverName, config);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Handle permission changes - grant/revoke access.
|
||||||
|
* Use case: Admin shares/unshares server with users
|
||||||
|
*
|
||||||
|
* @param serverName - Server name
|
||||||
|
* @param allowedUserIds - Array of user IDs who should have access
|
||||||
|
* @param config - Server configuration
|
||||||
|
*/
|
||||||
|
public static async updatePrivateServerAccess(
|
||||||
|
serverName: string,
|
||||||
|
allowedUserIds: string[],
|
||||||
|
config: t.ParsedServerConfig,
|
||||||
|
): Promise<void> {
|
||||||
|
if (allowedUserIds.length === 0) {
|
||||||
|
// Revoke from everyone
|
||||||
|
logger.info(`${this.prefix(serverName)} Revoking access from all users`);
|
||||||
|
const allUsers = await registry.privateServersCache.findUsersWithServer(serverName);
|
||||||
|
await registry.privateServersCache.removeServerConfigIfCacheExists(allUsers, serverName);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.info(`${this.prefix(serverName)} Updating access for ${allowedUserIds.length} users`);
|
||||||
|
|
||||||
|
// Find current state
|
||||||
|
const currentUsers = await registry.privateServersCache.findUsersWithServer(serverName);
|
||||||
|
const allowedSet = new Set(allowedUserIds);
|
||||||
|
|
||||||
|
// Revoke from users no longer allowed
|
||||||
|
const toRevoke = currentUsers.filter((id) => !allowedSet.has(id));
|
||||||
|
if (toRevoke.length > 0) {
|
||||||
|
logger.debug(`${this.prefix(serverName)} Revoking access from ${toRevoke.length} users`);
|
||||||
|
await registry.privateServersCache.removeServerConfigIfCacheExists(toRevoke, serverName);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Grant to allowed users (only initialized caches)
|
||||||
|
logger.debug(`${this.prefix(serverName)} Granting access to ${allowedUserIds.length} users`);
|
||||||
|
await registry.privateServersCache.addServerConfigIfCacheExists(
|
||||||
|
allowedUserIds,
|
||||||
|
serverName,
|
||||||
|
config,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Promote a private server to shared (public) registry.
|
||||||
|
* Use case: Admin shares a private server with PUBLIC
|
||||||
|
*
|
||||||
|
* Migrates server from private user caches to shared registry (app or user tier).
|
||||||
|
* Removes from all private caches to avoid duplication.
|
||||||
|
*
|
||||||
|
* @param serverName - Server name
|
||||||
|
* @param config - Server configuration
|
||||||
|
*/
|
||||||
|
public static async promoteToSharedServer(
|
||||||
|
serverName: string,
|
||||||
|
config: t.ParsedServerConfig,
|
||||||
|
): Promise<void> {
|
||||||
|
logger.info(`${this.prefix(serverName)} Promoting to shared server`);
|
||||||
|
|
||||||
|
// 1. Add to shared registry (app or user tier based on config)
|
||||||
|
await registry.addSharedServer(serverName, config);
|
||||||
|
|
||||||
|
// 2. Remove from all private user caches
|
||||||
|
const affectedUsers = await registry.privateServersCache.findUsersWithServer(serverName);
|
||||||
|
if (affectedUsers.length > 0) {
|
||||||
|
logger.debug(
|
||||||
|
`${this.prefix(serverName)} Removing from ${affectedUsers.length} private user caches`,
|
||||||
|
);
|
||||||
|
await registry.privateServersCache.removeServerConfigIfCacheExists(affectedUsers, serverName);
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.info(`${this.prefix(serverName)} Successfully promoted to shared server`);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Demote a shared server to private registry.
|
||||||
|
* Use case: Admin un-shares a server from PUBLIC
|
||||||
|
*
|
||||||
|
* Removes server from shared registry and adds to specified users' private caches.
|
||||||
|
* Only adds to users with initialized caches.
|
||||||
|
*
|
||||||
|
* @param serverName - Server name
|
||||||
|
* @param allowedUserIds - Array of user IDs who should have private access
|
||||||
|
* @param config - Server configuration
|
||||||
|
*/
|
||||||
|
public static async demoteToPrivateServer(
|
||||||
|
serverName: string,
|
||||||
|
allowedUserIds: string[],
|
||||||
|
config: t.ParsedServerConfig,
|
||||||
|
): Promise<void> {
|
||||||
|
logger.info(`${this.prefix(serverName)} Demoting to private server`);
|
||||||
|
|
||||||
|
// 1. Remove from shared registries
|
||||||
|
await registry.removeServer(serverName);
|
||||||
|
|
||||||
|
// 2. Add to private caches for allowed users (only if caches exist)
|
||||||
|
if (allowedUserIds.length > 0) {
|
||||||
|
logger.debug(
|
||||||
|
`${this.prefix(serverName)} Adding to ${allowedUserIds.length} users' private caches`,
|
||||||
|
);
|
||||||
|
await registry.privateServersCache.addServerConfigIfCacheExists(
|
||||||
|
allowedUserIds,
|
||||||
|
serverName,
|
||||||
|
config,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.info(`${this.prefix(serverName)} Successfully demoted to private server`);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns formatted log prefix for server messages
|
||||||
|
private static prefix(serverName: string): string {
|
||||||
|
return `[MCP][PrivateServer][${serverName}]`;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -61,18 +61,11 @@ export class MCPServersInitializer {
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Initializes a single server with all its metadata and adds it to appropriate collections */
|
/** Initializes a single server with all its metadata and adds it to appropriate collections */
|
||||||
private static async initializeServer(
|
public static async initializeServer(serverName: string, rawConfig: t.MCPOptions): Promise<void> {
|
||||||
serverName: string,
|
|
||||||
rawConfig: t.MCPOptions,
|
|
||||||
): Promise<void> {
|
|
||||||
try {
|
try {
|
||||||
const config = await MCPServerInspector.inspect(serverName, rawConfig);
|
const config = await MCPServerInspector.inspect(serverName, rawConfig);
|
||||||
|
|
||||||
if (config.startup === false || config.requiresOAuth) {
|
await registry.addSharedServer(serverName, config);
|
||||||
await registry.sharedUserServers.add(serverName, config);
|
|
||||||
} else {
|
|
||||||
await registry.sharedAppServers.add(serverName, config);
|
|
||||||
}
|
|
||||||
MCPServersInitializer.logParsedConfig(serverName, config);
|
MCPServersInitializer.logParsedConfig(serverName, config);
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
logger.error(`${MCPServersInitializer.prefix(serverName)} Failed to initialize:`, error);
|
logger.error(`${MCPServersInitializer.prefix(serverName)} Failed to initialize:`, error);
|
||||||
|
|
|
||||||
|
|
@ -3,24 +3,35 @@ import {
|
||||||
ServerConfigsCacheFactory,
|
ServerConfigsCacheFactory,
|
||||||
type ServerConfigsCache,
|
type ServerConfigsCache,
|
||||||
} from './cache/ServerConfigsCacheFactory';
|
} from './cache/ServerConfigsCacheFactory';
|
||||||
|
import {
|
||||||
|
PrivateServerConfigsCache,
|
||||||
|
PrivateServerConfigsCacheFactory,
|
||||||
|
} from './cache/PrivateServerConfigs/PrivateServerConfigsCacheFactory';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Central registry for managing MCP server configurations across different scopes and users.
|
* Central registry for managing MCP server configurations across different scopes and users.
|
||||||
* Maintains three categories of server configurations:
|
* Authoritative source of truth for all MCP servers provided by LibreChat.
|
||||||
|
*
|
||||||
|
* Maintains three-tier cache structure:
|
||||||
* - Shared App Servers: Auto-started servers available to all users (initialized at startup)
|
* - Shared App Servers: Auto-started servers available to all users (initialized at startup)
|
||||||
* - Shared User Servers: User-scope servers that require OAuth or on-demand startup
|
* - Shared User Servers: User-scope servers that require OAuth or on-demand startup
|
||||||
* - Private User Servers: Per-user configurations dynamically added during runtime
|
* - Private Servers: Per-user configurations dynamically added during runtime
|
||||||
*
|
*
|
||||||
* Provides a unified interface for retrieving server configs with proper fallback hierarchy:
|
* Provides a unified query interface with proper fallback hierarchy:
|
||||||
* checks shared app servers first, then shared user servers, then private user servers.
|
* checks shared app servers first, then shared user servers, then private user servers.
|
||||||
* Falls back to raw config when servers haven't been initialized yet or failed to initialize.
|
|
||||||
* Handles server lifecycle operations including adding, removing, and querying configurations.
|
|
||||||
*/
|
*/
|
||||||
class MCPServersRegistry {
|
class MCPServersRegistry {
|
||||||
public readonly sharedAppServers = ServerConfigsCacheFactory.create('App', false);
|
public readonly sharedAppServers: ServerConfigsCache = ServerConfigsCacheFactory.create(
|
||||||
public readonly sharedUserServers = ServerConfigsCacheFactory.create('User', false);
|
'App',
|
||||||
private readonly privateUserServers: Map<string | undefined, ServerConfigsCache> = new Map();
|
'Shared',
|
||||||
private rawConfigs: t.MCPServers = {};
|
false,
|
||||||
|
);
|
||||||
|
|
||||||
|
public readonly sharedUserServers: ServerConfigsCache = ServerConfigsCacheFactory.create(
|
||||||
|
'User',
|
||||||
|
'Shared',
|
||||||
|
false,
|
||||||
|
);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Stores the raw MCP configuration as a fallback when servers haven't been initialized yet.
|
* Stores the raw MCP configuration as a fallback when servers haven't been initialized yet.
|
||||||
|
|
@ -30,31 +41,10 @@ class MCPServersRegistry {
|
||||||
this.rawConfigs = configs;
|
this.rawConfigs = configs;
|
||||||
}
|
}
|
||||||
|
|
||||||
public async addPrivateUserServer(
|
public readonly privateServersCache: PrivateServerConfigsCache =
|
||||||
userId: string,
|
PrivateServerConfigsCacheFactory.create();
|
||||||
serverName: string,
|
|
||||||
config: t.ParsedServerConfig,
|
|
||||||
): Promise<void> {
|
|
||||||
if (!this.privateUserServers.has(userId)) {
|
|
||||||
const cache = ServerConfigsCacheFactory.create(`User(${userId})`, false);
|
|
||||||
this.privateUserServers.set(userId, cache);
|
|
||||||
}
|
|
||||||
await this.privateUserServers.get(userId)!.add(serverName, config);
|
|
||||||
}
|
|
||||||
|
|
||||||
public async updatePrivateUserServer(
|
private rawConfigs: t.MCPServers = {};
|
||||||
userId: string,
|
|
||||||
serverName: string,
|
|
||||||
config: t.ParsedServerConfig,
|
|
||||||
): Promise<void> {
|
|
||||||
const userCache = this.privateUserServers.get(userId);
|
|
||||||
if (!userCache) throw new Error(`No private servers found for user "${userId}".`);
|
|
||||||
await userCache.update(serverName, config);
|
|
||||||
}
|
|
||||||
|
|
||||||
public async removePrivateUserServer(userId: string, serverName: string): Promise<void> {
|
|
||||||
await this.privateUserServers.get(userId)?.remove(serverName);
|
|
||||||
}
|
|
||||||
|
|
||||||
public async getServerConfig(
|
public async getServerConfig(
|
||||||
serverName: string,
|
serverName: string,
|
||||||
|
|
@ -63,26 +53,25 @@ class MCPServersRegistry {
|
||||||
const sharedAppServer = await this.sharedAppServers.get(serverName);
|
const sharedAppServer = await this.sharedAppServers.get(serverName);
|
||||||
if (sharedAppServer) return sharedAppServer;
|
if (sharedAppServer) return sharedAppServer;
|
||||||
|
|
||||||
|
if (userId) {
|
||||||
|
//we require user id to also access sharedServers to ensure that getServerConfig(serverName, undefined) returns only app level configs.
|
||||||
const sharedUserServer = await this.sharedUserServers.get(serverName);
|
const sharedUserServer = await this.sharedUserServers.get(serverName);
|
||||||
if (sharedUserServer) return sharedUserServer;
|
if (sharedUserServer) return sharedUserServer;
|
||||||
|
|
||||||
const privateUserServer = await this.privateUserServers.get(userId)?.get(serverName);
|
const privateUserServer = await this.privateServersCache.get(userId, serverName);
|
||||||
if (privateUserServer) return privateUserServer;
|
if (privateUserServer) return privateUserServer;
|
||||||
|
}
|
||||||
/** Fallback to raw config if server hasn't been initialized yet */
|
|
||||||
const rawConfig = this.rawConfigs[serverName];
|
|
||||||
if (rawConfig) return rawConfig as t.ParsedServerConfig;
|
|
||||||
|
|
||||||
return undefined;
|
return undefined;
|
||||||
}
|
}
|
||||||
|
|
||||||
public async getAllServerConfigs(userId?: string): Promise<Record<string, t.ParsedServerConfig>> {
|
public async getAllServerConfigs(userId?: string): Promise<Record<string, t.ParsedServerConfig>> {
|
||||||
|
const privateConfigs = userId ? await this.privateServersCache.getAll(userId) : {};
|
||||||
const registryConfigs = {
|
const registryConfigs = {
|
||||||
...(await this.sharedAppServers.getAll()),
|
...(await this.sharedAppServers.getAll()),
|
||||||
...(await this.sharedUserServers.getAll()),
|
...(await this.sharedUserServers.getAll()),
|
||||||
...((await this.privateUserServers.get(userId)?.getAll()) ?? {}),
|
...privateConfigs,
|
||||||
};
|
};
|
||||||
|
|
||||||
/** Include all raw configs, but registry configs take precedence (they have inspection data) */
|
/** Include all raw configs, but registry configs take precedence (they have inspection data) */
|
||||||
const allConfigs: Record<string, t.ParsedServerConfig> = {};
|
const allConfigs: Record<string, t.ParsedServerConfig> = {};
|
||||||
for (const serverName in this.rawConfigs) {
|
for (const serverName in this.rawConfigs) {
|
||||||
|
|
@ -105,13 +94,57 @@ class MCPServersRegistry {
|
||||||
return new Set(oauthServers.map(([name]) => name));
|
return new Set(oauthServers.map(([name]) => name));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Add a shared server configuration.
|
||||||
|
* Automatically routes to appropriate cache (app vs user) based on config properties.
|
||||||
|
* - Servers requiring OAuth or with startup=false → sharedUserServers
|
||||||
|
* - All other servers → sharedAppServers
|
||||||
|
*
|
||||||
|
* @param serverName - Name of the MCP server
|
||||||
|
* @param config - Parsed server configuration
|
||||||
|
*/
|
||||||
|
public async addSharedServer(serverName: string, config: t.ParsedServerConfig): Promise<void> {
|
||||||
|
if (config.requiresOAuth || config.startup === false) {
|
||||||
|
await this.sharedUserServers.add(serverName, config);
|
||||||
|
} else {
|
||||||
|
await this.sharedAppServers.add(serverName, config);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
public async reset(): Promise<void> {
|
public async reset(): Promise<void> {
|
||||||
await this.sharedAppServers.reset();
|
await this.sharedAppServers.reset();
|
||||||
await this.sharedUserServers.reset();
|
await this.sharedUserServers.reset();
|
||||||
for (const cache of this.privateUserServers.values()) {
|
await this.privateServersCache.resetAll();
|
||||||
await cache.reset();
|
|
||||||
}
|
}
|
||||||
this.privateUserServers.clear();
|
|
||||||
|
public async removeServer(serverName: string, userId?: string): Promise<void> {
|
||||||
|
const appServer = await this.sharedAppServers.get(serverName);
|
||||||
|
if (appServer) {
|
||||||
|
await this.sharedAppServers.remove(serverName);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const userServer = await this.sharedUserServers.get(serverName);
|
||||||
|
if (userServer) {
|
||||||
|
await this.sharedUserServers.remove(serverName);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (userId) {
|
||||||
|
const privateServer = await this.privateServersCache.get(userId, serverName);
|
||||||
|
if (privateServer) {
|
||||||
|
await this.privateServersCache.remove(userId, serverName);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
const affectedUsers = await this.privateServersCache.findUsersWithServer(serverName);
|
||||||
|
if (affectedUsers.length > 0) {
|
||||||
|
await this.privateServersCache.removeServerConfigIfCacheExists(affectedUsers, serverName);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
throw new Error(`Server ${serverName} not found`);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,593 @@
|
||||||
|
import type * as t from '~/mcp/types';
|
||||||
|
import type { MCPServerDB } from 'librechat-data-provider';
|
||||||
|
|
||||||
|
describe('MCPPrivateServerLoader Cache Integration Tests', () => {
|
||||||
|
let keyvRedisClient: Awaited<typeof import('~/cache/redisClients')>['keyvRedisClient'];
|
||||||
|
let registry: typeof import('../MCPServersRegistry').mcpServersRegistry;
|
||||||
|
let MCPPrivateServerLoader: typeof import('../MCPPrivateServerLoader').MCPPrivateServerLoader;
|
||||||
|
let loadStatusCache: typeof import('../cache/PrivateServersLoadStatusCache').privateServersLoadStatusCache;
|
||||||
|
const mockConfig1: t.ParsedServerConfig = {
|
||||||
|
command: 'node',
|
||||||
|
args: ['server1.js'],
|
||||||
|
env: { TEST: 'value1' },
|
||||||
|
};
|
||||||
|
|
||||||
|
const mockConfig2: t.ParsedServerConfig = {
|
||||||
|
command: 'python',
|
||||||
|
args: ['server2.py'],
|
||||||
|
env: { TEST: 'value2' },
|
||||||
|
};
|
||||||
|
|
||||||
|
const mockConfig3: t.ParsedServerConfig = {
|
||||||
|
url: 'http://localhost:3000',
|
||||||
|
requiresOAuth: true,
|
||||||
|
};
|
||||||
|
|
||||||
|
const mockUpdatedConfig: t.ParsedServerConfig = {
|
||||||
|
command: 'node',
|
||||||
|
args: ['server1-updated.js'],
|
||||||
|
env: { TEST: 'updated', NEW_VAR: 'added' },
|
||||||
|
};
|
||||||
|
beforeAll(async () => {
|
||||||
|
jest.resetModules();
|
||||||
|
// Set up environment variables for Redis (only if not already set)
|
||||||
|
process.env.USE_REDIS = process.env.USE_REDIS ?? 'true';
|
||||||
|
process.env.USE_REDIS_CLUSTER = process.env.USE_REDIS_CLUSTER ?? 'true';
|
||||||
|
process.env.REDIS_URI = process.env.REDIS_URI ?? 'redis://127.0.0.1:6379';
|
||||||
|
process.env.REDIS_KEY_PREFIX =
|
||||||
|
process.env.REDIS_KEY_PREFIX ?? `MCPPrivateServerLoader-integration-test`;
|
||||||
|
|
||||||
|
// Import modules after setting env vars
|
||||||
|
const registryModule = await import('../MCPServersRegistry');
|
||||||
|
const redisClients = await import('~/cache/redisClients');
|
||||||
|
const MCPPrivateServerLoaderModule = await import('../MCPPrivateServerLoader');
|
||||||
|
const loadStatusCacheLoaderModule = await import('../cache/PrivateServersLoadStatusCache');
|
||||||
|
|
||||||
|
loadStatusCache = loadStatusCacheLoaderModule.privateServersLoadStatusCache;
|
||||||
|
MCPPrivateServerLoader = MCPPrivateServerLoaderModule.MCPPrivateServerLoader;
|
||||||
|
registry = registryModule.mcpServersRegistry;
|
||||||
|
keyvRedisClient = redisClients.keyvRedisClient;
|
||||||
|
|
||||||
|
if (!keyvRedisClient) throw new Error('Redis client is not initialized');
|
||||||
|
|
||||||
|
await redisClients.keyvRedisClientReady;
|
||||||
|
});
|
||||||
|
|
||||||
|
beforeEach(async () => {
|
||||||
|
// Clear load status for commonly used test users
|
||||||
|
// This ensures each test starts with a clean state
|
||||||
|
const testUsers = ['user1', 'user2', 'user3', 'user4', 'user5', 'user99'];
|
||||||
|
for (const userId of testUsers) {
|
||||||
|
try {
|
||||||
|
await loadStatusCache.clearLoaded(userId);
|
||||||
|
} catch {
|
||||||
|
// Ignore errors (user may not have load status set)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
afterEach(async () => {
|
||||||
|
// Restore all mocks after each test
|
||||||
|
if (keyvRedisClient && 'scanIterator' in keyvRedisClient) {
|
||||||
|
const pattern = '*MCPPrivateServerLoader-integration-test*';
|
||||||
|
const keysToDelete: string[] = [];
|
||||||
|
|
||||||
|
// Collect all keys first
|
||||||
|
for await (const key of keyvRedisClient.scanIterator({ MATCH: pattern })) {
|
||||||
|
keysToDelete.push(key);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete in parallel for cluster mode efficiency
|
||||||
|
if (keysToDelete.length > 0) {
|
||||||
|
await Promise.all(keysToDelete.map((key) => keyvRedisClient!.del(key)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
jest.restoreAllMocks();
|
||||||
|
});
|
||||||
|
|
||||||
|
afterAll(async () => {
|
||||||
|
if (keyvRedisClient?.isOpen) await keyvRedisClient.disconnect();
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('loadPrivateServers() integration', () => {
|
||||||
|
it('should load private servers from configsLoader and cache them', async () => {
|
||||||
|
const mockConfigs: MCPServerDB[] = [
|
||||||
|
{
|
||||||
|
_id: 'mock-id-1',
|
||||||
|
mcp_id: 'server1',
|
||||||
|
config: mockConfig1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
_id: 'mock-id-2',
|
||||||
|
mcp_id: 'server2',
|
||||||
|
config: mockConfig2,
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
const configsLoader = jest.fn().mockResolvedValue(mockConfigs);
|
||||||
|
|
||||||
|
await MCPPrivateServerLoader.loadPrivateServers('user1', configsLoader);
|
||||||
|
|
||||||
|
// Verify servers were cached
|
||||||
|
const server1 = await registry.privateServersCache.get('user1', 'server1');
|
||||||
|
const server2 = await registry.privateServersCache.get('user1', 'server2');
|
||||||
|
|
||||||
|
expect(server1).toMatchObject(mockConfig1);
|
||||||
|
expect(server2).toMatchObject(mockConfig2);
|
||||||
|
expect(configsLoader).toHaveBeenCalledWith('user1');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should not reload servers if already cached for user', async () => {
|
||||||
|
const mockConfigs: MCPServerDB[] = [
|
||||||
|
{
|
||||||
|
_id: 'mock-id-1',
|
||||||
|
mcp_id: 'server1',
|
||||||
|
config: mockConfig1,
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
const configsLoader = jest.fn().mockResolvedValue(mockConfigs);
|
||||||
|
|
||||||
|
// First load
|
||||||
|
await MCPPrivateServerLoader.loadPrivateServers('user1', configsLoader);
|
||||||
|
expect(configsLoader).toHaveBeenCalledTimes(1);
|
||||||
|
|
||||||
|
// Second load should skip
|
||||||
|
await MCPPrivateServerLoader.loadPrivateServers('user1', configsLoader);
|
||||||
|
expect(configsLoader).toHaveBeenCalledTimes(1); // Still 1, not called again
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should isolate servers between different users', async () => {
|
||||||
|
const user1Configs: MCPServerDB[] = [
|
||||||
|
{
|
||||||
|
_id: 'mock-id-1',
|
||||||
|
mcp_id: 'server1',
|
||||||
|
config: mockConfig1,
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
const user2Configs: MCPServerDB[] = [
|
||||||
|
{
|
||||||
|
_id: 'mock-id-2',
|
||||||
|
mcp_id: 'server2',
|
||||||
|
config: mockConfig2,
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
const user1Loader = jest.fn().mockResolvedValue(user1Configs);
|
||||||
|
const user2Loader = jest.fn().mockResolvedValue(user2Configs);
|
||||||
|
|
||||||
|
await MCPPrivateServerLoader.loadPrivateServers('user1', user1Loader);
|
||||||
|
await MCPPrivateServerLoader.loadPrivateServers('user2', user2Loader);
|
||||||
|
|
||||||
|
// Verify isolation
|
||||||
|
const user1Server1 = await registry.privateServersCache.get('user1', 'server1');
|
||||||
|
const user1Server2 = await registry.privateServersCache.get('user1', 'server2');
|
||||||
|
const user2Server1 = await registry.privateServersCache.get('user2', 'server1');
|
||||||
|
const user2Server2 = await registry.privateServersCache.get('user2', 'server2');
|
||||||
|
|
||||||
|
expect(user1Server1).toMatchObject(mockConfig1);
|
||||||
|
expect(user1Server2).toBeUndefined();
|
||||||
|
expect(user2Server1).toBeUndefined();
|
||||||
|
expect(user2Server2).toMatchObject(mockConfig2);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle partial failures gracefully', async () => {
|
||||||
|
const mockConfigs: MCPServerDB[] = [
|
||||||
|
{
|
||||||
|
_id: 'mock-id-1',
|
||||||
|
mcp_id: 'server1',
|
||||||
|
config: mockConfig1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
_id: 'mock-id-2',
|
||||||
|
mcp_id: 'server2',
|
||||||
|
config: mockConfig2,
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
// Mock to fail on second server
|
||||||
|
let callCount = 0;
|
||||||
|
jest
|
||||||
|
.spyOn(registry.privateServersCache, 'add')
|
||||||
|
.mockImplementation(async (userId, serverName, config) => {
|
||||||
|
callCount++;
|
||||||
|
if (callCount === 2) {
|
||||||
|
throw new Error('Cache write failed for server2');
|
||||||
|
}
|
||||||
|
// Call the real implementation for other calls
|
||||||
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||||
|
const cache = (registry.privateServersCache as any).getOrCreatePrivateUserCache(userId);
|
||||||
|
return cache.add(serverName, config);
|
||||||
|
});
|
||||||
|
|
||||||
|
const configsLoader = jest.fn().mockResolvedValue(mockConfigs);
|
||||||
|
|
||||||
|
await expect(
|
||||||
|
MCPPrivateServerLoader.loadPrivateServers('user1', configsLoader),
|
||||||
|
).rejects.toThrow('Cache write failed for server2');
|
||||||
|
|
||||||
|
// Verify first server was added before failure
|
||||||
|
const server1 = await registry.privateServersCache.get('user1', 'server1');
|
||||||
|
expect(server1).toMatchObject(mockConfig1);
|
||||||
|
|
||||||
|
jest.restoreAllMocks();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should reset cache before loading, removing old servers no longer in configs', async () => {
|
||||||
|
// Initial load with server1 and server2
|
||||||
|
const initialConfigs: MCPServerDB[] = [
|
||||||
|
{
|
||||||
|
_id: 'mock-id-1',
|
||||||
|
mcp_id: 'server1',
|
||||||
|
config: mockConfig1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
_id: 'mock-id-2',
|
||||||
|
mcp_id: 'server2',
|
||||||
|
config: mockConfig2,
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
const initialLoader = jest.fn().mockResolvedValue(initialConfigs);
|
||||||
|
await MCPPrivateServerLoader.loadPrivateServers('user1', initialLoader);
|
||||||
|
|
||||||
|
// Verify both servers are cached
|
||||||
|
expect(await registry.privateServersCache.get('user1', 'server1')).toBeDefined();
|
||||||
|
expect(await registry.privateServersCache.get('user1', 'server2')).toBeDefined();
|
||||||
|
|
||||||
|
// Clear load status to force reload
|
||||||
|
await loadStatusCache.clearLoaded('user1');
|
||||||
|
|
||||||
|
// Second load with only server1 and a new server3
|
||||||
|
const updatedConfigs: MCPServerDB[] = [
|
||||||
|
{
|
||||||
|
_id: 'mock-id-1',
|
||||||
|
mcp_id: 'server1',
|
||||||
|
config: mockConfig1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
_id: 'mock-id-3',
|
||||||
|
mcp_id: 'server3',
|
||||||
|
config: mockConfig3,
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
const updatedLoader = jest.fn().mockResolvedValue(updatedConfigs);
|
||||||
|
await MCPPrivateServerLoader.loadPrivateServers('user1', updatedLoader);
|
||||||
|
|
||||||
|
// Verify server2 is removed (cache was reset)
|
||||||
|
expect(await registry.privateServersCache.get('user1', 'server1')).toBeDefined();
|
||||||
|
expect(await registry.privateServersCache.get('user1', 'server2')).toBeUndefined();
|
||||||
|
expect(await registry.privateServersCache.get('user1', 'server3')).toBeDefined();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('updatePrivateServer() integration', () => {
|
||||||
|
beforeEach(async () => {
|
||||||
|
// Setup: Load same server for multiple users
|
||||||
|
const configs: MCPServerDB[] = [
|
||||||
|
{
|
||||||
|
_id: 'mock-id-1',
|
||||||
|
mcp_id: 'server1',
|
||||||
|
config: mockConfig1,
|
||||||
|
},
|
||||||
|
];
|
||||||
|
const loader = jest.fn().mockResolvedValue(configs);
|
||||||
|
|
||||||
|
await MCPPrivateServerLoader.loadPrivateServers('user1', loader);
|
||||||
|
await MCPPrivateServerLoader.loadPrivateServers('user2', loader);
|
||||||
|
await MCPPrivateServerLoader.loadPrivateServers('user3', loader);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should update server config for all users who have it', async () => {
|
||||||
|
await MCPPrivateServerLoader.updatePrivateServer('server1', mockUpdatedConfig);
|
||||||
|
|
||||||
|
// Verify all users got the update
|
||||||
|
const user1Server = await registry.privateServersCache.get('user1', 'server1');
|
||||||
|
const user2Server = await registry.privateServersCache.get('user2', 'server1');
|
||||||
|
const user3Server = await registry.privateServersCache.get('user3', 'server1');
|
||||||
|
|
||||||
|
expect(user1Server).toMatchObject({
|
||||||
|
command: 'node',
|
||||||
|
args: ['server1-updated.js'],
|
||||||
|
env: { TEST: 'updated', NEW_VAR: 'added' },
|
||||||
|
});
|
||||||
|
expect(user2Server).toMatchObject(user1Server!);
|
||||||
|
expect(user3Server).toMatchObject(user1Server!);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should not affect other servers', async () => {
|
||||||
|
// Add another server to user1
|
||||||
|
await registry.privateServersCache.add('user1', 'server2', mockConfig2);
|
||||||
|
|
||||||
|
await MCPPrivateServerLoader.updatePrivateServer('server1', mockUpdatedConfig);
|
||||||
|
|
||||||
|
// Verify server2 unchanged
|
||||||
|
const server2 = await registry.privateServersCache.get('user1', 'server2');
|
||||||
|
expect(server2).toMatchObject(mockConfig2);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle updating non-existent server gracefully', async () => {
|
||||||
|
await expect(
|
||||||
|
MCPPrivateServerLoader.updatePrivateServer('non-existent-server', mockUpdatedConfig),
|
||||||
|
).resolves.not.toThrow();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle promoted server by updating shared registry instead of private caches', async () => {
|
||||||
|
// First add server to shared registry (simulating promotion)
|
||||||
|
await registry.addSharedServer('promoted-server', mockConfig1);
|
||||||
|
|
||||||
|
// Verify it's in shared registry
|
||||||
|
const sharedConfig = await registry.getServerConfig('promoted-server');
|
||||||
|
expect(sharedConfig).toBeDefined();
|
||||||
|
|
||||||
|
// Now update it - should go through the promoted server path
|
||||||
|
await MCPPrivateServerLoader.updatePrivateServer('promoted-server', mockUpdatedConfig);
|
||||||
|
|
||||||
|
// Verify the shared registry was updated with new config
|
||||||
|
const updatedSharedConfig = await registry.getServerConfig('promoted-server');
|
||||||
|
expect(updatedSharedConfig).toMatchObject(
|
||||||
|
expect.objectContaining({
|
||||||
|
command: 'node',
|
||||||
|
args: ['server1-updated.js'],
|
||||||
|
env: { TEST: 'updated', NEW_VAR: 'added' },
|
||||||
|
}),
|
||||||
|
);
|
||||||
|
|
||||||
|
// Verify users' private caches were NOT updated (no users had it privately)
|
||||||
|
expect(await registry.privateServersCache.get('user1', 'promoted-server')).toBeUndefined();
|
||||||
|
expect(await registry.privateServersCache.get('user2', 'promoted-server')).toBeUndefined();
|
||||||
|
expect(await registry.privateServersCache.get('user3', 'promoted-server')).toBeUndefined();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle promoted server update when users had it privately before promotion', async () => {
|
||||||
|
// Setup: Users have server1 in private caches
|
||||||
|
const configs: MCPServerDB[] = [
|
||||||
|
{
|
||||||
|
_id: 'mock-id-1',
|
||||||
|
mcp_id: 'server1',
|
||||||
|
config: mockConfig1,
|
||||||
|
},
|
||||||
|
];
|
||||||
|
const loader = jest.fn().mockResolvedValue(configs);
|
||||||
|
|
||||||
|
await MCPPrivateServerLoader.loadPrivateServers('user1', loader);
|
||||||
|
await MCPPrivateServerLoader.loadPrivateServers('user2', loader);
|
||||||
|
|
||||||
|
// Verify they have it privately
|
||||||
|
expect(await registry.privateServersCache.get('user1', 'server1')).toBeDefined();
|
||||||
|
expect(await registry.privateServersCache.get('user2', 'server1')).toBeDefined();
|
||||||
|
|
||||||
|
await MCPPrivateServerLoader.promoteToSharedServer('server1', mockConfig1);
|
||||||
|
// Verify it's now in shared registry and removed from private caches
|
||||||
|
expect(await registry.getServerConfig('server1')).toBeDefined();
|
||||||
|
expect(await registry.privateServersCache.get('user1', 'server1')).toBeUndefined();
|
||||||
|
expect(await registry.privateServersCache.get('user2', 'server1')).toBeUndefined();
|
||||||
|
|
||||||
|
// Now update it - should update shared registry
|
||||||
|
await MCPPrivateServerLoader.updatePrivateServer('server1', mockUpdatedConfig);
|
||||||
|
|
||||||
|
// Verify shared registry was updated
|
||||||
|
const updatedSharedConfig = await registry.getServerConfig('server1');
|
||||||
|
expect(updatedSharedConfig).toMatchObject(
|
||||||
|
expect.objectContaining({
|
||||||
|
command: 'node',
|
||||||
|
args: ['server1-updated.js'],
|
||||||
|
}),
|
||||||
|
);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('updatePrivateServerAccess() integration', () => {
|
||||||
|
beforeEach(async () => {
|
||||||
|
// Setup: Load server for user1, user2, user3
|
||||||
|
const configs: MCPServerDB[] = [
|
||||||
|
{
|
||||||
|
_id: 'mock-id-1',
|
||||||
|
mcp_id: 'server1',
|
||||||
|
config: mockConfig1,
|
||||||
|
},
|
||||||
|
];
|
||||||
|
const loader = jest.fn().mockResolvedValue(configs);
|
||||||
|
|
||||||
|
await MCPPrivateServerLoader.loadPrivateServers('user1', loader);
|
||||||
|
await MCPPrivateServerLoader.loadPrivateServers('user2', loader);
|
||||||
|
await MCPPrivateServerLoader.loadPrivateServers('user3', loader);
|
||||||
|
|
||||||
|
// Also initialize cache for user4 and user5 but without server1
|
||||||
|
await registry.privateServersCache.add('user4', 'other-server', mockConfig2);
|
||||||
|
await registry.privateServersCache.add('user5', 'other-server', mockConfig2);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should revoke access from all users when allowedUserIds is empty', async () => {
|
||||||
|
await MCPPrivateServerLoader.updatePrivateServerAccess('server1', [], mockConfig1);
|
||||||
|
|
||||||
|
// Verify all users lost access
|
||||||
|
expect(await registry.privateServersCache.get('user1', 'server1')).toBeUndefined();
|
||||||
|
expect(await registry.privateServersCache.get('user2', 'server1')).toBeUndefined();
|
||||||
|
expect(await registry.privateServersCache.get('user3', 'server1')).toBeUndefined();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should grant access to new users with initialized caches', async () => {
|
||||||
|
const allowedUserIds = ['user1', 'user4', 'user5'];
|
||||||
|
|
||||||
|
await MCPPrivateServerLoader.updatePrivateServerAccess(
|
||||||
|
'server1',
|
||||||
|
allowedUserIds,
|
||||||
|
mockConfig1,
|
||||||
|
);
|
||||||
|
|
||||||
|
// user1 should still have it
|
||||||
|
expect(await registry.privateServersCache.get('user1', 'server1')).toBeDefined();
|
||||||
|
|
||||||
|
// user4 and user5 should now have it (they had initialized caches)
|
||||||
|
expect(await registry.privateServersCache.get('user4', 'server1')).toBeDefined();
|
||||||
|
expect(await registry.privateServersCache.get('user5', 'server1')).toBeDefined();
|
||||||
|
|
||||||
|
// user2 and user3 should have lost access
|
||||||
|
expect(await registry.privateServersCache.get('user2', 'server1')).toBeUndefined();
|
||||||
|
expect(await registry.privateServersCache.get('user3', 'server1')).toBeUndefined();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should not grant access to users without initialized caches', async () => {
|
||||||
|
const allowedUserIds = ['user1', 'user99']; // user99 has no cache
|
||||||
|
|
||||||
|
await MCPPrivateServerLoader.updatePrivateServerAccess(
|
||||||
|
'server1',
|
||||||
|
allowedUserIds,
|
||||||
|
mockConfig1,
|
||||||
|
);
|
||||||
|
|
||||||
|
// user1 should still have it
|
||||||
|
expect(await registry.privateServersCache.get('user1', 'server1')).toBeDefined();
|
||||||
|
|
||||||
|
// user99 should not have it (cache not initialized)
|
||||||
|
expect(await registry.privateServersCache.get('user99', 'server1')).toBeUndefined();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle complex permission changes', async () => {
|
||||||
|
// Start: user1, user2, user3 have server1
|
||||||
|
// End: user2, user4, user5 should have server1
|
||||||
|
|
||||||
|
const allowedUserIds = ['user2', 'user4', 'user5'];
|
||||||
|
|
||||||
|
await MCPPrivateServerLoader.updatePrivateServerAccess(
|
||||||
|
'server1',
|
||||||
|
allowedUserIds,
|
||||||
|
mockConfig1,
|
||||||
|
);
|
||||||
|
|
||||||
|
// Revoked: user1, user3
|
||||||
|
expect(await registry.privateServersCache.get('user1', 'server1')).toBeUndefined();
|
||||||
|
expect(await registry.privateServersCache.get('user3', 'server1')).toBeUndefined();
|
||||||
|
|
||||||
|
// Kept: user2
|
||||||
|
expect(await registry.privateServersCache.get('user2', 'server1')).toBeDefined();
|
||||||
|
|
||||||
|
// Granted: user4, user5
|
||||||
|
expect(await registry.privateServersCache.get('user4', 'server1')).toBeDefined();
|
||||||
|
expect(await registry.privateServersCache.get('user5', 'server1')).toBeDefined();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should be idempotent when called with same permissions', async () => {
|
||||||
|
const allowedUserIds = ['user1', 'user2'];
|
||||||
|
|
||||||
|
// First call
|
||||||
|
await MCPPrivateServerLoader.updatePrivateServerAccess(
|
||||||
|
'server1',
|
||||||
|
allowedUserIds,
|
||||||
|
mockConfig1,
|
||||||
|
);
|
||||||
|
|
||||||
|
// eslint-disable-next-line @typescript-eslint/no-unused-vars
|
||||||
|
const { lastUpdatedAt: _1, ...user1ServerAfterFirst } =
|
||||||
|
(await registry.privateServersCache.get('user1', 'server1')) || {};
|
||||||
|
// eslint-disable-next-line @typescript-eslint/no-unused-vars
|
||||||
|
const { lastUpdatedAt: _2, ...user2ServerAfterFirst } =
|
||||||
|
(await registry.privateServersCache.get('user2', 'server1')) || {};
|
||||||
|
// Second call with same permissions
|
||||||
|
await MCPPrivateServerLoader.updatePrivateServerAccess(
|
||||||
|
'server1',
|
||||||
|
allowedUserIds,
|
||||||
|
mockConfig1,
|
||||||
|
);
|
||||||
|
|
||||||
|
// eslint-disable-next-line @typescript-eslint/no-unused-vars
|
||||||
|
const { lastUpdatedAt: _3, ...user1ServerAfterSecond } =
|
||||||
|
(await registry.privateServersCache.get('user1', 'server1')) || {};
|
||||||
|
// eslint-disable-next-line @typescript-eslint/no-unused-vars
|
||||||
|
const { lastUpdatedAt: _4, ...user2ServerAfterSecond } =
|
||||||
|
(await registry.privateServersCache.get('user2', 'server1')) || {};
|
||||||
|
|
||||||
|
// Should be unchanged
|
||||||
|
expect(user1ServerAfterSecond).toMatchObject(user1ServerAfterFirst!);
|
||||||
|
expect(user2ServerAfterSecond).toMatchObject(user2ServerAfterFirst!);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('Combined operations integration', () => {
|
||||||
|
it('should handle load, update metadata, and update access in sequence', async () => {
|
||||||
|
// 1. Load servers for user1 and user2
|
||||||
|
const configs: MCPServerDB[] = [
|
||||||
|
{
|
||||||
|
_id: 'mock-id-1',
|
||||||
|
mcp_id: 'server1',
|
||||||
|
config: mockConfig1,
|
||||||
|
},
|
||||||
|
];
|
||||||
|
const loader = jest.fn().mockResolvedValue(configs);
|
||||||
|
|
||||||
|
await MCPPrivateServerLoader.loadPrivateServers('user1', loader);
|
||||||
|
await MCPPrivateServerLoader.loadPrivateServers('user2', loader);
|
||||||
|
|
||||||
|
// Verify initial state
|
||||||
|
let user1Server = await registry.privateServersCache.get('user1', 'server1');
|
||||||
|
let user2Server = await registry.privateServersCache.get('user2', 'server1');
|
||||||
|
expect((user1Server as typeof mockConfig1)?.args).toEqual(['server1.js']);
|
||||||
|
expect((user2Server as typeof mockConfig1)?.args).toEqual(['server1.js']);
|
||||||
|
|
||||||
|
// 2. Update metadata for all users
|
||||||
|
await MCPPrivateServerLoader.updatePrivateServer('server1', mockUpdatedConfig);
|
||||||
|
|
||||||
|
user1Server = await registry.privateServersCache.get('user1', 'server1');
|
||||||
|
user2Server = await registry.privateServersCache.get('user2', 'server1');
|
||||||
|
|
||||||
|
expect((user1Server as typeof mockUpdatedConfig)?.args).toEqual(['server1-updated.js']);
|
||||||
|
expect((user2Server as typeof mockUpdatedConfig)?.args).toEqual(['server1-updated.js']);
|
||||||
|
|
||||||
|
// 3. Update access - revoke from user1
|
||||||
|
await MCPPrivateServerLoader.updatePrivateServerAccess('server1', ['user2'], mockConfig3);
|
||||||
|
|
||||||
|
user1Server = await registry.privateServersCache.get('user1', 'server1');
|
||||||
|
user2Server = await registry.privateServersCache.get('user2', 'server1');
|
||||||
|
|
||||||
|
expect(user1Server).toBeUndefined();
|
||||||
|
expect(user2Server).toBeDefined();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle concurrent user logins correctly', async () => {
|
||||||
|
const user1Configs: MCPServerDB[] = [
|
||||||
|
{
|
||||||
|
_id: 'mock-id-1',
|
||||||
|
mcp_id: 'server1',
|
||||||
|
config: mockConfig1,
|
||||||
|
},
|
||||||
|
];
|
||||||
|
const user2Configs: MCPServerDB[] = [
|
||||||
|
{
|
||||||
|
_id: 'mock-id-2',
|
||||||
|
mcp_id: 'server1',
|
||||||
|
config: mockConfig2,
|
||||||
|
},
|
||||||
|
];
|
||||||
|
const user3Configs: MCPServerDB[] = [
|
||||||
|
{
|
||||||
|
_id: 'mock-id-3',
|
||||||
|
mcp_id: 'server2',
|
||||||
|
config: mockConfig3,
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
const user1Loader = jest.fn().mockResolvedValue(user1Configs);
|
||||||
|
const user2Loader = jest.fn().mockResolvedValue(user2Configs);
|
||||||
|
const user3Loader = jest.fn().mockResolvedValue(user3Configs);
|
||||||
|
|
||||||
|
// Simulate concurrent loads
|
||||||
|
await Promise.all([
|
||||||
|
MCPPrivateServerLoader.loadPrivateServers('user1', user1Loader),
|
||||||
|
MCPPrivateServerLoader.loadPrivateServers('user2', user2Loader),
|
||||||
|
MCPPrivateServerLoader.loadPrivateServers('user3', user3Loader),
|
||||||
|
]);
|
||||||
|
|
||||||
|
// Verify all users got their configs
|
||||||
|
const user1Server1 = await registry.privateServersCache.get('user1', 'server1');
|
||||||
|
const user2Server1 = await registry.privateServersCache.get('user2', 'server1');
|
||||||
|
const user3Server2 = await registry.privateServersCache.get('user3', 'server2');
|
||||||
|
|
||||||
|
expect(user1Server1).toMatchObject(mockConfig1);
|
||||||
|
expect(user2Server1).toMatchObject(mockConfig2);
|
||||||
|
expect(user3Server2).toMatchObject(mockConfig3);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
@ -0,0 +1,702 @@
|
||||||
|
import { mcpServersRegistry as registry } from '../MCPServersRegistry';
|
||||||
|
import { privateServersLoadStatusCache as loadStatusCache } from '../cache/PrivateServersLoadStatusCache';
|
||||||
|
import { MCPPrivateServerLoader } from '../MCPPrivateServerLoader';
|
||||||
|
import { logger } from '@librechat/data-schemas';
|
||||||
|
import type * as t from '~/mcp/types';
|
||||||
|
import type { MCPServerDB } from 'librechat-data-provider';
|
||||||
|
|
||||||
|
// Mock dependencies
|
||||||
|
jest.mock('../MCPServersRegistry', () => ({
|
||||||
|
mcpServersRegistry: {
|
||||||
|
privateServersCache: {
|
||||||
|
get: jest.fn(),
|
||||||
|
add: jest.fn(),
|
||||||
|
reset: jest.fn(),
|
||||||
|
updateServerConfigIfExists: jest.fn(),
|
||||||
|
findUsersWithServer: jest.fn(),
|
||||||
|
removeServerConfigIfCacheExists: jest.fn(),
|
||||||
|
addServerConfigIfCacheExists: jest.fn(),
|
||||||
|
reset: jest.fn(),
|
||||||
|
},
|
||||||
|
getServerConfig: jest.fn(),
|
||||||
|
addSharedServer: jest.fn(),
|
||||||
|
removeServer: jest.fn(),
|
||||||
|
getServerConfig: jest.fn(),
|
||||||
|
},
|
||||||
|
}));
|
||||||
|
|
||||||
|
jest.mock('../cache/PrivateServersLoadStatusCache', () => ({
|
||||||
|
privateServersLoadStatusCache: {
|
||||||
|
isLoaded: jest.fn(),
|
||||||
|
setLoaded: jest.fn(),
|
||||||
|
acquireLoadLock: jest.fn(),
|
||||||
|
releaseLoadLock: jest.fn(),
|
||||||
|
waitForLoad: jest.fn(),
|
||||||
|
},
|
||||||
|
}));
|
||||||
|
|
||||||
|
jest.mock('@librechat/data-schemas', () => ({
|
||||||
|
logger: {
|
||||||
|
debug: jest.fn(),
|
||||||
|
info: jest.fn(),
|
||||||
|
warn: jest.fn(),
|
||||||
|
error: jest.fn(),
|
||||||
|
},
|
||||||
|
}));
|
||||||
|
|
||||||
|
describe('MCPPrivateServerLoader', () => {
|
||||||
|
const mockConfig1: t.ParsedServerConfig = {
|
||||||
|
command: 'node',
|
||||||
|
args: ['server1.js'],
|
||||||
|
env: { TEST: 'value1' },
|
||||||
|
lastUpdatedAt: Date.now(),
|
||||||
|
};
|
||||||
|
|
||||||
|
const mockConfig2: t.ParsedServerConfig = {
|
||||||
|
command: 'python',
|
||||||
|
args: ['server2.py'],
|
||||||
|
env: { TEST: 'value2' },
|
||||||
|
lastUpdatedAt: Date.now(),
|
||||||
|
};
|
||||||
|
|
||||||
|
beforeEach(() => {
|
||||||
|
jest.clearAllMocks();
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('loadPrivateServers()', () => {
|
||||||
|
it('should validate userId and throw error if empty', async () => {
|
||||||
|
const configsLoader = jest.fn();
|
||||||
|
|
||||||
|
await expect(MCPPrivateServerLoader.loadPrivateServers('', configsLoader)).rejects.toThrow(
|
||||||
|
'userId is required and cannot be empty',
|
||||||
|
);
|
||||||
|
|
||||||
|
await expect(MCPPrivateServerLoader.loadPrivateServers(' ', configsLoader)).rejects.toThrow(
|
||||||
|
'userId is required and cannot be empty',
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should validate configsLoader and throw error if not a function', async () => {
|
||||||
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||||
|
await expect(MCPPrivateServerLoader.loadPrivateServers('user1', null as any)).rejects.toThrow(
|
||||||
|
'configsLoader must be a function',
|
||||||
|
);
|
||||||
|
|
||||||
|
await expect(
|
||||||
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||||
|
MCPPrivateServerLoader.loadPrivateServers('user1', 'not-a-function' as any),
|
||||||
|
).rejects.toThrow('configsLoader must be a function');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should skip loading if user servers are already loaded', async () => {
|
||||||
|
const configsLoader = jest.fn();
|
||||||
|
(loadStatusCache.isLoaded as jest.Mock).mockResolvedValue(true);
|
||||||
|
|
||||||
|
await MCPPrivateServerLoader.loadPrivateServers('user1', configsLoader);
|
||||||
|
|
||||||
|
expect(loadStatusCache.isLoaded).toHaveBeenCalledWith('user1');
|
||||||
|
expect(configsLoader).not.toHaveBeenCalled();
|
||||||
|
expect(loadStatusCache.acquireLoadLock).not.toHaveBeenCalled();
|
||||||
|
expect(logger.debug).toHaveBeenCalledWith(
|
||||||
|
'[MCP][PrivateServerLoader] User user1 private servers already loaded',
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should load private servers for a user successfully', async () => {
|
||||||
|
const mockConfigs: MCPServerDB[] = [
|
||||||
|
{
|
||||||
|
_id: 'mock-id-1',
|
||||||
|
mcp_id: 'server1',
|
||||||
|
config: mockConfig1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
_id: 'mock-id-2',
|
||||||
|
mcp_id: 'server2',
|
||||||
|
config: mockConfig2,
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
const configsLoader = jest.fn().mockResolvedValue(mockConfigs);
|
||||||
|
(loadStatusCache.isLoaded as jest.Mock).mockResolvedValue(false);
|
||||||
|
(loadStatusCache.acquireLoadLock as jest.Mock).mockResolvedValue(true);
|
||||||
|
(registry.privateServersCache.get as jest.Mock).mockResolvedValue(undefined);
|
||||||
|
|
||||||
|
await MCPPrivateServerLoader.loadPrivateServers('user1', configsLoader);
|
||||||
|
|
||||||
|
expect(loadStatusCache.acquireLoadLock).toHaveBeenCalledWith('user1');
|
||||||
|
expect(configsLoader).toHaveBeenCalledWith('user1');
|
||||||
|
expect(registry.privateServersCache.add).toHaveBeenCalledWith('user1', 'server1', {
|
||||||
|
...mockConfig1,
|
||||||
|
dbId: 'mock-id-1',
|
||||||
|
});
|
||||||
|
expect(registry.privateServersCache.add).toHaveBeenCalledWith('user1', 'server2', {
|
||||||
|
...mockConfig2,
|
||||||
|
dbId: 'mock-id-2',
|
||||||
|
});
|
||||||
|
expect(loadStatusCache.setLoaded).toHaveBeenCalledWith('user1', 3600_000);
|
||||||
|
expect(loadStatusCache.releaseLoadLock).toHaveBeenCalledWith('user1');
|
||||||
|
expect(logger.info).toHaveBeenCalledWith(
|
||||||
|
'[MCP][PrivateServerLoader] Loading private servers for user user1',
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should skip servers that already exist in cache', async () => {
|
||||||
|
const mockConfigs: MCPServerDB[] = [
|
||||||
|
{
|
||||||
|
_id: 'mock-id-1',
|
||||||
|
mcp_id: 'server1',
|
||||||
|
config: mockConfig1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
_id: 'mock-id-2',
|
||||||
|
mcp_id: 'server2',
|
||||||
|
config: mockConfig2,
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
const configsLoader = jest.fn().mockResolvedValue(mockConfigs);
|
||||||
|
(loadStatusCache.isLoaded as jest.Mock).mockResolvedValue(false);
|
||||||
|
(loadStatusCache.acquireLoadLock as jest.Mock).mockResolvedValue(true);
|
||||||
|
(registry.privateServersCache.get as jest.Mock)
|
||||||
|
.mockResolvedValueOnce(mockConfig1) // server1 exists
|
||||||
|
.mockResolvedValueOnce(undefined); // server2 doesn't exist
|
||||||
|
|
||||||
|
await MCPPrivateServerLoader.loadPrivateServers('user1', configsLoader);
|
||||||
|
|
||||||
|
expect(registry.privateServersCache.add).toHaveBeenCalledTimes(1);
|
||||||
|
expect(registry.privateServersCache.add).toHaveBeenCalledWith('user1', 'server2', {
|
||||||
|
...mockConfig2,
|
||||||
|
dbId: 'mock-id-2',
|
||||||
|
});
|
||||||
|
expect(loadStatusCache.setLoaded).toHaveBeenCalledWith('user1', 3600_000);
|
||||||
|
expect(loadStatusCache.releaseLoadLock).toHaveBeenCalledWith('user1');
|
||||||
|
expect(logger.debug).toHaveBeenCalledWith(
|
||||||
|
'[MCP][PrivateServer][server1] Private server already exists for user user1',
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should throw error if configsLoader fails', async () => {
|
||||||
|
const configsLoader = jest.fn().mockRejectedValue(new Error('DB connection failed'));
|
||||||
|
(loadStatusCache.isLoaded as jest.Mock).mockResolvedValue(false);
|
||||||
|
(loadStatusCache.acquireLoadLock as jest.Mock).mockResolvedValue(true);
|
||||||
|
|
||||||
|
await expect(
|
||||||
|
MCPPrivateServerLoader.loadPrivateServers('user1', configsLoader),
|
||||||
|
).rejects.toThrow('DB connection failed');
|
||||||
|
|
||||||
|
expect(logger.error).toHaveBeenCalledWith(
|
||||||
|
'[MCP][PrivateServerLoader] Loading private servers for user user1 failed.',
|
||||||
|
expect.any(Error),
|
||||||
|
);
|
||||||
|
expect(loadStatusCache.setLoaded).not.toHaveBeenCalled();
|
||||||
|
expect(loadStatusCache.releaseLoadLock).toHaveBeenCalledWith('user1');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should throw error if cache.add fails', async () => {
|
||||||
|
const mockConfigs: MCPServerDB[] = [
|
||||||
|
{
|
||||||
|
_id: 'mock-id-1',
|
||||||
|
mcp_id: 'server1',
|
||||||
|
config: mockConfig1,
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
const configsLoader = jest.fn().mockResolvedValue(mockConfigs);
|
||||||
|
(loadStatusCache.isLoaded as jest.Mock).mockResolvedValue(false);
|
||||||
|
(loadStatusCache.acquireLoadLock as jest.Mock).mockResolvedValue(true);
|
||||||
|
(registry.privateServersCache.get as jest.Mock).mockResolvedValue(undefined);
|
||||||
|
(registry.privateServersCache.add as jest.Mock).mockRejectedValue(
|
||||||
|
new Error('Cache write failed'),
|
||||||
|
);
|
||||||
|
|
||||||
|
await expect(
|
||||||
|
MCPPrivateServerLoader.loadPrivateServers('user1', configsLoader),
|
||||||
|
).rejects.toThrow('Cache write failed');
|
||||||
|
expect(loadStatusCache.setLoaded).not.toHaveBeenCalled();
|
||||||
|
expect(loadStatusCache.releaseLoadLock).toHaveBeenCalledWith('user1');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle empty configs gracefully', async () => {
|
||||||
|
const mockConfigs: MCPServerDB[] = [];
|
||||||
|
|
||||||
|
const configsLoader = jest.fn().mockResolvedValue(mockConfigs);
|
||||||
|
(loadStatusCache.isLoaded as jest.Mock).mockResolvedValue(false);
|
||||||
|
(loadStatusCache.acquireLoadLock as jest.Mock).mockResolvedValue(true);
|
||||||
|
|
||||||
|
await MCPPrivateServerLoader.loadPrivateServers('user1', configsLoader);
|
||||||
|
|
||||||
|
expect(registry.privateServersCache.add).not.toHaveBeenCalled();
|
||||||
|
expect(loadStatusCache.setLoaded).toHaveBeenCalledWith('user1', 3600_000);
|
||||||
|
expect(loadStatusCache.releaseLoadLock).toHaveBeenCalledWith('user1');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should prevent partial loads after crash - loaded flag not set on failure', async () => {
|
||||||
|
const mockConfigs: MCPServerDB[] = [
|
||||||
|
{
|
||||||
|
_id: 'mock-id-1',
|
||||||
|
mcp_id: 'server1',
|
||||||
|
config: mockConfig1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
_id: 'mock-id-2',
|
||||||
|
mcp_id: 'server2',
|
||||||
|
config: mockConfig2,
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
const configsLoader = jest.fn().mockResolvedValue(mockConfigs);
|
||||||
|
(loadStatusCache.isLoaded as jest.Mock).mockResolvedValue(false);
|
||||||
|
(loadStatusCache.acquireLoadLock as jest.Mock).mockResolvedValue(true);
|
||||||
|
(registry.privateServersCache.get as jest.Mock).mockResolvedValue(undefined);
|
||||||
|
|
||||||
|
// Simulate crash after loading first server
|
||||||
|
(registry.privateServersCache.add as jest.Mock)
|
||||||
|
.mockResolvedValueOnce(undefined) // server1 succeeds
|
||||||
|
.mockRejectedValueOnce(new Error('Process crashed')); // server2 fails
|
||||||
|
|
||||||
|
await expect(
|
||||||
|
MCPPrivateServerLoader.loadPrivateServers('user1', configsLoader),
|
||||||
|
).rejects.toThrow('Process crashed');
|
||||||
|
|
||||||
|
// Loaded flag should NOT be set
|
||||||
|
expect(loadStatusCache.setLoaded).not.toHaveBeenCalled();
|
||||||
|
// Lock should be released even on error
|
||||||
|
expect(loadStatusCache.releaseLoadLock).toHaveBeenCalledWith('user1');
|
||||||
|
|
||||||
|
// On next call, should retry full load
|
||||||
|
jest.clearAllMocks();
|
||||||
|
(loadStatusCache.isLoaded as jest.Mock).mockResolvedValue(false);
|
||||||
|
(loadStatusCache.acquireLoadLock as jest.Mock).mockResolvedValue(true);
|
||||||
|
(registry.privateServersCache.add as jest.Mock).mockResolvedValue(undefined);
|
||||||
|
await MCPPrivateServerLoader.loadPrivateServers('user1', configsLoader);
|
||||||
|
|
||||||
|
// Now flag should be set
|
||||||
|
expect(loadStatusCache.setLoaded).toHaveBeenCalledWith('user1', 3600_000);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should wait for another process when lock is already held', async () => {
|
||||||
|
const configsLoader = jest.fn();
|
||||||
|
(loadStatusCache.isLoaded as jest.Mock).mockResolvedValue(false);
|
||||||
|
(loadStatusCache.acquireLoadLock as jest.Mock).mockResolvedValue(false); // Lock held
|
||||||
|
(loadStatusCache.waitForLoad as jest.Mock).mockResolvedValue(true); // Wait completes
|
||||||
|
|
||||||
|
await MCPPrivateServerLoader.loadPrivateServers('user1', configsLoader);
|
||||||
|
|
||||||
|
expect(loadStatusCache.acquireLoadLock).toHaveBeenCalledWith('user1');
|
||||||
|
expect(loadStatusCache.waitForLoad).toHaveBeenCalledWith('user1');
|
||||||
|
expect(configsLoader).not.toHaveBeenCalled(); // Didn't load, waited instead
|
||||||
|
expect(logger.debug).toHaveBeenCalledWith(
|
||||||
|
'[MCP][PrivateServerLoader] Another process is loading user user1, waiting...',
|
||||||
|
);
|
||||||
|
expect(logger.debug).toHaveBeenCalledWith(
|
||||||
|
'[MCP][PrivateServerLoader] User user1 loaded by another process',
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should retry lock acquisition after wait timeout', async () => {
|
||||||
|
const mockConfigs: MCPServerDB[] = [
|
||||||
|
{
|
||||||
|
_id: 'mock-id-1',
|
||||||
|
mcp_id: 'server1',
|
||||||
|
config: mockConfig1,
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
const configsLoader = jest.fn().mockResolvedValue(mockConfigs);
|
||||||
|
(loadStatusCache.isLoaded as jest.Mock).mockResolvedValue(false);
|
||||||
|
(loadStatusCache.acquireLoadLock as jest.Mock)
|
||||||
|
.mockResolvedValueOnce(false) // First attempt: lock held
|
||||||
|
.mockResolvedValueOnce(true); // Retry after timeout: success
|
||||||
|
(loadStatusCache.waitForLoad as jest.Mock).mockResolvedValue(false); // Wait times out
|
||||||
|
(registry.privateServersCache.get as jest.Mock).mockResolvedValue(undefined);
|
||||||
|
|
||||||
|
await MCPPrivateServerLoader.loadPrivateServers('user1', configsLoader);
|
||||||
|
|
||||||
|
expect(loadStatusCache.acquireLoadLock).toHaveBeenCalledTimes(2);
|
||||||
|
expect(loadStatusCache.waitForLoad).toHaveBeenCalledWith('user1');
|
||||||
|
expect(configsLoader).toHaveBeenCalled(); // Loaded after retry
|
||||||
|
expect(logger.warn).toHaveBeenCalledWith(
|
||||||
|
'[MCP][PrivateServerLoader] Timeout waiting for user user1, retrying lock acquisition',
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should throw error if retry lock acquisition fails', async () => {
|
||||||
|
const configsLoader = jest.fn();
|
||||||
|
(loadStatusCache.isLoaded as jest.Mock).mockResolvedValue(false);
|
||||||
|
(loadStatusCache.acquireLoadLock as jest.Mock).mockResolvedValue(false); // Both attempts fail
|
||||||
|
(loadStatusCache.waitForLoad as jest.Mock).mockResolvedValue(false); // Wait times out
|
||||||
|
|
||||||
|
await expect(
|
||||||
|
MCPPrivateServerLoader.loadPrivateServers('user1', configsLoader),
|
||||||
|
).rejects.toThrow('Failed to acquire load lock for user user1');
|
||||||
|
|
||||||
|
expect(loadStatusCache.acquireLoadLock).toHaveBeenCalledTimes(2);
|
||||||
|
expect(configsLoader).not.toHaveBeenCalled();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('updatePrivateServer()', () => {
|
||||||
|
it('should propagate metadata update to all users when server is not promoted', async () => {
|
||||||
|
(registry.getServerConfig as jest.Mock).mockResolvedValue(undefined);
|
||||||
|
|
||||||
|
await MCPPrivateServerLoader.updatePrivateServer('server1', mockConfig2);
|
||||||
|
|
||||||
|
expect(registry.getServerConfig).toHaveBeenCalledWith('server1');
|
||||||
|
expect(registry.privateServersCache.updateServerConfigIfExists).toHaveBeenCalledWith(
|
||||||
|
'server1',
|
||||||
|
mockConfig2,
|
||||||
|
);
|
||||||
|
expect(registry.removeServer).not.toHaveBeenCalled();
|
||||||
|
expect(registry.addSharedServer).not.toHaveBeenCalled();
|
||||||
|
expect(logger.info).toHaveBeenCalledWith(
|
||||||
|
'[MCP][PrivateServer][server1] Propagating metadata update to all users',
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle promoted server by updating shared registry', async () => {
|
||||||
|
const sharedConfig: t.ParsedServerConfig = {
|
||||||
|
command: 'node',
|
||||||
|
args: ['shared.js'],
|
||||||
|
env: { SHARED: 'true' },
|
||||||
|
lastUpdatedAt: Date.now(),
|
||||||
|
};
|
||||||
|
|
||||||
|
(registry.getServerConfig as jest.Mock).mockResolvedValue(sharedConfig);
|
||||||
|
|
||||||
|
await MCPPrivateServerLoader.updatePrivateServer('server1', mockConfig2);
|
||||||
|
|
||||||
|
expect(registry.getServerConfig).toHaveBeenCalledWith('server1');
|
||||||
|
expect(registry.removeServer).toHaveBeenCalledWith('server1');
|
||||||
|
expect(registry.addSharedServer).toHaveBeenCalledWith('server1', mockConfig2);
|
||||||
|
expect(registry.privateServersCache.updateServerConfigIfExists).not.toHaveBeenCalled();
|
||||||
|
expect(logger.info).toHaveBeenCalledWith(
|
||||||
|
'[MCP][PrivateServer][server1] Promoted private server update',
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should throw error if updateServerConfigIfExists fails', async () => {
|
||||||
|
(registry.getServerConfig as jest.Mock).mockResolvedValue(undefined);
|
||||||
|
(registry.privateServersCache.updateServerConfigIfExists as jest.Mock).mockRejectedValue(
|
||||||
|
new Error('Redis update failed'),
|
||||||
|
);
|
||||||
|
|
||||||
|
await expect(
|
||||||
|
MCPPrivateServerLoader.updatePrivateServer('server1', mockConfig2),
|
||||||
|
).rejects.toThrow('Redis update failed');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should throw error if removeServer fails for promoted server', async () => {
|
||||||
|
(registry.getServerConfig as jest.Mock).mockResolvedValue(mockConfig1);
|
||||||
|
(registry.removeServer as jest.Mock).mockRejectedValue(new Error('Remove server failed'));
|
||||||
|
|
||||||
|
await expect(
|
||||||
|
MCPPrivateServerLoader.updatePrivateServer('server1', mockConfig2),
|
||||||
|
).rejects.toThrow('Remove server failed');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should throw error if addSharedServer fails for promoted server', async () => {
|
||||||
|
(registry.getServerConfig as jest.Mock).mockResolvedValue(mockConfig1);
|
||||||
|
(registry.removeServer as jest.Mock).mockResolvedValue(undefined); // Ensure removeServer succeeds
|
||||||
|
(registry.addSharedServer as jest.Mock).mockRejectedValue(
|
||||||
|
new Error('Add shared server failed'),
|
||||||
|
);
|
||||||
|
|
||||||
|
await expect(
|
||||||
|
MCPPrivateServerLoader.updatePrivateServer('server1', mockConfig2),
|
||||||
|
).rejects.toThrow('Add shared server failed');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('updatePrivateServerAccess()', () => {
|
||||||
|
it('should revoke access from all users when allowedUserIds is empty', async () => {
|
||||||
|
(registry.privateServersCache.findUsersWithServer as jest.Mock).mockResolvedValue([
|
||||||
|
'user1',
|
||||||
|
'user2',
|
||||||
|
'user3',
|
||||||
|
]);
|
||||||
|
|
||||||
|
await MCPPrivateServerLoader.updatePrivateServerAccess('server1', [], mockConfig1);
|
||||||
|
|
||||||
|
expect(registry.privateServersCache.findUsersWithServer).toHaveBeenCalledWith('server1');
|
||||||
|
expect(registry.privateServersCache.removeServerConfigIfCacheExists).toHaveBeenCalledWith(
|
||||||
|
['user1', 'user2', 'user3'],
|
||||||
|
'server1',
|
||||||
|
);
|
||||||
|
expect(logger.info).toHaveBeenCalledWith(
|
||||||
|
'[MCP][PrivateServer][server1] Revoking access from all users',
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should grant access to new users and revoke from removed users', async () => {
|
||||||
|
const allowedUserIds = ['user1', 'user2', 'user4'];
|
||||||
|
(registry.privateServersCache.findUsersWithServer as jest.Mock).mockResolvedValue([
|
||||||
|
'user1',
|
||||||
|
'user2',
|
||||||
|
'user3',
|
||||||
|
]);
|
||||||
|
|
||||||
|
await MCPPrivateServerLoader.updatePrivateServerAccess(
|
||||||
|
'server1',
|
||||||
|
allowedUserIds,
|
||||||
|
mockConfig1,
|
||||||
|
);
|
||||||
|
|
||||||
|
// Should revoke from user3 (no longer in allowed list)
|
||||||
|
expect(registry.privateServersCache.removeServerConfigIfCacheExists).toHaveBeenCalledWith(
|
||||||
|
['user3'],
|
||||||
|
'server1',
|
||||||
|
);
|
||||||
|
|
||||||
|
// Should grant to all allowed users (includes existing and new)
|
||||||
|
expect(registry.privateServersCache.addServerConfigIfCacheExists).toHaveBeenCalledWith(
|
||||||
|
allowedUserIds,
|
||||||
|
'server1',
|
||||||
|
mockConfig1,
|
||||||
|
);
|
||||||
|
|
||||||
|
expect(logger.info).toHaveBeenCalledWith(
|
||||||
|
'[MCP][PrivateServer][server1] Updating access for 3 users',
|
||||||
|
);
|
||||||
|
expect(logger.debug).toHaveBeenCalledWith(
|
||||||
|
'[MCP][PrivateServer][server1] Revoking access from 1 users',
|
||||||
|
);
|
||||||
|
expect(logger.debug).toHaveBeenCalledWith(
|
||||||
|
'[MCP][PrivateServer][server1] Granting access to 3 users',
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should only grant access when no users currently have the server', async () => {
|
||||||
|
const allowedUserIds = ['user1', 'user2'];
|
||||||
|
(registry.privateServersCache.findUsersWithServer as jest.Mock).mockResolvedValue([]);
|
||||||
|
|
||||||
|
await MCPPrivateServerLoader.updatePrivateServerAccess(
|
||||||
|
'server1',
|
||||||
|
allowedUserIds,
|
||||||
|
mockConfig1,
|
||||||
|
);
|
||||||
|
|
||||||
|
expect(registry.privateServersCache.removeServerConfigIfCacheExists).not.toHaveBeenCalled();
|
||||||
|
expect(registry.privateServersCache.addServerConfigIfCacheExists).toHaveBeenCalledWith(
|
||||||
|
allowedUserIds,
|
||||||
|
'server1',
|
||||||
|
mockConfig1,
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should only revoke access when granting to users who already have it', async () => {
|
||||||
|
const allowedUserIds = ['user1', 'user2'];
|
||||||
|
(registry.privateServersCache.findUsersWithServer as jest.Mock).mockResolvedValue([
|
||||||
|
'user1',
|
||||||
|
'user2',
|
||||||
|
]);
|
||||||
|
|
||||||
|
await MCPPrivateServerLoader.updatePrivateServerAccess(
|
||||||
|
'server1',
|
||||||
|
allowedUserIds,
|
||||||
|
mockConfig1,
|
||||||
|
);
|
||||||
|
|
||||||
|
// No one to revoke
|
||||||
|
expect(registry.privateServersCache.removeServerConfigIfCacheExists).not.toHaveBeenCalled();
|
||||||
|
|
||||||
|
// Still grant (idempotent)
|
||||||
|
expect(registry.privateServersCache.addServerConfigIfCacheExists).toHaveBeenCalledWith(
|
||||||
|
allowedUserIds,
|
||||||
|
'server1',
|
||||||
|
mockConfig1,
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle completely new access list', async () => {
|
||||||
|
const allowedUserIds = ['user4', 'user5', 'user6'];
|
||||||
|
(registry.privateServersCache.findUsersWithServer as jest.Mock).mockResolvedValue([
|
||||||
|
'user1',
|
||||||
|
'user2',
|
||||||
|
'user3',
|
||||||
|
]);
|
||||||
|
|
||||||
|
await MCPPrivateServerLoader.updatePrivateServerAccess(
|
||||||
|
'server1',
|
||||||
|
allowedUserIds,
|
||||||
|
mockConfig1,
|
||||||
|
);
|
||||||
|
|
||||||
|
// Revoke from all current users
|
||||||
|
expect(registry.privateServersCache.removeServerConfigIfCacheExists).toHaveBeenCalledWith(
|
||||||
|
['user1', 'user2', 'user3'],
|
||||||
|
'server1',
|
||||||
|
);
|
||||||
|
|
||||||
|
// Grant to all new users
|
||||||
|
expect(registry.privateServersCache.addServerConfigIfCacheExists).toHaveBeenCalledWith(
|
||||||
|
allowedUserIds,
|
||||||
|
'server1',
|
||||||
|
mockConfig1,
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should throw error if findUsersWithServer fails', async () => {
|
||||||
|
(registry.privateServersCache.findUsersWithServer as jest.Mock).mockRejectedValue(
|
||||||
|
new Error('Redis scan failed'),
|
||||||
|
);
|
||||||
|
|
||||||
|
await expect(
|
||||||
|
MCPPrivateServerLoader.updatePrivateServerAccess('server1', [], mockConfig1),
|
||||||
|
).rejects.toThrow('Redis scan failed');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('promoteToSharedServer()', () => {
|
||||||
|
it('should promote private server to shared registry and remove from private caches', async () => {
|
||||||
|
(registry.addSharedServer as jest.Mock).mockResolvedValue(undefined);
|
||||||
|
(registry.privateServersCache.findUsersWithServer as jest.Mock).mockResolvedValue([
|
||||||
|
'user1',
|
||||||
|
'user2',
|
||||||
|
'user3',
|
||||||
|
]);
|
||||||
|
|
||||||
|
await MCPPrivateServerLoader.promoteToSharedServer('server1', mockConfig1);
|
||||||
|
|
||||||
|
// Should add to shared registry
|
||||||
|
expect(registry.addSharedServer).toHaveBeenCalledWith('server1', mockConfig1);
|
||||||
|
|
||||||
|
// Should remove from all private user caches
|
||||||
|
expect(registry.privateServersCache.removeServerConfigIfCacheExists).toHaveBeenCalledWith(
|
||||||
|
['user1', 'user2', 'user3'],
|
||||||
|
'server1',
|
||||||
|
);
|
||||||
|
|
||||||
|
expect(logger.info).toHaveBeenCalledWith(
|
||||||
|
'[MCP][PrivateServer][server1] Promoting to shared server',
|
||||||
|
);
|
||||||
|
expect(logger.info).toHaveBeenCalledWith(
|
||||||
|
'[MCP][PrivateServer][server1] Successfully promoted to shared server',
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle promoting when no users have the server privately', async () => {
|
||||||
|
(registry.addSharedServer as jest.Mock).mockResolvedValue(undefined);
|
||||||
|
(registry.privateServersCache.findUsersWithServer as jest.Mock).mockResolvedValue([]);
|
||||||
|
|
||||||
|
await MCPPrivateServerLoader.promoteToSharedServer('server1', mockConfig1);
|
||||||
|
|
||||||
|
expect(registry.addSharedServer).toHaveBeenCalledWith('server1', mockConfig1);
|
||||||
|
expect(registry.privateServersCache.removeServerConfigIfCacheExists).not.toHaveBeenCalled();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should throw error if addSharedServer fails', async () => {
|
||||||
|
(registry.addSharedServer as jest.Mock).mockRejectedValue(
|
||||||
|
new Error('Failed to add to shared registry'),
|
||||||
|
);
|
||||||
|
|
||||||
|
await expect(
|
||||||
|
MCPPrivateServerLoader.promoteToSharedServer('server1', mockConfig1),
|
||||||
|
).rejects.toThrow('Failed to add to shared registry');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('demoteToPrivateServer()', () => {
|
||||||
|
it('should demote shared server to private caches for specified users', async () => {
|
||||||
|
const allowedUserIds = ['user1', 'user2', 'user3'];
|
||||||
|
(registry.removeServer as jest.Mock).mockResolvedValue(undefined);
|
||||||
|
|
||||||
|
await MCPPrivateServerLoader.demoteToPrivateServer('server1', allowedUserIds, mockConfig1);
|
||||||
|
|
||||||
|
// Should remove from shared registry
|
||||||
|
expect(registry.removeServer).toHaveBeenCalledWith('server1');
|
||||||
|
|
||||||
|
// Should add to private caches for allowed users
|
||||||
|
expect(registry.privateServersCache.addServerConfigIfCacheExists).toHaveBeenCalledWith(
|
||||||
|
allowedUserIds,
|
||||||
|
'server1',
|
||||||
|
mockConfig1,
|
||||||
|
);
|
||||||
|
|
||||||
|
expect(logger.info).toHaveBeenCalledWith(
|
||||||
|
'[MCP][PrivateServer][server1] Demoting to private server',
|
||||||
|
);
|
||||||
|
expect(logger.info).toHaveBeenCalledWith(
|
||||||
|
'[MCP][PrivateServer][server1] Successfully demoted to private server',
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle demoting with empty user list', async () => {
|
||||||
|
(registry.removeServer as jest.Mock).mockResolvedValue(undefined);
|
||||||
|
|
||||||
|
await MCPPrivateServerLoader.demoteToPrivateServer('server1', [], mockConfig1);
|
||||||
|
|
||||||
|
expect(registry.removeServer).toHaveBeenCalledWith('server1');
|
||||||
|
expect(registry.privateServersCache.addServerConfigIfCacheExists).not.toHaveBeenCalled();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should throw error if removeServer fails', async () => {
|
||||||
|
(registry.removeServer as jest.Mock).mockRejectedValue(
|
||||||
|
new Error('Server not found in shared registry'),
|
||||||
|
);
|
||||||
|
|
||||||
|
await expect(
|
||||||
|
MCPPrivateServerLoader.demoteToPrivateServer('server1', ['user1'], mockConfig1),
|
||||||
|
).rejects.toThrow('Server not found in shared registry');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('addPrivateServer()', () => {
|
||||||
|
it('should add private server for a user', async () => {
|
||||||
|
await MCPPrivateServerLoader.addPrivateServer('user1', 'server1', mockConfig1);
|
||||||
|
|
||||||
|
expect(registry.privateServersCache.add).toHaveBeenCalledWith(
|
||||||
|
'user1',
|
||||||
|
'server1',
|
||||||
|
mockConfig1,
|
||||||
|
);
|
||||||
|
expect(logger.info).toHaveBeenCalledWith(
|
||||||
|
'[MCP][PrivateServer][server1] add private server to user with Id user1',
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should add private server with different configs for different users', async () => {
|
||||||
|
await MCPPrivateServerLoader.addPrivateServer('user1', 'server1', mockConfig1);
|
||||||
|
await MCPPrivateServerLoader.addPrivateServer('user2', 'server2', mockConfig2);
|
||||||
|
|
||||||
|
expect(registry.privateServersCache.add).toHaveBeenCalledWith(
|
||||||
|
'user1',
|
||||||
|
'server1',
|
||||||
|
mockConfig1,
|
||||||
|
);
|
||||||
|
expect(registry.privateServersCache.add).toHaveBeenCalledWith(
|
||||||
|
'user2',
|
||||||
|
'server2',
|
||||||
|
mockConfig2,
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should throw error if cache add fails', async () => {
|
||||||
|
(registry.privateServersCache.add as jest.Mock).mockRejectedValue(
|
||||||
|
new Error('Cache write failed'),
|
||||||
|
);
|
||||||
|
|
||||||
|
await expect(
|
||||||
|
MCPPrivateServerLoader.addPrivateServer('user1', 'server1', mockConfig1),
|
||||||
|
).rejects.toThrow('Cache write failed');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle adding server with complex config', async () => {
|
||||||
|
const complexConfig: t.ParsedServerConfig = {
|
||||||
|
url: 'https://api.example.com',
|
||||||
|
requiresOAuth: true,
|
||||||
|
lastUpdatedAt: Date.now(),
|
||||||
|
};
|
||||||
|
|
||||||
|
// Reset the mock to ensure it succeeds for this test
|
||||||
|
(registry.privateServersCache.add as jest.Mock).mockResolvedValue(undefined);
|
||||||
|
|
||||||
|
await MCPPrivateServerLoader.addPrivateServer('user1', 'complex-server', complexConfig);
|
||||||
|
|
||||||
|
expect(registry.privateServersCache.add).toHaveBeenCalledWith(
|
||||||
|
'user1',
|
||||||
|
'complex-server',
|
||||||
|
complexConfig,
|
||||||
|
);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
@ -1,4 +1,3 @@
|
||||||
import { expect } from '@playwright/test';
|
|
||||||
import type * as t from '~/mcp/types';
|
import type * as t from '~/mcp/types';
|
||||||
import type { MCPConnection } from '~/mcp/connection';
|
import type { MCPConnection } from '~/mcp/connection';
|
||||||
|
|
||||||
|
|
@ -27,7 +26,7 @@ describe('MCPServersInitializer Redis Integration Tests', () => {
|
||||||
},
|
},
|
||||||
oauth_server: {
|
oauth_server: {
|
||||||
type: 'streamable-http',
|
type: 'streamable-http',
|
||||||
url: 'https://api.example.com/mcp',
|
url: 'https://api.example.com/mcp-oauth',
|
||||||
},
|
},
|
||||||
file_tools_server: {
|
file_tools_server: {
|
||||||
type: 'stdio',
|
type: 'stdio',
|
||||||
|
|
@ -51,7 +50,7 @@ describe('MCPServersInitializer Redis Integration Tests', () => {
|
||||||
},
|
},
|
||||||
oauth_server: {
|
oauth_server: {
|
||||||
type: 'streamable-http',
|
type: 'streamable-http',
|
||||||
url: 'https://api.example.com/mcp',
|
url: 'https://api.example.com/mcp-oauth',
|
||||||
requiresOAuth: true,
|
requiresOAuth: true,
|
||||||
},
|
},
|
||||||
file_tools_server: {
|
file_tools_server: {
|
||||||
|
|
@ -92,14 +91,30 @@ describe('MCPServersInitializer Redis Integration Tests', () => {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
remote_no_oauth_server: {
|
||||||
|
type: 'streamable-http',
|
||||||
|
url: 'https://api.example.com/mcp-no-auth',
|
||||||
|
requiresOAuth: false,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
// Helper to determine requiresOAuth based on URL pattern
|
||||||
|
// URLs ending with '-oauth' require OAuth, others don't
|
||||||
|
const determineRequiresOAuth = (config: t.MCPOptions): boolean => {
|
||||||
|
if ('url' in config && config.url) {
|
||||||
|
return config.url.endsWith('-oauth');
|
||||||
|
}
|
||||||
|
return false;
|
||||||
};
|
};
|
||||||
|
|
||||||
beforeAll(async () => {
|
beforeAll(async () => {
|
||||||
// Set up environment variables for Redis (only if not already set)
|
// Set up environment variables for Redis (only if not already set)
|
||||||
process.env.USE_REDIS = process.env.USE_REDIS ?? 'true';
|
process.env.USE_REDIS = process.env.USE_REDIS ?? 'true';
|
||||||
process.env.REDIS_URI = process.env.REDIS_URI ?? 'redis://127.0.0.1:6379';
|
process.env.REDIS_URI = process.env.REDIS_URI ?? 'redis://127.0.0.1:6379';
|
||||||
|
// Use a unique prefix for each test run to avoid conflicts with parallel test executions
|
||||||
process.env.REDIS_KEY_PREFIX =
|
process.env.REDIS_KEY_PREFIX =
|
||||||
process.env.REDIS_KEY_PREFIX ?? 'MCPServersInitializer-IntegrationTest';
|
process.env.REDIS_KEY_PREFIX ??
|
||||||
|
`MCPServersInitializer-IntegrationTest-${Date.now()}-${Math.random().toString(36).substring(7)}`;
|
||||||
|
|
||||||
// Import modules after setting env vars
|
// Import modules after setting env vars
|
||||||
const initializerModule = await import('../MCPServersInitializer');
|
const initializerModule = await import('../MCPServersInitializer');
|
||||||
|
|
@ -127,20 +142,34 @@ describe('MCPServersInitializer Redis Integration Tests', () => {
|
||||||
// Become leader so we can perform write operations
|
// Become leader so we can perform write operations
|
||||||
leaderInstance = new LeaderElection();
|
leaderInstance = new LeaderElection();
|
||||||
const isLeader = await leaderInstance.isLeader();
|
const isLeader = await leaderInstance.isLeader();
|
||||||
|
// eslint-disable-next-line jest/no-standalone-expect
|
||||||
expect(isLeader).toBe(true);
|
expect(isLeader).toBe(true);
|
||||||
});
|
});
|
||||||
|
|
||||||
beforeEach(async () => {
|
beforeEach(async () => {
|
||||||
|
jest.resetModules();
|
||||||
|
|
||||||
// Ensure we're still the leader
|
// Ensure we're still the leader
|
||||||
const isLeader = await leaderInstance.isLeader();
|
const isLeader = await leaderInstance.isLeader();
|
||||||
if (!isLeader) {
|
if (!isLeader) {
|
||||||
throw new Error('Lost leader status before test');
|
throw new Error('Lost leader status before test');
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Reset caches first to ensure clean state
|
||||||
|
await registryStatusCache.reset();
|
||||||
|
await registry.reset();
|
||||||
|
|
||||||
// Mock MCPServerInspector.inspect to return parsed config
|
// Mock MCPServerInspector.inspect to return parsed config
|
||||||
jest.spyOn(MCPServerInspector, 'inspect').mockImplementation(async (serverName: string) => {
|
// This mock inspects the actual rawConfig to determine requiresOAuth dynamically
|
||||||
|
jest
|
||||||
|
.spyOn(MCPServerInspector, 'inspect')
|
||||||
|
.mockImplementation(async (serverName: string, rawConfig: t.MCPOptions) => {
|
||||||
|
const baseConfig = testParsedConfigs[serverName] || {};
|
||||||
return {
|
return {
|
||||||
...testParsedConfigs[serverName],
|
...baseConfig,
|
||||||
|
...rawConfig,
|
||||||
|
// Override requiresOAuth based on the actual config being inspected
|
||||||
|
requiresOAuth: determineRequiresOAuth(rawConfig),
|
||||||
_processedByInspector: true,
|
_processedByInspector: true,
|
||||||
} as unknown as t.ParsedServerConfig;
|
} as unknown as t.ParsedServerConfig;
|
||||||
});
|
});
|
||||||
|
|
@ -152,20 +181,22 @@ describe('MCPServersInitializer Redis Integration Tests', () => {
|
||||||
|
|
||||||
// Mock MCPConnectionFactory
|
// Mock MCPConnectionFactory
|
||||||
jest.spyOn(MCPConnectionFactory, 'create').mockResolvedValue(mockConnection);
|
jest.spyOn(MCPConnectionFactory, 'create').mockResolvedValue(mockConnection);
|
||||||
|
|
||||||
// Reset caches before each test
|
|
||||||
await registryStatusCache.reset();
|
|
||||||
await registry.reset();
|
|
||||||
});
|
});
|
||||||
|
|
||||||
afterEach(async () => {
|
afterEach(async () => {
|
||||||
// Clean up: clear all test keys from Redis
|
// Clean up: clear all test keys from Redis
|
||||||
if (keyvRedisClient) {
|
if (keyvRedisClient && 'scanIterator' in keyvRedisClient) {
|
||||||
const pattern = '*MCPServersInitializer-IntegrationTest*';
|
const pattern = '*MCPServersInitializer-IntegrationTest*';
|
||||||
if ('scanIterator' in keyvRedisClient) {
|
const keysToDelete: string[] = [];
|
||||||
|
|
||||||
|
// Collect all keys first
|
||||||
for await (const key of keyvRedisClient.scanIterator({ MATCH: pattern })) {
|
for await (const key of keyvRedisClient.scanIterator({ MATCH: pattern })) {
|
||||||
await keyvRedisClient.del(key);
|
keysToDelete.push(key);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Delete in parallel for cluster mode efficiency
|
||||||
|
if (keysToDelete.length > 0) {
|
||||||
|
await Promise.all(keysToDelete.map((key) => keyvRedisClient!.del(key)));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -6,6 +6,9 @@ import { MCPConnection } from '~/mcp/connection';
|
||||||
import { registryStatusCache } from '~/mcp/registry/cache/RegistryStatusCache';
|
import { registryStatusCache } from '~/mcp/registry/cache/RegistryStatusCache';
|
||||||
import { MCPServerInspector } from '~/mcp/registry/MCPServerInspector';
|
import { MCPServerInspector } from '~/mcp/registry/MCPServerInspector';
|
||||||
import { mcpServersRegistry as registry } from '~/mcp/registry/MCPServersRegistry';
|
import { mcpServersRegistry as registry } from '~/mcp/registry/MCPServersRegistry';
|
||||||
|
const FIXED_TIME = 1699564800000;
|
||||||
|
const originalDateNow = Date.now;
|
||||||
|
Date.now = jest.fn(() => FIXED_TIME);
|
||||||
|
|
||||||
// Mock external dependencies
|
// Mock external dependencies
|
||||||
jest.mock('../../MCPConnectionFactory');
|
jest.mock('../../MCPConnectionFactory');
|
||||||
|
|
@ -31,6 +34,10 @@ const mockInspect = MCPServerInspector.inspect as jest.MockedFunction<
|
||||||
describe('MCPServersInitializer', () => {
|
describe('MCPServersInitializer', () => {
|
||||||
let mockConnection: jest.Mocked<MCPConnection>;
|
let mockConnection: jest.Mocked<MCPConnection>;
|
||||||
|
|
||||||
|
afterAll(() => {
|
||||||
|
Date.now = originalDateNow;
|
||||||
|
});
|
||||||
|
|
||||||
const testConfigs: t.MCPServers = {
|
const testConfigs: t.MCPServers = {
|
||||||
disabled_server: {
|
disabled_server: {
|
||||||
type: 'stdio',
|
type: 'stdio',
|
||||||
|
|
@ -40,7 +47,7 @@ describe('MCPServersInitializer', () => {
|
||||||
},
|
},
|
||||||
oauth_server: {
|
oauth_server: {
|
||||||
type: 'streamable-http',
|
type: 'streamable-http',
|
||||||
url: 'https://api.example.com/mcp',
|
url: 'https://api.example.com/mcp-oauth',
|
||||||
},
|
},
|
||||||
file_tools_server: {
|
file_tools_server: {
|
||||||
type: 'stdio',
|
type: 'stdio',
|
||||||
|
|
@ -52,6 +59,10 @@ describe('MCPServersInitializer', () => {
|
||||||
command: 'node',
|
command: 'node',
|
||||||
args: ['instructions.js'],
|
args: ['instructions.js'],
|
||||||
},
|
},
|
||||||
|
remote_no_oauth_server: {
|
||||||
|
type: 'streamable-http',
|
||||||
|
url: 'https://api.example.com/mcp-no-auth',
|
||||||
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
const testParsedConfigs: Record<string, t.ParsedServerConfig> = {
|
const testParsedConfigs: Record<string, t.ParsedServerConfig> = {
|
||||||
|
|
@ -64,7 +75,7 @@ describe('MCPServersInitializer', () => {
|
||||||
},
|
},
|
||||||
oauth_server: {
|
oauth_server: {
|
||||||
type: 'streamable-http',
|
type: 'streamable-http',
|
||||||
url: 'https://api.example.com/mcp',
|
url: 'https://api.example.com/mcp-oauth',
|
||||||
requiresOAuth: true,
|
requiresOAuth: true,
|
||||||
},
|
},
|
||||||
file_tools_server: {
|
file_tools_server: {
|
||||||
|
|
@ -105,6 +116,21 @@ describe('MCPServersInitializer', () => {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
remote_no_oauth_server: {
|
||||||
|
type: 'streamable-http',
|
||||||
|
url: 'https://api.example.com/mcp-no-auth',
|
||||||
|
requiresOAuth: false,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
// Helper to determine requiresOAuth based on URL pattern
|
||||||
|
// URLs ending with '-oauth' require OAuth, others don't
|
||||||
|
const determineRequiresOAuth = (config: t.MCPOptions): boolean => {
|
||||||
|
if ('url' in config && config.url) {
|
||||||
|
// If URL ends with '-oauth', requires OAuth
|
||||||
|
return config.url.endsWith('-oauth');
|
||||||
|
}
|
||||||
|
return false;
|
||||||
};
|
};
|
||||||
|
|
||||||
beforeEach(async () => {
|
beforeEach(async () => {
|
||||||
|
|
@ -117,9 +143,14 @@ describe('MCPServersInitializer', () => {
|
||||||
(MCPConnectionFactory.create as jest.Mock).mockResolvedValue(mockConnection);
|
(MCPConnectionFactory.create as jest.Mock).mockResolvedValue(mockConnection);
|
||||||
|
|
||||||
// Mock MCPServerInspector.inspect to return parsed config
|
// Mock MCPServerInspector.inspect to return parsed config
|
||||||
mockInspect.mockImplementation(async (serverName: string) => {
|
// This mock inspects the actual rawConfig to determine requiresOAuth dynamically
|
||||||
|
mockInspect.mockImplementation(async (serverName: string, rawConfig: t.MCPOptions) => {
|
||||||
|
const baseConfig = testParsedConfigs[serverName] || {};
|
||||||
return {
|
return {
|
||||||
...testParsedConfigs[serverName],
|
...baseConfig,
|
||||||
|
...rawConfig,
|
||||||
|
// Override requiresOAuth based on the actual config being inspected
|
||||||
|
requiresOAuth: determineRequiresOAuth(rawConfig),
|
||||||
_processedByInspector: true,
|
_processedByInspector: true,
|
||||||
} as unknown as t.ParsedServerConfig;
|
} as unknown as t.ParsedServerConfig;
|
||||||
});
|
});
|
||||||
|
|
@ -170,7 +201,7 @@ describe('MCPServersInitializer', () => {
|
||||||
await MCPServersInitializer.initialize(testConfigs);
|
await MCPServersInitializer.initialize(testConfigs);
|
||||||
|
|
||||||
// Verify all configs were processed by inspector (without connection parameter)
|
// Verify all configs were processed by inspector (without connection parameter)
|
||||||
expect(mockInspect).toHaveBeenCalledTimes(4);
|
expect(mockInspect).toHaveBeenCalledTimes(5);
|
||||||
expect(mockInspect).toHaveBeenCalledWith('disabled_server', testConfigs.disabled_server);
|
expect(mockInspect).toHaveBeenCalledWith('disabled_server', testConfigs.disabled_server);
|
||||||
expect(mockInspect).toHaveBeenCalledWith('oauth_server', testConfigs.oauth_server);
|
expect(mockInspect).toHaveBeenCalledWith('oauth_server', testConfigs.oauth_server);
|
||||||
expect(mockInspect).toHaveBeenCalledWith('file_tools_server', testConfigs.file_tools_server);
|
expect(mockInspect).toHaveBeenCalledWith('file_tools_server', testConfigs.file_tools_server);
|
||||||
|
|
@ -178,6 +209,10 @@ describe('MCPServersInitializer', () => {
|
||||||
'search_tools_server',
|
'search_tools_server',
|
||||||
testConfigs.search_tools_server,
|
testConfigs.search_tools_server,
|
||||||
);
|
);
|
||||||
|
expect(mockInspect).toHaveBeenCalledWith(
|
||||||
|
'remote_no_oauth_server',
|
||||||
|
testConfigs.remote_no_oauth_server,
|
||||||
|
);
|
||||||
});
|
});
|
||||||
|
|
||||||
it('should add disabled servers to sharedUserServers', async () => {
|
it('should add disabled servers to sharedUserServers', async () => {
|
||||||
|
|
@ -232,12 +267,15 @@ describe('MCPServersInitializer', () => {
|
||||||
|
|
||||||
it('should handle inspection failures gracefully', async () => {
|
it('should handle inspection failures gracefully', async () => {
|
||||||
// Mock inspection failure for one server
|
// Mock inspection failure for one server
|
||||||
mockInspect.mockImplementation(async (serverName: string) => {
|
mockInspect.mockImplementation(async (serverName: string, rawConfig: t.MCPOptions) => {
|
||||||
if (serverName === 'file_tools_server') {
|
if (serverName === 'file_tools_server') {
|
||||||
throw new Error('Inspection failed');
|
throw new Error('Inspection failed');
|
||||||
}
|
}
|
||||||
|
const baseConfig = testParsedConfigs[serverName] || {};
|
||||||
return {
|
return {
|
||||||
...testParsedConfigs[serverName],
|
...rawConfig,
|
||||||
|
...baseConfig,
|
||||||
|
requiresOAuth: determineRequiresOAuth(rawConfig),
|
||||||
_processedByInspector: true,
|
_processedByInspector: true,
|
||||||
} as unknown as t.ParsedServerConfig;
|
} as unknown as t.ParsedServerConfig;
|
||||||
});
|
});
|
||||||
|
|
|
||||||
|
|
@ -36,7 +36,8 @@ describe('MCPServersRegistry Redis Integration Tests', () => {
|
||||||
process.env.USE_REDIS = process.env.USE_REDIS ?? 'true';
|
process.env.USE_REDIS = process.env.USE_REDIS ?? 'true';
|
||||||
process.env.REDIS_URI = process.env.REDIS_URI ?? 'redis://127.0.0.1:6379';
|
process.env.REDIS_URI = process.env.REDIS_URI ?? 'redis://127.0.0.1:6379';
|
||||||
process.env.REDIS_KEY_PREFIX =
|
process.env.REDIS_KEY_PREFIX =
|
||||||
process.env.REDIS_KEY_PREFIX ?? 'MCPServersRegistry-IntegrationTest';
|
process.env.REDIS_KEY_PREFIX ??
|
||||||
|
`MCPServersRegistry-IntegrationTest-${Date.now()}-${Math.random().toString(36).substring(7)}`;
|
||||||
|
|
||||||
// Import modules after setting env vars
|
// Import modules after setting env vars
|
||||||
const registryModule = await import('../MCPServersRegistry');
|
const registryModule = await import('../MCPServersRegistry');
|
||||||
|
|
@ -64,12 +65,18 @@ describe('MCPServersRegistry Redis Integration Tests', () => {
|
||||||
await registry.reset();
|
await registry.reset();
|
||||||
|
|
||||||
// Also clean up any remaining test keys from Redis
|
// Also clean up any remaining test keys from Redis
|
||||||
if (keyvRedisClient) {
|
if (keyvRedisClient && 'scanIterator' in keyvRedisClient) {
|
||||||
const pattern = '*MCPServersRegistry-IntegrationTest*';
|
const pattern = '*MCPServersRegistry-IntegrationTest*';
|
||||||
if ('scanIterator' in keyvRedisClient) {
|
const keysToDelete: string[] = [];
|
||||||
|
|
||||||
|
// Collect all keys first
|
||||||
for await (const key of keyvRedisClient.scanIterator({ MATCH: pattern })) {
|
for await (const key of keyvRedisClient.scanIterator({ MATCH: pattern })) {
|
||||||
await keyvRedisClient.del(key);
|
keysToDelete.push(key);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Delete in parallel for cluster mode efficiency
|
||||||
|
if (keysToDelete.length > 0) {
|
||||||
|
await Promise.all(keysToDelete.map((key) => keyvRedisClient!.del(key)));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
@ -88,14 +95,14 @@ describe('MCPServersRegistry Redis Integration Tests', () => {
|
||||||
const serverName = 'private_server';
|
const serverName = 'private_server';
|
||||||
|
|
||||||
// Add private user server
|
// Add private user server
|
||||||
await registry.addPrivateUserServer(userId, serverName, testParsedConfig);
|
await registry.privateServersCache.add(userId, serverName, testParsedConfig);
|
||||||
|
|
||||||
// Verify server was added
|
// Verify server was added
|
||||||
const retrievedConfig = await registry.getServerConfig(serverName, userId);
|
const retrievedConfig = await registry.getServerConfig(serverName, userId);
|
||||||
expect(retrievedConfig).toEqual(testParsedConfig);
|
expect(retrievedConfig).toMatchObject(testParsedConfig);
|
||||||
|
|
||||||
// Remove private user server
|
// Remove private user server
|
||||||
await registry.removePrivateUserServer(userId, serverName);
|
await registry.privateServersCache.remove(userId, serverName);
|
||||||
|
|
||||||
// Verify server was removed
|
// Verify server was removed
|
||||||
const configAfterRemoval = await registry.getServerConfig(serverName, userId);
|
const configAfterRemoval = await registry.getServerConfig(serverName, userId);
|
||||||
|
|
@ -106,9 +113,9 @@ describe('MCPServersRegistry Redis Integration Tests', () => {
|
||||||
const userId = 'user123';
|
const userId = 'user123';
|
||||||
const serverName = 'private_server';
|
const serverName = 'private_server';
|
||||||
|
|
||||||
await registry.addPrivateUserServer(userId, serverName, testParsedConfig);
|
await registry.privateServersCache.add(userId, serverName, testParsedConfig);
|
||||||
await expect(
|
await expect(
|
||||||
registry.addPrivateUserServer(userId, serverName, testParsedConfig),
|
registry.privateServersCache.add(userId, serverName, testParsedConfig),
|
||||||
).rejects.toThrow(
|
).rejects.toThrow(
|
||||||
'Server "private_server" already exists in cache. Use update() to modify existing configs.',
|
'Server "private_server" already exists in cache. Use update() to modify existing configs.',
|
||||||
);
|
);
|
||||||
|
|
@ -125,14 +132,14 @@ describe('MCPServersRegistry Redis Integration Tests', () => {
|
||||||
};
|
};
|
||||||
|
|
||||||
// Add private user server
|
// Add private user server
|
||||||
await registry.addPrivateUserServer(userId, serverName, testParsedConfig);
|
await registry.privateServersCache.add(userId, serverName, testParsedConfig);
|
||||||
|
|
||||||
// Update the server config
|
// Update the server config
|
||||||
await registry.updatePrivateUserServer(userId, serverName, updatedConfig);
|
await registry.privateServersCache.update(userId, serverName, updatedConfig);
|
||||||
|
|
||||||
// Verify server was updated
|
// Verify server was updated
|
||||||
const retrievedConfig = await registry.getServerConfig(serverName, userId);
|
const retrievedConfig = await registry.getServerConfig(serverName, userId);
|
||||||
expect(retrievedConfig).toEqual(updatedConfig);
|
expect(retrievedConfig).toMatchObject(updatedConfig);
|
||||||
});
|
});
|
||||||
|
|
||||||
it('should throw error when updating non-existent server', async () => {
|
it('should throw error when updating non-existent server', async () => {
|
||||||
|
|
@ -140,22 +147,106 @@ describe('MCPServersRegistry Redis Integration Tests', () => {
|
||||||
const serverName = 'private_server';
|
const serverName = 'private_server';
|
||||||
|
|
||||||
// Add a user cache first
|
// Add a user cache first
|
||||||
await registry.addPrivateUserServer(userId, 'other_server', testParsedConfig);
|
await registry.privateServersCache.add(userId, 'other_server', testParsedConfig);
|
||||||
|
|
||||||
await expect(
|
await expect(
|
||||||
registry.updatePrivateUserServer(userId, serverName, testParsedConfig),
|
registry.privateServersCache.update(userId, serverName, testParsedConfig),
|
||||||
).rejects.toThrow(
|
).rejects.toThrow(
|
||||||
'Server "private_server" does not exist in cache. Use add() to create new configs.',
|
'Server "private_server" does not exist in cache. Use add() to create new configs.',
|
||||||
);
|
);
|
||||||
});
|
});
|
||||||
|
|
||||||
it('should throw error when updating server for non-existent user', async () => {
|
it('should throw error when updating non-existent server (lazy-loads cache)', async () => {
|
||||||
const userId = 'nonexistent_user';
|
const userId = 'nonexistent_user';
|
||||||
const serverName = 'private_server';
|
const serverName = 'private_server';
|
||||||
|
|
||||||
|
// With lazy-loading, cache is created but server doesn't exist in it
|
||||||
await expect(
|
await expect(
|
||||||
registry.updatePrivateUserServer(userId, serverName, testParsedConfig),
|
registry.privateServersCache.update(userId, serverName, testParsedConfig),
|
||||||
).rejects.toThrow('No private servers found for user "nonexistent_user".');
|
).rejects.toThrow(
|
||||||
|
'Server "private_server" does not exist in cache. Use add() to create new configs.',
|
||||||
|
);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('getPrivateServerConfig', () => {
|
||||||
|
it('should retrieve private server config for a specific user', async () => {
|
||||||
|
const userId = 'user123';
|
||||||
|
const serverName = 'private_server';
|
||||||
|
|
||||||
|
await registry.privateServersCache.add(userId, serverName, testParsedConfig);
|
||||||
|
|
||||||
|
const retrievedConfig = await registry.privateServersCache.get(userId, serverName);
|
||||||
|
expect(retrievedConfig).toMatchObject(testParsedConfig);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return undefined if server does not exist in user private cache', async () => {
|
||||||
|
const userId = 'user123';
|
||||||
|
|
||||||
|
// Create a cache for this user with a different server
|
||||||
|
await registry.privateServersCache.add(userId, 'other_server', testParsedConfig);
|
||||||
|
|
||||||
|
// Try to get a server that doesn't exist
|
||||||
|
const retrievedConfig = await registry.privateServersCache.get(userId, 'nonexistent_server');
|
||||||
|
expect(retrievedConfig).toBeUndefined();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return undefined when user has no private servers (lazy-loads cache)', async () => {
|
||||||
|
const userId = 'user_with_no_cache';
|
||||||
|
|
||||||
|
// With lazy-loading, cache is created but is empty
|
||||||
|
const config = await registry.privateServersCache.get(userId, 'server_name');
|
||||||
|
expect(config).toBeUndefined();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should isolate private servers between different users', async () => {
|
||||||
|
const user1 = 'user1';
|
||||||
|
const user2 = 'user2';
|
||||||
|
const serverName = 'shared_name_server';
|
||||||
|
|
||||||
|
const config1: t.ParsedServerConfig = {
|
||||||
|
...testParsedConfig,
|
||||||
|
args: ['user1.js'],
|
||||||
|
};
|
||||||
|
const config2: t.ParsedServerConfig = {
|
||||||
|
...testParsedConfig,
|
||||||
|
args: ['user2.js'],
|
||||||
|
};
|
||||||
|
|
||||||
|
await registry.privateServersCache.add(user1, serverName, config1);
|
||||||
|
await registry.privateServersCache.add(user2, serverName, config2);
|
||||||
|
|
||||||
|
const user1Config = await registry.privateServersCache.get(user1, serverName);
|
||||||
|
const user2Config = await registry.privateServersCache.get(user2, serverName);
|
||||||
|
|
||||||
|
// Verify each user gets their own config
|
||||||
|
expect(user1Config).toBeDefined();
|
||||||
|
expect(user2Config).toBeDefined();
|
||||||
|
if (user1Config && 'args' in user1Config) {
|
||||||
|
expect(user1Config.args).toEqual(['user1.js']);
|
||||||
|
}
|
||||||
|
if (user2Config && 'args' in user2Config) {
|
||||||
|
expect(user2Config.args).toEqual(['user2.js']);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should not retrieve shared servers through privateServersCache.get', async () => {
|
||||||
|
const userId = 'user123';
|
||||||
|
|
||||||
|
// Add servers to shared caches
|
||||||
|
await registry.sharedAppServers.add('app_server', testParsedConfig);
|
||||||
|
await registry.sharedUserServers.add('user_server', testParsedConfig);
|
||||||
|
|
||||||
|
// Create a private cache for the user (but don't add these servers to it)
|
||||||
|
await registry.privateServersCache.add(userId, 'private_server', testParsedConfig);
|
||||||
|
|
||||||
|
// Try to get shared servers using privateServersCache.get - should return undefined
|
||||||
|
// because privateServersCache.get only looks at private cache, not shared caches
|
||||||
|
const appServerConfig = await registry.privateServersCache.get(userId, 'app_server');
|
||||||
|
const userServerConfig = await registry.privateServersCache.get(userId, 'user_server');
|
||||||
|
|
||||||
|
expect(appServerConfig).toBeUndefined();
|
||||||
|
expect(userServerConfig).toBeUndefined();
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
|
|
@ -164,8 +255,8 @@ describe('MCPServersRegistry Redis Integration Tests', () => {
|
||||||
// Add servers to all three caches
|
// Add servers to all three caches
|
||||||
await registry.sharedAppServers.add('app_server', testParsedConfig);
|
await registry.sharedAppServers.add('app_server', testParsedConfig);
|
||||||
await registry.sharedUserServers.add('user_server', testParsedConfig);
|
await registry.sharedUserServers.add('user_server', testParsedConfig);
|
||||||
await registry.addPrivateUserServer('abc', 'abc_private_server', testParsedConfig);
|
await registry.privateServersCache.add('abc', 'abc_private_server', testParsedConfig);
|
||||||
await registry.addPrivateUserServer('xyz', 'xyz_private_server', testParsedConfig);
|
await registry.privateServersCache.add('xyz', 'xyz_private_server', testParsedConfig);
|
||||||
|
|
||||||
// Without userId: should return only shared app + shared user servers
|
// Without userId: should return only shared app + shared user servers
|
||||||
const configsNoUser = await registry.getAllServerConfigs();
|
const configsNoUser = await registry.getAllServerConfigs();
|
||||||
|
|
@ -196,17 +287,17 @@ describe('MCPServersRegistry Redis Integration Tests', () => {
|
||||||
// Add servers to all three caches
|
// Add servers to all three caches
|
||||||
await registry.sharedAppServers.add('app_server', testParsedConfig);
|
await registry.sharedAppServers.add('app_server', testParsedConfig);
|
||||||
await registry.sharedUserServers.add('user_server', testParsedConfig);
|
await registry.sharedUserServers.add('user_server', testParsedConfig);
|
||||||
await registry.addPrivateUserServer(userId, 'private_server', testParsedConfig);
|
await registry.privateServersCache.add(userId, 'private_server', testParsedConfig);
|
||||||
|
|
||||||
// Verify all servers are accessible before reset
|
// Verify all servers are accessible before reset
|
||||||
const appConfigBefore = await registry.getServerConfig('app_server');
|
const appConfigBefore = await registry.getServerConfig('app_server');
|
||||||
const userConfigBefore = await registry.getServerConfig('user_server');
|
const userConfigBefore = await registry.getServerConfig('user_server', userId);
|
||||||
const privateConfigBefore = await registry.getServerConfig('private_server', userId);
|
const privateConfigBefore = await registry.getServerConfig('private_server', userId);
|
||||||
const allConfigsBefore = await registry.getAllServerConfigs(userId);
|
const allConfigsBefore = await registry.getAllServerConfigs(userId);
|
||||||
|
|
||||||
expect(appConfigBefore).toEqual(testParsedConfig);
|
expect(appConfigBefore).toMatchObject(testParsedConfig);
|
||||||
expect(userConfigBefore).toEqual(testParsedConfig);
|
expect(userConfigBefore).toMatchObject(testParsedConfig);
|
||||||
expect(privateConfigBefore).toEqual(testParsedConfig);
|
expect(privateConfigBefore).toMatchObject(testParsedConfig);
|
||||||
expect(Object.keys(allConfigsBefore)).toHaveLength(3);
|
expect(Object.keys(allConfigsBefore)).toHaveLength(3);
|
||||||
|
|
||||||
// Reset everything
|
// Reset everything
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,8 @@
|
||||||
import * as t from '~/mcp/types';
|
import * as t from '~/mcp/types';
|
||||||
import { mcpServersRegistry as registry } from '~/mcp/registry/MCPServersRegistry';
|
import { mcpServersRegistry as registry } from '~/mcp/registry/MCPServersRegistry';
|
||||||
|
const FIXED_TIME = 1699564800000;
|
||||||
|
const originalDateNow = Date.now;
|
||||||
|
Date.now = jest.fn(() => FIXED_TIME);
|
||||||
/**
|
/**
|
||||||
* Unit tests for MCPServersRegistry using in-memory cache.
|
* Unit tests for MCPServersRegistry using in-memory cache.
|
||||||
* For integration tests using Redis-backed cache, see MCPServersRegistry.cache_integration.spec.ts
|
* For integration tests using Redis-backed cache, see MCPServersRegistry.cache_integration.spec.ts
|
||||||
|
|
@ -24,8 +26,15 @@ describe('MCPServersRegistry', () => {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
lastUpdatedAt: FIXED_TIME,
|
||||||
};
|
};
|
||||||
|
beforeAll(() => {
|
||||||
|
jest.useFakeTimers();
|
||||||
|
jest.setSystemTime(new Date(FIXED_TIME));
|
||||||
|
});
|
||||||
|
afterAll(() => {
|
||||||
|
Date.now = originalDateNow;
|
||||||
|
});
|
||||||
beforeEach(async () => {
|
beforeEach(async () => {
|
||||||
await registry.reset();
|
await registry.reset();
|
||||||
});
|
});
|
||||||
|
|
@ -36,14 +45,14 @@ describe('MCPServersRegistry', () => {
|
||||||
const serverName = 'private_server';
|
const serverName = 'private_server';
|
||||||
|
|
||||||
// Add private user server
|
// Add private user server
|
||||||
await registry.addPrivateUserServer(userId, serverName, testParsedConfig);
|
await registry.privateServersCache.add(userId, serverName, testParsedConfig);
|
||||||
|
|
||||||
// Verify server was added
|
// Verify server was added
|
||||||
const retrievedConfig = await registry.getServerConfig(serverName, userId);
|
const retrievedConfig = await registry.getServerConfig(serverName, userId);
|
||||||
expect(retrievedConfig).toEqual(testParsedConfig);
|
expect(retrievedConfig).toEqual(testParsedConfig);
|
||||||
|
|
||||||
// Remove private user server
|
// Remove private user server
|
||||||
await registry.removePrivateUserServer(userId, serverName);
|
await registry.privateServersCache.remove(userId, serverName);
|
||||||
|
|
||||||
// Verify server was removed
|
// Verify server was removed
|
||||||
const configAfterRemoval = await registry.getServerConfig(serverName, userId);
|
const configAfterRemoval = await registry.getServerConfig(serverName, userId);
|
||||||
|
|
@ -54,9 +63,9 @@ describe('MCPServersRegistry', () => {
|
||||||
const userId = 'user123';
|
const userId = 'user123';
|
||||||
const serverName = 'private_server';
|
const serverName = 'private_server';
|
||||||
|
|
||||||
await registry.addPrivateUserServer(userId, serverName, testParsedConfig);
|
await registry.privateServersCache.add(userId, serverName, testParsedConfig);
|
||||||
await expect(
|
await expect(
|
||||||
registry.addPrivateUserServer(userId, serverName, testParsedConfig),
|
registry.privateServersCache.add(userId, serverName, testParsedConfig),
|
||||||
).rejects.toThrow(
|
).rejects.toThrow(
|
||||||
'Server "private_server" already exists in cache. Use update() to modify existing configs.',
|
'Server "private_server" already exists in cache. Use update() to modify existing configs.',
|
||||||
);
|
);
|
||||||
|
|
@ -70,13 +79,14 @@ describe('MCPServersRegistry', () => {
|
||||||
command: 'python',
|
command: 'python',
|
||||||
args: ['updated.py'],
|
args: ['updated.py'],
|
||||||
requiresOAuth: true,
|
requiresOAuth: true,
|
||||||
|
lastUpdatedAt: FIXED_TIME,
|
||||||
};
|
};
|
||||||
|
|
||||||
// Add private user server
|
// Add private user server
|
||||||
await registry.addPrivateUserServer(userId, serverName, testParsedConfig);
|
await registry.privateServersCache.add(userId, serverName, testParsedConfig);
|
||||||
|
|
||||||
// Update the server config
|
// Update the server config
|
||||||
await registry.updatePrivateUserServer(userId, serverName, updatedConfig);
|
await registry.privateServersCache.update(userId, serverName, updatedConfig);
|
||||||
|
|
||||||
// Verify server was updated
|
// Verify server was updated
|
||||||
const retrievedConfig = await registry.getServerConfig(serverName, userId);
|
const retrievedConfig = await registry.getServerConfig(serverName, userId);
|
||||||
|
|
@ -88,22 +98,106 @@ describe('MCPServersRegistry', () => {
|
||||||
const serverName = 'private_server';
|
const serverName = 'private_server';
|
||||||
|
|
||||||
// Add a user cache first
|
// Add a user cache first
|
||||||
await registry.addPrivateUserServer(userId, 'other_server', testParsedConfig);
|
await registry.privateServersCache.add(userId, 'other_server', testParsedConfig);
|
||||||
|
|
||||||
await expect(
|
await expect(
|
||||||
registry.updatePrivateUserServer(userId, serverName, testParsedConfig),
|
registry.privateServersCache.update(userId, serverName, testParsedConfig),
|
||||||
).rejects.toThrow(
|
).rejects.toThrow(
|
||||||
'Server "private_server" does not exist in cache. Use add() to create new configs.',
|
'Server "private_server" does not exist in cache. Use add() to create new configs.',
|
||||||
);
|
);
|
||||||
});
|
});
|
||||||
|
|
||||||
it('should throw error when updating server for non-existent user', async () => {
|
it('should throw error when updating non-existent server (lazy-loads cache)', async () => {
|
||||||
const userId = 'nonexistent_user';
|
const userId = 'nonexistent_user';
|
||||||
const serverName = 'private_server';
|
const serverName = 'private_server';
|
||||||
|
|
||||||
|
// With lazy-loading, cache is created but server doesn't exist in it
|
||||||
await expect(
|
await expect(
|
||||||
registry.updatePrivateUserServer(userId, serverName, testParsedConfig),
|
registry.privateServersCache.update(userId, serverName, testParsedConfig),
|
||||||
).rejects.toThrow('No private servers found for user "nonexistent_user".');
|
).rejects.toThrow(
|
||||||
|
'Server "private_server" does not exist in cache. Use add() to create new configs.',
|
||||||
|
);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('getPrivateServerConfig', () => {
|
||||||
|
it('should retrieve private server config for a specific user', async () => {
|
||||||
|
const userId = 'user123';
|
||||||
|
const serverName = 'private_server';
|
||||||
|
|
||||||
|
await registry.privateServersCache.add(userId, serverName, testParsedConfig);
|
||||||
|
|
||||||
|
const retrievedConfig = await registry.privateServersCache.get(userId, serverName);
|
||||||
|
expect(retrievedConfig).toEqual(testParsedConfig);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return undefined if server does not exist in user private cache', async () => {
|
||||||
|
const userId = 'user123';
|
||||||
|
|
||||||
|
// Create a cache for this user with a different server
|
||||||
|
await registry.privateServersCache.add(userId, 'other_server', testParsedConfig);
|
||||||
|
|
||||||
|
// Try to get a server that doesn't exist
|
||||||
|
const retrievedConfig = await registry.privateServersCache.get(userId, 'nonexistent_server');
|
||||||
|
expect(retrievedConfig).toBeUndefined();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return undefined when user has no private servers (lazy-loads cache)', async () => {
|
||||||
|
const userId = 'user_with_no_cache';
|
||||||
|
|
||||||
|
// With lazy-loading, cache is created but is empty
|
||||||
|
const config = await registry.privateServersCache.get(userId, 'server_name');
|
||||||
|
expect(config).toBeUndefined();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should isolate private servers between different users', async () => {
|
||||||
|
const user1 = 'user1';
|
||||||
|
const user2 = 'user2';
|
||||||
|
const serverName = 'shared_name_server';
|
||||||
|
|
||||||
|
const config1: t.ParsedServerConfig = {
|
||||||
|
...testParsedConfig,
|
||||||
|
args: ['user1.js'],
|
||||||
|
};
|
||||||
|
const config2: t.ParsedServerConfig = {
|
||||||
|
...testParsedConfig,
|
||||||
|
args: ['user2.js'],
|
||||||
|
};
|
||||||
|
|
||||||
|
await registry.privateServersCache.add(user1, serverName, config1);
|
||||||
|
await registry.privateServersCache.add(user2, serverName, config2);
|
||||||
|
|
||||||
|
const user1Config = await registry.privateServersCache.get(user1, serverName);
|
||||||
|
const user2Config = await registry.privateServersCache.get(user2, serverName);
|
||||||
|
|
||||||
|
// Verify each user gets their own config
|
||||||
|
expect(user1Config).toBeDefined();
|
||||||
|
expect(user2Config).toBeDefined();
|
||||||
|
if (user1Config && 'args' in user1Config) {
|
||||||
|
expect(user1Config.args).toEqual(['user1.js']);
|
||||||
|
}
|
||||||
|
if (user2Config && 'args' in user2Config) {
|
||||||
|
expect(user2Config.args).toEqual(['user2.js']);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should not retrieve shared servers through privateServersCache.get', async () => {
|
||||||
|
const userId = 'user123';
|
||||||
|
|
||||||
|
// Add servers to shared caches
|
||||||
|
await registry.sharedAppServers.add('app_server', testParsedConfig);
|
||||||
|
await registry.sharedUserServers.add('user_server', testParsedConfig);
|
||||||
|
|
||||||
|
// Create a private cache for the user (but don't add these servers to it)
|
||||||
|
await registry.privateServersCache.add(userId, 'private_server', testParsedConfig);
|
||||||
|
|
||||||
|
// Try to get shared servers using privateServersCache.get - should return undefined
|
||||||
|
// because privateServersCache.get only looks at private cache, not shared caches
|
||||||
|
const appServerConfig = await registry.privateServersCache.get(userId, 'app_server');
|
||||||
|
const userServerConfig = await registry.privateServersCache.get(userId, 'user_server');
|
||||||
|
|
||||||
|
expect(appServerConfig).toBeUndefined();
|
||||||
|
expect(userServerConfig).toBeUndefined();
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
|
|
@ -112,8 +206,8 @@ describe('MCPServersRegistry', () => {
|
||||||
// Add servers to all three caches
|
// Add servers to all three caches
|
||||||
await registry.sharedAppServers.add('app_server', testParsedConfig);
|
await registry.sharedAppServers.add('app_server', testParsedConfig);
|
||||||
await registry.sharedUserServers.add('user_server', testParsedConfig);
|
await registry.sharedUserServers.add('user_server', testParsedConfig);
|
||||||
await registry.addPrivateUserServer('abc', 'abc_private_server', testParsedConfig);
|
await registry.privateServersCache.add('abc', 'abc_private_server', testParsedConfig);
|
||||||
await registry.addPrivateUserServer('xyz', 'xyz_private_server', testParsedConfig);
|
await registry.privateServersCache.add('xyz', 'xyz_private_server', testParsedConfig);
|
||||||
|
|
||||||
// Without userId: should return only shared app + shared user servers
|
// Without userId: should return only shared app + shared user servers
|
||||||
const configsNoUser = await registry.getAllServerConfigs();
|
const configsNoUser = await registry.getAllServerConfigs();
|
||||||
|
|
@ -144,11 +238,11 @@ describe('MCPServersRegistry', () => {
|
||||||
// Add servers to all three caches
|
// Add servers to all three caches
|
||||||
await registry.sharedAppServers.add('app_server', testParsedConfig);
|
await registry.sharedAppServers.add('app_server', testParsedConfig);
|
||||||
await registry.sharedUserServers.add('user_server', testParsedConfig);
|
await registry.sharedUserServers.add('user_server', testParsedConfig);
|
||||||
await registry.addPrivateUserServer(userId, 'private_server', testParsedConfig);
|
await registry.privateServersCache.add(userId, 'private_server', testParsedConfig);
|
||||||
|
|
||||||
// Verify all servers are accessible before reset
|
// Verify all servers are accessible before reset
|
||||||
const appConfigBefore = await registry.getServerConfig('app_server');
|
const appConfigBefore = await registry.getServerConfig('app_server');
|
||||||
const userConfigBefore = await registry.getServerConfig('user_server');
|
const userConfigBefore = await registry.getServerConfig('user_server', userId);
|
||||||
const privateConfigBefore = await registry.getServerConfig('private_server', userId);
|
const privateConfigBefore = await registry.getServerConfig('private_server', userId);
|
||||||
const allConfigsBefore = await registry.getAllServerConfigs(userId);
|
const allConfigsBefore = await registry.getAllServerConfigs(userId);
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -9,6 +9,11 @@ import { isLeader } from '~/cluster';
|
||||||
export abstract class BaseRegistryCache {
|
export abstract class BaseRegistryCache {
|
||||||
protected readonly PREFIX = 'MCP::ServersRegistry';
|
protected readonly PREFIX = 'MCP::ServersRegistry';
|
||||||
protected abstract readonly cache: Keyv;
|
protected abstract readonly cache: Keyv;
|
||||||
|
protected readonly leaderOnly: boolean;
|
||||||
|
|
||||||
|
constructor(leaderOnly?: boolean) {
|
||||||
|
this.leaderOnly = leaderOnly ?? false;
|
||||||
|
}
|
||||||
|
|
||||||
protected async leaderCheck(action: string): Promise<void> {
|
protected async leaderCheck(action: string): Promise<void> {
|
||||||
if (!(await isLeader())) throw new Error(`Only leader can ${action}.`);
|
if (!(await isLeader())) throw new Error(`Only leader can ${action}.`);
|
||||||
|
|
@ -20,7 +25,9 @@ export abstract class BaseRegistryCache {
|
||||||
}
|
}
|
||||||
|
|
||||||
public async reset(): Promise<void> {
|
public async reset(): Promise<void> {
|
||||||
|
if (this.leaderOnly) {
|
||||||
await this.leaderCheck(`reset ${this.cache.namespace} cache`);
|
await this.leaderCheck(`reset ${this.cache.namespace} cache`);
|
||||||
|
}
|
||||||
await this.cache.clear();
|
await this.cache.clear();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
115
packages/api/src/mcp/registry/cache/PrivateServerConfigs/PrivateServerConfigsCacheBase.ts
vendored
Normal file
115
packages/api/src/mcp/registry/cache/PrivateServerConfigs/PrivateServerConfigsCacheBase.ts
vendored
Normal file
|
|
@ -0,0 +1,115 @@
|
||||||
|
import type * as t from '~/mcp/types';
|
||||||
|
import { ServerConfigsCache, ServerConfigsCacheFactory } from '../ServerConfigsCacheFactory';
|
||||||
|
import { logger } from '@librechat/data-schemas';
|
||||||
|
|
||||||
|
export abstract class PrivateServerConfigsCacheBase {
|
||||||
|
protected readonly PREFIX = 'MCP::ServersRegistry::Servers::Private';
|
||||||
|
protected caches: Map<string, ServerConfigsCache> = new Map();
|
||||||
|
|
||||||
|
public async add(
|
||||||
|
userId: string,
|
||||||
|
serverName: string,
|
||||||
|
config: t.ParsedServerConfig,
|
||||||
|
): Promise<void> {
|
||||||
|
const userCache = this.getOrCreatePrivateUserCache(userId);
|
||||||
|
await userCache.add(serverName, config);
|
||||||
|
}
|
||||||
|
|
||||||
|
public async update(
|
||||||
|
userId: string,
|
||||||
|
serverName: string,
|
||||||
|
config: t.ParsedServerConfig,
|
||||||
|
): Promise<void> {
|
||||||
|
const userCache = this.getOrCreatePrivateUserCache(userId);
|
||||||
|
await userCache.update(serverName, config);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get a specific server config from a user's cache.
|
||||||
|
*/
|
||||||
|
public async get(userId: string, serverName: string): Promise<t.ParsedServerConfig | undefined> {
|
||||||
|
const cache = this.getOrCreatePrivateUserCache(userId);
|
||||||
|
return await cache.get(serverName);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get all server configs for a user.
|
||||||
|
*/
|
||||||
|
public async getAll(userId: string): Promise<Record<string, t.ParsedServerConfig>> {
|
||||||
|
const cache = this.getOrCreatePrivateUserCache(userId);
|
||||||
|
return await cache.getAll();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check if a user has a cache instance loaded.
|
||||||
|
*/
|
||||||
|
public abstract has(userId: string): Promise<boolean>;
|
||||||
|
|
||||||
|
public async remove(userId: string, serverName: string): Promise<void> {
|
||||||
|
const userCache = this.getOrCreatePrivateUserCache(userId);
|
||||||
|
await userCache.remove(serverName);
|
||||||
|
}
|
||||||
|
|
||||||
|
public async reset(userId: string): Promise<void> {
|
||||||
|
const cache = this.getOrCreatePrivateUserCache(userId);
|
||||||
|
return cache.reset();
|
||||||
|
}
|
||||||
|
|
||||||
|
// ============= BATCH OPERATION PRIMITIVES =============
|
||||||
|
// Simple primitives for MCPPrivateServerLoader orchestration - no business logic
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Update server config in ALL user caches that already have it.
|
||||||
|
* Efficient: Uses pattern-based scan, skips users who don't have it.
|
||||||
|
* Use case: Metadata changed (command, args, env)
|
||||||
|
*/
|
||||||
|
public abstract updateServerConfigIfExists(
|
||||||
|
serverName: string,
|
||||||
|
config: t.ParsedServerConfig,
|
||||||
|
): Promise<void>;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Add server config ONLY to users whose caches are already initialized.
|
||||||
|
* Skips users without initialized caches (doesn't create new caches).
|
||||||
|
* Use case: Granting access to existing users
|
||||||
|
*/
|
||||||
|
public abstract addServerConfigIfCacheExists(
|
||||||
|
userIds: string[],
|
||||||
|
serverName: string,
|
||||||
|
config: t.ParsedServerConfig,
|
||||||
|
): Promise<void>;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Remove server config ONLY from users whose caches exist.
|
||||||
|
* Ignores users without initialized caches.
|
||||||
|
* Use case: Revoking access from users
|
||||||
|
*/
|
||||||
|
public abstract removeServerConfigIfCacheExists(
|
||||||
|
userIds: string[],
|
||||||
|
serverName: string,
|
||||||
|
): Promise<void>;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Find all users who have this server in their cache.
|
||||||
|
* Primitive for determining affected users.
|
||||||
|
*/
|
||||||
|
public abstract findUsersWithServer(serverName: string): Promise<string[]>;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Clear all private server configs for all users (nuclear option).
|
||||||
|
* Use sparingly - typically only for testing or full reset.
|
||||||
|
*/
|
||||||
|
public abstract resetAll(): Promise<void>;
|
||||||
|
|
||||||
|
protected getOrCreatePrivateUserCache(userId: string): ServerConfigsCache {
|
||||||
|
if (!userId) {
|
||||||
|
logger.error('userId is required to get or create private user cache');
|
||||||
|
throw new Error('userId is required to get or create private user cache');
|
||||||
|
}
|
||||||
|
if (!this.caches.has(userId)) {
|
||||||
|
const cache = ServerConfigsCacheFactory.create(userId, 'Private', false);
|
||||||
|
this.caches.set(userId, cache);
|
||||||
|
}
|
||||||
|
return this.caches.get(userId)!;
|
||||||
|
}
|
||||||
|
}
|
||||||
32
packages/api/src/mcp/registry/cache/PrivateServerConfigs/PrivateServerConfigsCacheFactory.ts
vendored
Normal file
32
packages/api/src/mcp/registry/cache/PrivateServerConfigs/PrivateServerConfigsCacheFactory.ts
vendored
Normal file
|
|
@ -0,0 +1,32 @@
|
||||||
|
import { cacheConfig } from '~/cache';
|
||||||
|
|
||||||
|
import { PrivateServerConfigsCacheInMemory } from './PrivateServerConfigsCacheInMemory';
|
||||||
|
import { PrivateServerConfigsCacheRedis } from './PrivateServerConfigsCacheRedis';
|
||||||
|
|
||||||
|
export type PrivateServerConfigsCache =
|
||||||
|
| PrivateServerConfigsCacheInMemory
|
||||||
|
| PrivateServerConfigsCacheRedis;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Factory for creating the appropriate PrivateServerConfigsCache implementation based on deployment mode.
|
||||||
|
* Automatically selects between in-memory and Redis-backed storage depending on USE_REDIS config.
|
||||||
|
* In single-instance mode (USE_REDIS=false), returns lightweight in-memory cache.
|
||||||
|
* In cluster mode (USE_REDIS=true), returns Redis-backed cache with distributed coordination.
|
||||||
|
* Provides a unified interface regardless of the underlying storage mechanism.
|
||||||
|
*/
|
||||||
|
export class PrivateServerConfigsCacheFactory {
|
||||||
|
/**
|
||||||
|
* Create a ServerConfigsCache instance.
|
||||||
|
* Returns Redis implementation if Redis is configured, otherwise in-memory implementation.
|
||||||
|
*
|
||||||
|
* @returns PrivateServerConfigsCache instance
|
||||||
|
*/
|
||||||
|
static create(): PrivateServerConfigsCache {
|
||||||
|
if (cacheConfig.USE_REDIS) {
|
||||||
|
return new PrivateServerConfigsCacheRedis();
|
||||||
|
}
|
||||||
|
|
||||||
|
// In-memory mode uses a simple Map - doesn't need owner/namespace
|
||||||
|
return new PrivateServerConfigsCacheInMemory();
|
||||||
|
}
|
||||||
|
}
|
||||||
105
packages/api/src/mcp/registry/cache/PrivateServerConfigs/PrivateServerConfigsCacheInMemory.ts
vendored
Normal file
105
packages/api/src/mcp/registry/cache/PrivateServerConfigs/PrivateServerConfigsCacheInMemory.ts
vendored
Normal file
|
|
@ -0,0 +1,105 @@
|
||||||
|
import { ParsedServerConfig } from '~/mcp/types';
|
||||||
|
import { PrivateServerConfigsCacheBase } from './PrivateServerConfigsCacheBase';
|
||||||
|
import { logger } from '@librechat/data-schemas';
|
||||||
|
import { ServerConfigsCacheInMemory } from '../ServerConfigsCacheInMemory';
|
||||||
|
|
||||||
|
export class PrivateServerConfigsCacheInMemory extends PrivateServerConfigsCacheBase {
|
||||||
|
public async has(userId: string): Promise<boolean> {
|
||||||
|
return this.caches.has(userId);
|
||||||
|
}
|
||||||
|
|
||||||
|
public async updateServerConfigIfExists(
|
||||||
|
serverName: string,
|
||||||
|
config: ParsedServerConfig,
|
||||||
|
): Promise<void> {
|
||||||
|
let updatedCount = 0;
|
||||||
|
|
||||||
|
for (const [userId, userCache] of this.caches.entries()) {
|
||||||
|
const existing = await userCache.get(serverName);
|
||||||
|
if (existing) {
|
||||||
|
const inMemoryCache = userCache as ServerConfigsCacheInMemory;
|
||||||
|
await inMemoryCache.set(serverName, config);
|
||||||
|
updatedCount++;
|
||||||
|
logger.debug(`[MCP][PrivateServers][InMemory] Updated "${serverName}" for user ${userId}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.info(
|
||||||
|
`[MCP][PrivateServers][InMemory] Propagated config update for "${serverName}" to ${updatedCount} users`,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
public async addServerConfigIfCacheExists(
|
||||||
|
userIds: string[],
|
||||||
|
serverName: string,
|
||||||
|
config: ParsedServerConfig,
|
||||||
|
): Promise<void> {
|
||||||
|
let addedCount = 0;
|
||||||
|
|
||||||
|
for (const userId of userIds) {
|
||||||
|
if (this.caches.has(userId)) {
|
||||||
|
// Only if cache initialized
|
||||||
|
const userCache = this.getOrCreatePrivateUserCache(userId);
|
||||||
|
const inMemoryCache = userCache as ServerConfigsCacheInMemory;
|
||||||
|
await inMemoryCache.set(serverName, config);
|
||||||
|
addedCount++;
|
||||||
|
logger.debug(`[MCP][PrivateServers][InMemory] Added "${serverName}" to user ${userId}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.info(
|
||||||
|
`[MCP][PrivateServers][InMemory] Granted access to "${serverName}" for ${addedCount}/${userIds.length} initialized users`,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
public async removeServerConfigIfCacheExists(
|
||||||
|
userIds: string[],
|
||||||
|
serverName: string,
|
||||||
|
): Promise<void> {
|
||||||
|
let removedCount = 0;
|
||||||
|
|
||||||
|
for (const userId of userIds) {
|
||||||
|
if (this.caches.has(userId)) {
|
||||||
|
try {
|
||||||
|
const userCache = this.getOrCreatePrivateUserCache(userId);
|
||||||
|
await userCache.remove(serverName);
|
||||||
|
removedCount++;
|
||||||
|
logger.debug(
|
||||||
|
`[MCP][PrivateServers][InMemory] Removed "${serverName}" from user ${userId}`,
|
||||||
|
);
|
||||||
|
} catch (error) {
|
||||||
|
// Ignore - server might not exist for this user
|
||||||
|
logger.debug(
|
||||||
|
`[MCP][PrivateServers][InMemory] Server "${serverName}" not found for user ${userId}`,
|
||||||
|
error,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.info(
|
||||||
|
`[MCP][PrivateServers][InMemory] Revoked access to "${serverName}" from ${removedCount}/${userIds.length} users`,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
public async findUsersWithServer(serverName: string): Promise<string[]> {
|
||||||
|
const userIds: string[] = [];
|
||||||
|
|
||||||
|
for (const [userId, userCache] of this.caches.entries()) {
|
||||||
|
const config = await userCache.get(serverName);
|
||||||
|
if (config) {
|
||||||
|
userIds.push(userId);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return userIds;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Clear ALL servers from ALL user caches (nuclear option).
|
||||||
|
*/
|
||||||
|
public async resetAll(): Promise<void> {
|
||||||
|
this.caches.clear();
|
||||||
|
logger.info(`[MCP][PrivateServers][InMemory] Cleared ALL user caches`);
|
||||||
|
}
|
||||||
|
}
|
||||||
284
packages/api/src/mcp/registry/cache/PrivateServerConfigs/PrivateServerConfigsCacheRedis.ts
vendored
Normal file
284
packages/api/src/mcp/registry/cache/PrivateServerConfigs/PrivateServerConfigsCacheRedis.ts
vendored
Normal file
|
|
@ -0,0 +1,284 @@
|
||||||
|
import { ParsedServerConfig } from '~/mcp/types';
|
||||||
|
import { keyvRedisClient } from '~/cache';
|
||||||
|
import { PrivateServerConfigsCacheBase } from './PrivateServerConfigsCacheBase';
|
||||||
|
import { logger } from '@librechat/data-schemas';
|
||||||
|
import { cacheConfig } from '~/cache/cacheConfig';
|
||||||
|
import { batchDeleteKeys, scanKeys } from '~/cache/redisUtils';
|
||||||
|
|
||||||
|
export class PrivateServerConfigsCacheRedis extends PrivateServerConfigsCacheBase {
|
||||||
|
/**
|
||||||
|
* Detect if Redis is running in cluster mode.
|
||||||
|
* In cluster mode, we need to avoid CROSSSLOT errors by using pipelines instead of multi() transactions.
|
||||||
|
*/
|
||||||
|
private isClusterMode(): boolean {
|
||||||
|
return cacheConfig.USE_REDIS_CLUSTER;
|
||||||
|
}
|
||||||
|
|
||||||
|
public async has(userId: string): Promise<boolean> {
|
||||||
|
if (!userId || !keyvRedisClient || !('scanIterator' in keyvRedisClient)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
const pattern = `*${this.PREFIX}::${userId}:*`;
|
||||||
|
|
||||||
|
for await (const _key of keyvRedisClient.scanIterator({
|
||||||
|
MATCH: pattern,
|
||||||
|
COUNT: 1,
|
||||||
|
})) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
return false; // No keys found - cache not initialized
|
||||||
|
}
|
||||||
|
|
||||||
|
public async updateServerConfigIfExists(
|
||||||
|
serverName: string,
|
||||||
|
config: ParsedServerConfig,
|
||||||
|
): Promise<void> {
|
||||||
|
if (!keyvRedisClient || !('scanIterator' in keyvRedisClient)) {
|
||||||
|
logger.warn('[MCP][PrivateServers][Redis] Redis SCAN not available');
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const pattern = this.generateScanKeyPattern(serverName);
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Efficient: Pattern-based scan for specific serverName
|
||||||
|
// All cache keys that have the serverName
|
||||||
|
const keysToUpdate = await scanKeys(keyvRedisClient, pattern);
|
||||||
|
|
||||||
|
if (keysToUpdate.length > 0) {
|
||||||
|
const updatedConfig = { ...config, lastUpdatedAt: Date.now() };
|
||||||
|
const keyvFormat = { value: updatedConfig, expires: null };
|
||||||
|
const serializedConfig = JSON.stringify(keyvFormat);
|
||||||
|
|
||||||
|
const chunkSize = cacheConfig.REDIS_UPDATE_CHUNK_SIZE;
|
||||||
|
|
||||||
|
if (this.isClusterMode()) {
|
||||||
|
// Cluster mode: Use individual commands in parallel (no atomicity, but works across slots)
|
||||||
|
for (let i = 0; i < keysToUpdate.length; i += chunkSize) {
|
||||||
|
const chunk = keysToUpdate.slice(i, i + chunkSize);
|
||||||
|
await Promise.all(
|
||||||
|
chunk.map((key) => keyvRedisClient!.set(key, serializedConfig, { XX: true })),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Single-node mode: Use multi() for atomic transactions
|
||||||
|
for (let i = 0; i < keysToUpdate.length; i += chunkSize) {
|
||||||
|
const chunk = keysToUpdate.slice(i, i + chunkSize);
|
||||||
|
const multi = keyvRedisClient.multi();
|
||||||
|
for (const key of chunk) {
|
||||||
|
multi.set(key, serializedConfig, { XX: true });
|
||||||
|
}
|
||||||
|
await multi.exec();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.info(
|
||||||
|
`[MCP][PrivateServers][Redis] Propagated config update for "${serverName}" to ${keysToUpdate.length} users`,
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
logger.debug(`[MCP][PrivateServers][Redis] No users have "${serverName}"`);
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
logger.error(`[MCP][PrivateServers][Redis] Error updating "${serverName}"`, error);
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public async addServerConfigIfCacheExists(
|
||||||
|
userIds: string[],
|
||||||
|
serverName: string,
|
||||||
|
config: ParsedServerConfig,
|
||||||
|
): Promise<void> {
|
||||||
|
if (!keyvRedisClient) return;
|
||||||
|
|
||||||
|
// Optimized: Single SCAN to get all users with initialized caches
|
||||||
|
const allUsersWithCaches = await this.getAllUserIds();
|
||||||
|
|
||||||
|
// Filter to only users with initialized caches
|
||||||
|
const eligibleUserIds = userIds.filter((id) => allUsersWithCaches.has(id));
|
||||||
|
|
||||||
|
if (eligibleUserIds.length === 0) {
|
||||||
|
logger.info(
|
||||||
|
`[MCP][PrivateServers][Redis] No initialized users to grant access to "${serverName}"`,
|
||||||
|
);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Batch add using pipeline with NX (only set if key doesn't exist)
|
||||||
|
const updatedConfig = { ...config, lastUpdatedAt: Date.now() };
|
||||||
|
const keyvFormat = { value: updatedConfig, expires: null };
|
||||||
|
const serializedConfig = JSON.stringify(keyvFormat);
|
||||||
|
|
||||||
|
const globalPrefix = cacheConfig.REDIS_KEY_PREFIX;
|
||||||
|
const separator = cacheConfig.GLOBAL_PREFIX_SEPARATOR;
|
||||||
|
|
||||||
|
const chunkSize = cacheConfig.REDIS_UPDATE_CHUNK_SIZE;
|
||||||
|
|
||||||
|
if (this.isClusterMode()) {
|
||||||
|
// Cluster mode: Use individual commands in parallel (no atomicity, but works across slots)
|
||||||
|
for (let i = 0; i < eligibleUserIds.length; i += chunkSize) {
|
||||||
|
const chunk = eligibleUserIds.slice(i, i + chunkSize);
|
||||||
|
await Promise.all(
|
||||||
|
chunk.map((userId) => {
|
||||||
|
const namespace = `${this.PREFIX}::${userId}`;
|
||||||
|
const fullKey = globalPrefix
|
||||||
|
? `${globalPrefix}${separator}${namespace}:${serverName}`
|
||||||
|
: `${namespace}:${serverName}`;
|
||||||
|
return keyvRedisClient!.set(fullKey, serializedConfig, { NX: true });
|
||||||
|
}),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Single-node mode: Use multi() for atomic transactions
|
||||||
|
for (let i = 0; i < eligibleUserIds.length; i += chunkSize) {
|
||||||
|
const chunk = eligibleUserIds.slice(i, i + chunkSize);
|
||||||
|
const multi = keyvRedisClient.multi();
|
||||||
|
for (const userId of chunk) {
|
||||||
|
const namespace = `${this.PREFIX}::${userId}`;
|
||||||
|
const fullKey = globalPrefix
|
||||||
|
? `${globalPrefix}${separator}${namespace}:${serverName}`
|
||||||
|
: `${namespace}:${serverName}`;
|
||||||
|
multi.set(fullKey, serializedConfig, { NX: true });
|
||||||
|
}
|
||||||
|
await multi.exec();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.info(
|
||||||
|
`[MCP][PrivateServers][Redis] Granted access to "${serverName}" for ${eligibleUserIds.length}/${userIds.length} initialized users`,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
public async removeServerConfigIfCacheExists(
|
||||||
|
userIds: string[],
|
||||||
|
serverName: string,
|
||||||
|
): Promise<void> {
|
||||||
|
if (!keyvRedisClient) return;
|
||||||
|
|
||||||
|
// Optimized: Direct key construction - no SCAN needed!
|
||||||
|
// Build full Redis keys directly since we know userId and serverName
|
||||||
|
const globalPrefix = cacheConfig.REDIS_KEY_PREFIX;
|
||||||
|
const separator = cacheConfig.GLOBAL_PREFIX_SEPARATOR;
|
||||||
|
const keysToDelete: string[] = [];
|
||||||
|
|
||||||
|
for (const userId of userIds) {
|
||||||
|
// Construct the full Redis key
|
||||||
|
const namespace = `${this.PREFIX}::${userId}`;
|
||||||
|
const fullKey = globalPrefix
|
||||||
|
? `${globalPrefix}${separator}${namespace}:${serverName}`
|
||||||
|
: `${namespace}:${serverName}`;
|
||||||
|
keysToDelete.push(fullKey);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (keysToDelete.length > 0) {
|
||||||
|
// Use utility function for efficient parallel deletion
|
||||||
|
const removedCount = await batchDeleteKeys(keyvRedisClient, keysToDelete);
|
||||||
|
|
||||||
|
logger.info(
|
||||||
|
`[MCP][PrivateServers][Redis] Revoked access to "${serverName}" from ${removedCount}/${userIds.length} users`,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public async findUsersWithServer(serverName: string): Promise<string[]> {
|
||||||
|
if (!keyvRedisClient || !('scanIterator' in keyvRedisClient)) {
|
||||||
|
return [];
|
||||||
|
}
|
||||||
|
|
||||||
|
const pattern = this.generateScanKeyPattern(serverName);
|
||||||
|
|
||||||
|
try {
|
||||||
|
const keys = await scanKeys(keyvRedisClient, pattern);
|
||||||
|
const userIds: string[] = [];
|
||||||
|
|
||||||
|
for (const key of keys) {
|
||||||
|
const userId = this.extractUserIdFromKey(key);
|
||||||
|
if (userId) {
|
||||||
|
userIds.push(userId);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return userIds;
|
||||||
|
} catch (error) {
|
||||||
|
logger.error(`[MCP][PrivateServers][Redis] Error finding users with "${serverName}"`, error);
|
||||||
|
return [];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Scans Redis to find all unique userIds that have private server configs.
|
||||||
|
* This method is used for efficient batch operations (add/update/delete) across all users.
|
||||||
|
*
|
||||||
|
* Performance note: This scans all private server config keys in Redis.
|
||||||
|
* Use sparingly as it can be expensive with many users.
|
||||||
|
*/
|
||||||
|
private async getAllUserIds(): Promise<Set<string>> {
|
||||||
|
if (!keyvRedisClient || !('scanIterator' in keyvRedisClient)) {
|
||||||
|
logger.warn('[MCP][PrivateServerConfigs][Redis] Redis SCAN not available');
|
||||||
|
return new Set();
|
||||||
|
}
|
||||||
|
|
||||||
|
const userIds = new Set<string>();
|
||||||
|
// Pattern to match all private server configs: MCP::ServersRegistry::Servers::*:*
|
||||||
|
const pattern = `*${this.PREFIX}::*:*`;
|
||||||
|
|
||||||
|
try {
|
||||||
|
const keys = await scanKeys(keyvRedisClient, pattern);
|
||||||
|
|
||||||
|
for (const key of keys) {
|
||||||
|
const userId = this.extractUserIdFromKey(key);
|
||||||
|
if (userId) {
|
||||||
|
userIds.add(userId);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
logger.error('[MCP][PrivateServerConfigs][Redis] Error scanning for userIds', error);
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
|
|
||||||
|
return userIds;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Extract userId from a Redis key.
|
||||||
|
* Key format: MCP::ServersRegistry::Servers::userId:serverName
|
||||||
|
*/
|
||||||
|
private extractUserIdFromKey(key: string): string | null {
|
||||||
|
// Remove any global prefix, then extract userId
|
||||||
|
const keyWithoutGlobalPrefix = key.includes(this.PREFIX)
|
||||||
|
? key.substring(key.indexOf(this.PREFIX))
|
||||||
|
: key;
|
||||||
|
|
||||||
|
const withoutPrefix = keyWithoutGlobalPrefix.replace(`${this.PREFIX}::`, '');
|
||||||
|
const lastColonIndex = withoutPrefix.lastIndexOf(':');
|
||||||
|
if (lastColonIndex === -1) return null;
|
||||||
|
|
||||||
|
return withoutPrefix.substring(0, lastColonIndex);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Clear ALL servers from ALL user caches (nuclear option).
|
||||||
|
*/
|
||||||
|
public async resetAll(): Promise<void> {
|
||||||
|
if (!keyvRedisClient || !('scanIterator' in keyvRedisClient)) return;
|
||||||
|
|
||||||
|
// Pattern to match all private user server configs
|
||||||
|
// Format: MCP::ServersRegistry::Servers::userId:serverName
|
||||||
|
const pattern = `*${this.PREFIX}::*:*`;
|
||||||
|
|
||||||
|
// Use utility functions for efficient scan and parallel deletion
|
||||||
|
const keysToDelete = await scanKeys(keyvRedisClient, pattern);
|
||||||
|
|
||||||
|
if (keysToDelete.length > 0) {
|
||||||
|
await batchDeleteKeys(keyvRedisClient, keysToDelete);
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.info(`[MCP][Cache][Redis] Cleared all user caches: ${keysToDelete.length} entries`);
|
||||||
|
}
|
||||||
|
|
||||||
|
private generateScanKeyPattern(serverName: string): string {
|
||||||
|
return `*${this.PREFIX}::*:${serverName}`;
|
||||||
|
}
|
||||||
|
}
|
||||||
166
packages/api/src/mcp/registry/cache/PrivateServersLoadStatusCache.ts
vendored
Normal file
166
packages/api/src/mcp/registry/cache/PrivateServersLoadStatusCache.ts
vendored
Normal file
|
|
@ -0,0 +1,166 @@
|
||||||
|
import { standardCache, keyvRedisClient } from '~/cache';
|
||||||
|
import { cacheConfig } from '~/cache/cacheConfig';
|
||||||
|
import { BaseRegistryCache } from './BaseRegistryCache';
|
||||||
|
import { logger } from '@librechat/data-schemas';
|
||||||
|
|
||||||
|
const LOADED_KEY_PREFIX = 'USER_PRIVATE_SERVERS_LOADED';
|
||||||
|
const LOCK_KEY_PREFIX = 'USER_PRIVATE_SERVERS_LOAD_LOCK';
|
||||||
|
|
||||||
|
// Default TTL values (in milliseconds)
|
||||||
|
const DEFAULT_LOADED_TTL = 3600 * 1000; // 1 hour - should match cache entry TTL
|
||||||
|
const DEFAULT_LOCK_TTL = 30 * 1000; // 30 seconds - lock timeout
|
||||||
|
const DEFAULT_WAIT_INTERVAL = 100; // 100ms between checks
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Dedicated cache for managing private server loading status with TTL synchronization.
|
||||||
|
* Solves three critical issues:
|
||||||
|
* 1. TTL Synchronization: Loaded flags expire in sync with cache entries
|
||||||
|
* 2. Cache Eviction Detection: When cache expires, flag expires too
|
||||||
|
* 3. Race Condition Prevention: Distributed locking prevents concurrent loads
|
||||||
|
*
|
||||||
|
* Design:
|
||||||
|
* - Loaded flags have same TTL as cache entries (prevents desync)
|
||||||
|
* - Distributed locks prevent multiple processes loading same user
|
||||||
|
* - Wait mechanism allows processes to wait for ongoing loads
|
||||||
|
* - Works correctly for users with 0 servers (trusts TTL, no cache verification)
|
||||||
|
*/
|
||||||
|
class PrivateServersLoadStatusCache extends BaseRegistryCache {
|
||||||
|
protected readonly cache = standardCache(`${this.PREFIX}::PrivateServersLoadStatus`);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check if user's private servers are fully loaded.
|
||||||
|
* If false, servers need to be loaded from DB.
|
||||||
|
*
|
||||||
|
* @param userId - User ID
|
||||||
|
* @returns true if user's private servers are fully loaded
|
||||||
|
*/
|
||||||
|
public async isLoaded(userId: string): Promise<boolean> {
|
||||||
|
const key = `${LOADED_KEY_PREFIX}::${userId}`;
|
||||||
|
return (await this.cache.get(key)) === true;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Mark user's private servers as fully loaded with TTL.
|
||||||
|
* TTL MUST match the cache entry TTL to prevent desync.
|
||||||
|
*
|
||||||
|
* @param userId - User ID
|
||||||
|
* @param ttl - Time to live in milliseconds (default: 1 hour)
|
||||||
|
*/
|
||||||
|
public async setLoaded(userId: string, ttl: number = DEFAULT_LOADED_TTL): Promise<void> {
|
||||||
|
const key = `${LOADED_KEY_PREFIX}::${userId}`;
|
||||||
|
const success = await this.cache.set(key, true, ttl);
|
||||||
|
this.successCheck(`set loaded status for user ${userId}`, success);
|
||||||
|
logger.debug(`[MCP][LoadStatusCache] Marked user ${userId} as loaded (TTL: ${ttl}ms)`);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Acquire a distributed lock for loading a user's private servers.
|
||||||
|
* Prevents concurrent processes from loading the same user's servers.
|
||||||
|
*
|
||||||
|
* Uses atomic Redis SET NX PX for race-free locking.
|
||||||
|
* Returns true immediately if Redis is not available (no locking needed in single-process mode).
|
||||||
|
*
|
||||||
|
* @param userId - User ID
|
||||||
|
* @param ttl - Lock timeout in milliseconds (default: 30s)
|
||||||
|
* @returns true if lock acquired, false if already locked
|
||||||
|
*/
|
||||||
|
public async acquireLoadLock(userId: string, ttl: number = DEFAULT_LOCK_TTL): Promise<boolean> {
|
||||||
|
const key = `${LOCK_KEY_PREFIX}::${userId}`;
|
||||||
|
|
||||||
|
// Distributed locking only needed when Redis is available (multi-instance mode)
|
||||||
|
if (!keyvRedisClient || !('set' in keyvRedisClient)) {
|
||||||
|
logger.debug(`[MCP][LoadStatusCache] Redis not available, skipping lock for user ${userId}`);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Build the full Redis key (accounting for namespace and global prefix)
|
||||||
|
const namespace = `${this.PREFIX}::PrivateServersLoadStatus`;
|
||||||
|
const globalPrefix = cacheConfig.REDIS_KEY_PREFIX;
|
||||||
|
const separator = cacheConfig.GLOBAL_PREFIX_SEPARATOR;
|
||||||
|
const fullKey = globalPrefix
|
||||||
|
? `${globalPrefix}${separator}${namespace}:${key}`
|
||||||
|
: `${namespace}:${key}`;
|
||||||
|
|
||||||
|
// Redis SET with NX (only if not exists) and PX (millisecond expiry) - ATOMIC!
|
||||||
|
const result = await keyvRedisClient.set(fullKey, Date.now().toString(), {
|
||||||
|
NX: true, // Only set if key doesn't exist
|
||||||
|
PX: ttl, // Expiry in milliseconds
|
||||||
|
});
|
||||||
|
|
||||||
|
const acquired = result === 'OK';
|
||||||
|
|
||||||
|
if (acquired) {
|
||||||
|
logger.debug(
|
||||||
|
`[MCP][LoadStatusCache] Acquired load lock for user ${userId} (TTL: ${ttl}ms)`,
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
logger.debug(`[MCP][LoadStatusCache] Load lock already held for user ${userId}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
return acquired;
|
||||||
|
} catch (error) {
|
||||||
|
logger.error(`[MCP][LoadStatusCache] Error acquiring lock for user ${userId}:`, error);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Release the distributed lock for a user's private server loading.
|
||||||
|
* Should be called in a finally block to ensure lock is always released.
|
||||||
|
*
|
||||||
|
* @param userId - User ID
|
||||||
|
*/
|
||||||
|
public async releaseLoadLock(userId: string): Promise<void> {
|
||||||
|
const key = `${LOCK_KEY_PREFIX}::${userId}`;
|
||||||
|
await this.cache.delete(key);
|
||||||
|
logger.debug(`[MCP][LoadStatusCache] Released load lock for user ${userId}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Wait for another process to finish loading a user's private servers.
|
||||||
|
* Used when a lock is already held by another process.
|
||||||
|
*
|
||||||
|
* @param userId - User ID
|
||||||
|
* @param maxWaitTime - Maximum time to wait in milliseconds (default: 5s)
|
||||||
|
* @param checkInterval - Interval between checks in milliseconds (default: 100ms)
|
||||||
|
* @returns true if loading completed within maxWaitTime, false if timeout
|
||||||
|
*/
|
||||||
|
public async waitForLoad(
|
||||||
|
userId: string,
|
||||||
|
maxWaitTime: number = 5000,
|
||||||
|
checkInterval: number = DEFAULT_WAIT_INTERVAL,
|
||||||
|
): Promise<boolean> {
|
||||||
|
const startTime = Date.now();
|
||||||
|
|
||||||
|
while (Date.now() - startTime < maxWaitTime) {
|
||||||
|
const loaded = await this.isLoaded(userId);
|
||||||
|
if (loaded) {
|
||||||
|
logger.debug(`[MCP][LoadStatusCache] User ${userId} loading completed by another process`);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait before checking again
|
||||||
|
await new Promise((resolve) => setTimeout(resolve, checkInterval));
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.warn(
|
||||||
|
`[MCP][LoadStatusCache] Timeout waiting for user ${userId} loading (waited ${maxWaitTime}ms)`,
|
||||||
|
);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Clear loaded status for a user.
|
||||||
|
* Used for testing or manual cache invalidation.
|
||||||
|
*
|
||||||
|
* @param userId - User ID
|
||||||
|
*/
|
||||||
|
public async clearLoaded(userId: string): Promise<void> {
|
||||||
|
const key = `${LOADED_KEY_PREFIX}::${userId}`;
|
||||||
|
await this.cache.delete(key);
|
||||||
|
logger.debug(`[MCP][LoadStatusCache] Cleared loaded status for user ${userId}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export const privateServersLoadStatusCache = new PrivateServersLoadStatusCache();
|
||||||
|
|
@ -5,11 +5,15 @@ import { BaseRegistryCache } from './BaseRegistryCache';
|
||||||
const INITIALIZED = 'INITIALIZED';
|
const INITIALIZED = 'INITIALIZED';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Cache for tracking MCP Servers Registry metadata and status across distributed instances.
|
* Cache for tracking MCP Servers Registry global metadata and status across distributed instances.
|
||||||
* Uses Redis-backed storage to coordinate state between leader and follower nodes.
|
* Uses Redis-backed storage to coordinate state between leader and follower nodes.
|
||||||
* Currently, tracks initialization status to ensure only the leader performs initialization
|
* Tracks global initialization status for the registry.
|
||||||
* while followers wait for completion. Designed to be extended with additional registry
|
*
|
||||||
* metadata as needed (e.g., last update timestamps, version info, health status).
|
* Note: Per-user private server loading status is tracked separately in PrivateServersLoadStatusCache
|
||||||
|
* to enable TTL synchronization and distributed locking.
|
||||||
|
*
|
||||||
|
* Designed to be extended with additional global registry metadata as needed
|
||||||
|
* (e.g., last update timestamps, version info, health status).
|
||||||
* This cache is only meant to be used internally by registry management components.
|
* This cache is only meant to be used internally by registry management components.
|
||||||
*/
|
*/
|
||||||
class RegistryStatusCache extends BaseRegistryCache {
|
class RegistryStatusCache extends BaseRegistryCache {
|
||||||
|
|
|
||||||
|
|
@ -20,9 +20,13 @@ export class ServerConfigsCacheFactory {
|
||||||
* @param leaderOnly - Whether operations should only be performed by the leader (only applies to Redis)
|
* @param leaderOnly - Whether operations should only be performed by the leader (only applies to Redis)
|
||||||
* @returns ServerConfigsCache instance
|
* @returns ServerConfigsCache instance
|
||||||
*/
|
*/
|
||||||
static create(owner: string, leaderOnly: boolean): ServerConfigsCache {
|
static create(
|
||||||
|
owner: string,
|
||||||
|
scope: 'Shared' | 'Private',
|
||||||
|
leaderOnly: boolean,
|
||||||
|
): ServerConfigsCache {
|
||||||
if (cacheConfig.USE_REDIS) {
|
if (cacheConfig.USE_REDIS) {
|
||||||
return new ServerConfigsCacheRedis(owner, leaderOnly);
|
return new ServerConfigsCacheRedis(owner, scope, leaderOnly);
|
||||||
}
|
}
|
||||||
|
|
||||||
// In-memory mode uses a simple Map - doesn't need owner/namespace
|
// In-memory mode uses a simple Map - doesn't need owner/namespace
|
||||||
|
|
|
||||||
|
|
@ -15,7 +15,7 @@ export class ServerConfigsCacheInMemory {
|
||||||
throw new Error(
|
throw new Error(
|
||||||
`Server "${serverName}" already exists in cache. Use update() to modify existing configs.`,
|
`Server "${serverName}" already exists in cache. Use update() to modify existing configs.`,
|
||||||
);
|
);
|
||||||
this.cache.set(serverName, config);
|
this.cache.set(serverName, { ...config, lastUpdatedAt: Date.now() });
|
||||||
}
|
}
|
||||||
|
|
||||||
public async update(serverName: string, config: ParsedServerConfig): Promise<void> {
|
public async update(serverName: string, config: ParsedServerConfig): Promise<void> {
|
||||||
|
|
@ -23,7 +23,15 @@ export class ServerConfigsCacheInMemory {
|
||||||
throw new Error(
|
throw new Error(
|
||||||
`Server "${serverName}" does not exist in cache. Use add() to create new configs.`,
|
`Server "${serverName}" does not exist in cache. Use add() to create new configs.`,
|
||||||
);
|
);
|
||||||
this.cache.set(serverName, config);
|
this.cache.set(serverName, { ...config, lastUpdatedAt: Date.now() });
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Sets a server config without checking if it exists (upsert operation).
|
||||||
|
* Use this for bulk operations where you want to add or update without error handling.
|
||||||
|
*/
|
||||||
|
public async set(serverName: string, config: ParsedServerConfig): Promise<void> {
|
||||||
|
this.cache.set(serverName, { ...config, lastUpdatedAt: Date.now() });
|
||||||
}
|
}
|
||||||
|
|
||||||
public async remove(serverName: string): Promise<void> {
|
public async remove(serverName: string): Promise<void> {
|
||||||
|
|
@ -43,4 +51,12 @@ export class ServerConfigsCacheInMemory {
|
||||||
public async reset(): Promise<void> {
|
public async reset(): Promise<void> {
|
||||||
this.cache.clear();
|
this.cache.clear();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns a placeholder namespace for consistency with Redis implementation.
|
||||||
|
* In-memory cache doesn't use namespaces, so this always returns empty string.
|
||||||
|
*/
|
||||||
|
public getNamespace(): string {
|
||||||
|
return '';
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -14,13 +14,11 @@ import { BaseRegistryCache } from './BaseRegistryCache';
|
||||||
export class ServerConfigsCacheRedis extends BaseRegistryCache {
|
export class ServerConfigsCacheRedis extends BaseRegistryCache {
|
||||||
protected readonly cache: Keyv;
|
protected readonly cache: Keyv;
|
||||||
private readonly owner: string;
|
private readonly owner: string;
|
||||||
private readonly leaderOnly: boolean;
|
|
||||||
|
|
||||||
constructor(owner: string, leaderOnly: boolean) {
|
constructor(owner: string, scope: 'Shared' | 'Private', leaderOnly: boolean) {
|
||||||
super();
|
super(leaderOnly);
|
||||||
this.owner = owner;
|
this.owner = owner;
|
||||||
this.leaderOnly = leaderOnly;
|
this.cache = standardCache(`${this.PREFIX}::Servers::${scope}::${owner}`);
|
||||||
this.cache = standardCache(`${this.PREFIX}::Servers::${owner}`);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public async add(serverName: string, config: ParsedServerConfig): Promise<void> {
|
public async add(serverName: string, config: ParsedServerConfig): Promise<void> {
|
||||||
|
|
@ -30,7 +28,7 @@ export class ServerConfigsCacheRedis extends BaseRegistryCache {
|
||||||
throw new Error(
|
throw new Error(
|
||||||
`Server "${serverName}" already exists in cache. Use update() to modify existing configs.`,
|
`Server "${serverName}" already exists in cache. Use update() to modify existing configs.`,
|
||||||
);
|
);
|
||||||
const success = await this.cache.set(serverName, config);
|
const success = await this.cache.set(serverName, { ...config, lastUpdatedAt: Date.now() });
|
||||||
this.successCheck(`add ${this.owner} server "${serverName}"`, success);
|
this.successCheck(`add ${this.owner} server "${serverName}"`, success);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -41,10 +39,21 @@ export class ServerConfigsCacheRedis extends BaseRegistryCache {
|
||||||
throw new Error(
|
throw new Error(
|
||||||
`Server "${serverName}" does not exist in cache. Use add() to create new configs.`,
|
`Server "${serverName}" does not exist in cache. Use add() to create new configs.`,
|
||||||
);
|
);
|
||||||
const success = await this.cache.set(serverName, config);
|
const success = await this.cache.set(serverName, { ...config, lastUpdatedAt: Date.now() });
|
||||||
this.successCheck(`update ${this.owner} server "${serverName}"`, success);
|
this.successCheck(`update ${this.owner} server "${serverName}"`, success);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Sets a server config without checking if it exists (upsert operation).
|
||||||
|
* Use this for bulk operations where you want to add or update without error handling.
|
||||||
|
* Note: Respects leaderOnly flag if set.
|
||||||
|
*/
|
||||||
|
public async set(serverName: string, config: ParsedServerConfig): Promise<void> {
|
||||||
|
if (this.leaderOnly) await this.leaderCheck(`set ${this.owner} MCP servers`);
|
||||||
|
const success = await this.cache.set(serverName, { ...config, lastUpdatedAt: Date.now() });
|
||||||
|
this.successCheck(`set ${this.owner} server "${serverName}"`, success);
|
||||||
|
}
|
||||||
|
|
||||||
public async remove(serverName: string): Promise<void> {
|
public async remove(serverName: string): Promise<void> {
|
||||||
if (this.leaderOnly) await this.leaderCheck(`remove ${this.owner} MCP servers`);
|
if (this.leaderOnly) await this.leaderCheck(`remove ${this.owner} MCP servers`);
|
||||||
const success = await this.cache.delete(serverName);
|
const success = await this.cache.delete(serverName);
|
||||||
|
|
@ -68,9 +77,9 @@ export class ServerConfigsCacheRedis extends BaseRegistryCache {
|
||||||
// Full key format: "prefix::namespace:keyName"
|
// Full key format: "prefix::namespace:keyName"
|
||||||
const lastColonIndex = key.lastIndexOf(':');
|
const lastColonIndex = key.lastIndexOf(':');
|
||||||
const keyName = key.substring(lastColonIndex + 1);
|
const keyName = key.substring(lastColonIndex + 1);
|
||||||
const value = await this.cache.get(keyName);
|
const config = (await this.cache.get(keyName)) as ParsedServerConfig | undefined;
|
||||||
if (value) {
|
if (config) {
|
||||||
entries.push([keyName, value as ParsedServerConfig]);
|
entries.push([keyName, config]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
|
@ -79,4 +88,12 @@ export class ServerConfigsCacheRedis extends BaseRegistryCache {
|
||||||
|
|
||||||
return fromPairs(entries);
|
return fromPairs(entries);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the Redis namespace for this cache instance.
|
||||||
|
* Used for constructing full Redis keys when needed for batch operations.
|
||||||
|
*/
|
||||||
|
public getNamespace(): string {
|
||||||
|
return this.cache.namespace ?? '';
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
71
packages/api/src/mcp/registry/cache/__tests__/PrivateServerConfigsCacheFactory.test.ts
vendored
Normal file
71
packages/api/src/mcp/registry/cache/__tests__/PrivateServerConfigsCacheFactory.test.ts
vendored
Normal file
|
|
@ -0,0 +1,71 @@
|
||||||
|
import { PrivateServerConfigsCacheFactory } from '../PrivateServerConfigs/PrivateServerConfigsCacheFactory';
|
||||||
|
import { PrivateServerConfigsCacheInMemory } from '../PrivateServerConfigs/PrivateServerConfigsCacheInMemory';
|
||||||
|
import { PrivateServerConfigsCacheRedis } from '../PrivateServerConfigs/PrivateServerConfigsCacheRedis';
|
||||||
|
import { cacheConfig } from '~/cache';
|
||||||
|
|
||||||
|
// Mock the cache implementations
|
||||||
|
jest.mock('../PrivateServerConfigs/PrivateServerConfigsCacheInMemory');
|
||||||
|
jest.mock('../PrivateServerConfigs/PrivateServerConfigsCacheRedis');
|
||||||
|
|
||||||
|
// Mock the cache config module
|
||||||
|
jest.mock('~/cache', () => ({
|
||||||
|
cacheConfig: {
|
||||||
|
USE_REDIS: false,
|
||||||
|
},
|
||||||
|
}));
|
||||||
|
|
||||||
|
describe('PrivateServerConfigsCacheFactory', () => {
|
||||||
|
beforeEach(() => {
|
||||||
|
jest.clearAllMocks();
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('create()', () => {
|
||||||
|
it('should return PrivateServerConfigsCacheRedis when USE_REDIS is true', () => {
|
||||||
|
// Arrange
|
||||||
|
cacheConfig.USE_REDIS = true;
|
||||||
|
|
||||||
|
// Act
|
||||||
|
const cache = PrivateServerConfigsCacheFactory.create();
|
||||||
|
|
||||||
|
// Assert
|
||||||
|
expect(cache).toBeInstanceOf(PrivateServerConfigsCacheRedis);
|
||||||
|
expect(PrivateServerConfigsCacheRedis).toHaveBeenCalled();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return PrivateServerConfigsCacheInMemory when USE_REDIS is false', () => {
|
||||||
|
// Arrange
|
||||||
|
cacheConfig.USE_REDIS = false;
|
||||||
|
|
||||||
|
// Act
|
||||||
|
const cache = PrivateServerConfigsCacheFactory.create();
|
||||||
|
|
||||||
|
// Assert
|
||||||
|
expect(cache).toBeInstanceOf(PrivateServerConfigsCacheInMemory);
|
||||||
|
expect(PrivateServerConfigsCacheInMemory).toHaveBeenCalled();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should create PrivateServerConfigsCacheInMemory without parameters when USE_REDIS is false', () => {
|
||||||
|
// Arrange
|
||||||
|
cacheConfig.USE_REDIS = false;
|
||||||
|
|
||||||
|
// Act
|
||||||
|
PrivateServerConfigsCacheFactory.create();
|
||||||
|
|
||||||
|
// Assert
|
||||||
|
// Private cache doesn't use any parameters
|
||||||
|
expect(PrivateServerConfigsCacheInMemory).toHaveBeenCalledWith();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should create PrivateServerConfigsCacheRedis without parameters when USE_REDIS is true', () => {
|
||||||
|
// Arrange
|
||||||
|
cacheConfig.USE_REDIS = true;
|
||||||
|
|
||||||
|
// Act
|
||||||
|
PrivateServerConfigsCacheFactory.create();
|
||||||
|
|
||||||
|
// Assert
|
||||||
|
// Private cache doesn't use any parameters
|
||||||
|
expect(PrivateServerConfigsCacheRedis).toHaveBeenCalledWith();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
346
packages/api/src/mcp/registry/cache/__tests__/PrivateServerConfigsCacheInMemory.test.ts
vendored
Normal file
346
packages/api/src/mcp/registry/cache/__tests__/PrivateServerConfigsCacheInMemory.test.ts
vendored
Normal file
|
|
@ -0,0 +1,346 @@
|
||||||
|
import { expect } from '@playwright/test';
|
||||||
|
import { ParsedServerConfig } from '~/mcp/types';
|
||||||
|
const FIXED_TIME = 1699564800000;
|
||||||
|
const originalDateNow = Date.now;
|
||||||
|
Date.now = jest.fn(() => FIXED_TIME);
|
||||||
|
|
||||||
|
describe('PrivateServerConfigsCacheInMemory Tests', () => {
|
||||||
|
let PrivateServerConfigsCacheInMemory: typeof import('../PrivateServerConfigs/PrivateServerConfigsCacheInMemory').PrivateServerConfigsCacheInMemory;
|
||||||
|
let cache: InstanceType<
|
||||||
|
typeof import('../PrivateServerConfigs/PrivateServerConfigsCacheInMemory').PrivateServerConfigsCacheInMemory
|
||||||
|
>;
|
||||||
|
|
||||||
|
// Test data
|
||||||
|
const mockConfig1: ParsedServerConfig = {
|
||||||
|
command: 'node',
|
||||||
|
args: ['server1.js'],
|
||||||
|
env: { TEST: 'value1' },
|
||||||
|
lastUpdatedAt: FIXED_TIME,
|
||||||
|
};
|
||||||
|
|
||||||
|
const mockConfig2: ParsedServerConfig = {
|
||||||
|
command: 'python',
|
||||||
|
args: ['server2.py'],
|
||||||
|
env: { TEST: 'value2' },
|
||||||
|
lastUpdatedAt: FIXED_TIME,
|
||||||
|
};
|
||||||
|
|
||||||
|
const mockConfig3: ParsedServerConfig = {
|
||||||
|
command: 'node',
|
||||||
|
args: ['server3.js'],
|
||||||
|
url: 'http://localhost:3000',
|
||||||
|
requiresOAuth: true,
|
||||||
|
lastUpdatedAt: FIXED_TIME,
|
||||||
|
};
|
||||||
|
|
||||||
|
beforeAll(async () => {
|
||||||
|
// Import modules
|
||||||
|
const cacheModule = await import('../PrivateServerConfigs/PrivateServerConfigsCacheInMemory');
|
||||||
|
PrivateServerConfigsCacheInMemory = cacheModule.PrivateServerConfigsCacheInMemory;
|
||||||
|
});
|
||||||
|
|
||||||
|
afterAll(() => {
|
||||||
|
Date.now = originalDateNow;
|
||||||
|
});
|
||||||
|
|
||||||
|
beforeEach(() => {
|
||||||
|
// Create a fresh instance for each test
|
||||||
|
cache = new PrivateServerConfigsCacheInMemory();
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('add and get operations', () => {
|
||||||
|
it('should add and retrieve a server config for a user', async () => {
|
||||||
|
await cache.add('user1', 'server1', mockConfig1);
|
||||||
|
const result = await cache.get('user1', 'server1');
|
||||||
|
expect(result).toEqual(mockConfig1);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return undefined for non-existent server', async () => {
|
||||||
|
const result = await cache.get('user1', 'non-existent');
|
||||||
|
expect(result).toBeUndefined();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should throw error when adding duplicate server for same user', async () => {
|
||||||
|
await cache.add('user1', 'server1', mockConfig1);
|
||||||
|
await expect(cache.add('user1', 'server1', mockConfig2)).rejects.toThrow(
|
||||||
|
'Server "server1" already exists in cache. Use update() to modify existing configs.',
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle multiple server configs for a single user', async () => {
|
||||||
|
await cache.add('user1', 'server1', mockConfig1);
|
||||||
|
await cache.add('user1', 'server2', mockConfig2);
|
||||||
|
await cache.add('user1', 'server3', mockConfig3);
|
||||||
|
|
||||||
|
const result1 = await cache.get('user1', 'server1');
|
||||||
|
const result2 = await cache.get('user1', 'server2');
|
||||||
|
const result3 = await cache.get('user1', 'server3');
|
||||||
|
|
||||||
|
expect(result1).toEqual(mockConfig1);
|
||||||
|
expect(result2).toEqual(mockConfig2);
|
||||||
|
expect(result3).toEqual(mockConfig3);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should isolate server configs between different users', async () => {
|
||||||
|
await cache.add('user1', 'server1', mockConfig1);
|
||||||
|
await cache.add('user2', 'server1', mockConfig2);
|
||||||
|
|
||||||
|
const user1Result = await cache.get('user1', 'server1');
|
||||||
|
const user2Result = await cache.get('user2', 'server1');
|
||||||
|
|
||||||
|
expect(user1Result).toEqual(mockConfig1);
|
||||||
|
expect(user2Result).toEqual(mockConfig2);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('getAll operation', () => {
|
||||||
|
it('should return empty object when user has no servers', async () => {
|
||||||
|
const result = await cache.getAll('user1');
|
||||||
|
expect(result).toEqual({});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return all server configs for a user', async () => {
|
||||||
|
await cache.add('user1', 'server1', mockConfig1);
|
||||||
|
await cache.add('user1', 'server2', mockConfig2);
|
||||||
|
await cache.add('user1', 'server3', mockConfig3);
|
||||||
|
|
||||||
|
const result = await cache.getAll('user1');
|
||||||
|
expect(result).toEqual({
|
||||||
|
server1: mockConfig1,
|
||||||
|
server2: mockConfig2,
|
||||||
|
server3: mockConfig3,
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should only return configs for specific user', async () => {
|
||||||
|
await cache.add('user1', 'server1', mockConfig1);
|
||||||
|
await cache.add('user1', 'server2', mockConfig2);
|
||||||
|
await cache.add('user2', 'server3', mockConfig3);
|
||||||
|
|
||||||
|
const user1Result = await cache.getAll('user1');
|
||||||
|
const user2Result = await cache.getAll('user2');
|
||||||
|
|
||||||
|
expect(Object.keys(user1Result).length).toBe(2);
|
||||||
|
expect(Object.keys(user2Result).length).toBe(1);
|
||||||
|
expect(user1Result.server3).toBeUndefined();
|
||||||
|
expect(user2Result.server1).toBeUndefined();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('update operation', () => {
|
||||||
|
it('should update an existing server config', async () => {
|
||||||
|
await cache.add('user1', 'server1', mockConfig1);
|
||||||
|
expect(await cache.get('user1', 'server1')).toEqual(mockConfig1);
|
||||||
|
|
||||||
|
await cache.update('user1', 'server1', mockConfig2);
|
||||||
|
const result = await cache.get('user1', 'server1');
|
||||||
|
expect(result).toEqual(mockConfig2);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should throw error when updating non-existent server', async () => {
|
||||||
|
await expect(cache.update('user1', 'non-existent', mockConfig1)).rejects.toThrow(
|
||||||
|
'Server "non-existent" does not exist in cache. Use add() to create new configs.',
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should only update for specific user', async () => {
|
||||||
|
await cache.add('user1', 'server1', mockConfig1);
|
||||||
|
await cache.add('user2', 'server1', mockConfig2);
|
||||||
|
|
||||||
|
await cache.update('user1', 'server1', mockConfig3);
|
||||||
|
|
||||||
|
expect(await cache.get('user1', 'server1')).toEqual(mockConfig3);
|
||||||
|
expect(await cache.get('user2', 'server1')).toEqual(mockConfig2);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('remove operation', () => {
|
||||||
|
it('should remove an existing server config', async () => {
|
||||||
|
await cache.add('user1', 'server1', mockConfig1);
|
||||||
|
expect(await cache.get('user1', 'server1')).toEqual(mockConfig1);
|
||||||
|
|
||||||
|
await cache.remove('user1', 'server1');
|
||||||
|
expect(await cache.get('user1', 'server1')).toBeUndefined();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should throw error when removing non-existent server', async () => {
|
||||||
|
await expect(cache.remove('user1', 'non-existent')).rejects.toThrow(
|
||||||
|
'Failed to remove server "non-existent" in cache.',
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should only remove from specific user', async () => {
|
||||||
|
await cache.add('user1', 'server1', mockConfig1);
|
||||||
|
await cache.add('user2', 'server1', mockConfig2);
|
||||||
|
|
||||||
|
await cache.remove('user1', 'server1');
|
||||||
|
|
||||||
|
expect(await cache.get('user1', 'server1')).toBeUndefined();
|
||||||
|
expect(await cache.get('user2', 'server1')).toEqual(mockConfig2);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should allow re-adding a removed server', async () => {
|
||||||
|
await cache.add('user1', 'server1', mockConfig1);
|
||||||
|
await cache.remove('user1', 'server1');
|
||||||
|
await cache.add('user1', 'server1', mockConfig3);
|
||||||
|
|
||||||
|
const result = await cache.get('user1', 'server1');
|
||||||
|
expect(result).toEqual(mockConfig3);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('reset operation', () => {
|
||||||
|
it('should clear all servers for a specific user', async () => {
|
||||||
|
await cache.add('user1', 'server1', mockConfig1);
|
||||||
|
await cache.add('user1', 'server2', mockConfig2);
|
||||||
|
await cache.add('user2', 'server3', mockConfig3);
|
||||||
|
|
||||||
|
await cache.reset('user1');
|
||||||
|
|
||||||
|
const user1Result = await cache.getAll('user1');
|
||||||
|
const user2Result = await cache.getAll('user2');
|
||||||
|
|
||||||
|
expect(Object.keys(user1Result).length).toBe(0);
|
||||||
|
expect(Object.keys(user2Result).length).toBe(1);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('has operation', () => {
|
||||||
|
it('should return true for users with loaded cache', async () => {
|
||||||
|
await cache.add('user1', 'server1', mockConfig1);
|
||||||
|
expect(await cache.has('user1')).toBe(true);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return false for users without loaded cache', async () => {
|
||||||
|
expect(await cache.has('user1')).toBe(false);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('updateServerConfigIfExists operation', () => {
|
||||||
|
it('should update server config for all users who have it', async () => {
|
||||||
|
await cache.add('user1', 'server1', mockConfig1);
|
||||||
|
await cache.add('user2', 'server1', mockConfig1);
|
||||||
|
await cache.add('user3', 'server2', mockConfig2);
|
||||||
|
|
||||||
|
await cache.updateServerConfigIfExists('server1', mockConfig3);
|
||||||
|
|
||||||
|
expect(await cache.get('user1', 'server1')).toEqual(mockConfig3);
|
||||||
|
expect(await cache.get('user2', 'server1')).toEqual(mockConfig3);
|
||||||
|
expect(await cache.get('user3', 'server1')).toBeUndefined();
|
||||||
|
expect(await cache.get('user3', 'server2')).toEqual(mockConfig2);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle case when no users have the server', async () => {
|
||||||
|
await cache.add('user1', 'server2', mockConfig2);
|
||||||
|
await cache.add('user2', 'server3', mockConfig3);
|
||||||
|
|
||||||
|
await expect(cache.updateServerConfigIfExists('server1', mockConfig1)).resolves.not.toThrow();
|
||||||
|
|
||||||
|
expect(await cache.get('user1', 'server2')).toEqual(mockConfig2);
|
||||||
|
expect(await cache.get('user2', 'server3')).toEqual(mockConfig3);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle case with no loaded user caches', async () => {
|
||||||
|
await expect(cache.updateServerConfigIfExists('server1', mockConfig1)).resolves.not.toThrow();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('addServerConfigIfCacheExists operation', () => {
|
||||||
|
it('should add server to specified users with initialized caches', async () => {
|
||||||
|
await cache.add('user1', 'other', mockConfig1);
|
||||||
|
await cache.add('user2', 'other', mockConfig1);
|
||||||
|
|
||||||
|
await cache.addServerConfigIfCacheExists(['user1', 'user2', 'user3'], 'server1', mockConfig2);
|
||||||
|
|
||||||
|
expect(await cache.get('user1', 'server1')).toEqual(mockConfig2);
|
||||||
|
expect(await cache.get('user2', 'server1')).toEqual(mockConfig2);
|
||||||
|
expect(await cache.get('user3', 'server1')).toBeUndefined();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should not add to users without initialized caches', async () => {
|
||||||
|
await cache.addServerConfigIfCacheExists(['user1', 'user2'], 'server1', mockConfig1);
|
||||||
|
|
||||||
|
expect(await cache.get('user1', 'server1')).toBeUndefined();
|
||||||
|
expect(await cache.get('user2', 'server1')).toBeUndefined();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle empty userIds array', async () => {
|
||||||
|
await expect(
|
||||||
|
cache.addServerConfigIfCacheExists([], 'server1', mockConfig1),
|
||||||
|
).resolves.not.toThrow();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('removeServerConfigIfCacheExists operation', () => {
|
||||||
|
it('should remove server from specified users', async () => {
|
||||||
|
await cache.add('user1', 'server1', mockConfig1);
|
||||||
|
await cache.add('user2', 'server1', mockConfig1);
|
||||||
|
await cache.add('user3', 'server1', mockConfig1);
|
||||||
|
|
||||||
|
await cache.removeServerConfigIfCacheExists(['user1', 'user3'], 'server1');
|
||||||
|
|
||||||
|
expect(await cache.get('user1', 'server1')).toBeUndefined();
|
||||||
|
expect(await cache.get('user2', 'server1')).toEqual(mockConfig1);
|
||||||
|
expect(await cache.get('user3', 'server1')).toBeUndefined();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle users who do not have the server', async () => {
|
||||||
|
await cache.add('user1', 'server1', mockConfig1);
|
||||||
|
|
||||||
|
await expect(
|
||||||
|
cache.removeServerConfigIfCacheExists(['user1', 'user2'], 'server1'),
|
||||||
|
).resolves.not.toThrow();
|
||||||
|
|
||||||
|
expect(await cache.get('user1', 'server1')).toBeUndefined();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle empty userIds array', async () => {
|
||||||
|
await expect(cache.removeServerConfigIfCacheExists([], 'server1')).resolves.not.toThrow();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('findUsersWithServer operation', () => {
|
||||||
|
it('should return all users who have the server', async () => {
|
||||||
|
await cache.add('user1', 'server1', mockConfig1);
|
||||||
|
await cache.add('user2', 'server1', mockConfig1);
|
||||||
|
await cache.add('user3', 'other', mockConfig2);
|
||||||
|
|
||||||
|
const users = await cache.findUsersWithServer('server1');
|
||||||
|
|
||||||
|
expect(users.sort()).toEqual(['user1', 'user2'].sort());
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return empty array if no users have the server', async () => {
|
||||||
|
await cache.add('user1', 'other', mockConfig1);
|
||||||
|
|
||||||
|
const users = await cache.findUsersWithServer('server1');
|
||||||
|
|
||||||
|
expect(users).toEqual([]);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return empty array with no loaded user caches', async () => {
|
||||||
|
const users = await cache.findUsersWithServer('server1');
|
||||||
|
|
||||||
|
expect(users).toEqual([]);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('resetAll operation', () => {
|
||||||
|
it('should clear all servers for all users', async () => {
|
||||||
|
await cache.add('user1', 'server1', mockConfig1);
|
||||||
|
await cache.add('user1', 'server2', mockConfig2);
|
||||||
|
await cache.add('user2', 'server1', mockConfig1);
|
||||||
|
await cache.add('user2', 'server3', mockConfig3);
|
||||||
|
|
||||||
|
await cache.resetAll();
|
||||||
|
|
||||||
|
expect(await cache.has('user1')).toBe(false);
|
||||||
|
expect(await cache.has('user2')).toBe(false);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle case with no loaded user caches', async () => {
|
||||||
|
// Should not throw
|
||||||
|
await expect(cache.resetAll()).resolves.not.toThrow();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
@ -0,0 +1,606 @@
|
||||||
|
import { expect } from '@playwright/test';
|
||||||
|
import { ParsedServerConfig } from '~/mcp/types';
|
||||||
|
|
||||||
|
describe('PrivateServerConfigsCacheRedis Integration Tests', () => {
|
||||||
|
let PrivateServerConfigsCacheRedis: typeof import('../PrivateServerConfigs/PrivateServerConfigsCacheRedis').PrivateServerConfigsCacheRedis;
|
||||||
|
let keyvRedisClient: Awaited<typeof import('~/cache/redisClients')>['keyvRedisClient'];
|
||||||
|
let cache: InstanceType<
|
||||||
|
typeof import('../PrivateServerConfigs/PrivateServerConfigsCacheRedis').PrivateServerConfigsCacheRedis
|
||||||
|
>;
|
||||||
|
|
||||||
|
// Test data
|
||||||
|
const mockConfig1: ParsedServerConfig = {
|
||||||
|
command: 'node',
|
||||||
|
args: ['server1.js'],
|
||||||
|
env: { TEST: 'value1' },
|
||||||
|
};
|
||||||
|
|
||||||
|
const mockConfig2: ParsedServerConfig = {
|
||||||
|
command: 'python',
|
||||||
|
args: ['server2.py'],
|
||||||
|
env: { TEST: 'value2' },
|
||||||
|
};
|
||||||
|
|
||||||
|
const mockConfig3: ParsedServerConfig = {
|
||||||
|
command: 'node',
|
||||||
|
args: ['server3.js'],
|
||||||
|
url: 'http://localhost:3000',
|
||||||
|
requiresOAuth: true,
|
||||||
|
};
|
||||||
|
|
||||||
|
beforeAll(async () => {
|
||||||
|
// Set up environment variables for Redis (only if not already set)
|
||||||
|
process.env.USE_REDIS = process.env.USE_REDIS ?? 'true';
|
||||||
|
process.env.REDIS_URI = process.env.REDIS_URI ?? 'redis://127.0.0.1:6379';
|
||||||
|
process.env.USE_REDIS_CLUSTER = process.env.USE_REDIS_CLUSTER ?? 'false';
|
||||||
|
console.log('USING CLUSETER....', process.env.USE_REDIS_CLUSTER);
|
||||||
|
|
||||||
|
process.env.REDIS_KEY_PREFIX =
|
||||||
|
process.env.REDIS_KEY_PREFIX ?? 'PrivateServerConfigsCacheRedis-IntegrationTest';
|
||||||
|
|
||||||
|
// Import modules after setting env vars
|
||||||
|
const cacheModule = await import('../PrivateServerConfigs/PrivateServerConfigsCacheRedis');
|
||||||
|
const redisClients = await import('~/cache/redisClients');
|
||||||
|
|
||||||
|
PrivateServerConfigsCacheRedis = cacheModule.PrivateServerConfigsCacheRedis;
|
||||||
|
keyvRedisClient = redisClients.keyvRedisClient;
|
||||||
|
|
||||||
|
// Ensure Redis is connected
|
||||||
|
if (!keyvRedisClient) throw new Error('Redis client is not initialized');
|
||||||
|
|
||||||
|
// Wait for connection and topology discovery to complete
|
||||||
|
await redisClients.keyvRedisClientReady;
|
||||||
|
});
|
||||||
|
|
||||||
|
beforeEach(() => {
|
||||||
|
// Create a fresh instance for each test
|
||||||
|
cache = new PrivateServerConfigsCacheRedis();
|
||||||
|
});
|
||||||
|
|
||||||
|
afterEach(async () => {
|
||||||
|
// Clean up: clear all test keys from Redis
|
||||||
|
if (keyvRedisClient && 'scanIterator' in keyvRedisClient) {
|
||||||
|
const pattern = '*PrivateServerConfigsCacheRedis-IntegrationTest*';
|
||||||
|
const keysToDelete: string[] = [];
|
||||||
|
|
||||||
|
// Collect all keys first
|
||||||
|
for await (const key of keyvRedisClient.scanIterator({ MATCH: pattern })) {
|
||||||
|
keysToDelete.push(key);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete in parallel for cluster mode efficiency
|
||||||
|
if (keysToDelete.length > 0) {
|
||||||
|
await Promise.all(keysToDelete.map((key) => keyvRedisClient!.del(key)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
afterAll(async () => {
|
||||||
|
// Close Redis connection
|
||||||
|
if (keyvRedisClient?.isOpen) await keyvRedisClient.disconnect();
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('add and get operations', () => {
|
||||||
|
it('should add and retrieve a server config for a user', async () => {
|
||||||
|
const randonPrefix = Math.random().toString(36).substring(2, 8);
|
||||||
|
|
||||||
|
await cache.add(`user1-${randonPrefix}`, `server1-${randonPrefix}`, mockConfig1);
|
||||||
|
const result = await cache.get(`user1-${randonPrefix}`, `server1-${randonPrefix}`);
|
||||||
|
expect(result).toMatchObject(mockConfig1);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return undefined for non-existent server', async () => {
|
||||||
|
const randonPrefix = Math.random().toString(36).substring(2, 8);
|
||||||
|
|
||||||
|
const result = await cache.get(`user1-${randonPrefix}`, 'non-existent');
|
||||||
|
expect(result).toBeUndefined();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should throw error when adding duplicate server for same user', async () => {
|
||||||
|
const randonPrefix = Math.random().toString(36).substring(2, 8);
|
||||||
|
|
||||||
|
await cache.add(`user1-${randonPrefix}`, `server1-${randonPrefix}`, mockConfig1);
|
||||||
|
await expect(
|
||||||
|
cache.add(`user1-${randonPrefix}`, `server1-${randonPrefix}`, mockConfig2),
|
||||||
|
).rejects.toThrow(
|
||||||
|
`Server "server1-${randonPrefix}" already exists in cache. Use update() to modify existing configs.`,
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle multiple server configs for a single user', async () => {
|
||||||
|
const randonPrefix = Math.random().toString(36).substring(2, 8);
|
||||||
|
|
||||||
|
await cache.add(`user1-${randonPrefix}`, `server1-${randonPrefix}`, mockConfig1);
|
||||||
|
await cache.add(`user1-${randonPrefix}`, `server2-${randonPrefix}`, mockConfig2);
|
||||||
|
await cache.add(`user1-${randonPrefix}`, `server3-${randonPrefix}`, mockConfig3);
|
||||||
|
|
||||||
|
const result1 = await cache.get(`user1-${randonPrefix}`, `server1-${randonPrefix}`);
|
||||||
|
const result2 = await cache.get(`user1-${randonPrefix}`, `server2-${randonPrefix}`);
|
||||||
|
const result3 = await cache.get(`user1-${randonPrefix}`, `server3-${randonPrefix}`);
|
||||||
|
|
||||||
|
expect(result1).toMatchObject(mockConfig1);
|
||||||
|
expect(result2).toMatchObject(mockConfig2);
|
||||||
|
expect(result3).toMatchObject(mockConfig3);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should isolate server configs between different users', async () => {
|
||||||
|
const randonPrefix = Math.random().toString(36).substring(2, 8);
|
||||||
|
|
||||||
|
await cache.add(`user1-${randonPrefix}`, `server1-${randonPrefix}`, mockConfig1);
|
||||||
|
await cache.add(`user2-${randonPrefix}`, `server1-${randonPrefix}`, mockConfig2);
|
||||||
|
|
||||||
|
const user1Result = await cache.get(`user1-${randonPrefix}`, `server1-${randonPrefix}`);
|
||||||
|
const user2Result = await cache.get(`user2-${randonPrefix}`, `server1-${randonPrefix}`);
|
||||||
|
|
||||||
|
expect(user1Result).toMatchObject(mockConfig1);
|
||||||
|
expect(user2Result).toMatchObject(mockConfig2);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('getAll operation', () => {
|
||||||
|
it('should return empty object when user has no servers', async () => {
|
||||||
|
const randonPrefix = Math.random().toString(36).substring(2, 8);
|
||||||
|
|
||||||
|
const result = await cache.getAll(`user1-${randonPrefix}`);
|
||||||
|
expect(result).toMatchObject({});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return all server configs for a user', async () => {
|
||||||
|
const randonPrefix = Math.random().toString(36).substring(2, 8);
|
||||||
|
|
||||||
|
await cache.add(`user1-${randonPrefix}`, `server1-${randonPrefix}`, mockConfig1);
|
||||||
|
await cache.add(`user1-${randonPrefix}`, `server2-${randonPrefix}`, mockConfig2);
|
||||||
|
await cache.add(`user1-${randonPrefix}`, `server3-${randonPrefix}`, mockConfig3);
|
||||||
|
|
||||||
|
const result = await cache.getAll(`user1-${randonPrefix}`);
|
||||||
|
expect(result).toMatchObject({
|
||||||
|
[`server1-${randonPrefix}`]: mockConfig1,
|
||||||
|
[`server2-${randonPrefix}`]: mockConfig2,
|
||||||
|
[`server3-${randonPrefix}`]: mockConfig3,
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should only return configs for specific user', async () => {
|
||||||
|
const randonPrefix = Math.random().toString(36).substring(2, 8);
|
||||||
|
|
||||||
|
await cache.add(`user1-${randonPrefix}`, `server1-${randonPrefix}`, mockConfig1);
|
||||||
|
await cache.add(`user1-${randonPrefix}`, `server2-${randonPrefix}`, mockConfig2);
|
||||||
|
await cache.add(`user2-${randonPrefix}`, `server3-${randonPrefix}`, mockConfig3);
|
||||||
|
|
||||||
|
const user1Result = await cache.getAll(`user1-${randonPrefix}`);
|
||||||
|
const user2Result = await cache.getAll(`user2-${randonPrefix}`);
|
||||||
|
|
||||||
|
expect(Object.keys(user1Result).length).toBe(2);
|
||||||
|
expect(Object.keys(user2Result).length).toBe(1);
|
||||||
|
expect(user1Result.server3).toBeUndefined();
|
||||||
|
expect(user2Result.server1).toBeUndefined();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('update operation', () => {
|
||||||
|
it('should update an existing server config', async () => {
|
||||||
|
const randonPrefix = Math.random().toString(36).substring(2, 8);
|
||||||
|
|
||||||
|
await cache.add(`user1-${randonPrefix}`, `server1-${randonPrefix}`, mockConfig1);
|
||||||
|
expect(await cache.get(`user1-${randonPrefix}`, `server1-${randonPrefix}`)).toMatchObject(
|
||||||
|
mockConfig1,
|
||||||
|
);
|
||||||
|
|
||||||
|
await cache.update(`user1-${randonPrefix}`, `server1-${randonPrefix}`, mockConfig2);
|
||||||
|
const result = await cache.get(`user1-${randonPrefix}`, `server1-${randonPrefix}`);
|
||||||
|
expect(result).toMatchObject(mockConfig2);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should throw error when updating non-existent server', async () => {
|
||||||
|
const randonPrefix = Math.random().toString(36).substring(2, 8);
|
||||||
|
|
||||||
|
await expect(
|
||||||
|
cache.update(`user1-${randonPrefix}`, 'non-existent', mockConfig1),
|
||||||
|
).rejects.toThrow(
|
||||||
|
'Server "non-existent" does not exist in cache. Use add() to create new configs.',
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should only update for specific user', async () => {
|
||||||
|
const randonPrefix = Math.random().toString(36).substring(2, 8);
|
||||||
|
|
||||||
|
await cache.add(`user1-${randonPrefix}`, `server1-${randonPrefix}`, mockConfig1);
|
||||||
|
await cache.add(`user2-${randonPrefix}`, `server1-${randonPrefix}`, mockConfig2);
|
||||||
|
|
||||||
|
await cache.update(`user1-${randonPrefix}`, `server1-${randonPrefix}`, mockConfig3);
|
||||||
|
|
||||||
|
expect(await cache.get(`user1-${randonPrefix}`, `server1-${randonPrefix}`)).toMatchObject(
|
||||||
|
mockConfig3,
|
||||||
|
);
|
||||||
|
expect(await cache.get(`user2-${randonPrefix}`, `server1-${randonPrefix}`)).toMatchObject(
|
||||||
|
mockConfig2,
|
||||||
|
);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('remove operation', () => {
|
||||||
|
it('should remove an existing server config', async () => {
|
||||||
|
const randonPrefix = Math.random().toString(36).substring(2, 8);
|
||||||
|
|
||||||
|
await cache.add(`user1-${randonPrefix}`, `server1-${randonPrefix}`, mockConfig1);
|
||||||
|
expect(await cache.get(`user1-${randonPrefix}`, `server1-${randonPrefix}`)).toMatchObject(
|
||||||
|
mockConfig1,
|
||||||
|
);
|
||||||
|
|
||||||
|
await cache.remove(`user1-${randonPrefix}`, `server1-${randonPrefix}`);
|
||||||
|
expect(await cache.get(`user1-${randonPrefix}`, `server1-${randonPrefix}`)).toBeUndefined();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should throw error when removing non-existent server', async () => {
|
||||||
|
const randonPrefix = Math.random().toString(36).substring(2, 8);
|
||||||
|
|
||||||
|
await expect(cache.remove(`user1-${randonPrefix}`, 'non-existent')).rejects.toThrow(
|
||||||
|
`Failed to remove user1-${randonPrefix} server "non-existent" in cache.`,
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should only remove from specific user', async () => {
|
||||||
|
const randonPrefix = Math.random().toString(36).substring(2, 8);
|
||||||
|
|
||||||
|
await cache.add(`user1-${randonPrefix}`, `server1-${randonPrefix}`, mockConfig1);
|
||||||
|
await cache.add(`user2-${randonPrefix}`, `server1-${randonPrefix}`, mockConfig2);
|
||||||
|
|
||||||
|
await cache.remove(`user1-${randonPrefix}`, `server1-${randonPrefix}`);
|
||||||
|
|
||||||
|
expect(await cache.get(`user1-${randonPrefix}`, `server1-${randonPrefix}`)).toBeUndefined();
|
||||||
|
expect(await cache.get(`user2-${randonPrefix}`, `server1-${randonPrefix}`)).toMatchObject(
|
||||||
|
mockConfig2,
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should allow re-adding a removed server', async () => {
|
||||||
|
const randonPrefix = Math.random().toString(36).substring(2, 8);
|
||||||
|
|
||||||
|
await cache.add(`user1-${randonPrefix}`, `server1-${randonPrefix}`, mockConfig1);
|
||||||
|
await cache.remove(`user1-${randonPrefix}`, `server1-${randonPrefix}`);
|
||||||
|
await cache.add(`user1-${randonPrefix}`, `server1-${randonPrefix}`, mockConfig3);
|
||||||
|
|
||||||
|
const result = await cache.get(`user1-${randonPrefix}`, `server1-${randonPrefix}`);
|
||||||
|
expect(result).toMatchObject(mockConfig3);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('reset operation', () => {
|
||||||
|
it('should clear all servers for a specific user', async () => {
|
||||||
|
const randonPrefix = Math.random().toString(36).substring(2, 8);
|
||||||
|
|
||||||
|
await cache.add(`user1-${randonPrefix}`, `server1-${randonPrefix}`, mockConfig1);
|
||||||
|
await cache.add(`user1-${randonPrefix}`, `server2-${randonPrefix}`, mockConfig2);
|
||||||
|
await cache.add(`user2-${randonPrefix}`, `server3-${randonPrefix}`, mockConfig3);
|
||||||
|
|
||||||
|
await cache.reset(`user1-${randonPrefix}`);
|
||||||
|
|
||||||
|
const user1Result = await cache.getAll(`user1-${randonPrefix}`);
|
||||||
|
const user2Result = await cache.getAll(`user2-${randonPrefix}`);
|
||||||
|
|
||||||
|
expect(Object.keys(user1Result).length).toBe(0);
|
||||||
|
expect(Object.keys(user2Result).length).toBe(1);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('has operation', () => {
|
||||||
|
it('should return true for users with loaded cache', async () => {
|
||||||
|
const randonPrefix = Math.random().toString(36).substring(2, 8);
|
||||||
|
|
||||||
|
await cache.add(`user1-${randonPrefix}`, `server1-${randonPrefix}`, mockConfig1);
|
||||||
|
console.log('check');
|
||||||
|
expect(await cache.has(`user1-${randonPrefix}`)).toBe(true);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return false for users without loaded cache', async () => {
|
||||||
|
const randonPrefix = Math.random().toString(36).substring(2, 8);
|
||||||
|
|
||||||
|
expect(await cache.has(`user1-${randonPrefix}`)).toBe(false);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('updateServerConfigIfExists operation', () => {
|
||||||
|
it('should update server config for all users who have it', async () => {
|
||||||
|
const randonPrefix = Math.random().toString(36).substring(2, 8);
|
||||||
|
|
||||||
|
await cache.add(`user1-${randonPrefix}`, `server1-${randonPrefix}`, mockConfig1);
|
||||||
|
await cache.add(`user2-${randonPrefix}`, `server1-${randonPrefix}`, mockConfig1);
|
||||||
|
await cache.add(`user3-${randonPrefix}`, `server2-${randonPrefix}`, mockConfig2);
|
||||||
|
|
||||||
|
await cache.updateServerConfigIfExists(`server1-${randonPrefix}`, mockConfig3);
|
||||||
|
|
||||||
|
expect(await cache.get(`user1-${randonPrefix}`, `server1-${randonPrefix}`)).toMatchObject(
|
||||||
|
mockConfig3,
|
||||||
|
);
|
||||||
|
expect(await cache.get(`user2-${randonPrefix}`, `server1-${randonPrefix}`)).toMatchObject(
|
||||||
|
mockConfig3,
|
||||||
|
);
|
||||||
|
expect(await cache.get(`user3-${randonPrefix}`, `server1-${randonPrefix}`)).toBeUndefined();
|
||||||
|
expect(await cache.get(`user3-${randonPrefix}`, `server2-${randonPrefix}`)).toMatchObject(
|
||||||
|
mockConfig2,
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should update lastUpdatedAt timestamp', async () => {
|
||||||
|
const randonPrefix = Math.random().toString(36).substring(2, 8);
|
||||||
|
|
||||||
|
await cache.add(`user1-${randonPrefix}`, 'server1-share', mockConfig1);
|
||||||
|
await cache.add(`user2-${randonPrefix}`, 'server1-share', mockConfig1);
|
||||||
|
|
||||||
|
const timeBeforeUpdate = Date.now();
|
||||||
|
await new Promise((r) => setTimeout(() => r(true), 100));
|
||||||
|
await cache.updateServerConfigIfExists('server1-share', mockConfig2);
|
||||||
|
|
||||||
|
const user1Result = await cache.get(`user1-${randonPrefix}`, 'server1-share');
|
||||||
|
const user2Result = await cache.get(`user2-${randonPrefix}`, 'server1-share');
|
||||||
|
expect(user1Result).toBeDefined();
|
||||||
|
expect(user1Result!.lastUpdatedAt! - timeBeforeUpdate).toBeGreaterThan(0);
|
||||||
|
expect(user2Result!.lastUpdatedAt! - timeBeforeUpdate).toBeGreaterThan(0);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle case when no users have the server', async () => {
|
||||||
|
const randonPrefix = Math.random().toString(36).substring(2, 8);
|
||||||
|
|
||||||
|
await cache.add(`user1-${randonPrefix}`, `server2-${randonPrefix}`, mockConfig2);
|
||||||
|
await cache.add(`user2-${randonPrefix}`, `server3-${randonPrefix}`, mockConfig3);
|
||||||
|
|
||||||
|
await expect(
|
||||||
|
cache.updateServerConfigIfExists(`server1-${randonPrefix}`, mockConfig1),
|
||||||
|
).resolves.not.toThrow();
|
||||||
|
|
||||||
|
expect(await cache.get(`user1-${randonPrefix}`, `server2-${randonPrefix}`)).toMatchObject(
|
||||||
|
mockConfig2,
|
||||||
|
);
|
||||||
|
expect(await cache.get(`user2-${randonPrefix}`, `server3-${randonPrefix}`)).toMatchObject(
|
||||||
|
mockConfig3,
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle case with no user caches', async () => {
|
||||||
|
const randonPrefix = Math.random().toString(36).substring(2, 8);
|
||||||
|
|
||||||
|
await expect(
|
||||||
|
cache.updateServerConfigIfExists(`server1-${randonPrefix}`, mockConfig1),
|
||||||
|
).resolves.not.toThrow();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should work across multiple cache instances (distributed scenario)', async () => {
|
||||||
|
const randonPrefix = Math.random().toString(36).substring(2, 8);
|
||||||
|
|
||||||
|
const cache1 = new PrivateServerConfigsCacheRedis();
|
||||||
|
const cache2 = new PrivateServerConfigsCacheRedis();
|
||||||
|
|
||||||
|
await cache1.add(`user1-${randonPrefix}`, `server1-${randonPrefix}`, mockConfig1);
|
||||||
|
await cache1.add(`user2-${randonPrefix}`, `server1-${randonPrefix}`, mockConfig1);
|
||||||
|
|
||||||
|
await cache2.updateServerConfigIfExists(`server1-${randonPrefix}`, mockConfig3);
|
||||||
|
|
||||||
|
expect(await cache1.get(`user1-${randonPrefix}`, `server1-${randonPrefix}`)).toMatchObject(
|
||||||
|
mockConfig3,
|
||||||
|
);
|
||||||
|
expect(await cache1.get(`user2-${randonPrefix}`, `server1-${randonPrefix}`)).toMatchObject(
|
||||||
|
mockConfig3,
|
||||||
|
);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('addServerConfigIfCacheExists operation', () => {
|
||||||
|
it('should add server to specified users with initialized caches', async () => {
|
||||||
|
const randonPrefix = Math.random().toString(36).substring(2, 8);
|
||||||
|
|
||||||
|
await cache.add(`user1-${randonPrefix}`, 'other', mockConfig1);
|
||||||
|
await cache.add(`user2-${randonPrefix}`, 'other', mockConfig1);
|
||||||
|
|
||||||
|
await cache.addServerConfigIfCacheExists(
|
||||||
|
[`user1-${randonPrefix}`, `user2-${randonPrefix}`, `user3-${randonPrefix}`],
|
||||||
|
`server1-${randonPrefix}`,
|
||||||
|
mockConfig2,
|
||||||
|
);
|
||||||
|
|
||||||
|
expect(await cache.get(`user1-${randonPrefix}`, `server1-${randonPrefix}`)).toMatchObject(
|
||||||
|
mockConfig2,
|
||||||
|
);
|
||||||
|
expect(await cache.get(`user2-${randonPrefix}`, `server1-${randonPrefix}`)).toMatchObject(
|
||||||
|
mockConfig2,
|
||||||
|
);
|
||||||
|
expect(await cache.get(`user3-${randonPrefix}`, `server1-${randonPrefix}`)).toBeUndefined();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should not add to users without initialized caches', async () => {
|
||||||
|
const randonPrefix = Math.random().toString(36).substring(2, 8);
|
||||||
|
|
||||||
|
await cache.addServerConfigIfCacheExists(
|
||||||
|
[`user1-${randonPrefix}`, `user2-${randonPrefix}`],
|
||||||
|
`server1-${randonPrefix}`,
|
||||||
|
mockConfig1,
|
||||||
|
);
|
||||||
|
|
||||||
|
expect(await cache.get(`user1-${randonPrefix}`, `server1-${randonPrefix}`)).toBeUndefined();
|
||||||
|
expect(await cache.get(`user2-${randonPrefix}`, `server1-${randonPrefix}`)).toBeUndefined();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle empty userIds array', async () => {
|
||||||
|
const randonPrefix = Math.random().toString(36).substring(2, 8);
|
||||||
|
|
||||||
|
await expect(
|
||||||
|
cache.addServerConfigIfCacheExists([], `server1-${randonPrefix}`, mockConfig1),
|
||||||
|
).resolves.not.toThrow();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should work across multiple cache instances (distributed scenario)', async () => {
|
||||||
|
const randonPrefix = Math.random().toString(36).substring(2, 8);
|
||||||
|
|
||||||
|
const cache1 = new PrivateServerConfigsCacheRedis();
|
||||||
|
const cache2 = new PrivateServerConfigsCacheRedis();
|
||||||
|
|
||||||
|
await cache1.add(`user1-${randonPrefix}`, 'other', mockConfig1);
|
||||||
|
await cache1.add(`user2-${randonPrefix}`, 'other', mockConfig1);
|
||||||
|
|
||||||
|
await cache2.addServerConfigIfCacheExists(
|
||||||
|
[`user1-${randonPrefix}`, `user2-${randonPrefix}`, `user3-${randonPrefix}`],
|
||||||
|
`server1-${randonPrefix}`,
|
||||||
|
mockConfig2,
|
||||||
|
);
|
||||||
|
|
||||||
|
expect(await cache1.get(`user1-${randonPrefix}`, `server1-${randonPrefix}`)).toMatchObject(
|
||||||
|
mockConfig2,
|
||||||
|
);
|
||||||
|
expect(await cache1.get(`user2-${randonPrefix}`, `server1-${randonPrefix}`)).toMatchObject(
|
||||||
|
mockConfig2,
|
||||||
|
);
|
||||||
|
expect(await cache1.get(`user3-${randonPrefix}`, `server1-${randonPrefix}`)).toBeUndefined();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('removeServerConfigIfCacheExists operation', () => {
|
||||||
|
it('should remove server from specified users', async () => {
|
||||||
|
const randonPrefix = Math.random().toString(36).substring(2, 8);
|
||||||
|
|
||||||
|
await cache.add(`user1-${randonPrefix}`, `server1-${randonPrefix}`, mockConfig1);
|
||||||
|
await cache.add(`user2-${randonPrefix}`, `server1-${randonPrefix}`, mockConfig1);
|
||||||
|
await cache.add(`user3-${randonPrefix}`, `server1-${randonPrefix}`, mockConfig1);
|
||||||
|
|
||||||
|
await cache.removeServerConfigIfCacheExists(
|
||||||
|
[`user1-${randonPrefix}`, `user3-${randonPrefix}`],
|
||||||
|
`server1-${randonPrefix}`,
|
||||||
|
);
|
||||||
|
|
||||||
|
expect(await cache.get(`user1-${randonPrefix}`, `server1-${randonPrefix}`)).toBeUndefined();
|
||||||
|
expect(await cache.get(`user2-${randonPrefix}`, `server1-${randonPrefix}`)).toMatchObject(
|
||||||
|
mockConfig1,
|
||||||
|
);
|
||||||
|
expect(await cache.get(`user3-${randonPrefix}`, `server1-${randonPrefix}`)).toBeUndefined();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle users who do not have the server', async () => {
|
||||||
|
const randonPrefix = Math.random().toString(36).substring(2, 8);
|
||||||
|
|
||||||
|
await cache.add(`user1-${randonPrefix}`, `server1-${randonPrefix}`, mockConfig1);
|
||||||
|
|
||||||
|
await expect(
|
||||||
|
cache.removeServerConfigIfCacheExists(
|
||||||
|
[`user1-${randonPrefix}`, `user2-${randonPrefix}`],
|
||||||
|
`server1-${randonPrefix}`,
|
||||||
|
),
|
||||||
|
).resolves.not.toThrow();
|
||||||
|
|
||||||
|
expect(await cache.get(`user1-${randonPrefix}`, `server1-${randonPrefix}`)).toBeUndefined();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle empty userIds array', async () => {
|
||||||
|
const randonPrefix = Math.random().toString(36).substring(2, 8);
|
||||||
|
|
||||||
|
await expect(
|
||||||
|
cache.removeServerConfigIfCacheExists([], `server1-${randonPrefix}`),
|
||||||
|
).resolves.not.toThrow();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should work across multiple cache instances (distributed scenario)', async () => {
|
||||||
|
const randonPrefix = Math.random().toString(36).substring(2, 8);
|
||||||
|
|
||||||
|
const cache1 = new PrivateServerConfigsCacheRedis();
|
||||||
|
const cache2 = new PrivateServerConfigsCacheRedis();
|
||||||
|
|
||||||
|
await cache1.add(`user1-${randonPrefix}`, `server1-${randonPrefix}`, mockConfig1);
|
||||||
|
await cache1.add(`user2-${randonPrefix}`, `server1-${randonPrefix}`, mockConfig1);
|
||||||
|
|
||||||
|
await cache2.removeServerConfigIfCacheExists(
|
||||||
|
[`user1-${randonPrefix}`, `user2-${randonPrefix}`],
|
||||||
|
`server1-${randonPrefix}`,
|
||||||
|
);
|
||||||
|
|
||||||
|
expect(await cache1.get(`user1-${randonPrefix}`, `server1-${randonPrefix}`)).toBeUndefined();
|
||||||
|
expect(await cache1.get(`user2-${randonPrefix}`, `server1-${randonPrefix}`)).toBeUndefined();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('findUsersWithServer operation', () => {
|
||||||
|
it('should return all users who have the server', async () => {
|
||||||
|
const randonPrefix = Math.random().toString(36).substring(2, 8);
|
||||||
|
|
||||||
|
await cache.add(`user1-${randonPrefix}`, `server1-${randonPrefix}`, mockConfig1);
|
||||||
|
await cache.add(`user2-${randonPrefix}`, `server1-${randonPrefix}`, mockConfig1);
|
||||||
|
await cache.add(`user3-${randonPrefix}`, 'other', mockConfig2);
|
||||||
|
|
||||||
|
const users = await cache.findUsersWithServer(`server1-${randonPrefix}`);
|
||||||
|
|
||||||
|
expect(users.sort()).toEqual([`user1-${randonPrefix}`, `user2-${randonPrefix}`].sort());
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return empty array if no users have the server', async () => {
|
||||||
|
const randonPrefix = Math.random().toString(36).substring(2, 8);
|
||||||
|
|
||||||
|
await cache.add(`user1-${randonPrefix}`, 'other', mockConfig1);
|
||||||
|
|
||||||
|
const users = await cache.findUsersWithServer(`server1-${randonPrefix}`);
|
||||||
|
|
||||||
|
expect(users).toEqual([]);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return empty array with no user caches', async () => {
|
||||||
|
const randonPrefix = Math.random().toString(36).substring(2, 8);
|
||||||
|
|
||||||
|
const users = await cache.findUsersWithServer(`server1-${randonPrefix}`);
|
||||||
|
|
||||||
|
expect(users).toEqual([]);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should work across multiple cache instances (distributed scenario)', async () => {
|
||||||
|
const randonPrefix = Math.random().toString(36).substring(2, 8);
|
||||||
|
|
||||||
|
const cache1 = new PrivateServerConfigsCacheRedis();
|
||||||
|
const cache2 = new PrivateServerConfigsCacheRedis();
|
||||||
|
|
||||||
|
await cache1.add(`user1-${randonPrefix}`, `server1-${randonPrefix}`, mockConfig1);
|
||||||
|
await cache1.add(`user2-${randonPrefix}`, `server1-${randonPrefix}`, mockConfig1);
|
||||||
|
await cache1.add(`user3-${randonPrefix}`, 'other', mockConfig2);
|
||||||
|
|
||||||
|
const users = await cache2.findUsersWithServer(`server1-${randonPrefix}`);
|
||||||
|
|
||||||
|
expect(users.sort()).toEqual([`user1-${randonPrefix}`, `user2-${randonPrefix}`].sort());
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('resetAll operation', () => {
|
||||||
|
it('should clear all servers for all users in Redis', async () => {
|
||||||
|
const randonPrefix = Math.random().toString(36).substring(2, 8);
|
||||||
|
|
||||||
|
await cache.add(`user1-${randonPrefix}`, `server1-${randonPrefix}`, mockConfig1);
|
||||||
|
await cache.add(`user1-${randonPrefix}`, `server2-${randonPrefix}`, mockConfig2);
|
||||||
|
await cache.add(`user2-${randonPrefix}`, `server1-${randonPrefix}`, mockConfig1);
|
||||||
|
await cache.add(`user2-${randonPrefix}`, `server3-${randonPrefix}`, mockConfig3);
|
||||||
|
|
||||||
|
await cache.resetAll();
|
||||||
|
|
||||||
|
expect(await cache.get(`user1-${randonPrefix}`, `server1-${randonPrefix}`)).toBeUndefined();
|
||||||
|
expect(await cache.get(`user1-${randonPrefix}`, `server2-${randonPrefix}`)).toBeUndefined();
|
||||||
|
expect(await cache.get(`user2-${randonPrefix}`, `server1-${randonPrefix}`)).toBeUndefined();
|
||||||
|
expect(await cache.get(`user2-${randonPrefix}`, `server3-${randonPrefix}`)).toBeUndefined();
|
||||||
|
});
|
||||||
|
|
||||||
|
it.skip('should handle case with no user caches', async () => {
|
||||||
|
// const randonPrefix = Math.random().toString(36).substring(2, 8);
|
||||||
|
|
||||||
|
// Should not throw
|
||||||
|
await expect(cache.resetAll()).resolves.not.toThrow();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should work across multiple cache instances (distributed scenario)', async () => {
|
||||||
|
const randonPrefix = Math.random().toString(36).substring(2, 8);
|
||||||
|
|
||||||
|
const cache1 = new PrivateServerConfigsCacheRedis();
|
||||||
|
const cache2 = new PrivateServerConfigsCacheRedis();
|
||||||
|
|
||||||
|
// Add servers using cache1
|
||||||
|
await cache1.add(`user1-${randonPrefix}`, `server1-${randonPrefix}`, mockConfig1);
|
||||||
|
await cache1.add(`user2-${randonPrefix}`, `server2-${randonPrefix}`, mockConfig2);
|
||||||
|
|
||||||
|
// Reset using cache2
|
||||||
|
await cache2.resetAll();
|
||||||
|
|
||||||
|
// Verify using cache1
|
||||||
|
expect(await cache1.get(`user1-${randonPrefix}`, `server1-${randonPrefix}`)).toBeUndefined();
|
||||||
|
expect(await cache1.get(`user2-${randonPrefix}`, `server2-${randonPrefix}`)).toBeUndefined();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
@ -0,0 +1,397 @@
|
||||||
|
import { expect } from '@playwright/test';
|
||||||
|
|
||||||
|
describe('PrivateServersLoadStatusCache Integration Tests', () => {
|
||||||
|
let loadStatusCache: typeof import('../PrivateServersLoadStatusCache').privateServersLoadStatusCache;
|
||||||
|
let keyvRedisClient: Awaited<typeof import('~/cache/redisClients')>['keyvRedisClient'];
|
||||||
|
let testCounter = 0;
|
||||||
|
|
||||||
|
beforeAll(async () => {
|
||||||
|
// Set up environment variables for Redis (only if not already set)
|
||||||
|
process.env.USE_REDIS = process.env.USE_REDIS ?? 'true';
|
||||||
|
process.env.REDIS_URI = process.env.REDIS_URI ?? 'redis://127.0.0.1:6379';
|
||||||
|
process.env.REDIS_KEY_PREFIX = 'PrivateServersLoadStatusCache-IntegrationTest';
|
||||||
|
|
||||||
|
// Import modules after setting env vars
|
||||||
|
const loadStatusCacheModule = await import('../PrivateServersLoadStatusCache');
|
||||||
|
const redisClients = await import('~/cache/redisClients');
|
||||||
|
|
||||||
|
loadStatusCache = loadStatusCacheModule.privateServersLoadStatusCache;
|
||||||
|
keyvRedisClient = redisClients.keyvRedisClient;
|
||||||
|
|
||||||
|
// Ensure Redis is connected
|
||||||
|
if (!keyvRedisClient) throw new Error('Redis client is not initialized');
|
||||||
|
|
||||||
|
// Wait for Redis connection and topology discovery to complete
|
||||||
|
await redisClients.keyvRedisClientReady;
|
||||||
|
|
||||||
|
process.setMaxListeners(200);
|
||||||
|
});
|
||||||
|
|
||||||
|
beforeEach(() => {
|
||||||
|
jest.resetModules();
|
||||||
|
testCounter++;
|
||||||
|
});
|
||||||
|
|
||||||
|
afterEach(async () => {
|
||||||
|
// Clean up: clear all test keys from Redis
|
||||||
|
if (keyvRedisClient && 'scanIterator' in keyvRedisClient) {
|
||||||
|
const pattern = '*PrivateServersLoadStatusCache-IntegrationTest*';
|
||||||
|
const keysToDelete: string[] = [];
|
||||||
|
|
||||||
|
// Collect all keys first
|
||||||
|
for await (const key of keyvRedisClient.scanIterator({ MATCH: pattern })) {
|
||||||
|
keysToDelete.push(key);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete in parallel for cluster mode efficiency
|
||||||
|
if (keysToDelete.length > 0) {
|
||||||
|
await Promise.all(keysToDelete.map((key) => keyvRedisClient!.del(key)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
afterAll(async () => {
|
||||||
|
// Close Redis connection
|
||||||
|
if (keyvRedisClient?.isOpen) await keyvRedisClient.disconnect();
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('isLoaded() and setLoaded() integration', () => {
|
||||||
|
it('should persist loaded status in cache', async () => {
|
||||||
|
const userId = `user-persist-${testCounter}`;
|
||||||
|
|
||||||
|
expect(await loadStatusCache.isLoaded(userId)).toBe(false);
|
||||||
|
|
||||||
|
await loadStatusCache.setLoaded(userId, 60000);
|
||||||
|
|
||||||
|
expect(await loadStatusCache.isLoaded(userId)).toBe(true);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle multiple users independently', async () => {
|
||||||
|
const user1 = `user-multi-1-${testCounter}`;
|
||||||
|
const user2 = `user-multi-2-${testCounter}`;
|
||||||
|
const user3 = `user-multi-3-${testCounter}`;
|
||||||
|
|
||||||
|
await loadStatusCache.setLoaded(user1, 60000);
|
||||||
|
await loadStatusCache.setLoaded(user2, 60000);
|
||||||
|
|
||||||
|
expect(await loadStatusCache.isLoaded(user1)).toBe(true);
|
||||||
|
expect(await loadStatusCache.isLoaded(user2)).toBe(true);
|
||||||
|
expect(await loadStatusCache.isLoaded(user3)).toBe(false);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should respect TTL expiration (short TTL for testing)', async () => {
|
||||||
|
const userId = `user-ttl-expire-${testCounter}`;
|
||||||
|
|
||||||
|
// Set with 1 second TTL
|
||||||
|
await loadStatusCache.setLoaded(userId, 1000);
|
||||||
|
|
||||||
|
expect(await loadStatusCache.isLoaded(userId)).toBe(true);
|
||||||
|
|
||||||
|
// Wait for TTL to expire
|
||||||
|
await new Promise((resolve) => setTimeout(resolve, 1100));
|
||||||
|
|
||||||
|
expect(await loadStatusCache.isLoaded(userId)).toBe(false);
|
||||||
|
}, 10000);
|
||||||
|
|
||||||
|
it('should allow re-setting loaded status', async () => {
|
||||||
|
const userId = `user-reset-${testCounter}`;
|
||||||
|
|
||||||
|
await loadStatusCache.setLoaded(userId, 60000);
|
||||||
|
expect(await loadStatusCache.isLoaded(userId)).toBe(true);
|
||||||
|
|
||||||
|
await loadStatusCache.clearLoaded(userId);
|
||||||
|
expect(await loadStatusCache.isLoaded(userId)).toBe(false);
|
||||||
|
|
||||||
|
await loadStatusCache.setLoaded(userId, 60000);
|
||||||
|
expect(await loadStatusCache.isLoaded(userId)).toBe(true);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('acquireLoadLock() and releaseLoadLock() integration', () => {
|
||||||
|
it('should acquire lock successfully when available', async () => {
|
||||||
|
const userId = `user-lock-acquire-${testCounter}`;
|
||||||
|
|
||||||
|
const acquired = await loadStatusCache.acquireLoadLock(userId, 10000);
|
||||||
|
|
||||||
|
expect(acquired).toBe(true);
|
||||||
|
|
||||||
|
// Clean up
|
||||||
|
await loadStatusCache.releaseLoadLock(userId);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should prevent concurrent lock acquisition', async () => {
|
||||||
|
const userId = `user-lock-concurrent-${testCounter}`;
|
||||||
|
|
||||||
|
const lock1 = await loadStatusCache.acquireLoadLock(userId, 10000);
|
||||||
|
expect(lock1).toBe(true);
|
||||||
|
|
||||||
|
const lock2 = await loadStatusCache.acquireLoadLock(userId, 10000);
|
||||||
|
expect(lock2).toBe(false);
|
||||||
|
|
||||||
|
// Release lock
|
||||||
|
await loadStatusCache.releaseLoadLock(userId);
|
||||||
|
|
||||||
|
// Should be able to acquire again
|
||||||
|
const lock3 = await loadStatusCache.acquireLoadLock(userId, 10000);
|
||||||
|
expect(lock3).toBe(true);
|
||||||
|
|
||||||
|
await loadStatusCache.releaseLoadLock(userId);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should auto-release lock after TTL expires', async () => {
|
||||||
|
const userId = `user-lock-ttl-${testCounter}`;
|
||||||
|
|
||||||
|
const acquired = await loadStatusCache.acquireLoadLock(userId, 1000); // 1 second TTL
|
||||||
|
expect(acquired).toBe(true);
|
||||||
|
|
||||||
|
// Lock should prevent acquisition
|
||||||
|
const blocked = await loadStatusCache.acquireLoadLock(userId, 1000);
|
||||||
|
expect(blocked).toBe(false);
|
||||||
|
|
||||||
|
// Wait for TTL to expire
|
||||||
|
await new Promise((resolve) => setTimeout(resolve, 1100));
|
||||||
|
|
||||||
|
// Should be able to acquire now
|
||||||
|
const reacquired = await loadStatusCache.acquireLoadLock(userId, 10000);
|
||||||
|
expect(reacquired).toBe(true);
|
||||||
|
|
||||||
|
await loadStatusCache.releaseLoadLock(userId);
|
||||||
|
}, 10000);
|
||||||
|
|
||||||
|
it('should handle locks for multiple users independently', async () => {
|
||||||
|
const user1 = `user-lock-multi-1-${testCounter}`;
|
||||||
|
const user2 = `user-lock-multi-2-${testCounter}`;
|
||||||
|
const user3 = `user-lock-multi-3-${testCounter}`;
|
||||||
|
|
||||||
|
const lock1 = await loadStatusCache.acquireLoadLock(user1, 10000);
|
||||||
|
const lock2 = await loadStatusCache.acquireLoadLock(user2, 10000);
|
||||||
|
const lock3 = await loadStatusCache.acquireLoadLock(user3, 10000);
|
||||||
|
|
||||||
|
expect(lock1).toBe(true);
|
||||||
|
expect(lock2).toBe(true);
|
||||||
|
expect(lock3).toBe(true);
|
||||||
|
|
||||||
|
await loadStatusCache.releaseLoadLock(user1);
|
||||||
|
await loadStatusCache.releaseLoadLock(user2);
|
||||||
|
await loadStatusCache.releaseLoadLock(user3);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should allow release of non-existent lock without error', async () => {
|
||||||
|
const userId = `user-lock-nonexist-${testCounter}`;
|
||||||
|
await expect(loadStatusCache.releaseLoadLock(userId)).resolves.not.toThrow();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('waitForLoad() integration', () => {
|
||||||
|
it('should wait and detect when loaded flag is set', async () => {
|
||||||
|
const userId = `user-wait-detect-${testCounter}`;
|
||||||
|
|
||||||
|
// Start waiting in background
|
||||||
|
const waitPromise = loadStatusCache.waitForLoad(userId, 2000, 100);
|
||||||
|
|
||||||
|
// Simulate another process setting the loaded flag after 300ms
|
||||||
|
const setLoadedPromise = new Promise<void>((resolve) => {
|
||||||
|
setTimeout(async () => {
|
||||||
|
await loadStatusCache.setLoaded(userId, 60000);
|
||||||
|
// Add small delay to ensure Redis write completes
|
||||||
|
await new Promise((r) => setTimeout(r, 50));
|
||||||
|
resolve();
|
||||||
|
}, 300);
|
||||||
|
});
|
||||||
|
|
||||||
|
// Await both in parallel - waitPromise should complete first
|
||||||
|
const [result] = await Promise.all([waitPromise, setLoadedPromise]);
|
||||||
|
|
||||||
|
expect(result).toBe(true);
|
||||||
|
}, 5000);
|
||||||
|
|
||||||
|
it('should timeout if loaded flag is never set', async () => {
|
||||||
|
const userId = `user-timeout-${testCounter}`;
|
||||||
|
|
||||||
|
const result = await loadStatusCache.waitForLoad(userId, 300, 50);
|
||||||
|
|
||||||
|
expect(result).toBe(false);
|
||||||
|
}, 1000);
|
||||||
|
|
||||||
|
it('should return immediately if already loaded', async () => {
|
||||||
|
const userId = `user-immediate-${testCounter}`;
|
||||||
|
|
||||||
|
await loadStatusCache.setLoaded(userId, 60000);
|
||||||
|
// Small delay to ensure Redis write completes
|
||||||
|
await new Promise((resolve) => setTimeout(resolve, 50));
|
||||||
|
|
||||||
|
const startTime = Date.now();
|
||||||
|
const result = await loadStatusCache.waitForLoad(userId, 5000, 100);
|
||||||
|
const elapsed = Date.now() - startTime;
|
||||||
|
|
||||||
|
expect(result).toBe(true);
|
||||||
|
expect(elapsed).toBeLessThan(300); // Increased tolerance for CI environments
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('Complete load workflow integration', () => {
|
||||||
|
it('should simulate distributed load coordination', async () => {
|
||||||
|
const userId = `user-distributed-${testCounter}`;
|
||||||
|
|
||||||
|
// Process 1: Acquires lock and loads
|
||||||
|
const lock1 = await loadStatusCache.acquireLoadLock(userId, 10000);
|
||||||
|
expect(lock1).toBe(true);
|
||||||
|
|
||||||
|
// Process 2: Tries to acquire lock (should fail) and waits
|
||||||
|
const lock2 = await loadStatusCache.acquireLoadLock(userId, 10000);
|
||||||
|
expect(lock2).toBe(false);
|
||||||
|
|
||||||
|
const waitPromise = loadStatusCache.waitForLoad(userId, 3000, 100);
|
||||||
|
|
||||||
|
// Process 1: Completes loading after 300ms
|
||||||
|
const process1Promise = new Promise<void>((resolve) => {
|
||||||
|
setTimeout(async () => {
|
||||||
|
await loadStatusCache.setLoaded(userId, 60000);
|
||||||
|
await new Promise((r) => setTimeout(r, 50)); // Redis write delay
|
||||||
|
await loadStatusCache.releaseLoadLock(userId);
|
||||||
|
resolve();
|
||||||
|
}, 300);
|
||||||
|
});
|
||||||
|
|
||||||
|
// Process 2: Should detect completion
|
||||||
|
const completed = await waitPromise;
|
||||||
|
expect(completed).toBe(true);
|
||||||
|
|
||||||
|
// Both processes should now see it as loaded
|
||||||
|
expect(await loadStatusCache.isLoaded(userId)).toBe(true);
|
||||||
|
|
||||||
|
// Wait for process 1 to complete cleanup
|
||||||
|
await process1Promise;
|
||||||
|
}, 10000);
|
||||||
|
|
||||||
|
it('should handle process crash scenario (lock timeout)', async () => {
|
||||||
|
const userId = `user-crash-${testCounter}`;
|
||||||
|
|
||||||
|
// Process 1: Acquires lock but crashes (doesn't release)
|
||||||
|
const lock1 = await loadStatusCache.acquireLoadLock(userId, 1000); // 1 second TTL
|
||||||
|
expect(lock1).toBe(true);
|
||||||
|
// (simulate crash - no releaseLoadLock call)
|
||||||
|
|
||||||
|
// Process 2: Waits for timeout
|
||||||
|
const waitResult = await loadStatusCache.waitForLoad(userId, 1500, 200);
|
||||||
|
expect(waitResult).toBe(false); // Timeout (process 1 never completed)
|
||||||
|
|
||||||
|
// After lock TTL expires, process 2 can retry
|
||||||
|
await new Promise((resolve) => setTimeout(resolve, 200));
|
||||||
|
|
||||||
|
const retryLock = await loadStatusCache.acquireLoadLock(userId, 10000);
|
||||||
|
expect(retryLock).toBe(true);
|
||||||
|
|
||||||
|
// Process 2 completes successfully
|
||||||
|
await loadStatusCache.setLoaded(userId, 60000);
|
||||||
|
await loadStatusCache.releaseLoadLock(userId);
|
||||||
|
|
||||||
|
expect(await loadStatusCache.isLoaded(userId)).toBe(true);
|
||||||
|
}, 10000);
|
||||||
|
|
||||||
|
it('should handle concurrent user loads independently', async () => {
|
||||||
|
const user1 = `user-concurrent-1-${testCounter}`;
|
||||||
|
const user2 = `user-concurrent-2-${testCounter}`;
|
||||||
|
const user3 = `user-concurrent-3-${testCounter}`;
|
||||||
|
|
||||||
|
// Simulate 3 users loading concurrently
|
||||||
|
const user1Lock = await loadStatusCache.acquireLoadLock(user1, 10000);
|
||||||
|
const user2Lock = await loadStatusCache.acquireLoadLock(user2, 10000);
|
||||||
|
const user3Lock = await loadStatusCache.acquireLoadLock(user3, 10000);
|
||||||
|
|
||||||
|
expect(user1Lock).toBe(true);
|
||||||
|
expect(user2Lock).toBe(true);
|
||||||
|
expect(user3Lock).toBe(true);
|
||||||
|
|
||||||
|
// All complete independently
|
||||||
|
await Promise.all([
|
||||||
|
(async () => {
|
||||||
|
await loadStatusCache.setLoaded(user1, 60000);
|
||||||
|
await loadStatusCache.releaseLoadLock(user1);
|
||||||
|
})(),
|
||||||
|
(async () => {
|
||||||
|
await loadStatusCache.setLoaded(user2, 60000);
|
||||||
|
await loadStatusCache.releaseLoadLock(user2);
|
||||||
|
})(),
|
||||||
|
(async () => {
|
||||||
|
await loadStatusCache.setLoaded(user3, 60000);
|
||||||
|
await loadStatusCache.releaseLoadLock(user3);
|
||||||
|
})(),
|
||||||
|
]);
|
||||||
|
|
||||||
|
// Small delay for Redis propagation
|
||||||
|
await new Promise((resolve) => setTimeout(resolve, 100));
|
||||||
|
|
||||||
|
expect(await loadStatusCache.isLoaded(user1)).toBe(true);
|
||||||
|
expect(await loadStatusCache.isLoaded(user2)).toBe(true);
|
||||||
|
expect(await loadStatusCache.isLoaded(user3)).toBe(true);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('TTL synchronization', () => {
|
||||||
|
it('should keep loaded flag and cache entry in sync via TTL', async () => {
|
||||||
|
const userId = `user-ttl-sync-${testCounter}`;
|
||||||
|
|
||||||
|
// Set loaded flag with 1 second TTL
|
||||||
|
await loadStatusCache.setLoaded(userId, 1000);
|
||||||
|
|
||||||
|
expect(await loadStatusCache.isLoaded(userId)).toBe(true);
|
||||||
|
|
||||||
|
// After TTL expires, both should be gone
|
||||||
|
await new Promise((resolve) => setTimeout(resolve, 1100));
|
||||||
|
|
||||||
|
expect(await loadStatusCache.isLoaded(userId)).toBe(false);
|
||||||
|
|
||||||
|
// This simulates cache entry and loaded flag being in sync
|
||||||
|
// In real usage, if cache entries expire, loaded flag should also expire
|
||||||
|
}, 10000);
|
||||||
|
|
||||||
|
it('should allow different TTLs for different users', async () => {
|
||||||
|
const user1 = `user-ttl-diff-1-${testCounter}`;
|
||||||
|
const user2 = `user-ttl-diff-2-${testCounter}`;
|
||||||
|
|
||||||
|
await loadStatusCache.setLoaded(user1, 1000); // 1 second
|
||||||
|
await loadStatusCache.setLoaded(user2, 3000); // 3 seconds
|
||||||
|
|
||||||
|
expect(await loadStatusCache.isLoaded(user1)).toBe(true);
|
||||||
|
expect(await loadStatusCache.isLoaded(user2)).toBe(true);
|
||||||
|
|
||||||
|
// Wait for user1 to expire
|
||||||
|
await new Promise((resolve) => setTimeout(resolve, 1100));
|
||||||
|
|
||||||
|
expect(await loadStatusCache.isLoaded(user1)).toBe(false);
|
||||||
|
expect(await loadStatusCache.isLoaded(user2)).toBe(true); // Still valid
|
||||||
|
|
||||||
|
// Wait for user2 to expire
|
||||||
|
await new Promise((resolve) => setTimeout(resolve, 2000));
|
||||||
|
|
||||||
|
expect(await loadStatusCache.isLoaded(user2)).toBe(false);
|
||||||
|
}, 10000);
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('clearLoaded() integration', () => {
|
||||||
|
it('should clear loaded status immediately', async () => {
|
||||||
|
const userId = `user-clear-${testCounter}`;
|
||||||
|
|
||||||
|
await loadStatusCache.setLoaded(userId, 60000);
|
||||||
|
expect(await loadStatusCache.isLoaded(userId)).toBe(true);
|
||||||
|
|
||||||
|
await loadStatusCache.clearLoaded(userId);
|
||||||
|
expect(await loadStatusCache.isLoaded(userId)).toBe(false);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should allow clearing multiple users', async () => {
|
||||||
|
const user1 = `user-clear-multi-1-${testCounter}`;
|
||||||
|
const user2 = `user-clear-multi-2-${testCounter}`;
|
||||||
|
|
||||||
|
await loadStatusCache.setLoaded(user1, 60000);
|
||||||
|
await loadStatusCache.setLoaded(user2, 60000);
|
||||||
|
|
||||||
|
await loadStatusCache.clearLoaded(user1);
|
||||||
|
await loadStatusCache.clearLoaded(user2);
|
||||||
|
|
||||||
|
expect(await loadStatusCache.isLoaded(user1)).toBe(false);
|
||||||
|
expect(await loadStatusCache.isLoaded(user2)).toBe(false);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
329
packages/api/src/mcp/registry/cache/__tests__/PrivateServersLoadStatusCache.test.ts
vendored
Normal file
329
packages/api/src/mcp/registry/cache/__tests__/PrivateServersLoadStatusCache.test.ts
vendored
Normal file
|
|
@ -0,0 +1,329 @@
|
||||||
|
// Mock dependencies BEFORE imports to avoid hoisting issues
|
||||||
|
const mockGet = jest.fn();
|
||||||
|
const mockSet = jest.fn();
|
||||||
|
const mockDelete = jest.fn();
|
||||||
|
const mockRedisSet = jest.fn();
|
||||||
|
const mockRedisDel = jest.fn();
|
||||||
|
|
||||||
|
jest.mock('~/cache', () => ({
|
||||||
|
standardCache: jest.fn(() => ({
|
||||||
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||||
|
get: (...args: any[]) => mockGet(...args),
|
||||||
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||||
|
set: (...args: any[]) => mockSet(...args),
|
||||||
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||||
|
delete: (...args: any[]) => mockDelete(...args),
|
||||||
|
})),
|
||||||
|
keyvRedisClient: {
|
||||||
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||||
|
set: (...args: any[]) => mockRedisSet(...args),
|
||||||
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||||
|
del: (...args: any[]) => mockRedisDel(...args),
|
||||||
|
},
|
||||||
|
}));
|
||||||
|
|
||||||
|
jest.mock('~/cache/cacheConfig', () => ({
|
||||||
|
cacheConfig: {
|
||||||
|
REDIS_KEY_PREFIX: '',
|
||||||
|
GLOBAL_PREFIX_SEPARATOR: '::',
|
||||||
|
},
|
||||||
|
}));
|
||||||
|
|
||||||
|
jest.mock('@librechat/data-schemas', () => ({
|
||||||
|
logger: {
|
||||||
|
debug: jest.fn(),
|
||||||
|
info: jest.fn(),
|
||||||
|
warn: jest.fn(),
|
||||||
|
error: jest.fn(),
|
||||||
|
},
|
||||||
|
}));
|
||||||
|
|
||||||
|
jest.mock('~/cluster', () => ({
|
||||||
|
isLeader: jest.fn().mockResolvedValue(true),
|
||||||
|
}));
|
||||||
|
|
||||||
|
import { privateServersLoadStatusCache as loadStatusCache } from '../PrivateServersLoadStatusCache';
|
||||||
|
import { logger } from '@librechat/data-schemas';
|
||||||
|
|
||||||
|
describe('PrivateServersLoadStatusCache', () => {
|
||||||
|
beforeEach(() => {
|
||||||
|
jest.clearAllMocks();
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('isLoaded()', () => {
|
||||||
|
it('should return true when user servers are loaded', async () => {
|
||||||
|
mockGet.mockResolvedValue(true);
|
||||||
|
|
||||||
|
const result = await loadStatusCache.isLoaded('user1');
|
||||||
|
|
||||||
|
expect(result).toBe(true);
|
||||||
|
expect(mockGet).toHaveBeenCalledWith('USER_PRIVATE_SERVERS_LOADED::user1');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return false when user servers are not loaded', async () => {
|
||||||
|
mockGet.mockResolvedValue(undefined);
|
||||||
|
|
||||||
|
const result = await loadStatusCache.isLoaded('user1');
|
||||||
|
|
||||||
|
expect(result).toBe(false);
|
||||||
|
expect(mockGet).toHaveBeenCalledWith('USER_PRIVATE_SERVERS_LOADED::user1');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return false when loaded flag is explicitly false', async () => {
|
||||||
|
mockGet.mockResolvedValue(false);
|
||||||
|
|
||||||
|
const result = await loadStatusCache.isLoaded('user1');
|
||||||
|
|
||||||
|
expect(result).toBe(false);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('setLoaded()', () => {
|
||||||
|
it('should set loaded flag with default TTL', async () => {
|
||||||
|
mockSet.mockResolvedValue(true);
|
||||||
|
|
||||||
|
await loadStatusCache.setLoaded('user1');
|
||||||
|
|
||||||
|
expect(mockSet).toHaveBeenCalledWith('USER_PRIVATE_SERVERS_LOADED::user1', true, 3600_000);
|
||||||
|
expect(logger.debug).toHaveBeenCalledWith(
|
||||||
|
'[MCP][LoadStatusCache] Marked user user1 as loaded (TTL: 3600000ms)',
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should set loaded flag with custom TTL', async () => {
|
||||||
|
mockSet.mockResolvedValue(true);
|
||||||
|
|
||||||
|
await loadStatusCache.setLoaded('user1', 7200000);
|
||||||
|
|
||||||
|
expect(mockSet).toHaveBeenCalledWith('USER_PRIVATE_SERVERS_LOADED::user1', true, 7200_000);
|
||||||
|
expect(logger.debug).toHaveBeenCalledWith(
|
||||||
|
'[MCP][LoadStatusCache] Marked user user1 as loaded (TTL: 7200000ms)',
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should throw error if cache.set fails', async () => {
|
||||||
|
mockSet.mockResolvedValue(false);
|
||||||
|
|
||||||
|
await expect(loadStatusCache.setLoaded('user1')).rejects.toThrow();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('acquireLoadLock()', () => {
|
||||||
|
it('should acquire lock successfully when no lock exists (using Redis SET NX)', async () => {
|
||||||
|
mockRedisSet.mockResolvedValue('OK'); // Redis SET NX returns 'OK' on success
|
||||||
|
|
||||||
|
const result = await loadStatusCache.acquireLoadLock('user1');
|
||||||
|
|
||||||
|
expect(result).toBe(true);
|
||||||
|
expect(mockRedisSet).toHaveBeenCalledWith(
|
||||||
|
'MCP::ServersRegistry::PrivateServersLoadStatus:USER_PRIVATE_SERVERS_LOAD_LOCK::user1',
|
||||||
|
expect.any(String), // Timestamp as string
|
||||||
|
{ NX: true, PX: 30000 },
|
||||||
|
);
|
||||||
|
expect(logger.debug).toHaveBeenCalledWith(
|
||||||
|
'[MCP][LoadStatusCache] Acquired load lock for user user1 (TTL: 30000ms)',
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should fail to acquire lock when lock already exists (Redis returns null)', async () => {
|
||||||
|
mockRedisSet.mockResolvedValue(null); // Redis SET NX returns null if key exists
|
||||||
|
|
||||||
|
const result = await loadStatusCache.acquireLoadLock('user1');
|
||||||
|
|
||||||
|
expect(result).toBe(false);
|
||||||
|
expect(mockRedisSet).toHaveBeenCalledWith(
|
||||||
|
'MCP::ServersRegistry::PrivateServersLoadStatus:USER_PRIVATE_SERVERS_LOAD_LOCK::user1',
|
||||||
|
expect.any(String),
|
||||||
|
{ NX: true, PX: 30000 },
|
||||||
|
);
|
||||||
|
expect(logger.debug).toHaveBeenCalledWith(
|
||||||
|
'[MCP][LoadStatusCache] Load lock already held for user user1',
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should acquire lock with custom TTL', async () => {
|
||||||
|
mockRedisSet.mockResolvedValue('OK');
|
||||||
|
|
||||||
|
const result = await loadStatusCache.acquireLoadLock('user1', 60_000);
|
||||||
|
|
||||||
|
expect(result).toBe(true);
|
||||||
|
expect(mockRedisSet).toHaveBeenCalledWith(
|
||||||
|
'MCP::ServersRegistry::PrivateServersLoadStatus:USER_PRIVATE_SERVERS_LOAD_LOCK::user1',
|
||||||
|
expect.any(String),
|
||||||
|
{ NX: true, PX: 60_000 },
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return false if Redis SET fails with error', async () => {
|
||||||
|
mockRedisSet.mockRejectedValue(new Error('Redis error'));
|
||||||
|
|
||||||
|
const result = await loadStatusCache.acquireLoadLock('user1');
|
||||||
|
|
||||||
|
expect(result).toBe(false);
|
||||||
|
expect(logger.error).toHaveBeenCalledWith(
|
||||||
|
'[MCP][LoadStatusCache] Error acquiring lock for user user1:',
|
||||||
|
expect.any(Error),
|
||||||
|
);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('releaseLoadLock()', () => {
|
||||||
|
it('should release lock successfully', async () => {
|
||||||
|
await loadStatusCache.releaseLoadLock('user1');
|
||||||
|
|
||||||
|
expect(mockDelete).toHaveBeenCalledWith('USER_PRIVATE_SERVERS_LOAD_LOCK::user1');
|
||||||
|
expect(logger.debug).toHaveBeenCalledWith(
|
||||||
|
'[MCP][LoadStatusCache] Released load lock for user user1',
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should not throw error if lock does not exist', async () => {
|
||||||
|
mockDelete.mockResolvedValue(undefined);
|
||||||
|
|
||||||
|
await expect(loadStatusCache.releaseLoadLock('user1')).resolves.not.toThrow();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('waitForLoad()', () => {
|
||||||
|
let mockDateNow: jest.SpyInstance;
|
||||||
|
let currentTime: number;
|
||||||
|
|
||||||
|
beforeEach(() => {
|
||||||
|
jest.useFakeTimers();
|
||||||
|
currentTime = 1000000; // Starting time
|
||||||
|
mockDateNow = jest.spyOn(Date, 'now').mockImplementation(() => currentTime);
|
||||||
|
});
|
||||||
|
|
||||||
|
afterEach(() => {
|
||||||
|
jest.useRealTimers();
|
||||||
|
mockDateNow.mockRestore();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return true when loading completes within timeout', async () => {
|
||||||
|
let checkCount = 0;
|
||||||
|
mockGet.mockImplementation(async () => {
|
||||||
|
checkCount++;
|
||||||
|
return checkCount >= 3; // Return true on third check
|
||||||
|
});
|
||||||
|
|
||||||
|
const waitPromise = loadStatusCache.waitForLoad('user1', 500, 100);
|
||||||
|
|
||||||
|
// Simulate time passing
|
||||||
|
for (let i = 0; i < 3; i++) {
|
||||||
|
currentTime += 100;
|
||||||
|
await jest.advanceTimersByTimeAsync(100);
|
||||||
|
}
|
||||||
|
|
||||||
|
const result = await waitPromise;
|
||||||
|
|
||||||
|
expect(result).toBe(true);
|
||||||
|
expect(logger.debug).toHaveBeenCalledWith(
|
||||||
|
'[MCP][LoadStatusCache] User user1 loading completed by another process',
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return false when timeout is reached', async () => {
|
||||||
|
mockGet.mockResolvedValue(false); // Never becomes true
|
||||||
|
|
||||||
|
const waitPromise = loadStatusCache.waitForLoad('user1', 300, 100);
|
||||||
|
|
||||||
|
// Advance time past the timeout
|
||||||
|
currentTime += 400;
|
||||||
|
await jest.advanceTimersByTimeAsync(400);
|
||||||
|
|
||||||
|
const result = await waitPromise;
|
||||||
|
|
||||||
|
expect(result).toBe(false);
|
||||||
|
expect(logger.warn).toHaveBeenCalledWith(
|
||||||
|
'[MCP][LoadStatusCache] Timeout waiting for user user1 loading (waited 300ms)',
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should use default timeout and check interval', async () => {
|
||||||
|
mockGet.mockResolvedValue(true);
|
||||||
|
|
||||||
|
const waitPromise = loadStatusCache.waitForLoad('user1');
|
||||||
|
|
||||||
|
currentTime += 100;
|
||||||
|
await jest.advanceTimersByTimeAsync(100);
|
||||||
|
|
||||||
|
const result = await waitPromise;
|
||||||
|
|
||||||
|
expect(result).toBe(true);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should poll at specified intervals', async () => {
|
||||||
|
let checkCount = 0;
|
||||||
|
mockGet.mockImplementation(async () => {
|
||||||
|
checkCount++;
|
||||||
|
return checkCount >= 4; // Return true on fourth check
|
||||||
|
});
|
||||||
|
|
||||||
|
const waitPromise = loadStatusCache.waitForLoad('user1', 1000, 200);
|
||||||
|
|
||||||
|
// Advance time for each poll
|
||||||
|
for (let i = 0; i < 4; i++) {
|
||||||
|
currentTime += 200;
|
||||||
|
await jest.advanceTimersByTimeAsync(200);
|
||||||
|
}
|
||||||
|
|
||||||
|
const result = await waitPromise;
|
||||||
|
|
||||||
|
expect(result).toBe(true);
|
||||||
|
expect(checkCount).toBe(4);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('clearLoaded()', () => {
|
||||||
|
it('should clear loaded status for a user', async () => {
|
||||||
|
await loadStatusCache.clearLoaded('user1');
|
||||||
|
|
||||||
|
expect(mockDelete).toHaveBeenCalledWith('USER_PRIVATE_SERVERS_LOADED::user1');
|
||||||
|
expect(logger.debug).toHaveBeenCalledWith(
|
||||||
|
'[MCP][LoadStatusCache] Cleared loaded status for user user1',
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should not throw error if loaded status does not exist', async () => {
|
||||||
|
mockDelete.mockResolvedValue(undefined);
|
||||||
|
|
||||||
|
await expect(loadStatusCache.clearLoaded('user1')).resolves.not.toThrow();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('Edge cases', () => {
|
||||||
|
it('should handle multiple users independently', async () => {
|
||||||
|
mockRedisSet.mockResolvedValue('OK');
|
||||||
|
|
||||||
|
const lock1 = await loadStatusCache.acquireLoadLock('user1');
|
||||||
|
const lock2 = await loadStatusCache.acquireLoadLock('user2');
|
||||||
|
|
||||||
|
expect(lock1).toBe(true);
|
||||||
|
expect(lock2).toBe(true);
|
||||||
|
expect(mockRedisSet).toHaveBeenCalledWith(
|
||||||
|
'MCP::ServersRegistry::PrivateServersLoadStatus:USER_PRIVATE_SERVERS_LOAD_LOCK::user1',
|
||||||
|
expect.any(String),
|
||||||
|
{ NX: true, PX: 30000 },
|
||||||
|
);
|
||||||
|
expect(mockRedisSet).toHaveBeenCalledWith(
|
||||||
|
'MCP::ServersRegistry::PrivateServersLoadStatus:USER_PRIVATE_SERVERS_LOAD_LOCK::user2',
|
||||||
|
expect.any(String),
|
||||||
|
{ NX: true, PX: 30000 },
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle concurrent operations on same user', async () => {
|
||||||
|
mockRedisSet
|
||||||
|
.mockResolvedValueOnce('OK') // First lock attempt succeeds
|
||||||
|
.mockResolvedValueOnce(null); // Second lock attempt fails (key exists)
|
||||||
|
|
||||||
|
const [lock1, lock2] = await Promise.all([
|
||||||
|
loadStatusCache.acquireLoadLock('user1'),
|
||||||
|
loadStatusCache.acquireLoadLock('user1'),
|
||||||
|
]);
|
||||||
|
|
||||||
|
// One should succeed, one should fail (order not guaranteed)
|
||||||
|
expect([lock1, lock2].sort()).toEqual([false, true]);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
@ -36,12 +36,18 @@ describe('RegistryStatusCache Integration Tests', () => {
|
||||||
|
|
||||||
afterEach(async () => {
|
afterEach(async () => {
|
||||||
// Clean up: clear all test keys from Redis
|
// Clean up: clear all test keys from Redis
|
||||||
if (keyvRedisClient) {
|
if (keyvRedisClient && 'scanIterator' in keyvRedisClient) {
|
||||||
const pattern = '*RegistryStatusCache-IntegrationTest*';
|
const pattern = '*RegistryStatusCache-IntegrationTest*';
|
||||||
if ('scanIterator' in keyvRedisClient) {
|
const keysToDelete: string[] = [];
|
||||||
|
|
||||||
|
// Collect all keys first
|
||||||
for await (const key of keyvRedisClient.scanIterator({ MATCH: pattern })) {
|
for await (const key of keyvRedisClient.scanIterator({ MATCH: pattern })) {
|
||||||
await keyvRedisClient.del(key);
|
keysToDelete.push(key);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Delete in parallel for cluster mode efficiency
|
||||||
|
if (keysToDelete.length > 0) {
|
||||||
|
await Promise.all(keysToDelete.map((key) => keyvRedisClient!.del(key)));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
|
||||||
|
|
@ -25,11 +25,11 @@ describe('ServerConfigsCacheFactory', () => {
|
||||||
cacheConfig.USE_REDIS = true;
|
cacheConfig.USE_REDIS = true;
|
||||||
|
|
||||||
// Act
|
// Act
|
||||||
const cache = ServerConfigsCacheFactory.create('TestOwner', true);
|
const cache = ServerConfigsCacheFactory.create('TestOwner', 'Private', true);
|
||||||
|
|
||||||
// Assert
|
// Assert
|
||||||
expect(cache).toBeInstanceOf(ServerConfigsCacheRedis);
|
expect(cache).toBeInstanceOf(ServerConfigsCacheRedis);
|
||||||
expect(ServerConfigsCacheRedis).toHaveBeenCalledWith('TestOwner', true);
|
expect(ServerConfigsCacheRedis).toHaveBeenCalledWith('TestOwner', 'Private', true);
|
||||||
});
|
});
|
||||||
|
|
||||||
it('should return ServerConfigsCacheInMemory when USE_REDIS is false', () => {
|
it('should return ServerConfigsCacheInMemory when USE_REDIS is false', () => {
|
||||||
|
|
@ -37,7 +37,7 @@ describe('ServerConfigsCacheFactory', () => {
|
||||||
cacheConfig.USE_REDIS = false;
|
cacheConfig.USE_REDIS = false;
|
||||||
|
|
||||||
// Act
|
// Act
|
||||||
const cache = ServerConfigsCacheFactory.create('TestOwner', false);
|
const cache = ServerConfigsCacheFactory.create('TestOwner', 'Private', false);
|
||||||
|
|
||||||
// Assert
|
// Assert
|
||||||
expect(cache).toBeInstanceOf(ServerConfigsCacheInMemory);
|
expect(cache).toBeInstanceOf(ServerConfigsCacheInMemory);
|
||||||
|
|
@ -49,10 +49,10 @@ describe('ServerConfigsCacheFactory', () => {
|
||||||
cacheConfig.USE_REDIS = true;
|
cacheConfig.USE_REDIS = true;
|
||||||
|
|
||||||
// Act
|
// Act
|
||||||
ServerConfigsCacheFactory.create('App', true);
|
ServerConfigsCacheFactory.create('App', 'Shared', true);
|
||||||
|
|
||||||
// Assert
|
// Assert
|
||||||
expect(ServerConfigsCacheRedis).toHaveBeenCalledWith('App', true);
|
expect(ServerConfigsCacheRedis).toHaveBeenCalledWith('App', 'Shared', true);
|
||||||
});
|
});
|
||||||
|
|
||||||
it('should create ServerConfigsCacheInMemory without parameters when USE_REDIS is false', () => {
|
it('should create ServerConfigsCacheInMemory without parameters when USE_REDIS is false', () => {
|
||||||
|
|
@ -60,7 +60,7 @@ describe('ServerConfigsCacheFactory', () => {
|
||||||
cacheConfig.USE_REDIS = false;
|
cacheConfig.USE_REDIS = false;
|
||||||
|
|
||||||
// Act
|
// Act
|
||||||
ServerConfigsCacheFactory.create('User', false);
|
ServerConfigsCacheFactory.create('User', 'Shared', false);
|
||||||
|
|
||||||
// Assert
|
// Assert
|
||||||
// In-memory cache doesn't use owner/leaderOnly parameters
|
// In-memory cache doesn't use owner/leaderOnly parameters
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,8 @@
|
||||||
import { expect } from '@playwright/test';
|
import { expect } from '@playwright/test';
|
||||||
import { ParsedServerConfig } from '~/mcp/types';
|
import { ParsedServerConfig } from '~/mcp/types';
|
||||||
|
const FIXED_TIME = 1699564800000;
|
||||||
|
const originalDateNow = Date.now;
|
||||||
|
Date.now = jest.fn(() => FIXED_TIME);
|
||||||
|
|
||||||
describe('ServerConfigsCacheInMemory Integration Tests', () => {
|
describe('ServerConfigsCacheInMemory Integration Tests', () => {
|
||||||
let ServerConfigsCacheInMemory: typeof import('../ServerConfigsCacheInMemory').ServerConfigsCacheInMemory;
|
let ServerConfigsCacheInMemory: typeof import('../ServerConfigsCacheInMemory').ServerConfigsCacheInMemory;
|
||||||
|
|
@ -12,12 +15,14 @@ describe('ServerConfigsCacheInMemory Integration Tests', () => {
|
||||||
command: 'node',
|
command: 'node',
|
||||||
args: ['server1.js'],
|
args: ['server1.js'],
|
||||||
env: { TEST: 'value1' },
|
env: { TEST: 'value1' },
|
||||||
|
lastUpdatedAt: FIXED_TIME,
|
||||||
};
|
};
|
||||||
|
|
||||||
const mockConfig2: ParsedServerConfig = {
|
const mockConfig2: ParsedServerConfig = {
|
||||||
command: 'python',
|
command: 'python',
|
||||||
args: ['server2.py'],
|
args: ['server2.py'],
|
||||||
env: { TEST: 'value2' },
|
env: { TEST: 'value2' },
|
||||||
|
lastUpdatedAt: FIXED_TIME,
|
||||||
};
|
};
|
||||||
|
|
||||||
const mockConfig3: ParsedServerConfig = {
|
const mockConfig3: ParsedServerConfig = {
|
||||||
|
|
@ -25,6 +30,7 @@ describe('ServerConfigsCacheInMemory Integration Tests', () => {
|
||||||
args: ['server3.js'],
|
args: ['server3.js'],
|
||||||
url: 'http://localhost:3000',
|
url: 'http://localhost:3000',
|
||||||
requiresOAuth: true,
|
requiresOAuth: true,
|
||||||
|
lastUpdatedAt: FIXED_TIME,
|
||||||
};
|
};
|
||||||
|
|
||||||
beforeAll(async () => {
|
beforeAll(async () => {
|
||||||
|
|
@ -33,6 +39,10 @@ describe('ServerConfigsCacheInMemory Integration Tests', () => {
|
||||||
ServerConfigsCacheInMemory = cacheModule.ServerConfigsCacheInMemory;
|
ServerConfigsCacheInMemory = cacheModule.ServerConfigsCacheInMemory;
|
||||||
});
|
});
|
||||||
|
|
||||||
|
afterAll(() => {
|
||||||
|
Date.now = originalDateNow;
|
||||||
|
});
|
||||||
|
|
||||||
beforeEach(() => {
|
beforeEach(() => {
|
||||||
// Create a fresh instance for each test
|
// Create a fresh instance for each test
|
||||||
cache = new ServerConfigsCacheInMemory();
|
cache = new ServerConfigsCacheInMemory();
|
||||||
|
|
|
||||||
|
|
@ -4,8 +4,7 @@ import { ParsedServerConfig } from '~/mcp/types';
|
||||||
describe('ServerConfigsCacheRedis Integration Tests', () => {
|
describe('ServerConfigsCacheRedis Integration Tests', () => {
|
||||||
let ServerConfigsCacheRedis: typeof import('../ServerConfigsCacheRedis').ServerConfigsCacheRedis;
|
let ServerConfigsCacheRedis: typeof import('../ServerConfigsCacheRedis').ServerConfigsCacheRedis;
|
||||||
let keyvRedisClient: Awaited<typeof import('~/cache/redisClients')>['keyvRedisClient'];
|
let keyvRedisClient: Awaited<typeof import('~/cache/redisClients')>['keyvRedisClient'];
|
||||||
let LeaderElection: typeof import('~/cluster/LeaderElection').LeaderElection;
|
|
||||||
let checkIsLeader: () => Promise<boolean>;
|
|
||||||
let cache: InstanceType<typeof import('../ServerConfigsCacheRedis').ServerConfigsCacheRedis>;
|
let cache: InstanceType<typeof import('../ServerConfigsCacheRedis').ServerConfigsCacheRedis>;
|
||||||
|
|
||||||
// Test data
|
// Test data
|
||||||
|
|
@ -41,49 +40,42 @@ describe('ServerConfigsCacheRedis Integration Tests', () => {
|
||||||
// Import modules after setting env vars
|
// Import modules after setting env vars
|
||||||
const cacheModule = await import('../ServerConfigsCacheRedis');
|
const cacheModule = await import('../ServerConfigsCacheRedis');
|
||||||
const redisClients = await import('~/cache/redisClients');
|
const redisClients = await import('~/cache/redisClients');
|
||||||
const leaderElectionModule = await import('~/cluster/LeaderElection');
|
|
||||||
const clusterModule = await import('~/cluster');
|
|
||||||
|
|
||||||
ServerConfigsCacheRedis = cacheModule.ServerConfigsCacheRedis;
|
ServerConfigsCacheRedis = cacheModule.ServerConfigsCacheRedis;
|
||||||
keyvRedisClient = redisClients.keyvRedisClient;
|
keyvRedisClient = redisClients.keyvRedisClient;
|
||||||
LeaderElection = leaderElectionModule.LeaderElection;
|
|
||||||
checkIsLeader = clusterModule.isLeader;
|
|
||||||
|
|
||||||
// Ensure Redis is connected
|
// Ensure Redis is connected
|
||||||
if (!keyvRedisClient) throw new Error('Redis client is not initialized');
|
if (!keyvRedisClient) throw new Error('Redis client is not initialized');
|
||||||
|
|
||||||
// Wait for connection and topology discovery to complete
|
// Wait for connection and topology discovery to complete
|
||||||
await redisClients.keyvRedisClientReady;
|
await redisClients.keyvRedisClientReady;
|
||||||
|
|
||||||
// Clear any existing leader key to ensure clean state
|
|
||||||
await keyvRedisClient.del(LeaderElection.LEADER_KEY);
|
|
||||||
|
|
||||||
// Become leader so we can perform write operations (using default election instance)
|
|
||||||
const isLeader = await checkIsLeader();
|
|
||||||
expect(isLeader).toBe(true);
|
|
||||||
});
|
});
|
||||||
|
|
||||||
beforeEach(() => {
|
beforeEach(() => {
|
||||||
// Create a fresh instance for each test with leaderOnly=true
|
// Create a fresh instance for each test with leaderOnly=true
|
||||||
cache = new ServerConfigsCacheRedis('test-user', true);
|
jest.resetModules();
|
||||||
|
cache = new ServerConfigsCacheRedis('test-user', 'Shared', false);
|
||||||
});
|
});
|
||||||
|
|
||||||
afterEach(async () => {
|
afterEach(async () => {
|
||||||
// Clean up: clear all test keys from Redis
|
// Clean up: clear all test keys from Redis
|
||||||
if (keyvRedisClient) {
|
if (keyvRedisClient && 'scanIterator' in keyvRedisClient) {
|
||||||
const pattern = '*ServerConfigsCacheRedis-IntegrationTest*';
|
const pattern = '*ServerConfigsCacheRedis-IntegrationTest*';
|
||||||
if ('scanIterator' in keyvRedisClient) {
|
const keysToDelete: string[] = [];
|
||||||
|
|
||||||
|
// Collect all keys first
|
||||||
for await (const key of keyvRedisClient.scanIterator({ MATCH: pattern })) {
|
for await (const key of keyvRedisClient.scanIterator({ MATCH: pattern })) {
|
||||||
await keyvRedisClient.del(key);
|
keysToDelete.push(key);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Delete in parallel for cluster mode efficiency
|
||||||
|
if (keysToDelete.length > 0) {
|
||||||
|
await Promise.all(keysToDelete.map((key) => keyvRedisClient!.del(key)));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
afterAll(async () => {
|
afterAll(async () => {
|
||||||
// Clear leader key to allow other tests to become leader
|
|
||||||
if (keyvRedisClient) await keyvRedisClient.del(LeaderElection.LEADER_KEY);
|
|
||||||
|
|
||||||
// Close Redis connection
|
// Close Redis connection
|
||||||
if (keyvRedisClient?.isOpen) await keyvRedisClient.disconnect();
|
if (keyvRedisClient?.isOpen) await keyvRedisClient.disconnect();
|
||||||
});
|
});
|
||||||
|
|
@ -92,7 +84,7 @@ describe('ServerConfigsCacheRedis Integration Tests', () => {
|
||||||
it('should add and retrieve a server config', async () => {
|
it('should add and retrieve a server config', async () => {
|
||||||
await cache.add('server1', mockConfig1);
|
await cache.add('server1', mockConfig1);
|
||||||
const result = await cache.get('server1');
|
const result = await cache.get('server1');
|
||||||
expect(result).toEqual(mockConfig1);
|
expect(result).toMatchObject(mockConfig1);
|
||||||
});
|
});
|
||||||
|
|
||||||
it('should return undefined for non-existent server', async () => {
|
it('should return undefined for non-existent server', async () => {
|
||||||
|
|
@ -116,14 +108,14 @@ describe('ServerConfigsCacheRedis Integration Tests', () => {
|
||||||
const result2 = await cache.get('server2');
|
const result2 = await cache.get('server2');
|
||||||
const result3 = await cache.get('server3');
|
const result3 = await cache.get('server3');
|
||||||
|
|
||||||
expect(result1).toEqual(mockConfig1);
|
expect(result1).toMatchObject(mockConfig1);
|
||||||
expect(result2).toEqual(mockConfig2);
|
expect(result2).toMatchObject(mockConfig2);
|
||||||
expect(result3).toEqual(mockConfig3);
|
expect(result3).toMatchObject(mockConfig3);
|
||||||
});
|
});
|
||||||
|
|
||||||
it('should isolate caches by owner namespace', async () => {
|
it('should isolate caches by owner namespace', async () => {
|
||||||
const userCache = new ServerConfigsCacheRedis('user1', true);
|
const userCache = new ServerConfigsCacheRedis('user1', 'Private', false);
|
||||||
const globalCache = new ServerConfigsCacheRedis('global', true);
|
const globalCache = new ServerConfigsCacheRedis('global', 'Shared', false);
|
||||||
|
|
||||||
await userCache.add('server1', mockConfig1);
|
await userCache.add('server1', mockConfig1);
|
||||||
await globalCache.add('server1', mockConfig2);
|
await globalCache.add('server1', mockConfig2);
|
||||||
|
|
@ -131,15 +123,15 @@ describe('ServerConfigsCacheRedis Integration Tests', () => {
|
||||||
const userResult = await userCache.get('server1');
|
const userResult = await userCache.get('server1');
|
||||||
const globalResult = await globalCache.get('server1');
|
const globalResult = await globalCache.get('server1');
|
||||||
|
|
||||||
expect(userResult).toEqual(mockConfig1);
|
expect(userResult).toMatchObject(mockConfig1);
|
||||||
expect(globalResult).toEqual(mockConfig2);
|
expect(globalResult).toMatchObject(mockConfig2);
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
describe('getAll operation', () => {
|
describe('getAll operation', () => {
|
||||||
it('should return empty object when no servers exist', async () => {
|
it('should return empty object when no servers exist', async () => {
|
||||||
const result = await cache.getAll();
|
const result = await cache.getAll();
|
||||||
expect(result).toEqual({});
|
expect(result).toMatchObject({});
|
||||||
});
|
});
|
||||||
|
|
||||||
it('should return all server configs', async () => {
|
it('should return all server configs', async () => {
|
||||||
|
|
@ -148,7 +140,7 @@ describe('ServerConfigsCacheRedis Integration Tests', () => {
|
||||||
await cache.add('server3', mockConfig3);
|
await cache.add('server3', mockConfig3);
|
||||||
|
|
||||||
const result = await cache.getAll();
|
const result = await cache.getAll();
|
||||||
expect(result).toEqual({
|
expect(result).toMatchObject({
|
||||||
server1: mockConfig1,
|
server1: mockConfig1,
|
||||||
server2: mockConfig2,
|
server2: mockConfig2,
|
||||||
server3: mockConfig3,
|
server3: mockConfig3,
|
||||||
|
|
@ -165,12 +157,12 @@ describe('ServerConfigsCacheRedis Integration Tests', () => {
|
||||||
await cache.add('server3', mockConfig3);
|
await cache.add('server3', mockConfig3);
|
||||||
result = await cache.getAll();
|
result = await cache.getAll();
|
||||||
expect(Object.keys(result).length).toBe(3);
|
expect(Object.keys(result).length).toBe(3);
|
||||||
expect(result.server3).toEqual(mockConfig3);
|
expect(result.server3).toMatchObject(mockConfig3);
|
||||||
});
|
});
|
||||||
|
|
||||||
it('should only return configs for the specific owner', async () => {
|
it('should only return configs for the specific owner', async () => {
|
||||||
const userCache = new ServerConfigsCacheRedis('user1', true);
|
const userCache = new ServerConfigsCacheRedis('user1', 'Private', false);
|
||||||
const globalCache = new ServerConfigsCacheRedis('global', true);
|
const globalCache = new ServerConfigsCacheRedis('global', 'Private', false);
|
||||||
|
|
||||||
await userCache.add('server1', mockConfig1);
|
await userCache.add('server1', mockConfig1);
|
||||||
await userCache.add('server2', mockConfig2);
|
await userCache.add('server2', mockConfig2);
|
||||||
|
|
@ -181,20 +173,20 @@ describe('ServerConfigsCacheRedis Integration Tests', () => {
|
||||||
|
|
||||||
expect(Object.keys(userResult).length).toBe(2);
|
expect(Object.keys(userResult).length).toBe(2);
|
||||||
expect(Object.keys(globalResult).length).toBe(1);
|
expect(Object.keys(globalResult).length).toBe(1);
|
||||||
expect(userResult.server1).toEqual(mockConfig1);
|
expect(userResult.server1).toMatchObject(mockConfig1);
|
||||||
expect(userResult.server3).toBeUndefined();
|
expect(userResult.server3).toBeUndefined();
|
||||||
expect(globalResult.server3).toEqual(mockConfig3);
|
expect(globalResult.server3).toMatchObject(mockConfig3);
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
describe('update operation', () => {
|
describe('update operation', () => {
|
||||||
it('should update an existing server config', async () => {
|
it('should update an existing server config', async () => {
|
||||||
await cache.add('server1', mockConfig1);
|
await cache.add('server1', mockConfig1);
|
||||||
expect(await cache.get('server1')).toEqual(mockConfig1);
|
expect(await cache.get('server1')).toMatchObject(mockConfig1);
|
||||||
|
|
||||||
await cache.update('server1', mockConfig2);
|
await cache.update('server1', mockConfig2);
|
||||||
const result = await cache.get('server1');
|
const result = await cache.get('server1');
|
||||||
expect(result).toEqual(mockConfig2);
|
expect(result).toMatchObject(mockConfig2);
|
||||||
});
|
});
|
||||||
|
|
||||||
it('should throw error when updating non-existent server', async () => {
|
it('should throw error when updating non-existent server', async () => {
|
||||||
|
|
@ -209,28 +201,28 @@ describe('ServerConfigsCacheRedis Integration Tests', () => {
|
||||||
|
|
||||||
await cache.update('server1', mockConfig3);
|
await cache.update('server1', mockConfig3);
|
||||||
const result = await cache.getAll();
|
const result = await cache.getAll();
|
||||||
expect(result.server1).toEqual(mockConfig3);
|
expect(result.server1).toMatchObject(mockConfig3);
|
||||||
expect(result.server2).toEqual(mockConfig2);
|
expect(result.server2).toMatchObject(mockConfig2);
|
||||||
});
|
});
|
||||||
|
|
||||||
it('should only update in the specific owner namespace', async () => {
|
it('should only update in the specific owner namespace', async () => {
|
||||||
const userCache = new ServerConfigsCacheRedis('user1', true);
|
const userCache = new ServerConfigsCacheRedis('user1', 'Private', false);
|
||||||
const globalCache = new ServerConfigsCacheRedis('global', true);
|
const globalCache = new ServerConfigsCacheRedis('global', 'Shared', false);
|
||||||
|
|
||||||
await userCache.add('server1', mockConfig1);
|
await userCache.add('server1', mockConfig1);
|
||||||
await globalCache.add('server1', mockConfig2);
|
await globalCache.add('server1', mockConfig2);
|
||||||
|
|
||||||
await userCache.update('server1', mockConfig3);
|
await userCache.update('server1', mockConfig3);
|
||||||
|
|
||||||
expect(await userCache.get('server1')).toEqual(mockConfig3);
|
expect(await userCache.get('server1')).toMatchObject(mockConfig3);
|
||||||
expect(await globalCache.get('server1')).toEqual(mockConfig2);
|
expect(await globalCache.get('server1')).toMatchObject(mockConfig2);
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
describe('remove operation', () => {
|
describe('remove operation', () => {
|
||||||
it('should remove an existing server config', async () => {
|
it('should remove an existing server config', async () => {
|
||||||
await cache.add('server1', mockConfig1);
|
await cache.add('server1', mockConfig1);
|
||||||
expect(await cache.get('server1')).toEqual(mockConfig1);
|
expect(await cache.get('server1')).toMatchObject(mockConfig1);
|
||||||
|
|
||||||
await cache.remove('server1');
|
await cache.remove('server1');
|
||||||
expect(await cache.get('server1')).toBeUndefined();
|
expect(await cache.get('server1')).toBeUndefined();
|
||||||
|
|
@ -253,7 +245,7 @@ describe('ServerConfigsCacheRedis Integration Tests', () => {
|
||||||
result = await cache.getAll();
|
result = await cache.getAll();
|
||||||
expect(Object.keys(result).length).toBe(1);
|
expect(Object.keys(result).length).toBe(1);
|
||||||
expect(result.server1).toBeUndefined();
|
expect(result.server1).toBeUndefined();
|
||||||
expect(result.server2).toEqual(mockConfig2);
|
expect(result.server2).toMatchObject(mockConfig2);
|
||||||
});
|
});
|
||||||
|
|
||||||
it('should allow re-adding a removed server', async () => {
|
it('should allow re-adding a removed server', async () => {
|
||||||
|
|
@ -262,12 +254,12 @@ describe('ServerConfigsCacheRedis Integration Tests', () => {
|
||||||
await cache.add('server1', mockConfig3);
|
await cache.add('server1', mockConfig3);
|
||||||
|
|
||||||
const result = await cache.get('server1');
|
const result = await cache.get('server1');
|
||||||
expect(result).toEqual(mockConfig3);
|
expect(result).toMatchObject(mockConfig3);
|
||||||
});
|
});
|
||||||
|
|
||||||
it('should only remove from the specific owner namespace', async () => {
|
it('should only remove from the specific owner namespace', async () => {
|
||||||
const userCache = new ServerConfigsCacheRedis('user1', true);
|
const userCache = new ServerConfigsCacheRedis('user1', 'Private', false);
|
||||||
const globalCache = new ServerConfigsCacheRedis('global', true);
|
const globalCache = new ServerConfigsCacheRedis('global', 'Shared', false);
|
||||||
|
|
||||||
await userCache.add('server1', mockConfig1);
|
await userCache.add('server1', mockConfig1);
|
||||||
await globalCache.add('server1', mockConfig2);
|
await globalCache.add('server1', mockConfig2);
|
||||||
|
|
@ -275,7 +267,7 @@ describe('ServerConfigsCacheRedis Integration Tests', () => {
|
||||||
await userCache.remove('server1');
|
await userCache.remove('server1');
|
||||||
|
|
||||||
expect(await userCache.get('server1')).toBeUndefined();
|
expect(await userCache.get('server1')).toBeUndefined();
|
||||||
expect(await globalCache.get('server1')).toEqual(mockConfig2);
|
expect(await globalCache.get('server1')).toMatchObject(mockConfig2);
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
|
||||||
|
|
@ -153,6 +153,8 @@ export type ParsedServerConfig = MCPOptions & {
|
||||||
tools?: string;
|
tools?: string;
|
||||||
toolFunctions?: LCAvailableTools;
|
toolFunctions?: LCAvailableTools;
|
||||||
initDuration?: number;
|
initDuration?: number;
|
||||||
|
lastUpdatedAt?: number;
|
||||||
|
dbId?: string;
|
||||||
};
|
};
|
||||||
|
|
||||||
export interface BasicConnectionOptions {
|
export interface BasicConnectionOptions {
|
||||||
|
|
|
||||||
|
|
@ -22,6 +22,7 @@ export * from './types';
|
||||||
export * from './types/agents';
|
export * from './types/agents';
|
||||||
export * from './types/assistants';
|
export * from './types/assistants';
|
||||||
export * from './types/files';
|
export * from './types/files';
|
||||||
|
export * from './types/mcpServers';
|
||||||
export * from './types/mutations';
|
export * from './types/mutations';
|
||||||
export * from './types/queries';
|
export * from './types/queries';
|
||||||
export * from './types/runs';
|
export * from './types/runs';
|
||||||
|
|
|
||||||
21
packages/data-provider/src/types/mcpServers.ts
Normal file
21
packages/data-provider/src/types/mcpServers.ts
Normal file
|
|
@ -0,0 +1,21 @@
|
||||||
|
import type { MCPOptions } from '../mcp';
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Base MCP Server interface
|
||||||
|
* Core structure shared between API and database layers
|
||||||
|
*/
|
||||||
|
export interface IMCPServerDB {
|
||||||
|
_id?: string; // MongoDB ObjectId (used for ACL/permissions)
|
||||||
|
mcp_id: string;
|
||||||
|
config: MCPOptions;
|
||||||
|
author?: string | null;
|
||||||
|
createdAt?: Date;
|
||||||
|
updatedAt?: Date;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* User-managed MCP Server (standalone, not attached to agent)
|
||||||
|
* API type for frontend/backend communication
|
||||||
|
* Similar to Agent type - includes populated author fields
|
||||||
|
*/
|
||||||
|
export type MCPServerDB = IMCPServerDB;
|
||||||
Loading…
Add table
Add a link
Reference in a new issue