🔄 refactor: Migrate Cache Logic to TypeScript (#9771)
Some checks are pending
Docker Dev Branch Images Build / build (Dockerfile, lc-dev, node) (push) Waiting to run
Docker Dev Branch Images Build / build (Dockerfile.multi, lc-dev-api, api-build) (push) Waiting to run

* Refactor: Moved Redis cache infra logic into `packages/api`
- Moved cacheFactory and redisClients from `api/cache` into `packages/api/src/cache` so that features in `packages/api` can use cache without importing backward from the backend.
- Converted all moved files into TS with proper typing.
- Created integration tests to run against actual Redis servers for redisClients and cacheFactory.
- Added a GitHub workflow to run integration tests for the cache feature.
- Bug fix: keyvRedisClient now implements the PING feature properly.

* chore: consolidate imports in getLogStores.js

* chore: reorder imports

* chore: re-add fs-extra as dev dep.

* chore: reorder imports in cacheConfig.ts, cacheFactory.ts, and keyvMongo.ts

---------

Co-authored-by: Danny Avila <danny@librechat.ai>
This commit is contained in:
Theo N. Truong 2025-10-02 07:33:58 -06:00 committed by GitHub
parent 341435fb25
commit 0e5bb6f98c
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
38 changed files with 1552 additions and 1340 deletions

View file

@ -0,0 +1,182 @@
describe('cacheConfig', () => {
let originalEnv: NodeJS.ProcessEnv;
beforeEach(() => {
originalEnv = { ...process.env };
// Clear all related env vars first
delete process.env.REDIS_URI;
delete process.env.REDIS_CA;
delete process.env.REDIS_KEY_PREFIX_VAR;
delete process.env.REDIS_KEY_PREFIX;
delete process.env.USE_REDIS;
delete process.env.USE_REDIS_CLUSTER;
delete process.env.REDIS_PING_INTERVAL;
delete process.env.FORCED_IN_MEMORY_CACHE_NAMESPACES;
// Clear module cache
jest.resetModules();
});
afterEach(() => {
process.env = originalEnv;
jest.resetModules();
});
describe('REDIS_KEY_PREFIX validation and resolution', () => {
test('should throw error when both REDIS_KEY_PREFIX_VAR and REDIS_KEY_PREFIX are set', async () => {
process.env.REDIS_KEY_PREFIX_VAR = 'DEPLOYMENT_ID';
process.env.REDIS_KEY_PREFIX = 'manual-prefix';
await expect(async () => {
await import('../cacheConfig');
}).rejects.toThrow('Only either REDIS_KEY_PREFIX_VAR or REDIS_KEY_PREFIX can be set.');
});
test('should resolve REDIS_KEY_PREFIX from variable reference', async () => {
process.env.REDIS_KEY_PREFIX_VAR = 'DEPLOYMENT_ID';
process.env.DEPLOYMENT_ID = 'test-deployment-123';
const { cacheConfig } = await import('../cacheConfig');
expect(cacheConfig.REDIS_KEY_PREFIX).toBe('test-deployment-123');
});
test('should use direct REDIS_KEY_PREFIX value', async () => {
process.env.REDIS_KEY_PREFIX = 'direct-prefix';
const { cacheConfig } = await import('../cacheConfig');
expect(cacheConfig.REDIS_KEY_PREFIX).toBe('direct-prefix');
});
test('should default to empty string when no prefix is configured', async () => {
const { cacheConfig } = await import('../cacheConfig');
expect(cacheConfig.REDIS_KEY_PREFIX).toBe('');
});
test('should handle empty variable reference', async () => {
process.env.REDIS_KEY_PREFIX_VAR = 'EMPTY_VAR';
process.env.EMPTY_VAR = '';
const { cacheConfig } = await import('../cacheConfig');
expect(cacheConfig.REDIS_KEY_PREFIX).toBe('');
});
test('should handle undefined variable reference', async () => {
process.env.REDIS_KEY_PREFIX_VAR = 'UNDEFINED_VAR';
const { cacheConfig } = await import('../cacheConfig');
expect(cacheConfig.REDIS_KEY_PREFIX).toBe('');
});
});
describe('USE_REDIS and REDIS_URI validation', () => {
test('should throw error when USE_REDIS is enabled but REDIS_URI is not set', async () => {
process.env.USE_REDIS = 'true';
await expect(async () => {
await import('../cacheConfig');
}).rejects.toThrow('USE_REDIS is enabled but REDIS_URI is not set.');
});
test('should not throw error when USE_REDIS is enabled and REDIS_URI is set', async () => {
process.env.USE_REDIS = 'true';
process.env.REDIS_URI = 'redis://localhost:6379';
const importModule = async () => {
await import('../cacheConfig');
};
await expect(importModule()).resolves.not.toThrow();
});
test('should handle empty REDIS_URI when USE_REDIS is enabled', async () => {
process.env.USE_REDIS = 'true';
process.env.REDIS_URI = '';
await expect(async () => {
await import('../cacheConfig');
}).rejects.toThrow('USE_REDIS is enabled but REDIS_URI is not set.');
});
});
describe('USE_REDIS_CLUSTER configuration', () => {
test('should default to false when USE_REDIS_CLUSTER is not set', async () => {
const { cacheConfig } = await import('../cacheConfig');
expect(cacheConfig.USE_REDIS_CLUSTER).toBe(false);
});
test('should be false when USE_REDIS_CLUSTER is set to false', async () => {
process.env.USE_REDIS_CLUSTER = 'false';
const { cacheConfig } = await import('../cacheConfig');
expect(cacheConfig.USE_REDIS_CLUSTER).toBe(false);
});
test('should be true when USE_REDIS_CLUSTER is set to true', async () => {
process.env.USE_REDIS_CLUSTER = 'true';
const { cacheConfig } = await import('../cacheConfig');
expect(cacheConfig.USE_REDIS_CLUSTER).toBe(true);
});
test('should work with USE_REDIS enabled and REDIS_URI set', async () => {
process.env.USE_REDIS_CLUSTER = 'true';
process.env.USE_REDIS = 'true';
process.env.REDIS_URI = 'redis://localhost:6379';
const { cacheConfig } = await import('../cacheConfig');
expect(cacheConfig.USE_REDIS_CLUSTER).toBe(true);
expect(cacheConfig.USE_REDIS).toBe(true);
expect(cacheConfig.REDIS_URI).toBe('redis://localhost:6379');
});
});
describe('REDIS_CA file reading', () => {
test('should be null when REDIS_CA is not set', async () => {
const { cacheConfig } = await import('../cacheConfig');
expect(cacheConfig.REDIS_CA).toBeNull();
});
});
describe('REDIS_PING_INTERVAL configuration', () => {
test('should default to 0 when REDIS_PING_INTERVAL is not set', async () => {
const { cacheConfig } = await import('../cacheConfig');
expect(cacheConfig.REDIS_PING_INTERVAL).toBe(0);
});
test('should use provided REDIS_PING_INTERVAL value', async () => {
process.env.REDIS_PING_INTERVAL = '300';
const { cacheConfig } = await import('../cacheConfig');
expect(cacheConfig.REDIS_PING_INTERVAL).toBe(300);
});
});
describe('FORCED_IN_MEMORY_CACHE_NAMESPACES validation', () => {
test('should parse comma-separated cache keys correctly', async () => {
process.env.FORCED_IN_MEMORY_CACHE_NAMESPACES = ' ROLES, MESSAGES ';
const { cacheConfig } = await import('../cacheConfig');
expect(cacheConfig.FORCED_IN_MEMORY_CACHE_NAMESPACES).toEqual(['ROLES', 'MESSAGES']);
});
test('should throw error for invalid cache keys', async () => {
process.env.FORCED_IN_MEMORY_CACHE_NAMESPACES = 'INVALID_KEY,ROLES';
await expect(async () => {
await import('../cacheConfig');
}).rejects.toThrow('Invalid cache keys in FORCED_IN_MEMORY_CACHE_NAMESPACES: INVALID_KEY');
});
test('should handle empty string gracefully', async () => {
process.env.FORCED_IN_MEMORY_CACHE_NAMESPACES = '';
const { cacheConfig } = await import('../cacheConfig');
expect(cacheConfig.FORCED_IN_MEMORY_CACHE_NAMESPACES).toEqual([]);
});
test('should handle undefined env var gracefully', async () => {
const { cacheConfig } = await import('../cacheConfig');
expect(cacheConfig.FORCED_IN_MEMORY_CACHE_NAMESPACES).toEqual([]);
});
});
});

View file

@ -0,0 +1,113 @@
import type { RedisStore } from 'rate-limit-redis';
describe('limiterCache', () => {
let originalEnv: NodeJS.ProcessEnv;
let testStore: RedisStore | undefined = undefined;
beforeEach(() => {
originalEnv = { ...process.env };
// Clear cache-related env vars
delete process.env.USE_REDIS;
delete process.env.REDIS_URI;
delete process.env.USE_REDIS_CLUSTER;
delete process.env.REDIS_PING_INTERVAL;
delete process.env.REDIS_KEY_PREFIX;
// Set test configuration
process.env.REDIS_PING_INTERVAL = '0';
process.env.REDIS_KEY_PREFIX = 'Cache-Integration-Test';
process.env.REDIS_RETRY_MAX_ATTEMPTS = '5';
// Clear require cache to reload modules
jest.resetModules();
});
afterEach(async () => {
process.env = originalEnv;
jest.resetModules();
});
test('should throw error when prefix is not provided', async () => {
const cacheFactory = await import('../../cacheFactory');
expect(() => cacheFactory.limiterCache('')).toThrow('prefix is required');
});
test('should return undefined when USE_REDIS is false', async () => {
process.env.USE_REDIS = 'false';
const cacheFactory = await import('../../cacheFactory');
testStore = cacheFactory.limiterCache('test-limiter');
expect(testStore).toBeUndefined();
});
test('should return RedisStore with sendCommand when USE_REDIS is true', async () => {
process.env.USE_REDIS = 'true';
process.env.USE_REDIS_CLUSTER = 'false';
process.env.REDIS_URI = 'redis://127.0.0.1:6379';
const cacheFactory = await import('../../cacheFactory');
const redisClients = await import('../../redisClients');
const { ioredisClient } = redisClients;
testStore = cacheFactory.limiterCache('test-limiter');
// Wait for Redis connection to be ready
if (ioredisClient && ioredisClient.status !== 'ready') {
await new Promise<void>((resolve) => {
ioredisClient.once('ready', resolve);
});
}
// Verify it returns a RedisStore instance
expect(testStore).toBeDefined();
expect(testStore!.constructor.name).toBe('RedisStore');
expect(testStore!.prefix).toBe('test-limiter:');
expect(typeof testStore!.sendCommand).toBe('function');
const testKey = 'user:123';
// SET operation
await testStore!.sendCommand('SET', testKey, '1', 'EX', '60');
// Verify the key was created WITHOUT prefix using ioredis
// Note: Using call method since get method seems to have issues in test environment
// Type assertion for ioredis call method
type RedisClientWithCall = typeof ioredisClient & {
call: (command: string, key: string) => Promise<string | null>;
};
const directValue = await (ioredisClient as RedisClientWithCall).call('GET', testKey);
expect(directValue).toBe('1');
// GET operation
const value = await testStore!.sendCommand('GET', testKey);
expect(value).toBe('1');
// INCR operation
const incremented = await testStore!.sendCommand('INCR', testKey);
expect(incremented).toBe(2);
// Verify increment worked with ioredis
const incrementedValue = await (ioredisClient as RedisClientWithCall).call('GET', testKey);
expect(incrementedValue).toBe('2');
// TTL operation
const ttl = (await testStore!.sendCommand('TTL', testKey)) as number;
expect(ttl).toBeGreaterThan(0);
expect(ttl).toBeLessThanOrEqual(60);
// DEL operation
const deleted = await testStore!.sendCommand('DEL', testKey);
expect(deleted).toBe(1);
// Verify deletion
const afterDelete = await testStore!.sendCommand('GET', testKey);
expect(afterDelete).toBeNull();
const directAfterDelete = await ioredisClient!.get(testKey);
expect(directAfterDelete).toBeNull();
// Test error handling
await expect(testStore!.sendCommand('INVALID_COMMAND')).rejects.toThrow();
});
});

View file

@ -0,0 +1,211 @@
interface SessionData {
[key: string]: unknown;
cookie?: { maxAge: number };
user?: { id: string; name: string };
userId?: string;
}
interface SessionStore {
prefix?: string;
set: (id: string, data: SessionData, callback?: (err?: Error) => void) => void;
get: (id: string, callback: (err: Error | null, data?: SessionData | null) => void) => void;
destroy: (id: string, callback?: (err?: Error) => void) => void;
touch: (id: string, data: SessionData, callback?: (err?: Error) => void) => void;
on?: (event: string, handler: (...args: unknown[]) => void) => void;
}
describe('sessionCache', () => {
let originalEnv: NodeJS.ProcessEnv;
// Helper to make session stores async
const asyncStore = (store: SessionStore) => ({
set: (id: string, data: SessionData) =>
new Promise<void>((resolve) => store.set(id, data, () => resolve())),
get: (id: string) =>
new Promise<SessionData | null | undefined>((resolve) =>
store.get(id, (_, data) => resolve(data)),
),
destroy: (id: string) => new Promise<void>((resolve) => store.destroy(id, () => resolve())),
touch: (id: string, data: SessionData) =>
new Promise<void>((resolve) => store.touch(id, data, () => resolve())),
});
beforeEach(() => {
originalEnv = { ...process.env };
// Clear cache-related env vars
delete process.env.USE_REDIS;
delete process.env.REDIS_URI;
delete process.env.USE_REDIS_CLUSTER;
delete process.env.REDIS_PING_INTERVAL;
delete process.env.REDIS_KEY_PREFIX;
// Set test configuration
process.env.REDIS_PING_INTERVAL = '0';
process.env.REDIS_KEY_PREFIX = 'Cache-Integration-Test';
process.env.REDIS_RETRY_MAX_ATTEMPTS = '5';
// Clear require cache to reload modules
jest.resetModules();
});
afterEach(async () => {
process.env = originalEnv;
jest.resetModules();
});
test('should return ConnectRedis store when USE_REDIS is true', async () => {
process.env.USE_REDIS = 'true';
process.env.USE_REDIS_CLUSTER = 'false';
process.env.REDIS_URI = 'redis://127.0.0.1:6379';
const cacheFactory = await import('../../cacheFactory');
const redisClients = await import('../../redisClients');
const { ioredisClient } = redisClients;
const store = cacheFactory.sessionCache('test-sessions', 3600);
// Wait for Redis connection to be ready
if (ioredisClient && ioredisClient.status !== 'ready') {
await new Promise<void>((resolve) => {
ioredisClient.once('ready', resolve);
});
}
// Verify it returns a ConnectRedis instance
expect(store).toBeDefined();
expect(store.constructor.name).toBe('RedisStore');
expect(store.prefix).toBe('test-sessions:');
// Test session operations
const sessionId = 'sess:123456';
const sessionData: SessionData = {
user: { id: 'user123', name: 'Test User' },
cookie: { maxAge: 3600000 },
};
const async = asyncStore(store);
// Set session
await async.set(sessionId, sessionData);
// Get session
const retrieved = await async.get(sessionId);
expect(retrieved).toEqual(sessionData);
// Touch session (update expiry)
await async.touch(sessionId, sessionData);
// Destroy session
await async.destroy(sessionId);
// Verify deletion
const afterDelete = await async.get(sessionId);
expect(afterDelete).toBeNull();
});
test('should return MemoryStore when USE_REDIS is false', async () => {
process.env.USE_REDIS = 'false';
const cacheFactory = await import('../../cacheFactory');
const store = cacheFactory.sessionCache('test-sessions', 3600);
// Verify it returns a MemoryStore instance
expect(store).toBeDefined();
expect(store.constructor.name).toBe('MemoryStore');
// Test session operations
const sessionId = 'mem:789012';
const sessionData: SessionData = {
user: { id: 'user456', name: 'Memory User' },
cookie: { maxAge: 3600000 },
};
const async = asyncStore(store);
// Set session
await async.set(sessionId, sessionData);
// Get session
const retrieved = await async.get(sessionId);
expect(retrieved).toEqual(sessionData);
// Destroy session
await async.destroy(sessionId);
// Verify deletion
const afterDelete = await async.get(sessionId);
expect(afterDelete).toBeUndefined();
});
test('should handle namespace with and without trailing colon', async () => {
process.env.USE_REDIS = 'true';
process.env.USE_REDIS_CLUSTER = 'false';
process.env.REDIS_URI = 'redis://127.0.0.1:6379';
const cacheFactory = await import('../../cacheFactory');
const store1 = cacheFactory.sessionCache('namespace1');
const store2 = cacheFactory.sessionCache('namespace2:');
expect(store1.prefix).toBe('namespace1:');
expect(store2.prefix).toBe('namespace2:');
});
test('should register error handler for Redis connection', async () => {
process.env.USE_REDIS = 'true';
process.env.USE_REDIS_CLUSTER = 'false';
process.env.REDIS_URI = 'redis://127.0.0.1:6379';
const cacheFactory = await import('../../cacheFactory');
const redisClients = await import('../../redisClients');
const { ioredisClient } = redisClients;
// Spy on ioredisClient.on
const onSpy = jest.spyOn(ioredisClient!, 'on');
// Create session store
cacheFactory.sessionCache('error-test');
// Verify error handler was registered
expect(onSpy).toHaveBeenCalledWith('error', expect.any(Function));
onSpy.mockRestore();
});
test('should handle session expiration with TTL', async () => {
process.env.USE_REDIS = 'true';
process.env.USE_REDIS_CLUSTER = 'false';
process.env.REDIS_URI = 'redis://127.0.0.1:6379';
const cacheFactory = await import('../../cacheFactory');
const redisClients = await import('../../redisClients');
const { ioredisClient } = redisClients;
const ttl = 1; // 1 second TTL
const store = cacheFactory.sessionCache('ttl-sessions', ttl);
// Wait for Redis connection to be ready
if (ioredisClient && ioredisClient.status !== 'ready') {
await new Promise<void>((resolve) => {
ioredisClient.once('ready', resolve);
});
}
const sessionId = 'ttl:12345';
const sessionData: SessionData = { userId: 'ttl-user' };
const async = asyncStore(store);
// Set session with short TTL
await async.set(sessionId, sessionData);
// Verify session exists immediately
const immediate = await async.get(sessionId);
expect(immediate).toEqual(sessionData);
// Wait for TTL to expire
await new Promise((resolve) => setTimeout(resolve, (ttl + 0.5) * 1000));
// Verify session has expired
const expired = await async.get(sessionId);
expect(expired).toBeNull();
});
});

View file

@ -0,0 +1,185 @@
import type { Keyv } from 'keyv';
// Mock GLOBAL_PREFIX_SEPARATOR
jest.mock('../../redisClients', () => {
const originalModule = jest.requireActual('../../redisClients');
return {
...originalModule,
GLOBAL_PREFIX_SEPARATOR: '>>',
};
});
describe('standardCache', () => {
let originalEnv: NodeJS.ProcessEnv;
let testCache: Keyv | null = null;
// Helper function to verify Redis keys exist
const expectRedisKeysExist = async (expectedKeys: string[]) => {
const redisClients = await import('../../redisClients');
const { ioredisClient } = redisClients;
if (!ioredisClient) throw new Error('ioredisClient is null');
const allKeys = await ioredisClient.keys('Cache-Integration-Test*');
expectedKeys.forEach((expectedKey) => {
expect(allKeys).toContain(expectedKey);
});
};
beforeEach(() => {
originalEnv = { ...process.env };
// Clear cache-related env vars
delete process.env.USE_REDIS;
delete process.env.REDIS_URI;
delete process.env.USE_REDIS_CLUSTER;
delete process.env.REDIS_PING_INTERVAL;
delete process.env.REDIS_KEY_PREFIX;
delete process.env.FORCED_IN_MEMORY_CACHE_NAMESPACES;
// Set test configuration
process.env.REDIS_PING_INTERVAL = '0';
process.env.REDIS_KEY_PREFIX = 'Cache-Integration-Test';
process.env.REDIS_RETRY_MAX_ATTEMPTS = '5';
// Clear require cache to reload modules
jest.resetModules();
});
afterEach(async () => {
// Clean up test keys using prefix and test namespaces
const redisClients = await import('../../redisClients');
const { ioredisClient } = redisClients;
if (ioredisClient && ioredisClient.status === 'ready') {
try {
const patterns = [
'Cache-Integration-Test>>*',
'Cache-Integration-Test>>test-namespace:*',
'Cache-Integration-Test>>another-namespace:*',
];
for (const pattern of patterns) {
const keys = await ioredisClient.keys(pattern);
if (keys.length > 0) {
await ioredisClient.del(...keys);
}
}
} catch (error: unknown) {
if (error instanceof Error) {
console.warn('Error cleaning up test keys:', error.message);
}
}
}
// Clean up cache instance
if (testCache) {
try {
await testCache.clear();
} catch (error: unknown) {
if (error instanceof Error) {
console.warn('Error clearing cache:', error.message);
}
}
testCache = null;
}
process.env = originalEnv;
jest.resetModules();
});
describe('when USE_REDIS is false', () => {
test('should create in-memory cache', async () => {
process.env.USE_REDIS = 'false';
const cacheFactory = await import('../../cacheFactory');
testCache = cacheFactory.standardCache('test-namespace');
expect(testCache).toBeDefined();
expect(testCache.constructor.name).toBe('Keyv');
});
test('should use fallback store when provided', async () => {
process.env.USE_REDIS = 'false';
const fallbackStore = new Map();
const cacheFactory = await import('../../cacheFactory');
testCache = cacheFactory.standardCache('test-namespace', 200, fallbackStore);
expect(testCache).toBeDefined();
// Type assertion to access internal options
const cacheWithOpts = testCache as Keyv & {
opts: { store: unknown; namespace: string; ttl: number };
};
expect(cacheWithOpts.opts.store).toBe(fallbackStore);
expect(cacheWithOpts.opts.namespace).toBe('test-namespace');
expect(cacheWithOpts.opts.ttl).toBe(200);
});
});
describe('when connecting to a Redis server', () => {
test('should handle different namespaces with correct prefixes', async () => {
process.env.USE_REDIS = 'true';
process.env.USE_REDIS_CLUSTER = 'false';
process.env.REDIS_URI = 'redis://127.0.0.1:6379';
const cacheFactory = await import('../../cacheFactory');
const cache1 = cacheFactory.standardCache('namespace-one');
const cache2 = cacheFactory.standardCache('namespace-two');
await cache1.set('key1', 'value1');
await cache2.set('key2', 'value2');
// Verify both caches work independently
expect(await cache1.get('key1')).toBe('value1');
expect(await cache2.get('key2')).toBe('value2');
expect(await cache1.get('key2')).toBeUndefined();
expect(await cache2.get('key1')).toBeUndefined();
// Verify Redis keys have correct prefixes for different namespaces
await expectRedisKeysExist([
'Cache-Integration-Test>>namespace-one:key1',
'Cache-Integration-Test>>namespace-two:key2',
]);
await cache1.clear();
await cache2.clear();
});
test('should respect FORCED_IN_MEMORY_CACHE_NAMESPACES', async () => {
process.env.USE_REDIS = 'true';
process.env.USE_REDIS_CLUSTER = 'false';
process.env.REDIS_URI = 'redis://127.0.0.1:6379';
process.env.FORCED_IN_MEMORY_CACHE_NAMESPACES = 'ROLES'; // Use a valid cache key
const cacheFactory = await import('../../cacheFactory');
// This should create an in-memory cache despite USE_REDIS being true
testCache = cacheFactory.standardCache('ROLES', 5000);
expect(testCache).toBeDefined();
expect(testCache.constructor.name).toBe('Keyv');
// Type assertion to access internal options
const cacheWithOpts = testCache as Keyv & { opts: { namespace: string; ttl: number } };
expect(cacheWithOpts.opts.namespace).toBe('ROLES');
expect(cacheWithOpts.opts.ttl).toBe(5000);
});
test('should handle TTL correctly', async () => {
process.env.USE_REDIS = 'true';
process.env.USE_REDIS_CLUSTER = 'false';
process.env.REDIS_URI = 'redis://127.0.0.1:6379';
const cacheFactory = await import('../../cacheFactory');
testCache = cacheFactory.standardCache('ttl-test', 1000); // 1 second TTL
const testKey = 'ttl-key';
const testValue = 'ttl-value';
await testCache.set(testKey, testValue);
expect(await testCache.get(testKey)).toBe(testValue);
// Wait for TTL to expire
await new Promise((resolve) => setTimeout(resolve, 1100));
expect(await testCache.get(testKey)).toBeUndefined();
});
});
});

View file

@ -0,0 +1,241 @@
interface ViolationData {
count?: number;
timestamp?: number;
namespace?: number;
data?: string;
userId?: string;
violations?: Array<{
type: string;
timestamp: number;
severity: string;
}>;
metadata?: {
ip: string;
userAgent: string;
nested: {
deep: {
value: string;
};
};
};
}
describe('violationCache', () => {
let originalEnv: NodeJS.ProcessEnv;
beforeEach(() => {
originalEnv = { ...process.env };
// Clear cache-related env vars
delete process.env.USE_REDIS;
delete process.env.REDIS_URI;
delete process.env.USE_REDIS_CLUSTER;
delete process.env.REDIS_PING_INTERVAL;
delete process.env.REDIS_KEY_PREFIX;
// Set test configuration
process.env.REDIS_PING_INTERVAL = '0';
process.env.REDIS_KEY_PREFIX = 'Cache-Integration-Test';
process.env.REDIS_RETRY_MAX_ATTEMPTS = '5';
// Clear require cache to reload modules
jest.resetModules();
});
afterEach(async () => {
process.env = originalEnv;
jest.resetModules();
});
test('should create violation cache with Redis when USE_REDIS is true', async () => {
process.env.USE_REDIS = 'true';
process.env.USE_REDIS_CLUSTER = 'false';
process.env.REDIS_URI = 'redis://127.0.0.1:6379';
const cacheFactory = await import('../../cacheFactory');
const redisClients = await import('../../redisClients');
const { ioredisClient } = redisClients;
const cache = cacheFactory.violationCache('test-violations', 60000); // 60 second TTL
// Wait for Redis connection to be ready
if (ioredisClient && ioredisClient.status !== 'ready') {
await new Promise<void>((resolve) => {
ioredisClient.once('ready', resolve);
});
}
// Verify it returns a Keyv instance
expect(cache).toBeDefined();
expect(cache.constructor.name).toBe('Keyv');
// Test basic cache operations
const testKey = 'user:456:violation';
const testValue: ViolationData = { count: 1, timestamp: Date.now() };
// SET operation
await cache.set(testKey, testValue);
// GET operation
const retrievedValue = await cache.get(testKey);
expect(retrievedValue).toEqual(testValue);
// DELETE operation
const deleted = await cache.delete(testKey);
expect(deleted).toBe(true);
// Verify deletion
const afterDelete = await cache.get(testKey);
expect(afterDelete).toBeUndefined();
});
test('should use fallback store when USE_REDIS is false', async () => {
process.env.USE_REDIS = 'false';
const cacheFactory = await import('../../cacheFactory');
const cache = cacheFactory.violationCache('test-violations');
// Verify it returns a Keyv instance
expect(cache).toBeDefined();
expect(cache.constructor.name).toBe('Keyv');
// Test basic operations with fallback store
const testKey = 'user:789:violation';
const testValue: ViolationData = { count: 2, timestamp: Date.now() };
// SET operation
await cache.set(testKey, testValue);
// GET operation
const retrievedValue = await cache.get(testKey);
expect(retrievedValue).toEqual(testValue);
// DELETE operation
const deleted = await cache.delete(testKey);
expect(deleted).toBe(true);
// Verify deletion
const afterDelete = await cache.get(testKey);
expect(afterDelete).toBeUndefined();
});
test('should respect namespace prefixing', async () => {
process.env.USE_REDIS = 'true';
process.env.USE_REDIS_CLUSTER = 'false';
process.env.REDIS_URI = 'redis://127.0.0.1:6379';
const cacheFactory = await import('../../cacheFactory');
const redisClients = await import('../../redisClients');
const { ioredisClient } = redisClients;
const cache1 = cacheFactory.violationCache('namespace1');
const cache2 = cacheFactory.violationCache('namespace2');
// Wait for Redis connection to be ready
if (ioredisClient && ioredisClient.status !== 'ready') {
await new Promise<void>((resolve) => {
ioredisClient.once('ready', resolve);
});
}
const testKey = 'shared-key';
const value1: ViolationData = { namespace: 1 };
const value2: ViolationData = { namespace: 2 };
// Set same key in different namespaces
await cache1.set(testKey, value1);
await cache2.set(testKey, value2);
// Verify namespace isolation
const retrieved1 = await cache1.get(testKey);
const retrieved2 = await cache2.get(testKey);
expect(retrieved1).toEqual(value1);
expect(retrieved2).toEqual(value2);
// Clean up
await cache1.delete(testKey);
await cache2.delete(testKey);
});
test('should respect TTL settings', async () => {
process.env.USE_REDIS = 'true';
process.env.USE_REDIS_CLUSTER = 'false';
process.env.REDIS_URI = 'redis://127.0.0.1:6379';
const cacheFactory = await import('../../cacheFactory');
const redisClients = await import('../../redisClients');
const { ioredisClient } = redisClients;
const ttl = 1000; // 1 second TTL
const cache = cacheFactory.violationCache('ttl-test', ttl);
// Wait for Redis connection to be ready
if (ioredisClient && ioredisClient.status !== 'ready') {
await new Promise<void>((resolve) => {
ioredisClient.once('ready', resolve);
});
}
const testKey = 'ttl-key';
const testValue: ViolationData = { data: 'expires soon' };
// Set value with TTL
await cache.set(testKey, testValue);
// Verify value exists immediately
const immediate = await cache.get(testKey);
expect(immediate).toEqual(testValue);
// Wait for TTL to expire
await new Promise((resolve) => setTimeout(resolve, ttl + 100));
// Verify value has expired
const expired = await cache.get(testKey);
expect(expired).toBeUndefined();
});
test('should handle complex violation data structures', async () => {
process.env.USE_REDIS = 'true';
process.env.USE_REDIS_CLUSTER = 'false';
process.env.REDIS_URI = 'redis://127.0.0.1:6379';
const cacheFactory = await import('../../cacheFactory');
const redisClients = await import('../../redisClients');
const { ioredisClient } = redisClients;
const cache = cacheFactory.violationCache('complex-violations');
// Wait for Redis connection to be ready
if (ioredisClient && ioredisClient.status !== 'ready') {
await new Promise<void>((resolve) => {
ioredisClient.once('ready', resolve);
});
}
const complexData: ViolationData = {
userId: 'user123',
violations: [
{ type: 'rate_limit', timestamp: Date.now(), severity: 'warning' },
{ type: 'spam', timestamp: Date.now() - 1000, severity: 'critical' },
],
metadata: {
ip: '192.168.1.1',
userAgent: 'Mozilla/5.0',
nested: {
deep: {
value: 'test',
},
},
},
};
const key = 'complex-violation-data';
// Store complex data
await cache.set(key, complexData);
// Retrieve and verify
const retrieved = await cache.get(key);
expect(retrieved).toEqual(complexData);
// Clean up
await cache.delete(key);
});
});

View file

@ -0,0 +1,168 @@
import type { Redis, Cluster } from 'ioredis';
import type { RedisClientType, RedisClusterType } from '@redis/client';
type RedisClient = RedisClientType | RedisClusterType | Redis | Cluster;
describe('redisClients Integration Tests', () => {
let originalEnv: NodeJS.ProcessEnv;
let ioredisClient: Redis | Cluster | null = null;
let keyvRedisClient: RedisClientType | RedisClusterType | null = null;
// Helper function to test set/get/delete operations
const testRedisOperations = async (client: RedisClient, keyPrefix: string): Promise<void> => {
// Wait cluster to fully initialize
await new Promise((resolve) => setTimeout(resolve, 1000));
const testKey = `${keyPrefix}-test-key`;
const testValue = `${keyPrefix}-test-value`;
// Test set operation
await client.set(testKey, testValue);
// Test get operation
const result = await client.get(testKey);
expect(result).toBe(testValue);
// Test delete operation
const deleteResult = await client.del(testKey);
expect(deleteResult).toBe(1);
// Verify key is deleted
const deletedResult = await client.get(testKey);
expect(deletedResult).toBeNull();
};
beforeEach(() => {
originalEnv = { ...process.env };
// Clear Redis-related env vars
delete process.env.USE_REDIS;
delete process.env.REDIS_URI;
delete process.env.USE_REDIS_CLUSTER;
delete process.env.REDIS_PING_INTERVAL;
delete process.env.REDIS_KEY_PREFIX;
// Set common test configuration
process.env.REDIS_PING_INTERVAL = '0';
process.env.REDIS_KEY_PREFIX = 'Redis-Integration-Test';
process.env.REDIS_RETRY_MAX_ATTEMPTS = '5';
process.env.REDIS_PING_INTERVAL = '1000';
// Clear module cache to reload module
jest.resetModules();
});
afterEach(async () => {
// Clean up test keys using the prefix
if (ioredisClient && ioredisClient.status === 'ready') {
try {
const keys = await ioredisClient.keys('Redis-Integration-Test::*');
if (keys.length > 0) {
await ioredisClient.del(...keys);
}
} catch (error: any) {
console.warn('Error cleaning up test keys:', error.message);
}
}
// Cleanup Redis connections
if (ioredisClient) {
try {
if (ioredisClient.status === 'ready') {
ioredisClient.disconnect();
}
} catch (error: any) {
console.warn('Error disconnecting ioredis client:', error.message);
}
ioredisClient = null;
}
if (keyvRedisClient) {
try {
// Try to disconnect - keyv/redis client doesn't have an isReady property
await keyvRedisClient.disconnect();
} catch (error: any) {
console.warn('Error disconnecting keyv redis client:', error.message);
}
keyvRedisClient = null;
}
process.env = originalEnv;
jest.resetModules();
});
describe('ioredis Client Tests', () => {
describe('when USE_REDIS is false', () => {
test('should have null client', async () => {
process.env.USE_REDIS = 'false';
const clients = await import('../redisClients');
ioredisClient = clients.ioredisClient;
expect(ioredisClient).toBeNull();
});
});
describe('when connecting to a Redis instance', () => {
test('should connect and perform set/get/delete operations', async () => {
process.env.USE_REDIS = 'true';
process.env.USE_REDIS_CLUSTER = 'false';
process.env.REDIS_URI = 'redis://127.0.0.1:6379';
const clients = await import('../redisClients');
ioredisClient = clients.ioredisClient;
await testRedisOperations(ioredisClient!, 'ioredis-single');
});
});
describe('when connecting to a Redis cluster', () => {
test('should connect to cluster and perform set/get/delete operations', async () => {
process.env.USE_REDIS = 'true';
process.env.USE_REDIS_CLUSTER = 'true';
process.env.REDIS_URI =
'redis://127.0.0.1:7001,redis://127.0.0.1:7002,redis://127.0.0.1:7003';
const clients = await import('../redisClients');
ioredisClient = clients.ioredisClient;
await testRedisOperations(ioredisClient!, 'ioredis-cluster');
});
});
});
describe('keyvRedisClient Tests', () => {
describe('when USE_REDIS is false', () => {
test('should have null client', async () => {
process.env.USE_REDIS = 'false';
const clients = await import('../redisClients');
keyvRedisClient = clients.keyvRedisClient;
expect(keyvRedisClient).toBeNull();
});
});
describe('when connecting to a Redis instance', () => {
test('should connect and perform set/get/delete operations', async () => {
process.env.USE_REDIS = 'true';
process.env.USE_REDIS_CLUSTER = 'false';
process.env.REDIS_URI = 'redis://127.0.0.1:6379';
const clients = await import('../redisClients');
keyvRedisClient = clients.keyvRedisClient;
await testRedisOperations(keyvRedisClient!, 'keyv-single');
});
});
describe('when connecting to a Redis cluster', () => {
test('should connect to cluster and perform set/get/delete operations', async () => {
process.env.USE_REDIS = 'true';
process.env.USE_REDIS_CLUSTER = 'true';
process.env.REDIS_URI =
'redis://127.0.0.1:7001,redis://127.0.0.1:7002,redis://127.0.0.1:7003';
const clients = await import('../redisClients');
keyvRedisClient = clients.keyvRedisClient;
await testRedisOperations(keyvRedisClient!, 'keyv-cluster');
});
});
});
});

89
packages/api/src/cache/cacheConfig.ts vendored Normal file
View file

@ -0,0 +1,89 @@
import { readFileSync, existsSync } from 'fs';
import { logger } from '@librechat/data-schemas';
import { CacheKeys } from 'librechat-data-provider';
import { math, isEnabled } from '~/utils';
// To ensure that different deployments do not interfere with each other's cache, we use a prefix for the Redis keys.
// This prefix is usually the deployment ID, which is often passed to the container or pod as an env var.
// Set REDIS_KEY_PREFIX_VAR to the env var that contains the deployment ID.
const REDIS_KEY_PREFIX_VAR = process.env.REDIS_KEY_PREFIX_VAR;
const REDIS_KEY_PREFIX = process.env.REDIS_KEY_PREFIX;
if (REDIS_KEY_PREFIX_VAR && REDIS_KEY_PREFIX) {
throw new Error('Only either REDIS_KEY_PREFIX_VAR or REDIS_KEY_PREFIX can be set.');
}
const USE_REDIS = isEnabled(process.env.USE_REDIS);
if (USE_REDIS && !process.env.REDIS_URI) {
throw new Error('USE_REDIS is enabled but REDIS_URI is not set.');
}
// Comma-separated list of cache namespaces that should be forced to use in-memory storage
// even when Redis is enabled. This allows selective performance optimization for specific caches.
const FORCED_IN_MEMORY_CACHE_NAMESPACES = process.env.FORCED_IN_MEMORY_CACHE_NAMESPACES
? process.env.FORCED_IN_MEMORY_CACHE_NAMESPACES.split(',').map((key) => key.trim())
: [];
// Validate against CacheKeys enum
if (FORCED_IN_MEMORY_CACHE_NAMESPACES.length > 0) {
const validKeys = Object.values(CacheKeys) as string[];
const invalidKeys = FORCED_IN_MEMORY_CACHE_NAMESPACES.filter((key) => !validKeys.includes(key));
if (invalidKeys.length > 0) {
throw new Error(
`Invalid cache keys in FORCED_IN_MEMORY_CACHE_NAMESPACES: ${invalidKeys.join(', ')}. Valid keys: ${validKeys.join(', ')}`,
);
}
}
/** Helper function to safely read Redis CA certificate from file
* @returns {string|null} The contents of the CA certificate file, or null if not set or on error
*/
const getRedisCA = (): string | null => {
const caPath = process.env.REDIS_CA;
if (!caPath) {
return null;
}
try {
if (existsSync(caPath)) {
return readFileSync(caPath, 'utf8');
} else {
logger.warn(`Redis CA certificate file not found: ${caPath}`);
return null;
}
} catch (error) {
logger.error(`Failed to read Redis CA certificate file '${caPath}':`, error);
return null;
}
};
const cacheConfig = {
FORCED_IN_MEMORY_CACHE_NAMESPACES,
USE_REDIS,
REDIS_URI: process.env.REDIS_URI,
REDIS_USERNAME: process.env.REDIS_USERNAME,
REDIS_PASSWORD: process.env.REDIS_PASSWORD,
REDIS_CA: getRedisCA(),
REDIS_KEY_PREFIX: process.env[REDIS_KEY_PREFIX_VAR ?? ''] || REDIS_KEY_PREFIX || '',
REDIS_MAX_LISTENERS: math(process.env.REDIS_MAX_LISTENERS, 40),
REDIS_PING_INTERVAL: math(process.env.REDIS_PING_INTERVAL, 0),
/** Max delay between reconnection attempts in ms */
REDIS_RETRY_MAX_DELAY: math(process.env.REDIS_RETRY_MAX_DELAY, 3000),
/** Max number of reconnection attempts (0 = infinite) */
REDIS_RETRY_MAX_ATTEMPTS: math(process.env.REDIS_RETRY_MAX_ATTEMPTS, 10),
/** Connection timeout in ms */
REDIS_CONNECT_TIMEOUT: math(process.env.REDIS_CONNECT_TIMEOUT, 10000),
/** Queue commands when disconnected */
REDIS_ENABLE_OFFLINE_QUEUE: isEnabled(process.env.REDIS_ENABLE_OFFLINE_QUEUE ?? 'true'),
/** flag to modify redis connection by adding dnsLookup this is required when connecting to elasticache for ioredis
* see "Special Note: Aws Elasticache Clusters with TLS" on this webpage: https://www.npmjs.com/package/ioredis **/
REDIS_USE_ALTERNATIVE_DNS_LOOKUP: isEnabled(process.env.REDIS_USE_ALTERNATIVE_DNS_LOOKUP),
/** Enable redis cluster without the need of multiple URIs */
USE_REDIS_CLUSTER: isEnabled(process.env.USE_REDIS_CLUSTER ?? 'false'),
CI: isEnabled(process.env.CI),
DEBUG_MEMORY_CACHE: isEnabled(process.env.DEBUG_MEMORY_CACHE),
BAN_DURATION: math(process.env.BAN_DURATION, 7200000), // 2 hours
};
export { cacheConfig };

116
packages/api/src/cache/cacheFactory.ts vendored Normal file
View file

@ -0,0 +1,116 @@
/**
* @keyv/redis exports its default class in a non-standard way:
* module.exports = { default: KeyvRedis, ... } instead of module.exports = KeyvRedis
* This breaks ES6 imports when the module is marked as external in rollup.
* We must use require() to access the .default property directly.
*/
// eslint-disable-next-line @typescript-eslint/no-require-imports
const KeyvRedis = require('@keyv/redis').default as typeof import('@keyv/redis').default;
import { Keyv } from 'keyv';
import createMemoryStore from 'memorystore';
import { RedisStore } from 'rate-limit-redis';
import { Time } from 'librechat-data-provider';
import { logger } from '@librechat/data-schemas';
import session, { MemoryStore } from 'express-session';
import { RedisStore as ConnectRedis } from 'connect-redis';
import type { SendCommandFn } from 'rate-limit-redis';
import { keyvRedisClient, ioredisClient, GLOBAL_PREFIX_SEPARATOR } from './redisClients';
import { cacheConfig } from './cacheConfig';
import { violationFile } from './keyvFiles';
/**
* Creates a cache instance using Redis or a fallback store. Suitable for general caching needs.
* @param namespace - The cache namespace.
* @param ttl - Time to live for cache entries.
* @param fallbackStore - Optional fallback store if Redis is not used.
* @returns Cache instance.
*/
export const standardCache = (namespace: string, ttl?: number, fallbackStore?: object): Keyv => {
if (keyvRedisClient && !cacheConfig.FORCED_IN_MEMORY_CACHE_NAMESPACES?.includes(namespace)) {
try {
const keyvRedis = new KeyvRedis(keyvRedisClient);
const cache = new Keyv(keyvRedis, { namespace, ttl });
keyvRedis.namespace = cacheConfig.REDIS_KEY_PREFIX;
keyvRedis.keyPrefixSeparator = GLOBAL_PREFIX_SEPARATOR;
cache.on('error', (err) => {
logger.error(`Cache error in namespace ${namespace}:`, err);
});
return cache;
} catch (err) {
logger.error(`Failed to create Redis cache for namespace ${namespace}:`, err);
throw err;
}
}
if (fallbackStore) {
return new Keyv({ store: fallbackStore, namespace, ttl });
}
return new Keyv({ namespace, ttl });
};
/**
* Creates a cache instance for storing violation data.
* Uses a file-based fallback store if Redis is not enabled.
* @param namespace - The cache namespace for violations.
* @param ttl - Time to live for cache entries.
* @returns Cache instance for violations.
*/
export const violationCache = (namespace: string, ttl?: number): Keyv => {
return standardCache(`violations:${namespace}`, ttl, violationFile);
};
/**
* Creates a session cache instance using Redis or in-memory store.
* @param namespace - The session namespace.
* @param ttl - Time to live for session entries.
* @returns Session store instance.
*/
export const sessionCache = (namespace: string, ttl?: number): MemoryStore | ConnectRedis => {
namespace = namespace.endsWith(':') ? namespace : `${namespace}:`;
if (!cacheConfig.USE_REDIS) {
const MemoryStore = createMemoryStore(session);
return new MemoryStore({ ttl, checkPeriod: Time.ONE_DAY });
}
const store = new ConnectRedis({ client: ioredisClient, ttl, prefix: namespace });
if (ioredisClient) {
ioredisClient.on('error', (err) => {
logger.error(`Session store Redis error for namespace ${namespace}:`, err);
});
}
return store;
};
/**
* Creates a rate limiter cache using Redis.
* @param prefix - The key prefix for rate limiting.
* @returns RedisStore instance or undefined if Redis is not used.
*/
export const limiterCache = (prefix: string): RedisStore | undefined => {
if (!prefix) {
throw new Error('prefix is required');
}
if (!cacheConfig.USE_REDIS) {
return undefined;
}
// TODO: The prefix is not actually applied. Also needs to account for global prefix.
prefix = prefix.endsWith(':') ? prefix : `${prefix}:`;
try {
const sendCommand: SendCommandFn = (async (...args: string[]) => {
if (ioredisClient == null) {
throw new Error('Redis client not available');
}
try {
return await ioredisClient.call(args[0], ...args.slice(1));
} catch (err) {
logger.error('Redis command execution failed:', err);
throw err;
}
}) as SendCommandFn;
return new RedisStore({ sendCommand, prefix });
} catch (err) {
logger.error(`Failed to create Redis rate limiter for prefix ${prefix}:`, err);
return undefined;
}
};

5
packages/api/src/cache/index.ts vendored Normal file
View file

@ -0,0 +1,5 @@
export * from './cacheConfig';
export * from './redisClients';
export * from './keyvFiles';
export { default as keyvMongo } from './keyvMongo';
export * from './cacheFactory';

6
packages/api/src/cache/keyvFiles.ts vendored Normal file
View file

@ -0,0 +1,6 @@
import { KeyvFile } from 'keyv-file';
export const logFile = new KeyvFile({ filename: './data/logs.json' }).setMaxListeners(20);
export const violationFile = new KeyvFile({ filename: './data/violations.json' }).setMaxListeners(
20,
);

279
packages/api/src/cache/keyvMongo.ts vendored Normal file
View file

@ -0,0 +1,279 @@
import mongoose from 'mongoose';
import { EventEmitter } from 'events';
import { logger } from '@librechat/data-schemas';
import { GridFSBucket, type Db, type ReadPreference, type Collection } from 'mongodb';
interface KeyvMongoOptions {
url?: string;
collection?: string;
useGridFS?: boolean;
readPreference?: ReadPreference;
}
interface GridFSClient {
bucket: GridFSBucket;
store: Collection;
db: Db;
}
interface CollectionClient {
store: Collection;
db: Db;
}
type Client = GridFSClient | CollectionClient;
const storeMap = new Map<string, Client>();
class KeyvMongoCustom extends EventEmitter {
private opts: KeyvMongoOptions;
public ttlSupport: boolean;
public namespace?: string;
constructor(options: KeyvMongoOptions = {}) {
super();
this.opts = {
url: 'mongodb://127.0.0.1:27017',
collection: 'keyv',
...options,
};
this.ttlSupport = false;
}
// Helper to access the store WITHOUT storing a promise on the instance
private async _getClient(): Promise<Client> {
const storeKey = `${this.opts.collection}:${this.opts.useGridFS ? 'gridfs' : 'collection'}`;
// If we already have the store initialized, return it directly
if (storeMap.has(storeKey)) {
return storeMap.get(storeKey)!;
}
// Check mongoose connection state
if (mongoose.connection.readyState !== 1) {
throw new Error('Mongoose connection not ready. Ensure connectDb() is called first.');
}
try {
const db = mongoose.connection.db as unknown as Db | undefined;
if (!db) {
throw new Error('MongoDB database not available');
}
let client: Client;
if (this.opts.useGridFS) {
const bucket = new GridFSBucket(db, {
readPreference: this.opts.readPreference,
bucketName: this.opts.collection,
});
const store = db.collection(`${this.opts.collection}.files`);
client = { bucket, store, db };
} else {
const collection = this.opts.collection || 'keyv';
const store = db.collection(collection);
client = { store, db };
}
storeMap.set(storeKey, client);
return client;
} catch (error) {
this.emit('error', error);
throw error;
}
}
async get(key: string): Promise<unknown> {
const client = await this._getClient();
if (this.opts.useGridFS && this.isGridFSClient(client)) {
await client.store.updateOne(
{
filename: key,
},
{
$set: {
'metadata.lastAccessed': new Date(),
},
},
);
const stream = client.bucket.openDownloadStreamByName(key);
return new Promise((resolve) => {
const resp: Buffer[] = [];
stream.on('error', () => {
resolve(undefined);
});
stream.on('end', () => {
const data = Buffer.concat(resp).toString('utf8');
resolve(data);
});
stream.on('data', (chunk: Buffer) => {
resp.push(chunk);
});
});
}
const document = await client.store.findOne({ key: { $eq: key } });
if (!document) {
return undefined;
}
return document.value;
}
async getMany(keys: string[]): Promise<unknown[]> {
const client = await this._getClient();
if (this.opts.useGridFS) {
const promises = [];
for (const key of keys) {
promises.push(this.get(key));
}
const values = await Promise.allSettled(promises);
const data: unknown[] = [];
for (const value of values) {
data.push(value.status === 'fulfilled' ? value.value : undefined);
}
return data;
}
const values = await client.store
.find({ key: { $in: keys } })
.project({ _id: 0, value: 1, key: 1 })
.toArray();
const results: unknown[] = [...keys];
let i = 0;
for (const key of keys) {
const rowIndex = values.findIndex((row) => row.key === key);
results[i] = rowIndex > -1 ? values[rowIndex].value : undefined;
i++;
}
return results;
}
async set(key: string, value: string, ttl?: number): Promise<unknown> {
const client = await this._getClient();
const expiresAt = typeof ttl === 'number' ? new Date(Date.now() + ttl) : null;
if (this.opts.useGridFS && this.isGridFSClient(client)) {
const stream = client.bucket.openUploadStream(key, {
metadata: {
expiresAt,
lastAccessed: new Date(),
},
});
return new Promise((resolve) => {
stream.on('finish', () => {
resolve(stream);
});
stream.end(value);
});
}
await client.store.updateOne(
{ key: { $eq: key } },
{ $set: { key, value, expiresAt } },
{ upsert: true },
);
}
async delete(key: string): Promise<boolean> {
const client = await this._getClient();
if (this.opts.useGridFS && this.isGridFSClient(client)) {
try {
const bucket = new GridFSBucket(client.db, {
bucketName: this.opts.collection,
});
const files = await bucket.find({ filename: key }).toArray();
if (files.length > 0) {
await client.bucket.delete(files[0]._id);
}
return true;
} catch {
return false;
}
}
const object = await client.store.deleteOne({ key: { $eq: key } });
return object.deletedCount > 0;
}
async deleteMany(keys: string[]): Promise<boolean> {
const client = await this._getClient();
if (this.opts.useGridFS && this.isGridFSClient(client)) {
const bucket = new GridFSBucket(client.db, {
bucketName: this.opts.collection,
});
const files = await bucket.find({ filename: { $in: keys } }).toArray();
if (files.length === 0) {
return false;
}
await Promise.all(files.map(async (file) => client.bucket.delete(file._id)));
return true;
}
const object = await client.store.deleteMany({ key: { $in: keys } });
return object.deletedCount > 0;
}
async clear(): Promise<void> {
const client = await this._getClient();
if (this.opts.useGridFS && this.isGridFSClient(client)) {
try {
await client.bucket.drop();
} catch (error: unknown) {
// Throw error if not "namespace not found" error
const errorCode =
error instanceof Error && 'code' in error ? (error as { code?: number }).code : undefined;
if (errorCode !== 26) {
throw error;
}
}
}
await client.store.deleteMany({
key: { $regex: this.namespace ? `^${this.namespace}:*` : '' },
});
}
async has(key: string): Promise<boolean> {
const client = await this._getClient();
const filter = { [this.opts.useGridFS ? 'filename' : 'key']: { $eq: key } };
const document = await client.store.countDocuments(filter, { limit: 1 });
return document !== 0;
}
// No-op disconnect
async disconnect(): Promise<boolean> {
// This is a no-op since we don't want to close the shared mongoose connection
return true;
}
private isGridFSClient(client: Client): client is GridFSClient {
return (client as GridFSClient).bucket != null;
}
}
const keyvMongo = new KeyvMongoCustom({
collection: 'logs',
});
keyvMongo.on('error', (err) => logger.error('KeyvMongo connection error:', err));
export default keyvMongo;

195
packages/api/src/cache/redisClients.ts vendored Normal file
View file

@ -0,0 +1,195 @@
import IoRedis from 'ioredis';
import type { Redis, Cluster } from 'ioredis';
import { logger } from '@librechat/data-schemas';
import { createClient, createCluster } from '@keyv/redis';
import type { RedisClientType, RedisClusterType } from '@redis/client';
import { cacheConfig } from './cacheConfig';
const GLOBAL_PREFIX_SEPARATOR = '::';
const urls = cacheConfig.REDIS_URI?.split(',').map((uri) => new URL(uri)) || [];
const username = urls?.[0]?.username || cacheConfig.REDIS_USERNAME;
const password = urls?.[0]?.password || cacheConfig.REDIS_PASSWORD;
const ca = cacheConfig.REDIS_CA;
let ioredisClient: Redis | Cluster | null = null;
if (cacheConfig.USE_REDIS) {
const redisOptions: Record<string, unknown> = {
username: username,
password: password,
tls: ca ? { ca } : undefined,
keyPrefix: `${cacheConfig.REDIS_KEY_PREFIX}${GLOBAL_PREFIX_SEPARATOR}`,
maxListeners: cacheConfig.REDIS_MAX_LISTENERS,
retryStrategy: (times: number) => {
if (
cacheConfig.REDIS_RETRY_MAX_ATTEMPTS > 0 &&
times > cacheConfig.REDIS_RETRY_MAX_ATTEMPTS
) {
logger.error(
`ioredis giving up after ${cacheConfig.REDIS_RETRY_MAX_ATTEMPTS} reconnection attempts`,
);
return null;
}
const delay = Math.min(times * 50, cacheConfig.REDIS_RETRY_MAX_DELAY);
logger.info(`ioredis reconnecting... attempt ${times}, delay ${delay}ms`);
return delay;
},
reconnectOnError: (err: Error) => {
const targetError = 'READONLY';
if (err.message.includes(targetError)) {
logger.warn('ioredis reconnecting due to READONLY error');
return 2; // Return retry delay instead of boolean
}
return false;
},
enableOfflineQueue: cacheConfig.REDIS_ENABLE_OFFLINE_QUEUE,
connectTimeout: cacheConfig.REDIS_CONNECT_TIMEOUT,
maxRetriesPerRequest: 3,
};
ioredisClient =
urls.length === 1 && !cacheConfig.USE_REDIS_CLUSTER
? new IoRedis(cacheConfig.REDIS_URI!, redisOptions)
: new IoRedis.Cluster(
urls.map((url) => ({ host: url.hostname, port: parseInt(url.port, 10) || 6379 })),
{
...(cacheConfig.REDIS_USE_ALTERNATIVE_DNS_LOOKUP
? {
dnsLookup: (
address: string,
callback: (err: Error | null, address: string) => void,
) => callback(null, address),
}
: {}),
redisOptions,
clusterRetryStrategy: (times: number) => {
if (
cacheConfig.REDIS_RETRY_MAX_ATTEMPTS > 0 &&
times > cacheConfig.REDIS_RETRY_MAX_ATTEMPTS
) {
logger.error(
`ioredis cluster giving up after ${cacheConfig.REDIS_RETRY_MAX_ATTEMPTS} reconnection attempts`,
);
return null;
}
const delay = Math.min(times * 100, cacheConfig.REDIS_RETRY_MAX_DELAY);
logger.info(`ioredis cluster reconnecting... attempt ${times}, delay ${delay}ms`);
return delay;
},
enableOfflineQueue: cacheConfig.REDIS_ENABLE_OFFLINE_QUEUE,
},
);
ioredisClient.on('error', (err) => {
logger.error('ioredis client error:', err);
});
ioredisClient.on('connect', () => {
logger.info('ioredis client connected');
});
ioredisClient.on('ready', () => {
logger.info('ioredis client ready');
});
ioredisClient.on('reconnecting', (delay: number) => {
logger.info(`ioredis client reconnecting in ${delay}ms`);
});
ioredisClient.on('close', () => {
logger.warn('ioredis client connection closed');
});
/** Ping Interval to keep the Redis server connection alive (if enabled) */
let pingInterval: NodeJS.Timeout | null = null;
const clearPingInterval = () => {
if (pingInterval) {
clearInterval(pingInterval);
pingInterval = null;
}
};
if (cacheConfig.REDIS_PING_INTERVAL > 0) {
pingInterval = setInterval(() => {
if (ioredisClient && ioredisClient.status === 'ready') {
ioredisClient.ping().catch((err) => {
logger.error('ioredis ping failed:', err);
});
}
}, cacheConfig.REDIS_PING_INTERVAL * 1000);
ioredisClient.on('close', clearPingInterval);
ioredisClient.on('end', clearPingInterval);
}
}
let keyvRedisClient: RedisClientType | RedisClusterType | null = null;
if (cacheConfig.USE_REDIS) {
/**
* ** WARNING ** Keyv Redis client does not support Prefix like ioredis above.
* The prefix feature will be handled by the Keyv-Redis store in cacheFactory.js
*/
const redisOptions: Record<string, unknown> = {
username,
password,
socket: {
tls: ca != null,
ca,
connectTimeout: cacheConfig.REDIS_CONNECT_TIMEOUT,
reconnectStrategy: (retries: number) => {
if (
cacheConfig.REDIS_RETRY_MAX_ATTEMPTS > 0 &&
retries > cacheConfig.REDIS_RETRY_MAX_ATTEMPTS
) {
logger.error(
`@keyv/redis client giving up after ${cacheConfig.REDIS_RETRY_MAX_ATTEMPTS} reconnection attempts`,
);
return new Error('Max reconnection attempts reached');
}
const delay = Math.min(retries * 100, cacheConfig.REDIS_RETRY_MAX_DELAY);
logger.info(`@keyv/redis reconnecting... attempt ${retries}, delay ${delay}ms`);
return delay;
},
},
disableOfflineQueue: !cacheConfig.REDIS_ENABLE_OFFLINE_QUEUE,
...(cacheConfig.REDIS_PING_INTERVAL > 0
? { pingInterval: cacheConfig.REDIS_PING_INTERVAL * 1000 }
: {}),
};
keyvRedisClient =
urls.length === 1 && !cacheConfig.USE_REDIS_CLUSTER
? createClient({ url: cacheConfig.REDIS_URI, ...redisOptions })
: createCluster({
rootNodes: urls.map((url) => ({ url: url.href })),
defaults: redisOptions,
});
keyvRedisClient.setMaxListeners(cacheConfig.REDIS_MAX_LISTENERS);
keyvRedisClient.on('error', (err) => {
logger.error('@keyv/redis client error:', err);
});
keyvRedisClient.on('connect', () => {
logger.info('@keyv/redis client connected');
});
keyvRedisClient.on('ready', () => {
logger.info('@keyv/redis client ready');
});
keyvRedisClient.on('reconnecting', () => {
logger.info('@keyv/redis client reconnecting...');
});
keyvRedisClient.on('disconnect', () => {
logger.warn('@keyv/redis client disconnected');
});
keyvRedisClient.connect().catch((err) => {
logger.error('@keyv/redis initial connection failed:', err);
throw err;
});
}
export { ioredisClient, keyvRedisClient, GLOBAL_PREFIX_SEPARATOR };

View file

@ -35,6 +35,8 @@ export * from './files';
export * from './tools';
/* web search */
export * from './web';
/* Cache */
export * from './cache';
/* types */
export type * from './mcp/types';
export type * from './flow/types';

View file

@ -12,8 +12,8 @@
*
* @throws Throws an error if the input is not a string or number, contains invalid characters, or does not evaluate to a number.
*/
export function math(str: string | number, fallbackValue?: number): number {
const fallback = typeof fallbackValue !== 'undefined' && typeof fallbackValue === 'number';
export function math(str: string | number | undefined, fallbackValue?: number): number {
const fallback = fallbackValue != null;
if (typeof str !== 'string' && typeof str === 'number') {
return str;
} else if (typeof str !== 'string') {