mirror of
https://github.com/danny-avila/LibreChat.git
synced 2026-02-20 09:24:10 +01:00
⚛️ refactor: Redis Scalability Improvements for High-Throughput Deployments (#11840)
* fix: Redis scalability improvements for high-throughput deployments Replace INCR+check+DECR race in concurrency middleware with atomic Lua scripts. The old approach allowed 3-4 concurrent requests through a limit of 2 at 300 req/s because another request could slip between the INCR returning and the DECR executing. The Lua scripts run atomically on the Redis server, eliminating the race window entirely. Add exponential backoff with jitter to all three Redis retry strategies (ioredis single-node, cluster, keyv). Previously all instances retried at the same millisecond after an outage, causing a connection storm. Batch the RedisJobStore cleanup loop into parallel chunks of 50. With 1000 stale jobs, this reduces cleanup from ~20s of sequential calls to ~2s. Also pipeline appendChunk (xadd + expire) into a single round-trip and refresh TTL on every chunk instead of only the first, preventing TTL expiry during long-running streams. Propagate publish errors in RedisEventTransport.emitDone and emitError so callers can detect dropped completion/error events. emitChunk is left as swallow-and-log because its callers fire-and-forget without await. Add jest.config.js for the API package with babel TypeScript support and path alias resolution. Fix existing stream integration tests that were silently broken due to missing USE_REDIS_CLUSTER=false env var. * chore: Migrate Jest configuration from jest.config.js to jest.config.mjs Removed the old jest.config.js file and integrated the Jest configuration into jest.config.mjs, adding Babel TypeScript support and path alias resolution. This change streamlines the configuration for the API package. * fix: Ensure Redis retry delays do not exceed maximum configured delay Updated the delay calculation in Redis retry strategies to enforce a maximum delay defined in the configuration. This change prevents excessive delays during reconnection attempts, improving overall connection stability and performance. * fix: Update RedisJobStore cleanup to handle job failures gracefully Changed the cleanup process in RedisJobStore to use Promise.allSettled instead of Promise.all, allowing for individual job failures to be logged without interrupting the entire cleanup operation. This enhances error handling and provides better visibility into issues during job cleanup.
This commit is contained in:
parent
5ea59ecb2b
commit
3fa94e843c
9 changed files with 696 additions and 70 deletions
|
|
@ -0,0 +1,258 @@
|
|||
import type { Redis, Cluster } from 'ioredis';
|
||||
|
||||
/**
|
||||
* Integration tests for concurrency middleware atomic Lua scripts.
|
||||
*
|
||||
* Tests that the Lua-based check-and-increment / decrement operations
|
||||
* are truly atomic and eliminate the INCR+check+DECR race window.
|
||||
*
|
||||
* Run with: USE_REDIS=true npx jest --config packages/api/jest.config.js concurrency.cache_integration
|
||||
*/
|
||||
describe('Concurrency Middleware Integration Tests', () => {
|
||||
let originalEnv: NodeJS.ProcessEnv;
|
||||
let ioredisClient: Redis | Cluster | null = null;
|
||||
let checkAndIncrementPendingRequest: (
|
||||
userId: string,
|
||||
) => Promise<{ allowed: boolean; pendingRequests: number; limit: number }>;
|
||||
let decrementPendingRequest: (userId: string) => Promise<void>;
|
||||
const testPrefix = 'Concurrency-Integration-Test';
|
||||
|
||||
beforeAll(async () => {
|
||||
originalEnv = { ...process.env };
|
||||
|
||||
process.env.USE_REDIS = process.env.USE_REDIS ?? 'true';
|
||||
process.env.USE_REDIS_CLUSTER = process.env.USE_REDIS_CLUSTER ?? 'false';
|
||||
process.env.REDIS_URI = process.env.REDIS_URI ?? 'redis://127.0.0.1:6379';
|
||||
process.env.REDIS_KEY_PREFIX = testPrefix;
|
||||
process.env.REDIS_PING_INTERVAL = '0';
|
||||
process.env.REDIS_RETRY_MAX_ATTEMPTS = '5';
|
||||
process.env.LIMIT_CONCURRENT_MESSAGES = 'true';
|
||||
process.env.CONCURRENT_MESSAGE_MAX = '2';
|
||||
|
||||
jest.resetModules();
|
||||
|
||||
const { ioredisClient: client } = await import('../../cache/redisClients');
|
||||
ioredisClient = client;
|
||||
|
||||
if (!ioredisClient) {
|
||||
console.warn('Redis not available, skipping integration tests');
|
||||
return;
|
||||
}
|
||||
|
||||
// Import concurrency module after Redis client is available
|
||||
const concurrency = await import('../concurrency');
|
||||
checkAndIncrementPendingRequest = concurrency.checkAndIncrementPendingRequest;
|
||||
decrementPendingRequest = concurrency.decrementPendingRequest;
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
if (!ioredisClient) {
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
const keys = await ioredisClient.keys(`${testPrefix}*`);
|
||||
if (keys.length > 0) {
|
||||
await Promise.all(keys.map((key) => ioredisClient!.del(key)));
|
||||
}
|
||||
} catch (error) {
|
||||
console.warn('Error cleaning up test keys:', error);
|
||||
}
|
||||
});
|
||||
|
||||
afterAll(async () => {
|
||||
if (ioredisClient) {
|
||||
try {
|
||||
await ioredisClient.quit();
|
||||
} catch {
|
||||
try {
|
||||
ioredisClient.disconnect();
|
||||
} catch {
|
||||
// Ignore
|
||||
}
|
||||
}
|
||||
}
|
||||
process.env = originalEnv;
|
||||
});
|
||||
|
||||
describe('Atomic Check and Increment', () => {
|
||||
test('should allow requests within the concurrency limit', async () => {
|
||||
if (!ioredisClient) {
|
||||
return;
|
||||
}
|
||||
|
||||
const userId = `user-allow-${Date.now()}`;
|
||||
|
||||
// First request - should be allowed (count = 1, limit = 2)
|
||||
const result1 = await checkAndIncrementPendingRequest(userId);
|
||||
expect(result1.allowed).toBe(true);
|
||||
expect(result1.pendingRequests).toBe(1);
|
||||
expect(result1.limit).toBe(2);
|
||||
|
||||
// Second request - should be allowed (count = 2, limit = 2)
|
||||
const result2 = await checkAndIncrementPendingRequest(userId);
|
||||
expect(result2.allowed).toBe(true);
|
||||
expect(result2.pendingRequests).toBe(2);
|
||||
});
|
||||
|
||||
test('should reject requests over the concurrency limit', async () => {
|
||||
if (!ioredisClient) {
|
||||
return;
|
||||
}
|
||||
|
||||
const userId = `user-reject-${Date.now()}`;
|
||||
|
||||
// Fill up to the limit
|
||||
await checkAndIncrementPendingRequest(userId);
|
||||
await checkAndIncrementPendingRequest(userId);
|
||||
|
||||
// Third request - should be rejected (count would be 3, limit = 2)
|
||||
const result = await checkAndIncrementPendingRequest(userId);
|
||||
expect(result.allowed).toBe(false);
|
||||
expect(result.pendingRequests).toBe(3); // Reports the count that was over-limit
|
||||
});
|
||||
|
||||
test('should not leave stale counter after rejection (atomic rollback)', async () => {
|
||||
if (!ioredisClient) {
|
||||
return;
|
||||
}
|
||||
|
||||
const userId = `user-rollback-${Date.now()}`;
|
||||
|
||||
// Fill up to the limit
|
||||
await checkAndIncrementPendingRequest(userId);
|
||||
await checkAndIncrementPendingRequest(userId);
|
||||
|
||||
// Attempt over-limit (should be rejected and atomically rolled back)
|
||||
const rejected = await checkAndIncrementPendingRequest(userId);
|
||||
expect(rejected.allowed).toBe(false);
|
||||
|
||||
// The key value should still be 2, not 3 — verify the Lua script decremented back
|
||||
const key = `PENDING_REQ:${userId}`;
|
||||
const rawValue = await ioredisClient.get(key);
|
||||
expect(rawValue).toBe('2');
|
||||
});
|
||||
|
||||
test('should handle concurrent requests atomically (no over-admission)', async () => {
|
||||
if (!ioredisClient) {
|
||||
return;
|
||||
}
|
||||
|
||||
const userId = `user-concurrent-${Date.now()}`;
|
||||
|
||||
// Fire 20 concurrent requests for the same user (limit = 2)
|
||||
const results = await Promise.all(
|
||||
Array.from({ length: 20 }, () => checkAndIncrementPendingRequest(userId)),
|
||||
);
|
||||
|
||||
const allowed = results.filter((r) => r.allowed);
|
||||
const rejected = results.filter((r) => !r.allowed);
|
||||
|
||||
// Exactly 2 should be allowed (the concurrency limit)
|
||||
expect(allowed.length).toBe(2);
|
||||
expect(rejected.length).toBe(18);
|
||||
|
||||
// The key value should be exactly 2 after all atomic operations
|
||||
const key = `PENDING_REQ:${userId}`;
|
||||
const rawValue = await ioredisClient.get(key);
|
||||
expect(rawValue).toBe('2');
|
||||
|
||||
// Clean up
|
||||
await decrementPendingRequest(userId);
|
||||
await decrementPendingRequest(userId);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Atomic Decrement', () => {
|
||||
test('should decrement pending requests', async () => {
|
||||
if (!ioredisClient) {
|
||||
return;
|
||||
}
|
||||
|
||||
const userId = `user-decrement-${Date.now()}`;
|
||||
|
||||
await checkAndIncrementPendingRequest(userId);
|
||||
await checkAndIncrementPendingRequest(userId);
|
||||
|
||||
// Decrement once
|
||||
await decrementPendingRequest(userId);
|
||||
|
||||
const key = `PENDING_REQ:${userId}`;
|
||||
const rawValue = await ioredisClient.get(key);
|
||||
expect(rawValue).toBe('1');
|
||||
});
|
||||
|
||||
test('should clean up key when count reaches zero', async () => {
|
||||
if (!ioredisClient) {
|
||||
return;
|
||||
}
|
||||
|
||||
const userId = `user-cleanup-${Date.now()}`;
|
||||
|
||||
await checkAndIncrementPendingRequest(userId);
|
||||
await decrementPendingRequest(userId);
|
||||
|
||||
// Key should be deleted (not left as "0")
|
||||
const key = `PENDING_REQ:${userId}`;
|
||||
const exists = await ioredisClient.exists(key);
|
||||
expect(exists).toBe(0);
|
||||
});
|
||||
|
||||
test('should clean up key on double-decrement (negative protection)', async () => {
|
||||
if (!ioredisClient) {
|
||||
return;
|
||||
}
|
||||
|
||||
const userId = `user-double-decr-${Date.now()}`;
|
||||
|
||||
await checkAndIncrementPendingRequest(userId);
|
||||
await decrementPendingRequest(userId);
|
||||
await decrementPendingRequest(userId); // Double-decrement
|
||||
|
||||
// Key should be deleted, not negative
|
||||
const key = `PENDING_REQ:${userId}`;
|
||||
const exists = await ioredisClient.exists(key);
|
||||
expect(exists).toBe(0);
|
||||
});
|
||||
|
||||
test('should allow new requests after decrement frees a slot', async () => {
|
||||
if (!ioredisClient) {
|
||||
return;
|
||||
}
|
||||
|
||||
const userId = `user-free-slot-${Date.now()}`;
|
||||
|
||||
// Fill to limit
|
||||
await checkAndIncrementPendingRequest(userId);
|
||||
await checkAndIncrementPendingRequest(userId);
|
||||
|
||||
// Verify at limit
|
||||
const atLimit = await checkAndIncrementPendingRequest(userId);
|
||||
expect(atLimit.allowed).toBe(false);
|
||||
|
||||
// Free a slot
|
||||
await decrementPendingRequest(userId);
|
||||
|
||||
// Should now be allowed again
|
||||
const allowed = await checkAndIncrementPendingRequest(userId);
|
||||
expect(allowed.allowed).toBe(true);
|
||||
expect(allowed.pendingRequests).toBe(2);
|
||||
});
|
||||
});
|
||||
|
||||
describe('TTL Behavior', () => {
|
||||
test('should set TTL on the concurrency key', async () => {
|
||||
if (!ioredisClient) {
|
||||
return;
|
||||
}
|
||||
|
||||
const userId = `user-ttl-${Date.now()}`;
|
||||
await checkAndIncrementPendingRequest(userId);
|
||||
|
||||
const key = `PENDING_REQ:${userId}`;
|
||||
const ttl = await ioredisClient.ttl(key);
|
||||
expect(ttl).toBeGreaterThan(0);
|
||||
expect(ttl).toBeLessThanOrEqual(60);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
|
@ -9,6 +9,40 @@ const LIMIT_CONCURRENT_MESSAGES = process.env.LIMIT_CONCURRENT_MESSAGES;
|
|||
const CONCURRENT_MESSAGE_MAX = math(process.env.CONCURRENT_MESSAGE_MAX, 2);
|
||||
const CONCURRENT_VIOLATION_SCORE = math(process.env.CONCURRENT_VIOLATION_SCORE, 1);
|
||||
|
||||
/**
|
||||
* Lua script for atomic check-and-increment.
|
||||
* Increments the key, sets TTL, and if over limit decrements back.
|
||||
* Returns positive count if allowed, negative count if rejected.
|
||||
* Single round-trip, fully atomic — eliminates the INCR/check/DECR race window.
|
||||
*/
|
||||
const CHECK_AND_INCREMENT_SCRIPT = `
|
||||
local key = KEYS[1]
|
||||
local limit = tonumber(ARGV[1])
|
||||
local ttl = tonumber(ARGV[2])
|
||||
local current = redis.call('INCR', key)
|
||||
redis.call('EXPIRE', key, ttl)
|
||||
if current > limit then
|
||||
redis.call('DECR', key)
|
||||
return -current
|
||||
end
|
||||
return current
|
||||
`;
|
||||
|
||||
/**
|
||||
* Lua script for atomic decrement-and-cleanup.
|
||||
* Decrements the key and deletes it if the count reaches zero or below.
|
||||
* Eliminates the DECR-then-DEL race window.
|
||||
*/
|
||||
const DECREMENT_SCRIPT = `
|
||||
local key = KEYS[1]
|
||||
local current = redis.call('DECR', key)
|
||||
if current <= 0 then
|
||||
redis.call('DEL', key)
|
||||
return 0
|
||||
end
|
||||
return current
|
||||
`;
|
||||
|
||||
/** Lazily initialized cache for pending requests (used only for in-memory fallback) */
|
||||
let pendingReqCache: ReturnType<typeof standardCache> | null = null;
|
||||
|
||||
|
|
@ -80,36 +114,28 @@ export async function checkAndIncrementPendingRequest(
|
|||
return { allowed: true, pendingRequests: 0, limit };
|
||||
}
|
||||
|
||||
// Use atomic Redis INCR when available to prevent race conditions
|
||||
// Use atomic Lua script when Redis is available to prevent race conditions.
|
||||
// A single EVAL round-trip atomically increments, checks, and decrements if over-limit.
|
||||
if (USE_REDIS && ioredisClient) {
|
||||
const key = buildKey(userId);
|
||||
try {
|
||||
// Pipeline ensures INCR and EXPIRE execute atomically in one round-trip
|
||||
// This prevents edge cases where crash between operations leaves key without TTL
|
||||
const pipeline = ioredisClient.pipeline();
|
||||
pipeline.incr(key);
|
||||
pipeline.expire(key, 60);
|
||||
const results = await pipeline.exec();
|
||||
const result = (await ioredisClient.eval(
|
||||
CHECK_AND_INCREMENT_SCRIPT,
|
||||
1,
|
||||
key,
|
||||
limit,
|
||||
60,
|
||||
)) as number;
|
||||
|
||||
if (!results || results[0][0]) {
|
||||
throw new Error('Pipeline execution failed');
|
||||
if (result < 0) {
|
||||
// Negative return means over-limit (absolute value is the count before decrement)
|
||||
const count = -result;
|
||||
logger.debug(`[concurrency] User ${userId} exceeded concurrent limit: ${count}/${limit}`);
|
||||
return { allowed: false, pendingRequests: count, limit };
|
||||
}
|
||||
|
||||
const newCount = results[0][1] as number;
|
||||
|
||||
if (newCount > limit) {
|
||||
// Over limit - decrement back and reject
|
||||
await ioredisClient.decr(key);
|
||||
logger.debug(
|
||||
`[concurrency] User ${userId} exceeded concurrent limit: ${newCount}/${limit}`,
|
||||
);
|
||||
return { allowed: false, pendingRequests: newCount, limit };
|
||||
}
|
||||
|
||||
logger.debug(
|
||||
`[concurrency] User ${userId} incremented pending requests: ${newCount}/${limit}`,
|
||||
);
|
||||
return { allowed: true, pendingRequests: newCount, limit };
|
||||
logger.debug(`[concurrency] User ${userId} incremented pending requests: ${result}/${limit}`);
|
||||
return { allowed: true, pendingRequests: result, limit };
|
||||
} catch (error) {
|
||||
logger.error('[concurrency] Redis atomic increment failed:', error);
|
||||
// On Redis error, allow the request to proceed (fail-open)
|
||||
|
|
@ -164,18 +190,12 @@ export async function decrementPendingRequest(userId: string): Promise<void> {
|
|||
return;
|
||||
}
|
||||
|
||||
// Use atomic Redis DECR when available
|
||||
// Use atomic Lua script to decrement and clean up zero/negative keys in one round-trip
|
||||
if (USE_REDIS && ioredisClient) {
|
||||
const key = buildKey(userId);
|
||||
try {
|
||||
const newCount = await ioredisClient.decr(key);
|
||||
if (newCount < 0) {
|
||||
// Counter went negative - reset to 0 and delete
|
||||
await ioredisClient.del(key);
|
||||
logger.debug(`[concurrency] User ${userId} pending requests cleared (was negative)`);
|
||||
} else if (newCount === 0) {
|
||||
// Clean up zero-value keys
|
||||
await ioredisClient.del(key);
|
||||
const newCount = (await ioredisClient.eval(DECREMENT_SCRIPT, 1, key)) as number;
|
||||
if (newCount === 0) {
|
||||
logger.debug(`[concurrency] User ${userId} pending requests cleared`);
|
||||
} else {
|
||||
logger.debug(`[concurrency] User ${userId} decremented pending requests: ${newCount}`);
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue