⚛️ refactor: Redis Scalability Improvements for High-Throughput Deployments (#11840)

* fix: Redis scalability improvements for high-throughput deployments

  Replace INCR+check+DECR race in concurrency middleware with atomic Lua
  scripts. The old approach allowed 3-4 concurrent requests through a
  limit of 2 at 300 req/s because another request could slip between the
  INCR returning and the DECR executing. The Lua scripts run atomically
  on the Redis server, eliminating the race window entirely.

  Add exponential backoff with jitter to all three Redis retry strategies
  (ioredis single-node, cluster, keyv). Previously all instances retried
  at the same millisecond after an outage, causing a connection storm.

  Batch the RedisJobStore cleanup loop into parallel chunks of 50. With
  1000 stale jobs, this reduces cleanup from ~20s of sequential calls to
  ~2s. Also pipeline appendChunk (xadd + expire) into a single round-trip
  and refresh TTL on every chunk instead of only the first, preventing
  TTL expiry during long-running streams.

  Propagate publish errors in RedisEventTransport.emitDone and emitError
  so callers can detect dropped completion/error events. emitChunk is left
  as swallow-and-log because its callers fire-and-forget without await.

  Add jest.config.js for the API package with babel TypeScript support and
  path alias resolution. Fix existing stream integration tests that were
  silently broken due to missing USE_REDIS_CLUSTER=false env var.

* chore: Migrate Jest configuration from jest.config.js to jest.config.mjs

Removed the old jest.config.js file and integrated the Jest configuration into jest.config.mjs, adding Babel TypeScript support and path alias resolution. This change streamlines the configuration for the API package.

* fix: Ensure Redis retry delays do not exceed maximum configured delay

Updated the delay calculation in Redis retry strategies to enforce a maximum delay defined in the configuration. This change prevents excessive delays during reconnection attempts, improving overall connection stability and performance.

* fix: Update RedisJobStore cleanup to handle job failures gracefully

Changed the cleanup process in RedisJobStore to use Promise.allSettled instead of Promise.all, allowing for individual job failures to be logged without interrupting the entire cleanup operation. This enhances error handling and provides better visibility into issues during job cleanup.
This commit is contained in:
Danny Avila 2026-02-18 00:04:33 -05:00 committed by GitHub
parent 5ea59ecb2b
commit 3fa94e843c
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
9 changed files with 696 additions and 70 deletions

View file

@ -19,8 +19,11 @@ describe('RedisEventTransport Integration Tests', () => {
originalEnv = { ...process.env };
process.env.USE_REDIS = process.env.USE_REDIS ?? 'true';
process.env.USE_REDIS_CLUSTER = process.env.USE_REDIS_CLUSTER ?? 'false';
process.env.REDIS_URI = process.env.REDIS_URI ?? 'redis://127.0.0.1:6379';
process.env.REDIS_KEY_PREFIX = testPrefix;
process.env.REDIS_PING_INTERVAL = '0';
process.env.REDIS_RETRY_MAX_ATTEMPTS = '5';
jest.resetModules();
@ -890,4 +893,121 @@ describe('RedisEventTransport Integration Tests', () => {
subscriber.disconnect();
});
});
describe('Publish Error Propagation', () => {
test('should swallow emitChunk publish errors (callers fire-and-forget)', async () => {
const { RedisEventTransport } = await import('../implementations/RedisEventTransport');
const mockPublisher = {
publish: jest.fn().mockRejectedValue(new Error('Redis connection lost')),
};
const mockSubscriber = {
on: jest.fn(),
subscribe: jest.fn().mockResolvedValue(undefined),
unsubscribe: jest.fn().mockResolvedValue(undefined),
};
const transport = new RedisEventTransport(
mockPublisher as unknown as Redis,
mockSubscriber as unknown as Redis,
);
const streamId = `error-prop-chunk-${Date.now()}`;
// emitChunk swallows errors because callers often fire-and-forget (no await).
// Throwing would cause unhandled promise rejections.
await expect(transport.emitChunk(streamId, { data: 'test' })).resolves.toBeUndefined();
transport.destroy();
});
test('should throw when emitDone publish fails', async () => {
const { RedisEventTransport } = await import('../implementations/RedisEventTransport');
const mockPublisher = {
publish: jest.fn().mockRejectedValue(new Error('Redis connection lost')),
};
const mockSubscriber = {
on: jest.fn(),
subscribe: jest.fn().mockResolvedValue(undefined),
unsubscribe: jest.fn().mockResolvedValue(undefined),
};
const transport = new RedisEventTransport(
mockPublisher as unknown as Redis,
mockSubscriber as unknown as Redis,
);
const streamId = `error-prop-done-${Date.now()}`;
await expect(transport.emitDone(streamId, { finished: true })).rejects.toThrow(
'Redis connection lost',
);
transport.destroy();
});
test('should throw when emitError publish fails', async () => {
const { RedisEventTransport } = await import('../implementations/RedisEventTransport');
const mockPublisher = {
publish: jest.fn().mockRejectedValue(new Error('Redis connection lost')),
};
const mockSubscriber = {
on: jest.fn(),
subscribe: jest.fn().mockResolvedValue(undefined),
unsubscribe: jest.fn().mockResolvedValue(undefined),
};
const transport = new RedisEventTransport(
mockPublisher as unknown as Redis,
mockSubscriber as unknown as Redis,
);
const streamId = `error-prop-error-${Date.now()}`;
await expect(transport.emitError(streamId, 'some error')).rejects.toThrow(
'Redis connection lost',
);
transport.destroy();
});
test('should still deliver events successfully when publish succeeds', async () => {
if (!ioredisClient) {
console.warn('Redis not available, skipping test');
return;
}
const { RedisEventTransport } = await import('../implementations/RedisEventTransport');
const subscriber = (ioredisClient as Redis).duplicate();
const transport = new RedisEventTransport(ioredisClient, subscriber);
const streamId = `error-prop-success-${Date.now()}`;
const receivedChunks: unknown[] = [];
let doneEvent: unknown = null;
transport.subscribe(streamId, {
onChunk: (event) => receivedChunks.push(event),
onDone: (event) => {
doneEvent = event;
},
});
await new Promise((resolve) => setTimeout(resolve, 200));
// These should NOT throw
await transport.emitChunk(streamId, { text: 'hello' });
await transport.emitDone(streamId, { finished: true });
await new Promise((resolve) => setTimeout(resolve, 200));
expect(receivedChunks.length).toBe(1);
expect(doneEvent).toEqual({ finished: true });
transport.destroy();
subscriber.disconnect();
});
});
});

View file

@ -24,8 +24,11 @@ describe('RedisJobStore Integration Tests', () => {
// Set up test environment
process.env.USE_REDIS = process.env.USE_REDIS ?? 'true';
process.env.USE_REDIS_CLUSTER = process.env.USE_REDIS_CLUSTER ?? 'false';
process.env.REDIS_URI = process.env.REDIS_URI ?? 'redis://127.0.0.1:6379';
process.env.REDIS_KEY_PREFIX = testPrefix;
process.env.REDIS_PING_INTERVAL = '0';
process.env.REDIS_RETRY_MAX_ATTEMPTS = '5';
jest.resetModules();
@ -1033,4 +1036,196 @@ describe('RedisJobStore Integration Tests', () => {
await instance2.destroy();
});
});
describe('Batched Cleanup', () => {
test('should clean up many stale jobs in parallel batches', async () => {
if (!ioredisClient) {
return;
}
const { RedisJobStore } = await import('../implementations/RedisJobStore');
// Very short TTL so jobs are immediately stale
const store = new RedisJobStore(ioredisClient, { runningTtl: 1 });
await store.initialize();
const jobCount = 75; // More than one batch of 50
const veryOldTimestamp = Date.now() - 10000; // 10 seconds ago
// Create many stale jobs directly in Redis
for (let i = 0; i < jobCount; i++) {
const streamId = `batch-cleanup-${Date.now()}-${i}`;
const jobKey = `stream:{${streamId}}:job`;
await ioredisClient.hmset(jobKey, {
streamId,
userId: 'batch-user',
status: 'running',
createdAt: veryOldTimestamp.toString(),
syncSent: '0',
});
await ioredisClient.sadd('stream:running', streamId);
}
// Verify jobs are in the running set
const runningBefore = await ioredisClient.scard('stream:running');
expect(runningBefore).toBeGreaterThanOrEqual(jobCount);
// Run cleanup - should process in batches of 50
const cleaned = await store.cleanup();
expect(cleaned).toBeGreaterThanOrEqual(jobCount);
await store.destroy();
});
test('should not clean up valid running jobs during batch cleanup', async () => {
if (!ioredisClient) {
return;
}
const { RedisJobStore } = await import('../implementations/RedisJobStore');
const store = new RedisJobStore(ioredisClient, { runningTtl: 1200 });
await store.initialize();
// Create a mix of valid and stale jobs
const validStreamId = `valid-job-${Date.now()}`;
await store.createJob(validStreamId, 'user-1', validStreamId);
const staleStreamId = `stale-job-${Date.now()}`;
const jobKey = `stream:{${staleStreamId}}:job`;
await ioredisClient.hmset(jobKey, {
streamId: staleStreamId,
userId: 'user-1',
status: 'running',
createdAt: (Date.now() - 2000000).toString(), // Very old
syncSent: '0',
});
await ioredisClient.sadd('stream:running', staleStreamId);
const cleaned = await store.cleanup();
expect(cleaned).toBeGreaterThanOrEqual(1);
// Valid job should still exist
const validJob = await store.getJob(validStreamId);
expect(validJob).not.toBeNull();
expect(validJob?.status).toBe('running');
await store.destroy();
});
});
describe('appendChunk TTL Refresh', () => {
test('should set TTL on the chunk stream', async () => {
if (!ioredisClient) {
return;
}
const { RedisJobStore } = await import('../implementations/RedisJobStore');
const store = new RedisJobStore(ioredisClient, { runningTtl: 120 });
await store.initialize();
const streamId = `append-ttl-${Date.now()}`;
await store.createJob(streamId, 'user-1', streamId);
await store.appendChunk(streamId, {
event: 'on_message_delta',
data: { id: 'step-1', type: 'text', text: 'first' },
});
const chunkKey = `stream:{${streamId}}:chunks`;
const ttl = await ioredisClient.ttl(chunkKey);
expect(ttl).toBeGreaterThan(0);
expect(ttl).toBeLessThanOrEqual(120);
await store.destroy();
});
test('should refresh TTL on subsequent chunks (not just first)', async () => {
if (!ioredisClient) {
return;
}
const { RedisJobStore } = await import('../implementations/RedisJobStore');
const store = new RedisJobStore(ioredisClient, { runningTtl: 120 });
await store.initialize();
const streamId = `append-refresh-${Date.now()}`;
await store.createJob(streamId, 'user-1', streamId);
// Append first chunk
await store.appendChunk(streamId, {
event: 'on_message_delta',
data: { id: 'step-1', type: 'text', text: 'first' },
});
const chunkKey = `stream:{${streamId}}:chunks`;
const ttl1 = await ioredisClient.ttl(chunkKey);
expect(ttl1).toBeGreaterThan(0);
// Manually reduce TTL to simulate time passing
await ioredisClient.expire(chunkKey, 30);
const reducedTtl = await ioredisClient.ttl(chunkKey);
expect(reducedTtl).toBeLessThanOrEqual(30);
// Append another chunk - TTL should be refreshed back to running TTL
await store.appendChunk(streamId, {
event: 'on_message_delta',
data: { id: 'step-1', type: 'text', text: 'second' },
});
const ttl2 = await ioredisClient.ttl(chunkKey);
// Should be refreshed to ~120, not still ~30
expect(ttl2).toBeGreaterThan(30);
expect(ttl2).toBeLessThanOrEqual(120);
await store.destroy();
});
test('should store chunks correctly via pipeline', async () => {
if (!ioredisClient) {
return;
}
const { RedisJobStore } = await import('../implementations/RedisJobStore');
const store = new RedisJobStore(ioredisClient);
await store.initialize();
const streamId = `append-pipeline-${Date.now()}`;
await store.createJob(streamId, 'user-1', streamId);
const chunks = [
{
event: 'on_run_step',
data: {
id: 'step-1',
runId: 'run-1',
index: 0,
stepDetails: { type: 'message_creation' },
},
},
{
event: 'on_message_delta',
data: { id: 'step-1', delta: { content: { type: 'text', text: 'Hello ' } } },
},
{
event: 'on_message_delta',
data: { id: 'step-1', delta: { content: { type: 'text', text: 'world!' } } },
},
];
for (const chunk of chunks) {
await store.appendChunk(streamId, chunk);
}
// Verify all chunks were stored
const chunkKey = `stream:{${streamId}}:chunks`;
const len = await ioredisClient.xlen(chunkKey);
expect(len).toBe(3);
// Verify content can be reconstructed
const content = await store.getContentParts(streamId);
expect(content).not.toBeNull();
expect(content!.content.length).toBeGreaterThan(0);
await store.destroy();
});
});
});

View file

@ -461,6 +461,7 @@ export class RedisEventTransport implements IEventTransport {
await this.publisher.publish(channel, JSON.stringify(message));
} catch (err) {
logger.error(`[RedisEventTransport] Failed to publish done:`, err);
throw err;
}
}
@ -477,6 +478,7 @@ export class RedisEventTransport implements IEventTransport {
await this.publisher.publish(channel, JSON.stringify(message));
} catch (err) {
logger.error(`[RedisEventTransport] Failed to publish error:`, err);
throw err;
}
}

View file

@ -302,32 +302,46 @@ export class RedisJobStore implements IJobStore {
}
}
for (const streamId of streamIds) {
const job = await this.getJob(streamId);
// Process in batches of 50 to avoid sequential per-job round-trips
const BATCH_SIZE = 50;
for (let i = 0; i < streamIds.length; i += BATCH_SIZE) {
const batch = streamIds.slice(i, i + BATCH_SIZE);
const results = await Promise.allSettled(
batch.map(async (streamId) => {
const job = await this.getJob(streamId);
// Job no longer exists (TTL expired) - remove from set
if (!job) {
await this.redis.srem(KEYS.runningJobs, streamId);
this.localGraphCache.delete(streamId);
this.localCollectedUsageCache.delete(streamId);
cleaned++;
continue;
}
// Job no longer exists (TTL expired) - remove from set
if (!job) {
await this.redis.srem(KEYS.runningJobs, streamId);
this.localGraphCache.delete(streamId);
this.localCollectedUsageCache.delete(streamId);
return 1;
}
// Job completed but still in running set (shouldn't happen, but handle it)
if (job.status !== 'running') {
await this.redis.srem(KEYS.runningJobs, streamId);
this.localGraphCache.delete(streamId);
this.localCollectedUsageCache.delete(streamId);
cleaned++;
continue;
}
// Job completed but still in running set (shouldn't happen, but handle it)
if (job.status !== 'running') {
await this.redis.srem(KEYS.runningJobs, streamId);
this.localGraphCache.delete(streamId);
this.localCollectedUsageCache.delete(streamId);
return 1;
}
// Stale running job (failsafe - running for > configured TTL)
if (now - job.createdAt > this.ttl.running * 1000) {
logger.warn(`[RedisJobStore] Cleaning up stale job: ${streamId}`);
await this.deleteJob(streamId);
cleaned++;
// Stale running job (failsafe - running for > configured TTL)
if (now - job.createdAt > this.ttl.running * 1000) {
logger.warn(`[RedisJobStore] Cleaning up stale job: ${streamId}`);
await this.deleteJob(streamId);
return 1;
}
return 0;
}),
);
for (const result of results) {
if (result.status === 'fulfilled') {
cleaned += result.value;
} else {
logger.warn(`[RedisJobStore] Cleanup failed for a job:`, result.reason);
}
}
}
@ -592,16 +606,14 @@ export class RedisJobStore implements IJobStore {
*/
async appendChunk(streamId: string, event: unknown): Promise<void> {
const key = KEYS.chunks(streamId);
const added = await this.redis.xadd(key, '*', 'event', JSON.stringify(event));
// Set TTL on first chunk (when stream is created)
// Subsequent chunks inherit the stream's TTL
if (added) {
const len = await this.redis.xlen(key);
if (len === 1) {
await this.redis.expire(key, this.ttl.running);
}
}
// Pipeline XADD + EXPIRE in a single round-trip.
// EXPIRE is O(1) and idempotent — refreshing TTL on every chunk is better than
// only setting it once, since the original approach could let the TTL expire
// during long-running streams.
const pipeline = this.redis.pipeline();
pipeline.xadd(key, '*', 'event', JSON.stringify(event));
pipeline.expire(key, this.ttl.running);
await pipeline.exec();
}
/**