mirror of
https://github.com/danny-avila/LibreChat.git
synced 2026-02-20 09:24:10 +01:00
* feat: replace unsupported MongoDB aggregation operators for FerretDB compatibility Replace $lookup, $unwind, $sample, $replaceRoot, and $addFields aggregation stages which are unsupported on FerretDB v2.x (postgres-documentdb backend). - Prompt.js: Replace $lookup/$unwind/$project pipelines with find().select().lean() + attachProductionPrompts() batch helper. Replace $group/$replaceRoot/$sample in getRandomPromptGroups with distinct() + Fisher-Yates shuffle. - Agent/Prompt migration scripts: Replace $lookup anti-join pattern with distinct() + $nin two-step queries for finding un-migrated resources. All replacement patterns verified against FerretDB v2.7.0. Co-authored-by: Cursor <cursoragent@cursor.com> * fix: use $pullAll for simple array removals, fix memberIds type mismatches Replace $pull with $pullAll for exact-value scalar array removals. Both operators work on MongoDB and FerretDB, but $pullAll is more explicit for exact matching (no condition expressions). Fix critical type mismatch bugs where ObjectId values were used against String[] memberIds arrays in Group queries: - config/delete-user.js: use string uid instead of ObjectId user._id - e2e/setup/cleanupUser.ts: convert userId.toString() before query Harden PermissionService.bulkUpdateResourcePermissions abort handling to prevent crash when abortTransaction is called after commitTransaction. All changes verified against FerretDB v2.7.0 and MongoDB Memory Server. Co-authored-by: Cursor <cursoragent@cursor.com> * fix: harden transaction support probe for FerretDB compatibility Commit the transaction before aborting in supportsTransactions probe, and wrap abortTransaction in try-catch to prevent crashes when abort is called after a successful commit (observed behavior on FerretDB). Co-authored-by: Cursor <cursoragent@cursor.com> * feat: add FerretDB compatibility test suite, retry utilities, and CI config Add comprehensive FerretDB integration test suite covering: - $pullAll scalar array operations - $pull with subdocument conditions - $lookup replacement (find + manual join) - $sample replacement (distinct + Fisher-Yates) - $bit and $bitsAllSet operations - Migration anti-join pattern - Multi-tenancy (useDb, scaling, write amplification) - Sharding proof-of-concept - Production operations (backup/restore, schema migration, deadlock retry) Add production retryWithBackoff utility for deadlock recovery during concurrent index creation on FerretDB/DocumentDB backends. Add UserController.spec.js tests for deleteUserController (runs in CI). Configure jest and eslint to isolate FerretDB tests from CI pipelines: - packages/data-schemas/jest.config.mjs: ignore misc/ directory - eslint.config.mjs: ignore packages/data-schemas/misc/ Include Docker Compose config for local FerretDB v2.7 + postgres-documentdb, dedicated jest/tsconfig for the test files, and multi-tenancy findings doc. Co-authored-by: Cursor <cursoragent@cursor.com> * style: brace formatting in aclEntry.ts modifyPermissionBits Co-authored-by: Cursor <cursoragent@cursor.com> * refactor: reorganize retry utilities and update imports - Moved retryWithBackoff utility to a new file `retry.ts` for better structure. - Updated imports in `orgOperations.ferretdb.spec.ts` to reflect the new location of retry utilities. - Removed old import statement for retryWithBackoff from index.ts to streamline exports. * test: add $pullAll coverage for ConversationTag and PermissionService Add integration tests for deleteConversationTag verifying $pullAll removes tags from conversations correctly, and for syncUserEntraGroupMemberships verifying $pullAll removes user from non-matching Entra groups while preserving local group membership. --------- Co-authored-by: Cursor <cursoragent@cursor.com>
675 lines
23 KiB
TypeScript
675 lines
23 KiB
TypeScript
import mongoose, { Schema, type Connection, type Model } from 'mongoose';
|
|
import {
|
|
actionSchema,
|
|
agentSchema,
|
|
agentApiKeySchema,
|
|
agentCategorySchema,
|
|
assistantSchema,
|
|
balanceSchema,
|
|
bannerSchema,
|
|
conversationTagSchema,
|
|
convoSchema,
|
|
fileSchema,
|
|
keySchema,
|
|
messageSchema,
|
|
pluginAuthSchema,
|
|
presetSchema,
|
|
projectSchema,
|
|
promptSchema,
|
|
promptGroupSchema,
|
|
roleSchema,
|
|
sessionSchema,
|
|
shareSchema,
|
|
tokenSchema,
|
|
toolCallSchema,
|
|
transactionSchema,
|
|
userSchema,
|
|
memorySchema,
|
|
groupSchema,
|
|
} from '~/schema';
|
|
import accessRoleSchema from '~/schema/accessRole';
|
|
import mcpServerSchema from '~/schema/mcpServer';
|
|
import aclEntrySchema from '~/schema/aclEntry';
|
|
import { initializeOrgCollections, createIndexesWithRetry, retryWithBackoff } from '~/utils/retry';
|
|
|
|
/**
|
|
* Production operations tests for FerretDB multi-tenancy:
|
|
* 1. Retry utility under simulated and real deadlock conditions
|
|
* 2. Programmatic per-org backup/restore (driver-level, no mongodump)
|
|
* 3. Schema migration across existing org databases
|
|
*
|
|
* Run:
|
|
* FERRETDB_URI="mongodb://ferretdb:ferretdb@127.0.0.1:27020/ops_test" \
|
|
* npx jest orgOperations.ferretdb --testTimeout=300000
|
|
*/
|
|
|
|
const FERRETDB_URI = process.env.FERRETDB_URI;
|
|
const describeIfFerretDB = FERRETDB_URI ? describe : describe.skip;
|
|
|
|
const DB_PREFIX = 'ops_test_';
|
|
|
|
const MODEL_SCHEMAS: Record<string, Schema> = {
|
|
User: userSchema,
|
|
Token: tokenSchema,
|
|
Session: sessionSchema,
|
|
Balance: balanceSchema,
|
|
Conversation: convoSchema,
|
|
Message: messageSchema,
|
|
Agent: agentSchema,
|
|
AgentApiKey: agentApiKeySchema,
|
|
AgentCategory: agentCategorySchema,
|
|
MCPServer: mcpServerSchema,
|
|
Role: roleSchema,
|
|
Action: actionSchema,
|
|
Assistant: assistantSchema,
|
|
File: fileSchema,
|
|
Banner: bannerSchema,
|
|
Project: projectSchema,
|
|
Key: keySchema,
|
|
PluginAuth: pluginAuthSchema,
|
|
Transaction: transactionSchema,
|
|
Preset: presetSchema,
|
|
Prompt: promptSchema,
|
|
PromptGroup: promptGroupSchema,
|
|
ConversationTag: conversationTagSchema,
|
|
SharedLink: shareSchema,
|
|
ToolCall: toolCallSchema,
|
|
MemoryEntry: memorySchema,
|
|
AccessRole: accessRoleSchema,
|
|
AclEntry: aclEntrySchema,
|
|
Group: groupSchema,
|
|
};
|
|
|
|
const MODEL_COUNT = Object.keys(MODEL_SCHEMAS).length;
|
|
|
|
function registerModels(conn: Connection): Record<string, Model<unknown>> {
|
|
const models: Record<string, Model<unknown>> = {};
|
|
for (const [name, schema] of Object.entries(MODEL_SCHEMAS)) {
|
|
models[name] = conn.models[name] || conn.model(name, schema);
|
|
}
|
|
return models;
|
|
}
|
|
|
|
// ─── BACKUP/RESTORE UTILITIES ───────────────────────────────────────────────
|
|
|
|
interface OrgBackup {
|
|
orgId: string;
|
|
timestamp: Date;
|
|
collections: Record<string, unknown[]>;
|
|
}
|
|
|
|
/** Dump all collections from an org database to an in-memory structure */
|
|
async function backupOrg(conn: Connection, orgId: string): Promise<OrgBackup> {
|
|
const collectionNames = (await conn.db!.listCollections().toArray()).map((c) => c.name);
|
|
const collections: Record<string, unknown[]> = {};
|
|
|
|
for (const name of collectionNames) {
|
|
if (name.startsWith('system.')) {
|
|
continue;
|
|
}
|
|
const docs = await conn.db!.collection(name).find({}).toArray();
|
|
collections[name] = docs;
|
|
}
|
|
|
|
return { orgId, timestamp: new Date(), collections };
|
|
}
|
|
|
|
/** Restore collections from a backup into a target connection */
|
|
async function restoreOrg(
|
|
conn: Connection,
|
|
backup: OrgBackup,
|
|
): Promise<{ collectionsRestored: number; docsRestored: number }> {
|
|
let docsRestored = 0;
|
|
|
|
for (const [name, docs] of Object.entries(backup.collections)) {
|
|
if (docs.length === 0) {
|
|
continue;
|
|
}
|
|
const collection = conn.db!.collection(name);
|
|
await collection.insertMany(docs as Array<Record<string, unknown>>);
|
|
docsRestored += docs.length;
|
|
}
|
|
|
|
return { collectionsRestored: Object.keys(backup.collections).length, docsRestored };
|
|
}
|
|
|
|
// ─── MIGRATION UTILITIES ────────────────────────────────────────────────────
|
|
|
|
interface MigrationResult {
|
|
orgId: string;
|
|
newCollections: string[];
|
|
indexResults: Array<{ model: string; created: boolean; ms: number }>;
|
|
totalMs: number;
|
|
}
|
|
|
|
/** Migrate a single org: ensure all collections exist and all indexes are current */
|
|
async function migrateOrg(
|
|
conn: Connection,
|
|
orgId: string,
|
|
schemas: Record<string, Schema>,
|
|
): Promise<MigrationResult> {
|
|
const t0 = Date.now();
|
|
const models = registerModels(conn);
|
|
const existingCollections = new Set(
|
|
(await conn.db!.listCollections().toArray()).map((c) => c.name),
|
|
);
|
|
|
|
const newCollections: string[] = [];
|
|
const indexResults: Array<{ model: string; created: boolean; ms: number }> = [];
|
|
|
|
for (const [name, model] of Object.entries(models)) {
|
|
const collName = model.collection.collectionName;
|
|
const isNew = !existingCollections.has(collName);
|
|
if (isNew) {
|
|
newCollections.push(name);
|
|
}
|
|
|
|
const mt0 = Date.now();
|
|
await model.createCollection();
|
|
await createIndexesWithRetry(model);
|
|
indexResults.push({ model: name, created: isNew, ms: Date.now() - mt0 });
|
|
}
|
|
|
|
return { orgId, newCollections, indexResults, totalMs: Date.now() - t0 };
|
|
}
|
|
|
|
/** Migrate all orgs in sequence with progress reporting */
|
|
async function migrateAllOrgs(
|
|
baseConn: Connection,
|
|
orgIds: string[],
|
|
schemas: Record<string, Schema>,
|
|
onProgress?: (completed: number, total: number, result: MigrationResult) => void,
|
|
): Promise<MigrationResult[]> {
|
|
const results: MigrationResult[] = [];
|
|
|
|
for (let i = 0; i < orgIds.length; i++) {
|
|
const orgId = orgIds[i];
|
|
const conn = baseConn.useDb(`${DB_PREFIX}org_${orgId}`, { useCache: true });
|
|
const result = await migrateOrg(conn, orgId, schemas);
|
|
results.push(result);
|
|
if (onProgress) {
|
|
onProgress(i + 1, orgIds.length, result);
|
|
}
|
|
}
|
|
|
|
return results;
|
|
}
|
|
|
|
// ─── TESTS ──────────────────────────────────────────────────────────────────
|
|
|
|
describeIfFerretDB('Org Operations (Production)', () => {
|
|
const createdDbs: string[] = [];
|
|
let baseConn: Connection;
|
|
|
|
beforeAll(async () => {
|
|
baseConn = await mongoose.createConnection(FERRETDB_URI as string).asPromise();
|
|
});
|
|
|
|
afterAll(async () => {
|
|
for (const db of createdDbs) {
|
|
try {
|
|
await baseConn.useDb(db, { useCache: false }).dropDatabase();
|
|
} catch {
|
|
/* best-effort */
|
|
}
|
|
}
|
|
await baseConn.close();
|
|
}, 120_000);
|
|
|
|
// ─── RETRY UTILITY ──────────────────────────────────────────────────────
|
|
|
|
describe('retryWithBackoff', () => {
|
|
it('succeeds on first attempt when no error', async () => {
|
|
let calls = 0;
|
|
const result = await retryWithBackoff(async () => {
|
|
calls++;
|
|
return 'ok';
|
|
}, 'test-op');
|
|
expect(result).toBe('ok');
|
|
expect(calls).toBe(1);
|
|
});
|
|
|
|
it('retries on deadlock error and eventually succeeds', async () => {
|
|
let calls = 0;
|
|
const result = await retryWithBackoff(
|
|
async () => {
|
|
calls++;
|
|
if (calls < 3) {
|
|
throw new Error('deadlock detected');
|
|
}
|
|
return 'recovered';
|
|
},
|
|
'deadlock-test',
|
|
{ baseDelayMs: 10, jitter: false },
|
|
);
|
|
|
|
expect(result).toBe('recovered');
|
|
expect(calls).toBe(3);
|
|
});
|
|
|
|
it('does not retry on non-retryable errors', async () => {
|
|
let calls = 0;
|
|
await expect(
|
|
retryWithBackoff(
|
|
async () => {
|
|
calls++;
|
|
throw new Error('validation failed');
|
|
},
|
|
'non-retryable',
|
|
{ baseDelayMs: 10 },
|
|
),
|
|
).rejects.toThrow('validation failed');
|
|
expect(calls).toBe(1);
|
|
});
|
|
|
|
it('exhausts max attempts and throws', async () => {
|
|
let calls = 0;
|
|
await expect(
|
|
retryWithBackoff(
|
|
async () => {
|
|
calls++;
|
|
throw new Error('deadlock detected');
|
|
},
|
|
'exhausted',
|
|
{ maxAttempts: 3, baseDelayMs: 10, jitter: false },
|
|
),
|
|
).rejects.toThrow('deadlock');
|
|
expect(calls).toBe(3);
|
|
});
|
|
|
|
it('respects maxDelayMs cap', async () => {
|
|
const delays: number[] = [];
|
|
let calls = 0;
|
|
|
|
await retryWithBackoff(
|
|
async () => {
|
|
calls++;
|
|
if (calls < 4) {
|
|
throw new Error('deadlock detected');
|
|
}
|
|
return 'ok';
|
|
},
|
|
'delay-cap',
|
|
{
|
|
baseDelayMs: 100,
|
|
maxDelayMs: 250,
|
|
jitter: false,
|
|
onRetry: (_err, _attempt, delay) => delays.push(delay),
|
|
},
|
|
);
|
|
|
|
expect(delays[0]).toBe(100);
|
|
expect(delays[1]).toBe(200);
|
|
expect(delays[2]).toBe(250);
|
|
});
|
|
});
|
|
|
|
// ─── REAL DEADLOCK RETRY ────────────────────────────────────────────────
|
|
|
|
describe('initializeOrgCollections with retry', () => {
|
|
it('provisions 5 orgs sequentially using the production utility', async () => {
|
|
const orgIds = ['retry_1', 'retry_2', 'retry_3', 'retry_4', 'retry_5'];
|
|
const results: Array<{ orgId: string; ms: number; models: number }> = [];
|
|
|
|
for (const orgId of orgIds) {
|
|
const dbName = `${DB_PREFIX}org_${orgId}`;
|
|
createdDbs.push(dbName);
|
|
const conn = baseConn.useDb(dbName, { useCache: true });
|
|
const models = registerModels(conn);
|
|
|
|
const { totalMs } = await initializeOrgCollections(models, {
|
|
baseDelayMs: 50,
|
|
maxAttempts: 5,
|
|
});
|
|
results.push({ orgId, ms: totalMs, models: Object.keys(models).length });
|
|
}
|
|
|
|
const totalMs = results.reduce((s, r) => s + r.ms, 0);
|
|
console.log(`[Retry] 5 orgs provisioned in ${totalMs}ms:`);
|
|
for (const r of results) {
|
|
console.log(` ${r.orgId}: ${r.ms}ms (${r.models} models)`);
|
|
}
|
|
|
|
expect(results.every((r) => r.models === MODEL_COUNT)).toBe(true);
|
|
}, 120_000);
|
|
});
|
|
|
|
// ─── BACKUP/RESTORE ─────────────────────────────────────────────────────
|
|
|
|
describe('per-org backup and restore', () => {
|
|
const sourceOrg = 'backup_src';
|
|
const targetOrg = 'backup_dst';
|
|
|
|
beforeAll(async () => {
|
|
const srcDb = `${DB_PREFIX}org_${sourceOrg}`;
|
|
createdDbs.push(srcDb, `${DB_PREFIX}org_${targetOrg}`);
|
|
const srcConn = baseConn.useDb(srcDb, { useCache: true });
|
|
const models = registerModels(srcConn);
|
|
await initializeOrgCollections(models);
|
|
|
|
await models.User.create([
|
|
{ name: 'Alice', email: 'alice@backup.test', username: 'alice' },
|
|
{ name: 'Bob', email: 'bob@backup.test', username: 'bob' },
|
|
{ name: 'Charlie', email: 'charlie@backup.test', username: 'charlie' },
|
|
]);
|
|
|
|
await models.Conversation.create([
|
|
{
|
|
conversationId: 'conv_1',
|
|
user: 'alice_id',
|
|
title: 'Test conversation 1',
|
|
endpoint: 'openAI',
|
|
model: 'gpt-4',
|
|
},
|
|
{
|
|
conversationId: 'conv_2',
|
|
user: 'bob_id',
|
|
title: 'Test conversation 2',
|
|
endpoint: 'openAI',
|
|
model: 'gpt-4',
|
|
},
|
|
]);
|
|
|
|
await models.Message.create([
|
|
{
|
|
messageId: 'msg_1',
|
|
conversationId: 'conv_1',
|
|
user: 'alice_id',
|
|
sender: 'user',
|
|
text: 'Hello world',
|
|
isCreatedByUser: true,
|
|
},
|
|
{
|
|
messageId: 'msg_2',
|
|
conversationId: 'conv_1',
|
|
user: 'alice_id',
|
|
sender: 'GPT-4',
|
|
text: 'Hi there!',
|
|
isCreatedByUser: false,
|
|
},
|
|
]);
|
|
|
|
const agentId = new mongoose.Types.ObjectId();
|
|
await models.Agent.create({
|
|
id: `agent_${agentId}`,
|
|
name: 'Test Agent',
|
|
author: new mongoose.Types.ObjectId(),
|
|
description: 'A test agent for backup',
|
|
provider: 'openAI',
|
|
model: 'gpt-4',
|
|
});
|
|
}, 60_000);
|
|
|
|
it('backs up all collections from the source org', async () => {
|
|
const srcConn = baseConn.useDb(`${DB_PREFIX}org_${sourceOrg}`, { useCache: true });
|
|
const backup = await backupOrg(srcConn, sourceOrg);
|
|
|
|
console.log(`[Backup] ${sourceOrg}:`);
|
|
console.log(` Timestamp: ${backup.timestamp.toISOString()}`);
|
|
console.log(` Collections: ${Object.keys(backup.collections).length}`);
|
|
let totalDocs = 0;
|
|
for (const [name, docs] of Object.entries(backup.collections)) {
|
|
if (docs.length > 0) {
|
|
console.log(` ${name}: ${docs.length} docs`);
|
|
totalDocs += docs.length;
|
|
}
|
|
}
|
|
console.log(` Total documents: ${totalDocs}`);
|
|
|
|
expect(Object.keys(backup.collections).length).toBeGreaterThanOrEqual(4);
|
|
expect(backup.collections['users']?.length).toBe(3);
|
|
expect(backup.collections['conversations']?.length).toBe(2);
|
|
expect(backup.collections['messages']?.length).toBe(2);
|
|
}, 30_000);
|
|
|
|
it('restores backup to a fresh org database', async () => {
|
|
const srcConn = baseConn.useDb(`${DB_PREFIX}org_${sourceOrg}`, { useCache: true });
|
|
const backup = await backupOrg(srcConn, sourceOrg);
|
|
|
|
const dstConn = baseConn.useDb(`${DB_PREFIX}org_${targetOrg}`, { useCache: true });
|
|
const dstModels = registerModels(dstConn);
|
|
await initializeOrgCollections(dstModels);
|
|
|
|
const { collectionsRestored, docsRestored } = await restoreOrg(dstConn, backup);
|
|
|
|
console.log(
|
|
`[Restore] ${targetOrg}: ${collectionsRestored} collections, ${docsRestored} docs`,
|
|
);
|
|
|
|
expect(docsRestored).toBeGreaterThanOrEqual(7);
|
|
}, 60_000);
|
|
|
|
it('verifies restored data matches source exactly', async () => {
|
|
const srcConn = baseConn.useDb(`${DB_PREFIX}org_${sourceOrg}`, { useCache: true });
|
|
const dstConn = baseConn.useDb(`${DB_PREFIX}org_${targetOrg}`, { useCache: true });
|
|
|
|
const srcUsers = await srcConn.db!.collection('users').find({}).sort({ email: 1 }).toArray();
|
|
const dstUsers = await dstConn.db!.collection('users').find({}).sort({ email: 1 }).toArray();
|
|
|
|
expect(dstUsers.length).toBe(srcUsers.length);
|
|
for (let i = 0; i < srcUsers.length; i++) {
|
|
expect(dstUsers[i].name).toBe(srcUsers[i].name);
|
|
expect(dstUsers[i].email).toBe(srcUsers[i].email);
|
|
expect(dstUsers[i]._id.toString()).toBe(srcUsers[i]._id.toString());
|
|
}
|
|
|
|
const srcMsgs = await srcConn
|
|
.db!.collection('messages')
|
|
.find({})
|
|
.sort({ messageId: 1 })
|
|
.toArray();
|
|
const dstMsgs = await dstConn
|
|
.db!.collection('messages')
|
|
.find({})
|
|
.sort({ messageId: 1 })
|
|
.toArray();
|
|
|
|
expect(dstMsgs.length).toBe(srcMsgs.length);
|
|
for (let i = 0; i < srcMsgs.length; i++) {
|
|
expect(dstMsgs[i].messageId).toBe(srcMsgs[i].messageId);
|
|
expect(dstMsgs[i].text).toBe(srcMsgs[i].text);
|
|
expect(dstMsgs[i]._id.toString()).toBe(srcMsgs[i]._id.toString());
|
|
}
|
|
|
|
const srcConvos = await srcConn
|
|
.db!.collection('conversations')
|
|
.find({})
|
|
.sort({ conversationId: 1 })
|
|
.toArray();
|
|
const dstConvos = await dstConn
|
|
.db!.collection('conversations')
|
|
.find({})
|
|
.sort({ conversationId: 1 })
|
|
.toArray();
|
|
|
|
expect(dstConvos.length).toBe(srcConvos.length);
|
|
for (let i = 0; i < srcConvos.length; i++) {
|
|
expect(dstConvos[i].conversationId).toBe(srcConvos[i].conversationId);
|
|
expect(dstConvos[i].title).toBe(srcConvos[i].title);
|
|
}
|
|
|
|
console.log('[Restore] Data integrity verified: _ids, fields, and counts match exactly');
|
|
}, 30_000);
|
|
|
|
it('verifies BSON type preservation (ObjectId, Date, Number)', async () => {
|
|
const dstConn = baseConn.useDb(`${DB_PREFIX}org_${targetOrg}`, { useCache: true });
|
|
|
|
const user = await dstConn.db!.collection('users').findOne({ email: 'alice@backup.test' });
|
|
expect(user).toBeDefined();
|
|
expect(user!._id).toBeInstanceOf(mongoose.Types.ObjectId);
|
|
expect(user!.createdAt).toBeInstanceOf(Date);
|
|
|
|
const agent = await dstConn.db!.collection('agents').findOne({});
|
|
expect(agent).toBeDefined();
|
|
expect(agent!._id).toBeInstanceOf(mongoose.Types.ObjectId);
|
|
expect(typeof agent!.name).toBe('string');
|
|
|
|
console.log('[Restore] BSON types preserved: ObjectId, Date, String all correct');
|
|
});
|
|
|
|
it('measures backup and restore performance', async () => {
|
|
const srcConn = baseConn.useDb(`${DB_PREFIX}org_${sourceOrg}`, { useCache: true });
|
|
|
|
const backupStart = Date.now();
|
|
const backup = await backupOrg(srcConn, sourceOrg);
|
|
const backupMs = Date.now() - backupStart;
|
|
|
|
const freshDb = `${DB_PREFIX}org_perf_restore`;
|
|
createdDbs.push(freshDb);
|
|
const freshConn = baseConn.useDb(freshDb, { useCache: false });
|
|
const freshModels = registerModels(freshConn);
|
|
await initializeOrgCollections(freshModels);
|
|
|
|
const restoreStart = Date.now();
|
|
await restoreOrg(freshConn, backup);
|
|
const restoreMs = Date.now() - restoreStart;
|
|
|
|
const totalDocs = Object.values(backup.collections).reduce((s, d) => s + d.length, 0);
|
|
console.log(
|
|
`[Perf] Backup: ${backupMs}ms (${totalDocs} docs across ${Object.keys(backup.collections).length} collections)`,
|
|
);
|
|
console.log(`[Perf] Restore: ${restoreMs}ms`);
|
|
|
|
expect(backupMs).toBeLessThan(5000);
|
|
expect(restoreMs).toBeLessThan(5000);
|
|
}, 60_000);
|
|
});
|
|
|
|
// ─── SCHEMA MIGRATION ──────────────────────────────────────────────────
|
|
|
|
describe('schema migration across orgs', () => {
|
|
const migrationOrgs = ['mig_1', 'mig_2', 'mig_3', 'mig_4', 'mig_5'];
|
|
|
|
beforeAll(async () => {
|
|
for (const orgId of migrationOrgs) {
|
|
const dbName = `${DB_PREFIX}org_${orgId}`;
|
|
createdDbs.push(dbName);
|
|
const conn = baseConn.useDb(dbName, { useCache: true });
|
|
const models = registerModels(conn);
|
|
await initializeOrgCollections(models);
|
|
|
|
await models.User.create({
|
|
name: `User ${orgId}`,
|
|
email: `user@${orgId}.test`,
|
|
username: orgId,
|
|
});
|
|
}
|
|
}, 120_000);
|
|
|
|
it('createIndexes is idempotent (no-op for existing indexes)', async () => {
|
|
const conn = baseConn.useDb(`${DB_PREFIX}org_mig_1`, { useCache: true });
|
|
const models = registerModels(conn);
|
|
|
|
const beforeIndexes = await models.User.collection.indexes();
|
|
|
|
const t0 = Date.now();
|
|
await initializeOrgCollections(models);
|
|
const ms = Date.now() - t0;
|
|
|
|
const afterIndexes = await models.User.collection.indexes();
|
|
|
|
expect(afterIndexes.length).toBe(beforeIndexes.length);
|
|
console.log(
|
|
`[Migration] Idempotent re-init: ${ms}ms (indexes unchanged: ${beforeIndexes.length})`,
|
|
);
|
|
}, 60_000);
|
|
|
|
it('adds a new collection to all existing orgs', async () => {
|
|
const newSchema = new Schema(
|
|
{
|
|
orgId: { type: String, index: true },
|
|
eventType: { type: String, required: true, index: true },
|
|
payload: Schema.Types.Mixed,
|
|
userId: { type: Schema.Types.ObjectId, index: true },
|
|
},
|
|
{ timestamps: true },
|
|
);
|
|
newSchema.index({ orgId: 1, eventType: 1, createdAt: -1 });
|
|
|
|
for (const orgId of migrationOrgs) {
|
|
const conn = baseConn.useDb(`${DB_PREFIX}org_${orgId}`, { useCache: true });
|
|
const AuditLog = conn.models['AuditLog'] || conn.model('AuditLog', newSchema);
|
|
await AuditLog.createCollection();
|
|
await createIndexesWithRetry(AuditLog);
|
|
}
|
|
|
|
for (const orgId of migrationOrgs) {
|
|
const conn = baseConn.useDb(`${DB_PREFIX}org_${orgId}`, { useCache: true });
|
|
const collections = (await conn.db!.listCollections().toArray()).map((c) => c.name);
|
|
expect(collections).toContain('auditlogs');
|
|
|
|
const indexes = await conn.db!.collection('auditlogs').indexes();
|
|
expect(indexes.length).toBeGreaterThanOrEqual(4);
|
|
}
|
|
|
|
console.log(
|
|
`[Migration] New collection 'auditlogs' added to ${migrationOrgs.length} orgs with 4+ indexes`,
|
|
);
|
|
}, 60_000);
|
|
|
|
it('adds a new index to an existing collection across all orgs', async () => {
|
|
const indexSpec = { username: 1, createdAt: -1 };
|
|
|
|
for (const orgId of migrationOrgs) {
|
|
const conn = baseConn.useDb(`${DB_PREFIX}org_${orgId}`, { useCache: true });
|
|
await retryWithBackoff(
|
|
() => conn.db!.collection('users').createIndex(indexSpec, { background: true }),
|
|
`createIndex(users, username+createdAt) for ${orgId}`,
|
|
);
|
|
}
|
|
|
|
for (const orgId of migrationOrgs) {
|
|
const conn = baseConn.useDb(`${DB_PREFIX}org_${orgId}`, { useCache: true });
|
|
const indexes = await conn.db!.collection('users').indexes();
|
|
const hasNewIdx = indexes.some(
|
|
(idx: Record<string, unknown>) => JSON.stringify(idx.key) === JSON.stringify(indexSpec),
|
|
);
|
|
expect(hasNewIdx).toBe(true);
|
|
}
|
|
|
|
console.log(
|
|
`[Migration] New compound index added to 'users' across ${migrationOrgs.length} orgs`,
|
|
);
|
|
}, 60_000);
|
|
|
|
it('runs migrateAllOrgs and reports progress', async () => {
|
|
const progress: string[] = [];
|
|
|
|
const results = await migrateAllOrgs(
|
|
baseConn,
|
|
migrationOrgs,
|
|
MODEL_SCHEMAS,
|
|
(completed, total, result) => {
|
|
progress.push(
|
|
`${completed}/${total}: ${result.orgId} — ${result.totalMs}ms, ${result.newCollections.length} new collections`,
|
|
);
|
|
},
|
|
);
|
|
|
|
console.log(`[Migration] Full migration across ${migrationOrgs.length} orgs:`);
|
|
for (const p of progress) {
|
|
console.log(` ${p}`);
|
|
}
|
|
|
|
const totalMs = results.reduce((s, r) => s + r.totalMs, 0);
|
|
const avgMs = Math.round(totalMs / results.length);
|
|
console.log(` Total: ${totalMs}ms, avg: ${avgMs}ms/org`);
|
|
|
|
expect(results).toHaveLength(migrationOrgs.length);
|
|
expect(results.every((r) => r.indexResults.length >= MODEL_COUNT)).toBe(true);
|
|
}, 120_000);
|
|
|
|
it('verifies existing data is preserved after migration', async () => {
|
|
for (const orgId of migrationOrgs) {
|
|
const conn = baseConn.useDb(`${DB_PREFIX}org_${orgId}`, { useCache: true });
|
|
const user = await conn.db!.collection('users').findOne({ email: `user@${orgId}.test` });
|
|
expect(user).toBeDefined();
|
|
expect(user!.name).toBe(`User ${orgId}`);
|
|
}
|
|
|
|
console.log(
|
|
`[Migration] All existing user data preserved across ${migrationOrgs.length} orgs`,
|
|
);
|
|
});
|
|
});
|
|
});
|