LibreChat/api/server/routes/memories.js
Danny Avila a6fb257bcf
📦 refactor: Consolidate DB models, encapsulating Mongoose usage in data-schemas (#11830)
* chore: move database model methods to /packages/data-schemas

* chore: add TypeScript ESLint rule to warn on unused variables

* refactor: model imports to streamline access

- Consolidated model imports across various files to improve code organization and reduce redundancy.
- Updated imports for models such as Assistant, Message, Conversation, and others to a unified import path.
- Adjusted middleware and service files to reflect the new import structure, ensuring functionality remains intact.
- Enhanced test files to align with the new import paths, maintaining test coverage and integrity.

* chore: migrate database models to packages/data-schemas and refactor all direct Mongoose Model usage outside of data-schemas

* test: update agent model mocks in unit tests

- Added `getAgent` mock to `client.test.js` to enhance test coverage for agent-related functionality.
- Removed redundant `getAgent` and `getAgents` mocks from `openai.spec.js` and `responses.unit.spec.js` to streamline test setup and reduce duplication.
- Ensured consistency in agent mock implementations across test files.

* fix: update types in data-schemas

* refactor: enhance type definitions in transaction and spending methods

- Updated type definitions in `checkBalance.ts` to use specific request and response types.
- Refined `spendTokens.ts` to utilize a new `SpendTxData` interface for better clarity and type safety.
- Improved transaction handling in `transaction.ts` by introducing `TransactionResult` and `TxData` interfaces, ensuring consistent data structures across methods.
- Adjusted unit tests in `transaction.spec.ts` to accommodate new type definitions and enhance robustness.

* refactor: streamline model imports and enhance code organization

- Consolidated model imports across various controllers and services to a unified import path, improving code clarity and reducing redundancy.
- Updated multiple files to reflect the new import structure, ensuring all functionalities remain intact.
- Enhanced overall code organization by removing duplicate import statements and optimizing the usage of model methods.

* feat: implement loadAddedAgent and refactor agent loading logic

- Introduced `loadAddedAgent` function to handle loading agents from added conversations, supporting multi-convo parallel execution.
- Created a new `load.ts` file to encapsulate agent loading functionalities, including `loadEphemeralAgent` and `loadAgent`.
- Updated the `index.ts` file to export the new `load` module instead of the deprecated `loadAgent`.
- Enhanced type definitions and improved error handling in the agent loading process.
- Adjusted unit tests to reflect changes in the agent loading structure and ensure comprehensive coverage.

* refactor: enhance balance handling with new update interface

- Introduced `IBalanceUpdate` interface to streamline balance update operations across the codebase.
- Updated `upsertBalanceFields` method signatures in `balance.ts`, `transaction.ts`, and related tests to utilize the new interface for improved type safety.
- Adjusted type imports in `balance.spec.ts` to include `IBalanceUpdate`, ensuring consistency in balance management functionalities.
- Enhanced overall code clarity and maintainability by refining type definitions related to balance operations.

* feat: add unit tests for loadAgent functionality and enhance agent loading logic

- Introduced comprehensive unit tests for the `loadAgent` function, covering various scenarios including null and empty agent IDs, loading of ephemeral agents, and permission checks.
- Enhanced the `initializeClient` function by moving `getConvoFiles` to the correct position in the database method exports, ensuring proper functionality.
- Improved test coverage for agent loading, including handling of non-existent agents and user permissions.

* chore: reorder memory method exports for consistency

- Moved `deleteAllUserMemories` to the correct position in the exported memory methods, ensuring a consistent and logical order of method exports in `memory.ts`.
2026-02-18 00:31:36 -05:00

298 lines
8.9 KiB
JavaScript

const express = require('express');
const { Tokenizer, generateCheckAccess } = require('@librechat/api');
const { PermissionTypes, Permissions } = require('librechat-data-provider');
const {
getAllUserMemories,
toggleUserMemories,
getRoleByName,
createMemory,
deleteMemory,
setMemory,
} = require('~/models');
const { requireJwtAuth, configMiddleware } = require('~/server/middleware');
const router = express.Router();
const memoryPayloadLimit = express.json({ limit: '100kb' });
const checkMemoryRead = generateCheckAccess({
permissionType: PermissionTypes.MEMORIES,
permissions: [Permissions.USE, Permissions.READ],
getRoleByName,
});
const checkMemoryCreate = generateCheckAccess({
permissionType: PermissionTypes.MEMORIES,
permissions: [Permissions.USE, Permissions.CREATE],
getRoleByName,
});
const checkMemoryUpdate = generateCheckAccess({
permissionType: PermissionTypes.MEMORIES,
permissions: [Permissions.USE, Permissions.UPDATE],
getRoleByName,
});
const checkMemoryDelete = generateCheckAccess({
permissionType: PermissionTypes.MEMORIES,
permissions: [Permissions.USE, Permissions.UPDATE],
getRoleByName,
});
const checkMemoryOptOut = generateCheckAccess({
permissionType: PermissionTypes.MEMORIES,
permissions: [Permissions.USE, Permissions.OPT_OUT],
getRoleByName,
});
router.use(requireJwtAuth);
/**
* GET /memories
* Returns all memories for the authenticated user, sorted by updated_at (newest first).
* Also includes memory usage percentage based on token limit.
*/
router.get('/', checkMemoryRead, configMiddleware, async (req, res) => {
try {
const memories = await getAllUserMemories(req.user.id);
const sortedMemories = memories.sort(
(a, b) => new Date(b.updated_at).getTime() - new Date(a.updated_at).getTime(),
);
const totalTokens = memories.reduce((sum, memory) => {
return sum + (memory.tokenCount || 0);
}, 0);
const appConfig = req.config;
const memoryConfig = appConfig?.memory;
const tokenLimit = memoryConfig?.tokenLimit;
const charLimit = memoryConfig?.charLimit || 10000;
let usagePercentage = null;
if (tokenLimit && tokenLimit > 0) {
usagePercentage = Math.min(100, Math.round((totalTokens / tokenLimit) * 100));
}
res.json({
memories: sortedMemories,
totalTokens,
tokenLimit: tokenLimit || null,
charLimit,
usagePercentage,
});
} catch (error) {
res.status(500).json({ error: error.message });
}
});
/**
* POST /memories
* Creates a new memory entry for the authenticated user.
* Body: { key: string, value: string }
* Returns 201 and { created: true, memory: <createdDoc> } when successful.
*/
router.post('/', memoryPayloadLimit, checkMemoryCreate, configMiddleware, async (req, res) => {
const { key, value } = req.body;
if (typeof key !== 'string' || key.trim() === '') {
return res.status(400).json({ error: 'Key is required and must be a non-empty string.' });
}
if (typeof value !== 'string' || value.trim() === '') {
return res.status(400).json({ error: 'Value is required and must be a non-empty string.' });
}
const appConfig = req.config;
const memoryConfig = appConfig?.memory;
const charLimit = memoryConfig?.charLimit || 10000;
if (key.length > 1000) {
return res.status(400).json({
error: `Key exceeds maximum length of 1000 characters. Current length: ${key.length} characters.`,
});
}
if (value.length > charLimit) {
return res.status(400).json({
error: `Value exceeds maximum length of ${charLimit} characters. Current length: ${value.length} characters.`,
});
}
try {
const tokenCount = Tokenizer.getTokenCount(value, 'o200k_base');
const memories = await getAllUserMemories(req.user.id);
const appConfig = req.config;
const memoryConfig = appConfig?.memory;
const tokenLimit = memoryConfig?.tokenLimit;
if (tokenLimit) {
const currentTotalTokens = memories.reduce(
(sum, memory) => sum + (memory.tokenCount || 0),
0,
);
if (currentTotalTokens + tokenCount > tokenLimit) {
return res.status(400).json({
error: `Adding this memory would exceed the token limit of ${tokenLimit}. Current usage: ${currentTotalTokens} tokens.`,
});
}
}
const result = await createMemory({
userId: req.user.id,
key: key.trim(),
value: value.trim(),
tokenCount,
});
if (!result.ok) {
return res.status(500).json({ error: 'Failed to create memory.' });
}
const updatedMemories = await getAllUserMemories(req.user.id);
const newMemory = updatedMemories.find((m) => m.key === key.trim());
res.status(201).json({ created: true, memory: newMemory });
} catch (error) {
if (error.message && error.message.includes('already exists')) {
return res.status(409).json({ error: 'Memory with this key already exists.' });
}
res.status(500).json({ error: error.message });
}
});
/**
* PATCH /memories/preferences
* Updates the user's memory preferences (e.g., enabling/disabling memories).
* Body: { memories: boolean }
* Returns 200 and { updated: true, preferences: { memories: boolean } } when successful.
*/
router.patch('/preferences', checkMemoryOptOut, async (req, res) => {
const { memories } = req.body;
if (typeof memories !== 'boolean') {
return res.status(400).json({ error: 'memories must be a boolean value.' });
}
try {
const updatedUser = await toggleUserMemories(req.user.id, memories);
if (!updatedUser) {
return res.status(404).json({ error: 'User not found.' });
}
res.json({
updated: true,
preferences: {
memories: updatedUser.personalization?.memories ?? true,
},
});
} catch (error) {
res.status(500).json({ error: error.message });
}
});
/**
* PATCH /memories/:key
* Updates the value of an existing memory entry for the authenticated user.
* Body: { key?: string, value: string }
* Returns 200 and { updated: true, memory: <updatedDoc> } when successful.
*/
router.patch('/:key', memoryPayloadLimit, checkMemoryUpdate, configMiddleware, async (req, res) => {
const { key: urlKey } = req.params;
const { key: bodyKey, value } = req.body || {};
if (typeof value !== 'string' || value.trim() === '') {
return res.status(400).json({ error: 'Value is required and must be a non-empty string.' });
}
const newKey = bodyKey || urlKey;
const appConfig = req.config;
const memoryConfig = appConfig?.memory;
const charLimit = memoryConfig?.charLimit || 10000;
if (newKey.length > 1000) {
return res.status(400).json({
error: `Key exceeds maximum length of 1000 characters. Current length: ${newKey.length} characters.`,
});
}
if (value.length > charLimit) {
return res.status(400).json({
error: `Value exceeds maximum length of ${charLimit} characters. Current length: ${value.length} characters.`,
});
}
try {
const tokenCount = Tokenizer.getTokenCount(value, 'o200k_base');
const memories = await getAllUserMemories(req.user.id);
const existingMemory = memories.find((m) => m.key === urlKey);
if (!existingMemory) {
return res.status(404).json({ error: 'Memory not found.' });
}
if (newKey !== urlKey) {
const keyExists = memories.find((m) => m.key === newKey);
if (keyExists) {
return res.status(409).json({ error: 'Memory with this key already exists.' });
}
const createResult = await createMemory({
userId: req.user.id,
key: newKey,
value,
tokenCount,
});
if (!createResult.ok) {
return res.status(500).json({ error: 'Failed to create new memory.' });
}
const deleteResult = await deleteMemory({ userId: req.user.id, key: urlKey });
if (!deleteResult.ok) {
return res.status(500).json({ error: 'Failed to delete old memory.' });
}
} else {
const result = await setMemory({
userId: req.user.id,
key: newKey,
value,
tokenCount,
});
if (!result.ok) {
return res.status(500).json({ error: 'Failed to update memory.' });
}
}
const updatedMemories = await getAllUserMemories(req.user.id);
const updatedMemory = updatedMemories.find((m) => m.key === newKey);
res.json({ updated: true, memory: updatedMemory });
} catch (error) {
res.status(500).json({ error: error.message });
}
});
/**
* DELETE /memories/:key
* Deletes a memory entry for the authenticated user.
* Returns 200 and { deleted: true } when successful.
*/
router.delete('/:key', checkMemoryDelete, async (req, res) => {
const { key } = req.params;
try {
const result = await deleteMemory({ userId: req.user.id, key });
if (!result.ok) {
return res.status(404).json({ error: 'Memory not found.' });
}
res.json({ deleted: true });
} catch (error) {
res.status(500).json({ error: error.message });
}
});
module.exports = router;