LibreChat/api/server/routes/search.js
Danny Avila 37964975c1
🤖 refactor: Improve Agents Memory Usage, Bump Keyv, Grok 3 (#6850)
* chore: remove unused redis file

* chore: bump keyv dependencies, and update related imports

* refactor: Implement IoRedis client for rate limiting across middleware, as node-redis via keyv not compatible

* fix: Set max listeners to expected amount

* WIP: memory improvements

* refactor: Simplify getAbortData assignment in createAbortController

* refactor: Update getAbortData to use WeakRef for content management

* WIP: memory improvements in agent chat requests

* refactor: Enhance memory management with finalization registry and cleanup functions

* refactor: Simplify domainParser calls by removing unnecessary request parameter

* refactor: Update parameter types for action tools and agent loading functions to use minimal configs

* refactor: Simplify domainParser tests by removing unnecessary request parameter

* refactor: Simplify domainParser call by removing unnecessary request parameter

* refactor: Enhance client disposal by nullifying additional properties to improve memory management

* refactor: Improve title generation by adding abort controller and timeout handling, consolidate request cleanup

* refactor: Update checkIdleConnections to skip current user when checking for idle connections if passed

* refactor: Update createMCPTool to derive userId from config and handle abort signals

* refactor: Introduce createTokenCounter function and update tokenCounter usage; enhance disposeClient to reset Graph values

* refactor: Update getMCPManager to accept userId parameter for improved idle connection handling

* refactor: Extract logToolError function for improved error handling in AgentClient

* refactor: Update disposeClient to clear handlerRegistry and graphRunnable references in client.run

* refactor: Extract createHandleNewToken function to streamline token handling in initializeClient

* chore: bump @librechat/agents

* refactor: Improve timeout handling in addTitle function for better error management

* refactor: Introduce createFetch instead of using class method

* refactor: Enhance client disposal and request data handling in AskController and EditController

* refactor: Update import statements for AnthropicClient and OpenAIClient to use specific paths

* refactor: Use WeakRef for response handling in SplitStreamHandler to prevent memory leaks

* refactor: Simplify client disposal and rename getReqData to processReqData in AskController and EditController

* refactor: Improve logging structure and parameter handling in OpenAIClient

* refactor: Remove unused GraphEvents and improve stream event handling in AnthropicClient and OpenAIClient

* refactor: Simplify client initialization in AskController and EditController

* refactor: Remove unused mock functions and implement in-memory store for KeyvMongo

* chore: Update dependencies in package-lock.json to latest versions

* refactor: Await token usage recording in OpenAIClient to ensure proper async handling

* refactor: Remove handleAbort route from multiple endpoints and enhance client disposal logic

* refactor: Enhance abort controller logic by managing abortKey more effectively

* refactor: Add newConversation handling in useEventHandlers for improved conversation management

* fix: dropparams

* refactor: Use optional chaining for safer access to request properties in BaseClient

* refactor: Move client disposal and request data processing logic to cleanup module for better organization

* refactor: Remove aborted request check from addTitle function for cleaner logic

* feat: Add Grok 3 model pricing and update tests for new models

* chore: Remove trace warnings and inspect flags from backend start script used for debugging

* refactor: Replace user identifier handling with userId for consistency across controllers, use UserId in clientRegistry

* refactor: Enhance client disposal logic to prevent memory leaks by clearing additional references

* chore: Update @librechat/agents to version 2.4.14 in package.json and package-lock.json
2025-04-12 18:46:36 -04:00

105 lines
3.4 KiB
JavaScript

const { Keyv } = require('keyv');
const express = require('express');
const { MeiliSearch } = require('meilisearch');
const { Conversation, getConvosQueried } = require('~/models/Conversation');
const requireJwtAuth = require('~/server/middleware/requireJwtAuth');
const { cleanUpPrimaryKeyValue } = require('~/lib/utils/misc');
const { reduceHits } = require('~/lib/utils/reduceHits');
const { isEnabled } = require('~/server/utils');
const { Message } = require('~/models/Message');
const keyvRedis = require('~/cache/keyvRedis');
const { logger } = require('~/config');
const router = express.Router();
const expiration = 60 * 1000;
const cache = isEnabled(process.env.USE_REDIS)
? new Keyv({ store: keyvRedis })
: new Keyv({ namespace: 'search', ttl: expiration });
router.use(requireJwtAuth);
router.get('/sync', async function (req, res) {
await Message.syncWithMeili();
await Conversation.syncWithMeili();
res.send('synced');
});
router.get('/', async function (req, res) {
try {
let user = req.user.id ?? '';
const { q } = req.query;
const pageNumber = req.query.pageNumber || 1;
const key = `${user}:search:${q}`;
const cached = await cache.get(key);
if (cached) {
logger.debug('[/search] cache hit: ' + key);
const { pages, pageSize, messages } = cached;
res
.status(200)
.send({ conversations: cached[pageNumber], pages, pageNumber, pageSize, messages });
return;
}
const messages = (await Message.meiliSearch(q, undefined, true)).hits;
const titles = (await Conversation.meiliSearch(q)).hits;
const sortedHits = reduceHits(messages, titles);
const result = await getConvosQueried(user, sortedHits, pageNumber);
const activeMessages = [];
for (let i = 0; i < messages.length; i++) {
let message = messages[i];
if (message.conversationId.includes('--')) {
message.conversationId = cleanUpPrimaryKeyValue(message.conversationId);
}
if (result.convoMap[message.conversationId]) {
const convo = result.convoMap[message.conversationId];
const { title, chatGptLabel, model } = convo;
message = { ...message, ...{ title, chatGptLabel, model } };
activeMessages.push(message);
}
}
result.messages = activeMessages;
if (result.cache) {
result.cache.messages = activeMessages;
cache.set(key, result.cache, expiration);
delete result.cache;
}
delete result.convoMap;
res.status(200).send(result);
} catch (error) {
logger.error('[/search] Error while searching messages & conversations', error);
res.status(500).send({ message: 'Error searching' });
}
});
router.get('/test', async function (req, res) {
const { q } = req.query;
const messages = (
await Message.meiliSearch(q, { attributesToHighlight: ['text'] }, true)
).hits.map((message) => {
const { _formatted, ...rest } = message;
return { ...rest, searchResult: true, text: _formatted.text };
});
res.send(messages);
});
router.get('/enable', async function (req, res) {
let result = false;
try {
const client = new MeiliSearch({
host: process.env.MEILI_HOST,
apiKey: process.env.MEILI_MASTER_KEY,
});
const { status } = await client.health();
result = status === 'available' && !!process.env.SEARCH;
return res.send(result);
} catch (error) {
return res.send(false);
}
});
module.exports = router;