mirror of
https://github.com/danny-avila/LibreChat.git
synced 2026-02-20 17:34:10 +01:00
* chore: move database model methods to /packages/data-schemas * chore: add TypeScript ESLint rule to warn on unused variables * refactor: model imports to streamline access - Consolidated model imports across various files to improve code organization and reduce redundancy. - Updated imports for models such as Assistant, Message, Conversation, and others to a unified import path. - Adjusted middleware and service files to reflect the new import structure, ensuring functionality remains intact. - Enhanced test files to align with the new import paths, maintaining test coverage and integrity. * chore: migrate database models to packages/data-schemas and refactor all direct Mongoose Model usage outside of data-schemas * test: update agent model mocks in unit tests - Added `getAgent` mock to `client.test.js` to enhance test coverage for agent-related functionality. - Removed redundant `getAgent` and `getAgents` mocks from `openai.spec.js` and `responses.unit.spec.js` to streamline test setup and reduce duplication. - Ensured consistency in agent mock implementations across test files. * fix: update types in data-schemas * refactor: enhance type definitions in transaction and spending methods - Updated type definitions in `checkBalance.ts` to use specific request and response types. - Refined `spendTokens.ts` to utilize a new `SpendTxData` interface for better clarity and type safety. - Improved transaction handling in `transaction.ts` by introducing `TransactionResult` and `TxData` interfaces, ensuring consistent data structures across methods. - Adjusted unit tests in `transaction.spec.ts` to accommodate new type definitions and enhance robustness. * refactor: streamline model imports and enhance code organization - Consolidated model imports across various controllers and services to a unified import path, improving code clarity and reducing redundancy. - Updated multiple files to reflect the new import structure, ensuring all functionalities remain intact. - Enhanced overall code organization by removing duplicate import statements and optimizing the usage of model methods. * feat: implement loadAddedAgent and refactor agent loading logic - Introduced `loadAddedAgent` function to handle loading agents from added conversations, supporting multi-convo parallel execution. - Created a new `load.ts` file to encapsulate agent loading functionalities, including `loadEphemeralAgent` and `loadAgent`. - Updated the `index.ts` file to export the new `load` module instead of the deprecated `loadAgent`. - Enhanced type definitions and improved error handling in the agent loading process. - Adjusted unit tests to reflect changes in the agent loading structure and ensure comprehensive coverage. * refactor: enhance balance handling with new update interface - Introduced `IBalanceUpdate` interface to streamline balance update operations across the codebase. - Updated `upsertBalanceFields` method signatures in `balance.ts`, `transaction.ts`, and related tests to utilize the new interface for improved type safety. - Adjusted type imports in `balance.spec.ts` to include `IBalanceUpdate`, ensuring consistency in balance management functionalities. - Enhanced overall code clarity and maintainability by refining type definitions related to balance operations. * feat: add unit tests for loadAgent functionality and enhance agent loading logic - Introduced comprehensive unit tests for the `loadAgent` function, covering various scenarios including null and empty agent IDs, loading of ephemeral agents, and permission checks. - Enhanced the `initializeClient` function by moving `getConvoFiles` to the correct position in the database method exports, ensuring proper functionality. - Improved test coverage for agent loading, including handling of non-existent agents and user permissions. * chore: reorder memory method exports for consistency - Moved `deleteAllUserMemories` to the correct position in the exported memory methods, ensuring a consistent and logical order of method exports in `memory.ts`.
157 lines
5.2 KiB
JavaScript
157 lines
5.2 KiB
JavaScript
const { nanoid } = require('nanoid');
|
|
const { checkAccess } = require('@librechat/api');
|
|
const { logger } = require('@librechat/data-schemas');
|
|
const {
|
|
Tools,
|
|
Permissions,
|
|
FileSources,
|
|
EModelEndpoint,
|
|
PermissionTypes,
|
|
} = require('librechat-data-provider');
|
|
const { getRoleByName, getFiles } = require('~/models');
|
|
|
|
/**
|
|
* Process file search results from tool calls
|
|
* @param {Object} options
|
|
* @param {IUser} options.user - The user object
|
|
* @param {AppConfig} options.appConfig - The app configuration object
|
|
* @param {GraphRunnableConfig['configurable']} options.metadata - The metadata
|
|
* @param {{ [Tools.file_search]: { sources: Object[]; fileCitations: boolean } }} options.toolArtifact - The tool artifact containing structured data
|
|
* @param {string} options.toolCallId - The tool call ID
|
|
* @returns {Promise<Object|null>} The file search attachment or null
|
|
*/
|
|
async function processFileCitations({ user, appConfig, toolArtifact, toolCallId, metadata }) {
|
|
try {
|
|
if (!toolArtifact?.[Tools.file_search]?.sources) {
|
|
return null;
|
|
}
|
|
|
|
if (user) {
|
|
try {
|
|
const hasFileCitationsAccess =
|
|
toolArtifact?.[Tools.file_search]?.fileCitations ??
|
|
(await checkAccess({
|
|
user,
|
|
permissionType: PermissionTypes.FILE_CITATIONS,
|
|
permissions: [Permissions.USE],
|
|
getRoleByName,
|
|
}));
|
|
|
|
if (!hasFileCitationsAccess) {
|
|
logger.debug(
|
|
`[processFileCitations] User ${user.id} does not have FILE_CITATIONS permission`,
|
|
);
|
|
return null;
|
|
}
|
|
} catch (error) {
|
|
logger.error(
|
|
`[processFileCitations] Permission check failed for FILE_CITATIONS: ${error.message}`,
|
|
);
|
|
logger.debug(`[processFileCitations] Proceeding with citations due to permission error`);
|
|
}
|
|
}
|
|
|
|
const maxCitations = appConfig.endpoints?.[EModelEndpoint.agents]?.maxCitations ?? 30;
|
|
const maxCitationsPerFile =
|
|
appConfig.endpoints?.[EModelEndpoint.agents]?.maxCitationsPerFile ?? 5;
|
|
const minRelevanceScore =
|
|
appConfig.endpoints?.[EModelEndpoint.agents]?.minRelevanceScore ?? 0.45;
|
|
|
|
const sources = toolArtifact[Tools.file_search].sources || [];
|
|
const filteredSources = sources.filter((source) => source.relevance >= minRelevanceScore);
|
|
if (filteredSources.length === 0) {
|
|
logger.debug(
|
|
`[processFileCitations] No sources above relevance threshold of ${minRelevanceScore}`,
|
|
);
|
|
return null;
|
|
}
|
|
|
|
const selectedSources = applyCitationLimits(filteredSources, maxCitations, maxCitationsPerFile);
|
|
const enhancedSources = await enhanceSourcesWithMetadata(selectedSources, appConfig);
|
|
|
|
if (enhancedSources.length > 0) {
|
|
const fileSearchAttachment = {
|
|
type: Tools.file_search,
|
|
[Tools.file_search]: { sources: enhancedSources },
|
|
toolCallId: toolCallId,
|
|
messageId: metadata.run_id,
|
|
conversationId: metadata.thread_id,
|
|
name: `${Tools.file_search}_file_search_results_${nanoid()}`,
|
|
};
|
|
|
|
return fileSearchAttachment;
|
|
}
|
|
|
|
return null;
|
|
} catch (error) {
|
|
logger.error('[processFileCitations] Error processing file citations:', error);
|
|
return null;
|
|
}
|
|
}
|
|
|
|
/**
|
|
* Apply citation limits to sources
|
|
* @param {Array} sources - All sources
|
|
* @param {number} maxCitations - Maximum total citations
|
|
* @param {number} maxCitationsPerFile - Maximum citations per file
|
|
* @returns {Array} Selected sources
|
|
*/
|
|
function applyCitationLimits(sources, maxCitations, maxCitationsPerFile) {
|
|
const byFile = {};
|
|
sources.forEach((source) => {
|
|
if (!byFile[source.fileId]) {
|
|
byFile[source.fileId] = [];
|
|
}
|
|
byFile[source.fileId].push(source);
|
|
});
|
|
|
|
const representatives = [];
|
|
for (const fileId in byFile) {
|
|
const fileSources = byFile[fileId].sort((a, b) => b.relevance - a.relevance);
|
|
const selectedFromFile = fileSources.slice(0, maxCitationsPerFile);
|
|
representatives.push(...selectedFromFile);
|
|
}
|
|
|
|
return representatives.sort((a, b) => b.relevance - a.relevance).slice(0, maxCitations);
|
|
}
|
|
|
|
/**
|
|
* Enhance sources with file metadata from database
|
|
* @param {Array} sources - Selected sources
|
|
* @param {AppConfig} appConfig - Custom configuration
|
|
* @returns {Promise<Array>} Enhanced sources
|
|
*/
|
|
async function enhanceSourcesWithMetadata(sources, appConfig) {
|
|
const fileIds = [...new Set(sources.map((source) => source.fileId))];
|
|
|
|
let fileMetadataMap = {};
|
|
try {
|
|
const files = await getFiles({ file_id: { $in: fileIds } });
|
|
fileMetadataMap = files.reduce((map, file) => {
|
|
map[file.file_id] = file;
|
|
return map;
|
|
}, {});
|
|
} catch (error) {
|
|
logger.error('[enhanceSourcesWithMetadata] Error looking up file metadata:', error);
|
|
}
|
|
|
|
return sources.map((source) => {
|
|
const fileRecord = fileMetadataMap[source.fileId] || {};
|
|
const configuredStorageType = fileRecord.source || appConfig?.fileStrategy || FileSources.local;
|
|
|
|
return {
|
|
...source,
|
|
fileName: fileRecord.filename || source.fileName || 'Unknown File',
|
|
metadata: {
|
|
...source.metadata,
|
|
storageType: configuredStorageType,
|
|
},
|
|
};
|
|
});
|
|
}
|
|
|
|
module.exports = {
|
|
applyCitationLimits,
|
|
processFileCitations,
|
|
enhanceSourcesWithMetadata,
|
|
};
|