mirror of
https://github.com/danny-avila/LibreChat.git
synced 2026-02-20 17:34:10 +01:00
* chore: move database model methods to /packages/data-schemas * chore: add TypeScript ESLint rule to warn on unused variables * refactor: model imports to streamline access - Consolidated model imports across various files to improve code organization and reduce redundancy. - Updated imports for models such as Assistant, Message, Conversation, and others to a unified import path. - Adjusted middleware and service files to reflect the new import structure, ensuring functionality remains intact. - Enhanced test files to align with the new import paths, maintaining test coverage and integrity. * chore: migrate database models to packages/data-schemas and refactor all direct Mongoose Model usage outside of data-schemas * test: update agent model mocks in unit tests - Added `getAgent` mock to `client.test.js` to enhance test coverage for agent-related functionality. - Removed redundant `getAgent` and `getAgents` mocks from `openai.spec.js` and `responses.unit.spec.js` to streamline test setup and reduce duplication. - Ensured consistency in agent mock implementations across test files. * fix: update types in data-schemas * refactor: enhance type definitions in transaction and spending methods - Updated type definitions in `checkBalance.ts` to use specific request and response types. - Refined `spendTokens.ts` to utilize a new `SpendTxData` interface for better clarity and type safety. - Improved transaction handling in `transaction.ts` by introducing `TransactionResult` and `TxData` interfaces, ensuring consistent data structures across methods. - Adjusted unit tests in `transaction.spec.ts` to accommodate new type definitions and enhance robustness. * refactor: streamline model imports and enhance code organization - Consolidated model imports across various controllers and services to a unified import path, improving code clarity and reducing redundancy. - Updated multiple files to reflect the new import structure, ensuring all functionalities remain intact. - Enhanced overall code organization by removing duplicate import statements and optimizing the usage of model methods. * feat: implement loadAddedAgent and refactor agent loading logic - Introduced `loadAddedAgent` function to handle loading agents from added conversations, supporting multi-convo parallel execution. - Created a new `load.ts` file to encapsulate agent loading functionalities, including `loadEphemeralAgent` and `loadAgent`. - Updated the `index.ts` file to export the new `load` module instead of the deprecated `loadAgent`. - Enhanced type definitions and improved error handling in the agent loading process. - Adjusted unit tests to reflect changes in the agent loading structure and ensure comprehensive coverage. * refactor: enhance balance handling with new update interface - Introduced `IBalanceUpdate` interface to streamline balance update operations across the codebase. - Updated `upsertBalanceFields` method signatures in `balance.ts`, `transaction.ts`, and related tests to utilize the new interface for improved type safety. - Adjusted type imports in `balance.spec.ts` to include `IBalanceUpdate`, ensuring consistency in balance management functionalities. - Enhanced overall code clarity and maintainability by refining type definitions related to balance operations. * feat: add unit tests for loadAgent functionality and enhance agent loading logic - Introduced comprehensive unit tests for the `loadAgent` function, covering various scenarios including null and empty agent IDs, loading of ephemeral agents, and permission checks. - Enhanced the `initializeClient` function by moving `getConvoFiles` to the correct position in the database method exports, ensuring proper functionality. - Improved test coverage for agent loading, including handling of non-existent agents and user permissions. * chore: reorder memory method exports for consistency - Moved `deleteAllUserMemories` to the correct position in the exported memory methods, ensuring a consistent and logical order of method exports in `memory.ts`.
212 lines
5.5 KiB
JavaScript
212 lines
5.5 KiB
JavaScript
const {
|
|
Time,
|
|
CacheKeys,
|
|
SEPARATORS,
|
|
parseTextParts,
|
|
findLastSeparatorIndex,
|
|
} = require('librechat-data-provider');
|
|
const { getLogStores } = require('~/cache');
|
|
const { getMessage } = require('~/models');
|
|
|
|
/**
|
|
* @param {string[]} voiceIds - Array of voice IDs
|
|
* @returns {string}
|
|
*/
|
|
function getRandomVoiceId(voiceIds) {
|
|
const randomIndex = Math.floor(Math.random() * voiceIds.length);
|
|
return voiceIds[randomIndex];
|
|
}
|
|
|
|
/**
|
|
* @typedef {Object} VoiceSettings
|
|
* @property {number} similarity_boost
|
|
* @property {number} stability
|
|
* @property {boolean} use_speaker_boost
|
|
*/
|
|
|
|
/**
|
|
* @typedef {Object} GenerateAudioBulk
|
|
* @property {string} model_id
|
|
* @property {string} text
|
|
* @property {VoiceSettings} voice_settings
|
|
*/
|
|
|
|
/**
|
|
* @typedef {Object} TextToSpeechClient
|
|
* @property {function(Object): Promise<stream.Readable>} generate
|
|
*/
|
|
|
|
/**
|
|
* @typedef {Object} AudioChunk
|
|
* @property {string} audio
|
|
* @property {boolean} isFinal
|
|
* @property {Object} alignment
|
|
* @property {number[]} alignment.char_start_times_ms
|
|
* @property {number[]} alignment.chars_durations_ms
|
|
* @property {string[]} alignment.chars
|
|
* @property {Object} normalizedAlignment
|
|
* @property {number[]} normalizedAlignment.char_start_times_ms
|
|
* @property {number[]} normalizedAlignment.chars_durations_ms
|
|
* @property {string[]} normalizedAlignment.chars
|
|
*/
|
|
|
|
const MAX_NOT_FOUND_COUNT = 6;
|
|
const MAX_NO_CHANGE_COUNT = 10;
|
|
|
|
/**
|
|
* @param {string} user
|
|
* @param {string} messageId
|
|
* @returns {() => Promise<{ text: string, isFinished: boolean }[]>}
|
|
*/
|
|
function createChunkProcessor(user, messageId) {
|
|
let notFoundCount = 0;
|
|
let noChangeCount = 0;
|
|
let processedText = '';
|
|
if (!messageId) {
|
|
throw new Error('Message ID is required');
|
|
}
|
|
|
|
const messageCache = getLogStores(CacheKeys.MESSAGES);
|
|
|
|
/**
|
|
* @returns {Promise<{ text: string, isFinished: boolean }[] | string>}
|
|
*/
|
|
async function processChunks() {
|
|
if (notFoundCount >= MAX_NOT_FOUND_COUNT) {
|
|
return `Message not found after ${MAX_NOT_FOUND_COUNT} attempts`;
|
|
}
|
|
|
|
if (noChangeCount >= MAX_NO_CHANGE_COUNT) {
|
|
return `No change in message after ${MAX_NO_CHANGE_COUNT} attempts`;
|
|
}
|
|
|
|
/** @type { string | { text: string; complete: boolean } } */
|
|
let message = await messageCache.get(messageId);
|
|
if (!message) {
|
|
message = await getMessage({ user, messageId });
|
|
}
|
|
|
|
if (!message) {
|
|
notFoundCount++;
|
|
return [];
|
|
} else {
|
|
const text = message.content?.length > 0 ? parseTextParts(message.content) : message.text;
|
|
messageCache.set(
|
|
messageId,
|
|
{
|
|
text,
|
|
complete: true,
|
|
},
|
|
Time.FIVE_MINUTES,
|
|
);
|
|
}
|
|
|
|
const text = typeof message === 'string' ? message : message.text;
|
|
const complete = typeof message === 'string' ? false : (message.complete ?? true);
|
|
|
|
if (text === processedText) {
|
|
noChangeCount++;
|
|
}
|
|
|
|
const remainingText = text.slice(processedText.length);
|
|
const chunks = [];
|
|
|
|
if (!complete && remainingText.length >= 20) {
|
|
const separatorIndex = findLastSeparatorIndex(remainingText);
|
|
if (separatorIndex !== -1) {
|
|
const chunkText = remainingText.slice(0, separatorIndex + 1);
|
|
chunks.push({ text: chunkText, isFinished: false });
|
|
processedText += chunkText;
|
|
} else {
|
|
chunks.push({ text: remainingText, isFinished: false });
|
|
processedText = text;
|
|
}
|
|
} else if (complete && remainingText.trim().length > 0) {
|
|
chunks.push({ text: remainingText.trim(), isFinished: true });
|
|
processedText = text;
|
|
}
|
|
|
|
return chunks;
|
|
}
|
|
|
|
return processChunks;
|
|
}
|
|
|
|
/**
|
|
* @param {string} text
|
|
* @param {number} [chunkSize=4000]
|
|
* @returns {{ text: string, isFinished: boolean }[]}
|
|
*/
|
|
function splitTextIntoChunks(text, chunkSize = 4000) {
|
|
if (!text) {
|
|
throw new Error('Text is required');
|
|
}
|
|
|
|
const chunks = [];
|
|
let startIndex = 0;
|
|
const textLength = text.length;
|
|
|
|
while (startIndex < textLength) {
|
|
let endIndex = Math.min(startIndex + chunkSize, textLength);
|
|
let chunkText = text.slice(startIndex, endIndex);
|
|
|
|
if (endIndex < textLength) {
|
|
let lastSeparatorIndex = -1;
|
|
for (const separator of SEPARATORS) {
|
|
const index = chunkText.lastIndexOf(separator);
|
|
if (index !== -1) {
|
|
lastSeparatorIndex = Math.max(lastSeparatorIndex, index);
|
|
}
|
|
}
|
|
|
|
if (lastSeparatorIndex !== -1) {
|
|
endIndex = startIndex + lastSeparatorIndex + 1;
|
|
chunkText = text.slice(startIndex, endIndex);
|
|
} else {
|
|
const nextSeparatorIndex = text.slice(endIndex).search(/\S/);
|
|
if (nextSeparatorIndex !== -1) {
|
|
endIndex += nextSeparatorIndex;
|
|
chunkText = text.slice(startIndex, endIndex);
|
|
}
|
|
}
|
|
}
|
|
|
|
chunkText = chunkText.trim();
|
|
if (chunkText) {
|
|
chunks.push({
|
|
text: chunkText,
|
|
isFinished: endIndex >= textLength,
|
|
});
|
|
} else if (chunks.length > 0) {
|
|
chunks[chunks.length - 1].isFinished = true;
|
|
}
|
|
|
|
startIndex = endIndex;
|
|
while (startIndex < textLength && text[startIndex].trim() === '') {
|
|
startIndex++;
|
|
}
|
|
}
|
|
|
|
return chunks;
|
|
}
|
|
|
|
/**
|
|
*
|
|
* @param {AsyncIterable<string>} llmStream
|
|
*/
|
|
async function* llmMessageSource(llmStream) {
|
|
for await (const chunk of llmStream) {
|
|
const message = chunk.choices[0].delta.content;
|
|
if (message) {
|
|
yield message;
|
|
}
|
|
}
|
|
}
|
|
|
|
module.exports = {
|
|
findLastSeparatorIndex,
|
|
createChunkProcessor,
|
|
splitTextIntoChunks,
|
|
llmMessageSource,
|
|
getRandomVoiceId,
|
|
};
|