mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-12-17 08:50:15 +01:00
* refactor: pass model in message edit payload, use encoder in standalone util function * feat: add summaryBuffer helper * refactor(api/messages): use new countTokens helper and add auth middleware at top * wip: ConversationSummaryBufferMemory * refactor: move pre-generation helpers to prompts dir * chore: remove console log * chore: remove test as payload will no longer carry tokenCount * chore: update getMessagesWithinTokenLimit JSDoc * refactor: optimize getMessagesForConversation and also break on summary, feat(ci): getMessagesForConversation tests * refactor(getMessagesForConvo): count '00000000-0000-0000-0000-000000000000' as root message * chore: add newer model to token map * fix: condition was point to prop of array instead of message prop * refactor(BaseClient): use object for refineMessages param, rename 'summary' to 'summaryMessage', add previous_summary refactor(getMessagesWithinTokenLimit): replace text and tokenCount if should summarize, summary, and summaryTokenCount are present fix/refactor(handleContextStrategy): use the right comparison length for context diff, and replace payload first message when a summary is present * chore: log previous_summary if debugging * refactor(formatMessage): assume if role is defined that it's a valid value * refactor(getMessagesWithinTokenLimit): remove summary logic refactor(handleContextStrategy): add usePrevSummary logic in case only summary was pruned refactor(loadHistory): initial message query will return all ordered messages but keep track of the latest summary refactor(getMessagesForConversation): use object for single param, edit jsdoc, edit all files using the method refactor(ChatGPTClient): order messages before buildPrompt is called, TODO: add convoSumBuffMemory logic * fix: undefined handling and summarizing only when shouldRefineContext is true * chore(BaseClient): fix test results omitting system role for summaries and test edge case * chore: export summaryBuffer from index file * refactor(OpenAIClient/BaseClient): move refineMessages to subclass, implement LLM initialization for summaryBuffer * feat: add OPENAI_SUMMARIZE to enable summarizing, refactor: rename client prop 'shouldRefineContext' to 'shouldSummarize', change contextStrategy value to 'summarize' from 'refine' * refactor: rename refineMessages method to summarizeMessages for clarity * chore: clarify summary future intent in .env.example * refactor(initializeLLM): handle case for either 'model' or 'modelName' being passed * feat(gptPlugins): enable summarization for plugins * refactor(gptPlugins): utilize new initializeLLM method and formatting methods for messages, use payload array for currentMessages and assign pastMessages sooner * refactor(agents): use ConversationSummaryBufferMemory for both agent types * refactor(formatMessage): optimize original method for langchain, add helper function for langchain messages, add JSDocs and tests * refactor(summaryBuffer): add helper to createSummaryBufferMemory, and use new formatting helpers * fix: forgot to spread formatMessages also took opportunity to pluralize filename * refactor: pass memory to tools, namely openapi specs. not used and may never be used by new method but added for testing * ci(formatMessages): add more exhaustive checks for langchain messages * feat: add debug env var for OpenAI * chore: delete unnecessary comments * chore: add extra note about summary feature * fix: remove tokenCount from payload instructions * fix: test fail * fix: only pass instructions to payload when defined or not empty object * refactor: fromPromptMessages is deprecated, use renamed method fromMessages * refactor: use 'includes' instead of 'startsWith' for extended OpenRouter compatibility * fix(PluginsClient.buildPromptBody): handle undefined message strings * chore: log langchain titling error * feat: getModelMaxTokens helper * feat: tokenSplit helper * feat: summary prompts updated * fix: optimize _CUT_OFF_SUMMARIZER prompt * refactor(summaryBuffer): use custom summary prompt, allow prompt to be passed, pass humanPrefix and aiPrefix to memory, along with any future variables, rename messagesToRefine to context * fix(summaryBuffer): handle edge case where messagesToRefine exceeds summary context, refactor(BaseClient): allow custom maxContextTokens to be passed to getMessagesWithinTokenLimit, add defined check before unshifting summaryMessage, update shouldSummarize based on this refactor(OpenAIClient): use getModelMaxTokens, use cut-off message method for summary if no messages were left after pruning * fix(handleContextStrategy): handle case where incoming prompt is bigger than model context * chore: rename refinedContent to splitText * chore: remove unnecessary debug log
64 lines
2.5 KiB
JavaScript
64 lines
2.5 KiB
JavaScript
const { HumanMessage, AIMessage, SystemMessage } = require('langchain/schema');
|
|
|
|
/**
|
|
* Formats a message based on the provided options.
|
|
*
|
|
* @param {Object} params - The parameters for formatting.
|
|
* @param {Object} params.message - The message object to format.
|
|
* @param {string} [params.message.role] - The role of the message sender (e.g., 'user', 'assistant').
|
|
* @param {string} [params.message._name] - The name associated with the message.
|
|
* @param {string} [params.message.sender] - The sender of the message.
|
|
* @param {string} [params.message.text] - The text content of the message.
|
|
* @param {string} [params.message.content] - The content of the message.
|
|
* @param {string} [params.userName] - The name of the user.
|
|
* @param {string} [params.assistantName] - The name of the assistant.
|
|
* @param {boolean} [params.langChain=false] - Whether to return a LangChain message object.
|
|
* @returns {(Object|HumanMessage|AIMessage|SystemMessage)} - The formatted message.
|
|
*/
|
|
const formatMessage = ({ message, userName, assistantName, langChain = false }) => {
|
|
const { role: _role, _name, sender, text, content: _content } = message;
|
|
const role = _role ?? (sender && sender?.toLowerCase() === 'user' ? 'user' : 'assistant');
|
|
const content = text ?? _content ?? '';
|
|
const formattedMessage = {
|
|
role,
|
|
content,
|
|
};
|
|
|
|
if (_name) {
|
|
formattedMessage.name = _name;
|
|
}
|
|
|
|
if (userName && formattedMessage.role === 'user') {
|
|
formattedMessage.name = userName;
|
|
}
|
|
|
|
if (assistantName && formattedMessage.role === 'assistant') {
|
|
formattedMessage.name = assistantName;
|
|
}
|
|
|
|
if (!langChain) {
|
|
return formattedMessage;
|
|
}
|
|
|
|
if (role === 'user') {
|
|
return new HumanMessage(formattedMessage);
|
|
} else if (role === 'assistant') {
|
|
return new AIMessage(formattedMessage);
|
|
} else {
|
|
return new SystemMessage(formattedMessage);
|
|
}
|
|
};
|
|
|
|
/**
|
|
* Formats an array of messages for LangChain.
|
|
*
|
|
* @param {Array<Object>} messages - The array of messages to format.
|
|
* @param {Object} formatOptions - The options for formatting each message.
|
|
* @param {string} [formatOptions.userName] - The name of the user.
|
|
* @param {string} [formatOptions.assistantName] - The name of the assistant.
|
|
* @returns {Array<(HumanMessage|AIMessage|SystemMessage)>} - The array of formatted LangChain messages.
|
|
*/
|
|
const formatLangChainMessages = (messages, formatOptions) =>
|
|
messages.map((msg) => formatMessage({ ...formatOptions, message: msg, langChain: true }));
|
|
|
|
module.exports = { formatMessage, formatLangChainMessages };
|