mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-12-22 03:10:15 +01:00
* refactor: pass model in message edit payload, use encoder in standalone util function * feat: add summaryBuffer helper * refactor(api/messages): use new countTokens helper and add auth middleware at top * wip: ConversationSummaryBufferMemory * refactor: move pre-generation helpers to prompts dir * chore: remove console log * chore: remove test as payload will no longer carry tokenCount * chore: update getMessagesWithinTokenLimit JSDoc * refactor: optimize getMessagesForConversation and also break on summary, feat(ci): getMessagesForConversation tests * refactor(getMessagesForConvo): count '00000000-0000-0000-0000-000000000000' as root message * chore: add newer model to token map * fix: condition was point to prop of array instead of message prop * refactor(BaseClient): use object for refineMessages param, rename 'summary' to 'summaryMessage', add previous_summary refactor(getMessagesWithinTokenLimit): replace text and tokenCount if should summarize, summary, and summaryTokenCount are present fix/refactor(handleContextStrategy): use the right comparison length for context diff, and replace payload first message when a summary is present * chore: log previous_summary if debugging * refactor(formatMessage): assume if role is defined that it's a valid value * refactor(getMessagesWithinTokenLimit): remove summary logic refactor(handleContextStrategy): add usePrevSummary logic in case only summary was pruned refactor(loadHistory): initial message query will return all ordered messages but keep track of the latest summary refactor(getMessagesForConversation): use object for single param, edit jsdoc, edit all files using the method refactor(ChatGPTClient): order messages before buildPrompt is called, TODO: add convoSumBuffMemory logic * fix: undefined handling and summarizing only when shouldRefineContext is true * chore(BaseClient): fix test results omitting system role for summaries and test edge case * chore: export summaryBuffer from index file * refactor(OpenAIClient/BaseClient): move refineMessages to subclass, implement LLM initialization for summaryBuffer * feat: add OPENAI_SUMMARIZE to enable summarizing, refactor: rename client prop 'shouldRefineContext' to 'shouldSummarize', change contextStrategy value to 'summarize' from 'refine' * refactor: rename refineMessages method to summarizeMessages for clarity * chore: clarify summary future intent in .env.example * refactor(initializeLLM): handle case for either 'model' or 'modelName' being passed * feat(gptPlugins): enable summarization for plugins * refactor(gptPlugins): utilize new initializeLLM method and formatting methods for messages, use payload array for currentMessages and assign pastMessages sooner * refactor(agents): use ConversationSummaryBufferMemory for both agent types * refactor(formatMessage): optimize original method for langchain, add helper function for langchain messages, add JSDocs and tests * refactor(summaryBuffer): add helper to createSummaryBufferMemory, and use new formatting helpers * fix: forgot to spread formatMessages also took opportunity to pluralize filename * refactor: pass memory to tools, namely openapi specs. not used and may never be used by new method but added for testing * ci(formatMessages): add more exhaustive checks for langchain messages * feat: add debug env var for OpenAI * chore: delete unnecessary comments * chore: add extra note about summary feature * fix: remove tokenCount from payload instructions * fix: test fail * fix: only pass instructions to payload when defined or not empty object * refactor: fromPromptMessages is deprecated, use renamed method fromMessages * refactor: use 'includes' instead of 'startsWith' for extended OpenRouter compatibility * fix(PluginsClient.buildPromptBody): handle undefined message strings * chore: log langchain titling error * feat: getModelMaxTokens helper * feat: tokenSplit helper * feat: summary prompts updated * fix: optimize _CUT_OFF_SUMMARIZER prompt * refactor(summaryBuffer): use custom summary prompt, allow prompt to be passed, pass humanPrefix and aiPrefix to memory, along with any future variables, rename messagesToRefine to context * fix(summaryBuffer): handle edge case where messagesToRefine exceeds summary context, refactor(BaseClient): allow custom maxContextTokens to be passed to getMessagesWithinTokenLimit, add defined check before unshifting summaryMessage, update shouldSummarize based on this refactor(OpenAIClient): use getModelMaxTokens, use cut-off message method for summary if no messages were left after pruning * fix(handleContextStrategy): handle case where incoming prompt is bigger than model context * chore: rename refinedContent to splitText * chore: remove unnecessary debug log
61 lines
1.7 KiB
JavaScript
61 lines
1.7 KiB
JavaScript
const { ConversationSummaryBufferMemory, ChatMessageHistory } = require('langchain/memory');
|
|
const { formatLangChainMessages, SUMMARY_PROMPT } = require('../prompts');
|
|
|
|
const createSummaryBufferMemory = ({ llm, prompt, messages, ...rest }) => {
|
|
const chatHistory = new ChatMessageHistory(messages);
|
|
return new ConversationSummaryBufferMemory({
|
|
llm,
|
|
prompt,
|
|
chatHistory,
|
|
returnMessages: true,
|
|
...rest,
|
|
});
|
|
};
|
|
|
|
const summaryBuffer = async ({
|
|
llm,
|
|
debug,
|
|
context, // array of messages
|
|
formatOptions = {},
|
|
previous_summary = '',
|
|
prompt = SUMMARY_PROMPT,
|
|
}) => {
|
|
if (debug && previous_summary) {
|
|
console.log('<-----------PREVIOUS SUMMARY----------->\n\n');
|
|
console.log(previous_summary);
|
|
}
|
|
|
|
const formattedMessages = formatLangChainMessages(context, formatOptions);
|
|
const memoryOptions = {
|
|
llm,
|
|
prompt,
|
|
messages: formattedMessages,
|
|
};
|
|
|
|
if (formatOptions.userName) {
|
|
memoryOptions.humanPrefix = formatOptions.userName;
|
|
}
|
|
if (formatOptions.userName) {
|
|
memoryOptions.aiPrefix = formatOptions.assistantName;
|
|
}
|
|
|
|
const chatPromptMemory = createSummaryBufferMemory(memoryOptions);
|
|
|
|
const messages = await chatPromptMemory.chatHistory.getMessages();
|
|
|
|
if (debug) {
|
|
console.log('<-----------SUMMARY BUFFER MESSAGES----------->\n\n');
|
|
console.log(JSON.stringify(messages));
|
|
}
|
|
|
|
const predictSummary = await chatPromptMemory.predictNewSummary(messages, previous_summary);
|
|
|
|
if (debug) {
|
|
console.log('<-----------SUMMARY----------->\n\n');
|
|
console.log(JSON.stringify(predictSummary));
|
|
}
|
|
|
|
return { role: 'system', content: predictSummary };
|
|
};
|
|
|
|
module.exports = { createSummaryBufferMemory, summaryBuffer };
|