mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-12-16 16:30:15 +01:00
* chore: bump langchain deps to address vulnerability warnings * chore: bump community package and install textsplitters package * fix: update expected result in tokenSplit tests for accuracy * chore: remove CodeSherpa tools * chore: remove E2B tools and loadToolSuite * chore: remove CodeBrew tool and update related references * chore: remove HumanTool and ChatTool, update tool references * chore: remove Zapier tool from manifest.json and update SerpAPI * chore: remove basic tools * chore: update import path for RecursiveCharacterTextSplitter * chore: update import path for DynamicStructuredTool * chore: remove extractionChain.js and update tool filtering logic * chore: npm audit fix * chore: bump google packages * chore: update DALL-E tool to DALL-E-3 and adjust authentication logic * ci: update message classes * chore: elliptic npm audit fix * chore: update CallbackManager import and remove deprecated tool handling logic * chore: imports order * chore: remove unused code --------- Co-authored-by: Max Sanna <max@maxsanna.com>
53 lines
1.5 KiB
JavaScript
53 lines
1.5 KiB
JavaScript
const { PromptTemplate } = require('@langchain/core/prompts');
|
|
/*
|
|
* Without `{summary}` and `{new_lines}`, token count is 98
|
|
* We are counting this towards the max context tokens for summaries, +3 for the assistant label (101)
|
|
* If this prompt changes, use https://tiktokenizer.vercel.app/ to count the tokens
|
|
*/
|
|
const _DEFAULT_SUMMARIZER_TEMPLATE = `Summarize the conversation by integrating new lines into the current summary.
|
|
|
|
EXAMPLE:
|
|
Current summary:
|
|
The human inquires about the AI's view on artificial intelligence. The AI believes it's beneficial.
|
|
|
|
New lines:
|
|
Human: Why is it beneficial?
|
|
AI: It helps humans achieve their potential.
|
|
|
|
New summary:
|
|
The human inquires about the AI's view on artificial intelligence. The AI believes it's beneficial because it helps humans achieve their potential.
|
|
|
|
Current summary:
|
|
{summary}
|
|
|
|
New lines:
|
|
{new_lines}
|
|
|
|
New summary:`;
|
|
|
|
const SUMMARY_PROMPT = new PromptTemplate({
|
|
inputVariables: ['summary', 'new_lines'],
|
|
template: _DEFAULT_SUMMARIZER_TEMPLATE,
|
|
});
|
|
|
|
/*
|
|
* Without `{new_lines}`, token count is 27
|
|
* We are counting this towards the max context tokens for summaries, rounded up to 30
|
|
* If this prompt changes, use https://tiktokenizer.vercel.app/ to count the tokens
|
|
*/
|
|
const _CUT_OFF_SUMMARIZER = `The following text is cut-off:
|
|
{new_lines}
|
|
|
|
Summarize the content as best as you can, noting that it was cut-off.
|
|
|
|
Summary:`;
|
|
|
|
const CUT_OFF_PROMPT = new PromptTemplate({
|
|
inputVariables: ['new_lines'],
|
|
template: _CUT_OFF_SUMMARIZER,
|
|
});
|
|
|
|
module.exports = {
|
|
SUMMARY_PROMPT,
|
|
CUT_OFF_PROMPT,
|
|
};
|