LibreChat/api/app/clients/tools/util/loadSpecs.js
Danny Avila 317a1bd8da
feat: ConversationSummaryBufferMemory (#973)
* refactor: pass model in message edit payload, use encoder in standalone util function

* feat: add summaryBuffer helper

* refactor(api/messages): use new countTokens helper and add auth middleware at top

* wip: ConversationSummaryBufferMemory

* refactor: move pre-generation helpers to prompts dir

* chore: remove console log

* chore: remove test as payload will no longer carry tokenCount

* chore: update getMessagesWithinTokenLimit JSDoc

* refactor: optimize getMessagesForConversation and also break on summary, feat(ci): getMessagesForConversation tests

* refactor(getMessagesForConvo): count '00000000-0000-0000-0000-000000000000' as root message

* chore: add newer model to token map

* fix: condition was point to prop of array instead of message prop

* refactor(BaseClient): use object for refineMessages param, rename 'summary' to 'summaryMessage', add previous_summary
refactor(getMessagesWithinTokenLimit): replace text and tokenCount if should summarize, summary, and summaryTokenCount are present
fix/refactor(handleContextStrategy): use the right comparison length for context diff, and replace payload first message when a summary is present

* chore: log previous_summary if debugging

* refactor(formatMessage): assume if role is defined that it's a valid value

* refactor(getMessagesWithinTokenLimit): remove summary logic
refactor(handleContextStrategy): add usePrevSummary logic in case only summary was pruned
refactor(loadHistory): initial message query will return all ordered messages but keep track of the latest summary
refactor(getMessagesForConversation): use object for single param, edit jsdoc, edit all files using the method
refactor(ChatGPTClient): order messages before buildPrompt is called, TODO: add convoSumBuffMemory logic

* fix: undefined handling and summarizing only when shouldRefineContext is true

* chore(BaseClient): fix test results omitting system role for summaries and test edge case

* chore: export summaryBuffer from index file

* refactor(OpenAIClient/BaseClient): move refineMessages to subclass, implement LLM initialization for summaryBuffer

* feat: add OPENAI_SUMMARIZE to enable summarizing, refactor: rename client prop 'shouldRefineContext' to 'shouldSummarize', change contextStrategy value to 'summarize' from 'refine'

* refactor: rename refineMessages method to summarizeMessages for clarity

* chore: clarify summary future intent in .env.example

* refactor(initializeLLM): handle case for either 'model' or 'modelName' being passed

* feat(gptPlugins): enable summarization for plugins

* refactor(gptPlugins): utilize new initializeLLM method and formatting methods for messages, use payload array for currentMessages and assign pastMessages sooner

* refactor(agents): use ConversationSummaryBufferMemory for both agent types

* refactor(formatMessage): optimize original method for langchain, add helper function for langchain messages, add JSDocs and tests

* refactor(summaryBuffer): add helper to createSummaryBufferMemory, and use new formatting helpers

* fix: forgot to spread formatMessages also took opportunity to pluralize filename

* refactor: pass memory to tools, namely openapi specs. not used and may never be used by new method but added for testing

* ci(formatMessages): add more exhaustive checks for langchain messages

* feat: add debug env var for OpenAI

* chore: delete unnecessary comments

* chore: add extra note about summary feature

* fix: remove tokenCount from payload instructions

* fix: test fail

* fix: only pass instructions to payload when defined or not empty object

* refactor: fromPromptMessages is deprecated, use renamed method fromMessages

* refactor: use 'includes' instead of 'startsWith' for extended OpenRouter compatibility

* fix(PluginsClient.buildPromptBody): handle undefined message strings

* chore: log langchain titling error

* feat: getModelMaxTokens helper

* feat: tokenSplit helper

* feat: summary prompts updated

* fix: optimize _CUT_OFF_SUMMARIZER prompt

* refactor(summaryBuffer): use custom summary prompt, allow prompt to be passed, pass humanPrefix and aiPrefix to memory, along with any future variables, rename messagesToRefine to context

* fix(summaryBuffer): handle edge case where messagesToRefine exceeds summary context,
refactor(BaseClient): allow custom maxContextTokens to be passed to getMessagesWithinTokenLimit, add defined check before unshifting summaryMessage, update shouldSummarize based on this
refactor(OpenAIClient): use getModelMaxTokens, use cut-off message method for summary if no messages were left after pruning

* fix(handleContextStrategy): handle case where incoming prompt is bigger than model context

* chore: rename refinedContent to splitText

* chore: remove unnecessary debug log
2023-09-26 21:02:28 -04:00

122 lines
3.2 KiB
JavaScript

const fs = require('fs');
const path = require('path');
const { z } = require('zod');
const { createOpenAPIPlugin } = require('../dynamic/OpenAPIPlugin');
// The minimum Manifest definition
const ManifestDefinition = z.object({
schema_version: z.string().optional(),
name_for_human: z.string(),
name_for_model: z.string(),
description_for_human: z.string(),
description_for_model: z.string(),
auth: z.object({}).optional(),
api: z.object({
// Spec URL or can be the filename of the OpenAPI spec yaml file,
// located in api\app\clients\tools\.well-known\openapi
url: z.string(),
type: z.string().optional(),
is_user_authenticated: z.boolean().nullable().optional(),
has_user_authentication: z.boolean().nullable().optional(),
}),
// use to override any params that the LLM will consistently get wrong
params: z.object({}).optional(),
logo_url: z.string().optional(),
contact_email: z.string().optional(),
legal_info_url: z.string().optional(),
});
function validateJson(json, verbose = true) {
try {
return ManifestDefinition.parse(json);
} catch (error) {
if (verbose) {
console.debug('validateJson error', error);
}
return false;
}
}
// omit the LLM to return the well known jsons as objects
async function loadSpecs({ llm, user, message, tools = [], map = false, memory, verbose = false }) {
const directoryPath = path.join(__dirname, '..', '.well-known');
let files = [];
for (let i = 0; i < tools.length; i++) {
const filePath = path.join(directoryPath, tools[i] + '.json');
try {
// If the access Promise is resolved, it means that the file exists
// Then we can add it to the files array
await fs.promises.access(filePath, fs.constants.F_OK);
files.push(tools[i] + '.json');
} catch (err) {
console.error(`File ${tools[i] + '.json'} does not exist`);
}
}
if (files.length === 0) {
files = (await fs.promises.readdir(directoryPath)).filter(
(file) => path.extname(file) === '.json',
);
}
const validJsons = [];
const constructorMap = {};
if (verbose) {
console.debug('files', files);
}
for (const file of files) {
if (path.extname(file) === '.json') {
const filePath = path.join(directoryPath, file);
const fileContent = await fs.promises.readFile(filePath, 'utf8');
const json = JSON.parse(fileContent);
if (!validateJson(json)) {
verbose && console.debug('Invalid json', json);
continue;
}
if (llm && map) {
constructorMap[json.name_for_model] = async () =>
await createOpenAPIPlugin({
data: json,
llm,
message,
memory,
user,
verbose,
});
continue;
}
if (llm) {
validJsons.push(createOpenAPIPlugin({ data: json, llm, verbose }));
continue;
}
validJsons.push(json);
}
}
if (map) {
return constructorMap;
}
const plugins = (await Promise.all(validJsons)).filter((plugin) => plugin);
// if (verbose) {
// console.debug('plugins', plugins);
// console.debug(plugins[0].name);
// }
return plugins;
}
module.exports = {
loadSpecs,
validateJson,
ManifestDefinition,
};