2023-06-13 23:39:22 -04:00
|
|
|
const { Agent } = require('langchain/agents');
|
|
|
|
|
const { LLMChain } = require('langchain/chains');
|
|
|
|
|
const { FunctionChatMessage, AIChatMessage } = require('langchain/schema');
|
|
|
|
|
const {
|
|
|
|
|
ChatPromptTemplate,
|
|
|
|
|
MessagesPlaceholder,
|
|
|
|
|
SystemMessagePromptTemplate,
|
2023-07-14 09:36:49 -04:00
|
|
|
HumanMessagePromptTemplate,
|
2023-06-13 23:39:22 -04:00
|
|
|
} = require('langchain/prompts');
|
2023-12-14 07:49:27 -05:00
|
|
|
const { logger } = require('~/config');
|
|
|
|
|
|
2023-07-14 09:36:49 -04:00
|
|
|
const PREFIX = 'You are a helpful AI assistant.';
|
2023-06-13 23:39:22 -04:00
|
|
|
|
|
|
|
|
function parseOutput(message) {
|
|
|
|
|
if (message.additional_kwargs.function_call) {
|
|
|
|
|
const function_call = message.additional_kwargs.function_call;
|
|
|
|
|
return {
|
|
|
|
|
tool: function_call.name,
|
|
|
|
|
toolInput: function_call.arguments ? JSON.parse(function_call.arguments) : {},
|
2023-07-14 09:36:49 -04:00
|
|
|
log: message.text,
|
2023-06-13 23:39:22 -04:00
|
|
|
};
|
|
|
|
|
} else {
|
|
|
|
|
return { returnValues: { output: message.text }, log: message.text };
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
class FunctionsAgent extends Agent {
|
|
|
|
|
constructor(input) {
|
|
|
|
|
super({ ...input, outputParser: undefined });
|
|
|
|
|
this.tools = input.tools;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
lc_namespace = ['langchain', 'agents', 'openai'];
|
|
|
|
|
|
|
|
|
|
_agentType() {
|
|
|
|
|
return 'openai-functions';
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
observationPrefix() {
|
|
|
|
|
return 'Observation: ';
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
llmPrefix() {
|
|
|
|
|
return 'Thought:';
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
_stop() {
|
|
|
|
|
return ['Observation:'];
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static createPrompt(_tools, fields) {
|
|
|
|
|
const { prefix = PREFIX, currentDateString } = fields || {};
|
|
|
|
|
|
feat: ConversationSummaryBufferMemory (#973)
* refactor: pass model in message edit payload, use encoder in standalone util function
* feat: add summaryBuffer helper
* refactor(api/messages): use new countTokens helper and add auth middleware at top
* wip: ConversationSummaryBufferMemory
* refactor: move pre-generation helpers to prompts dir
* chore: remove console log
* chore: remove test as payload will no longer carry tokenCount
* chore: update getMessagesWithinTokenLimit JSDoc
* refactor: optimize getMessagesForConversation and also break on summary, feat(ci): getMessagesForConversation tests
* refactor(getMessagesForConvo): count '00000000-0000-0000-0000-000000000000' as root message
* chore: add newer model to token map
* fix: condition was point to prop of array instead of message prop
* refactor(BaseClient): use object for refineMessages param, rename 'summary' to 'summaryMessage', add previous_summary
refactor(getMessagesWithinTokenLimit): replace text and tokenCount if should summarize, summary, and summaryTokenCount are present
fix/refactor(handleContextStrategy): use the right comparison length for context diff, and replace payload first message when a summary is present
* chore: log previous_summary if debugging
* refactor(formatMessage): assume if role is defined that it's a valid value
* refactor(getMessagesWithinTokenLimit): remove summary logic
refactor(handleContextStrategy): add usePrevSummary logic in case only summary was pruned
refactor(loadHistory): initial message query will return all ordered messages but keep track of the latest summary
refactor(getMessagesForConversation): use object for single param, edit jsdoc, edit all files using the method
refactor(ChatGPTClient): order messages before buildPrompt is called, TODO: add convoSumBuffMemory logic
* fix: undefined handling and summarizing only when shouldRefineContext is true
* chore(BaseClient): fix test results omitting system role for summaries and test edge case
* chore: export summaryBuffer from index file
* refactor(OpenAIClient/BaseClient): move refineMessages to subclass, implement LLM initialization for summaryBuffer
* feat: add OPENAI_SUMMARIZE to enable summarizing, refactor: rename client prop 'shouldRefineContext' to 'shouldSummarize', change contextStrategy value to 'summarize' from 'refine'
* refactor: rename refineMessages method to summarizeMessages for clarity
* chore: clarify summary future intent in .env.example
* refactor(initializeLLM): handle case for either 'model' or 'modelName' being passed
* feat(gptPlugins): enable summarization for plugins
* refactor(gptPlugins): utilize new initializeLLM method and formatting methods for messages, use payload array for currentMessages and assign pastMessages sooner
* refactor(agents): use ConversationSummaryBufferMemory for both agent types
* refactor(formatMessage): optimize original method for langchain, add helper function for langchain messages, add JSDocs and tests
* refactor(summaryBuffer): add helper to createSummaryBufferMemory, and use new formatting helpers
* fix: forgot to spread formatMessages also took opportunity to pluralize filename
* refactor: pass memory to tools, namely openapi specs. not used and may never be used by new method but added for testing
* ci(formatMessages): add more exhaustive checks for langchain messages
* feat: add debug env var for OpenAI
* chore: delete unnecessary comments
* chore: add extra note about summary feature
* fix: remove tokenCount from payload instructions
* fix: test fail
* fix: only pass instructions to payload when defined or not empty object
* refactor: fromPromptMessages is deprecated, use renamed method fromMessages
* refactor: use 'includes' instead of 'startsWith' for extended OpenRouter compatibility
* fix(PluginsClient.buildPromptBody): handle undefined message strings
* chore: log langchain titling error
* feat: getModelMaxTokens helper
* feat: tokenSplit helper
* feat: summary prompts updated
* fix: optimize _CUT_OFF_SUMMARIZER prompt
* refactor(summaryBuffer): use custom summary prompt, allow prompt to be passed, pass humanPrefix and aiPrefix to memory, along with any future variables, rename messagesToRefine to context
* fix(summaryBuffer): handle edge case where messagesToRefine exceeds summary context,
refactor(BaseClient): allow custom maxContextTokens to be passed to getMessagesWithinTokenLimit, add defined check before unshifting summaryMessage, update shouldSummarize based on this
refactor(OpenAIClient): use getModelMaxTokens, use cut-off message method for summary if no messages were left after pruning
* fix(handleContextStrategy): handle case where incoming prompt is bigger than model context
* chore: rename refinedContent to splitText
* chore: remove unnecessary debug log
2023-09-26 21:02:28 -04:00
|
|
|
return ChatPromptTemplate.fromMessages([
|
2023-06-13 23:39:22 -04:00
|
|
|
SystemMessagePromptTemplate.fromTemplate(`Date: ${currentDateString}\n${prefix}`),
|
2023-06-19 14:15:56 -04:00
|
|
|
new MessagesPlaceholder('chat_history'),
|
2023-07-14 09:36:49 -04:00
|
|
|
HumanMessagePromptTemplate.fromTemplate('Query: {input}'),
|
2023-06-19 14:15:56 -04:00
|
|
|
new MessagesPlaceholder('agent_scratchpad'),
|
2023-06-13 23:39:22 -04:00
|
|
|
]);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static fromLLMAndTools(llm, tools, args) {
|
|
|
|
|
FunctionsAgent.validateTools(tools);
|
|
|
|
|
const prompt = FunctionsAgent.createPrompt(tools, args);
|
|
|
|
|
const chain = new LLMChain({
|
|
|
|
|
prompt,
|
|
|
|
|
llm,
|
2023-07-14 09:36:49 -04:00
|
|
|
callbacks: args?.callbacks,
|
2023-06-13 23:39:22 -04:00
|
|
|
});
|
|
|
|
|
return new FunctionsAgent({
|
|
|
|
|
llmChain: chain,
|
|
|
|
|
allowedTools: tools.map((t) => t.name),
|
2023-07-14 09:36:49 -04:00
|
|
|
tools,
|
2023-06-13 23:39:22 -04:00
|
|
|
});
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
async constructScratchPad(steps) {
|
|
|
|
|
return steps.flatMap(({ action, observation }) => [
|
|
|
|
|
new AIChatMessage('', {
|
|
|
|
|
function_call: {
|
|
|
|
|
name: action.tool,
|
2023-07-14 09:36:49 -04:00
|
|
|
arguments: JSON.stringify(action.toolInput),
|
|
|
|
|
},
|
2023-06-13 23:39:22 -04:00
|
|
|
}),
|
2023-07-14 09:36:49 -04:00
|
|
|
new FunctionChatMessage(observation, action.tool),
|
2023-06-13 23:39:22 -04:00
|
|
|
]);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
async plan(steps, inputs, callbackManager) {
|
|
|
|
|
// Add scratchpad and stop to inputs
|
2023-06-13 23:57:48 -04:00
|
|
|
const thoughts = await this.constructScratchPad(steps);
|
|
|
|
|
const newInputs = Object.assign({}, inputs, { agent_scratchpad: thoughts });
|
2023-06-13 23:39:22 -04:00
|
|
|
if (this._stop().length !== 0) {
|
|
|
|
|
newInputs.stop = this._stop();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Split inputs between prompt and llm
|
2023-06-13 23:57:48 -04:00
|
|
|
const llm = this.llmChain.llm;
|
|
|
|
|
const valuesForPrompt = Object.assign({}, newInputs);
|
|
|
|
|
const valuesForLLM = {
|
2023-07-14 09:36:49 -04:00
|
|
|
tools: this.tools,
|
2023-06-13 23:39:22 -04:00
|
|
|
};
|
2023-06-13 23:57:48 -04:00
|
|
|
for (let i = 0; i < this.llmChain.llm.callKeys.length; i++) {
|
|
|
|
|
const key = this.llmChain.llm.callKeys[i];
|
2023-06-13 23:39:22 -04:00
|
|
|
if (key in inputs) {
|
|
|
|
|
valuesForLLM[key] = inputs[key];
|
|
|
|
|
delete valuesForPrompt[key];
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2023-06-13 23:57:48 -04:00
|
|
|
const promptValue = await this.llmChain.prompt.formatPromptValue(valuesForPrompt);
|
|
|
|
|
const message = await llm.predictMessages(
|
2023-06-13 23:39:22 -04:00
|
|
|
promptValue.toChatMessages(),
|
|
|
|
|
valuesForLLM,
|
2023-07-14 09:36:49 -04:00
|
|
|
callbackManager,
|
2023-06-13 23:39:22 -04:00
|
|
|
);
|
2023-12-14 07:49:27 -05:00
|
|
|
logger.debug('[FunctionsAgent] plan message', message);
|
2023-06-13 23:39:22 -04:00
|
|
|
return parseOutput(message);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
module.exports = FunctionsAgent;
|