mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-12-17 08:50:15 +01:00
refactor(plugins): Improve OpenAPI handling, Show Multiple Plugins, & Other Improvements (#845)
* feat(PluginsClient.js): add conversationId to options object in the constructor feat(PluginsClient.js): add support for Code Interpreter plugin feat(PluginsClient.js): add support for Code Interpreter plugin in the availableTools manifest feat(CodeInterpreter.js): add CodeInterpreterTools module feat(CodeInterpreter.js): add RunCommand class feat(CodeInterpreter.js): add ReadFile class feat(CodeInterpreter.js): add WriteFile class feat(handleTools.js): add support for loading Code Interpreter plugin * chore(api): update langchain dependency to version 0.0.123 * fix(CodeInterpreter.js): add support for extracting environment from code fix(WriteFile.js): add support for extracting environment from data fix(extractionChain.js): add utility functions for creating extraction chain from Zod schema fix(handleTools.js): refactor getOpenAIKey function to handle user-provided API key fix(handleTools.js): pass model and openAIApiKey to CodeInterpreter constructor * fix(tools): rename CodeInterpreterTools to E2BTools fix(tools): rename code_interpreter pluginKey to e2b_code_interpreter * chore(PluginsClient.js): comment out unused import and function findMessageContent feat(PluginsClient.js): add support for CodeSherpa plugin feat(PluginsClient.js): add CodeSherpaTools to available tools feat(PluginsClient.js): update manifest.json to include CodeSherpa plugin feat(CodeSherpaTools.js): create RunCode and RunCommand classes for CodeSherpa plugin feat(E2BTools.js): Add E2BTools module for extracting environment from code and running commands, reading and writing files fix(codesherpa.js): Remove codesherpa module as it is no longer needed feat(handleTools.js): add support for CodeSherpaTools in loadTools function feat(loadToolSuite.js): create loadToolSuite utility function to load a suite of tools * feat(PluginsClient.js): add support for CodeSherpa v2 plugin feat(PluginsClient.js): add CodeSherpa v1 plugin to available tools feat(PluginsClient.js): add CodeSherpa v2 plugin to available tools feat(PluginsClient.js): update manifest.json for CodeSherpa v1 plugin feat(PluginsClient.js): update manifest.json for CodeSherpa v2 plugin feat(CodeSherpa.js): implement CodeSherpa plugin for interactive code and shell command execution feat(CodeSherpaTools.js): implement RunCode and RunCommand plugins for CodeSherpa v1 feat(CodeSherpaTools.js): update RunCode and RunCommand plugins for CodeSherpa v2 fix(handleTools.js): add CodeSherpa import statement fix(handleTools.js): change pluginKey from 'codesherpa' to 'codesherpa_tools' fix(handleTools.js): remove model and openAIApiKey from options object in e2b_code_interpreter tool fix(handleTools.js): remove openAIApiKey from options object in codesherpa_tools tool fix(loadToolSuite.js): remove model and openAIApiKey parameters from loadToolSuite function * feat(initializeFunctionsAgent.js): add prefix to agentArgs in initializeFunctionsAgent function The prefix is added to the agentArgs in the initializeFunctionsAgent function. This prefix is used to provide instructions to the agent when it receives any instructions from a webpage, plugin, or other tool. The agent will notify the user immediately and ask them if they wish to carry out or ignore the instructions. * feat(PluginsClient.js): add ChatTool to the list of tools if it meets the conditions feat(tools/index.js): import and export ChatTool feat(ChatTool.js): create ChatTool class with necessary properties and methods * fix(initializeFunctionsAgent.js): update PREFIX message to include sharing all output from the tool fix(E2BTools.js): update descriptions for RunCommand, ReadFile, and WriteFile plugins to provide more clarity and context * chore: rebuild package-lock after rebase * chore: remove deleted file from rebase * wip: refactor plugin message handling to mirror chat.openai.com, handle incoming stream for plugin use * wip: new plugin handling * wip: show multiple plugins handling * feat(plugins): save new plugins array * chore: bump langchain * feat(experimental): support streaming in between plugins * refactor(PluginsClient): factor out helper methods to avoid bloating the class, refactor(gptPlugins): use agent action for mapping the name of action * fix(handleTools): fix tests by adding condition to return original toolFunctions map * refactor(MessageContent): Allow the last index to be last in case it has text (may change with streaming) * feat(Plugins): add handleParsingErrors, useful when LLM does not invoke function params * chore: edit out experimental codesherpa integration * refactor(OpenAPIPlugin): rework tool to be 'function-first', as the spec functions are explicitly passed to agent model * refactor(initializeFunctionsAgent): improve error handling and system message * refactor(CodeSherpa, Wolfram): optimize token usage by delegating bulk of instructions to system message * style(Plugins): match official style with input/outputs * chore: remove unnecessary console logs used for testing * fix(abortMiddleware): render markdown when message is aborted * feat(plugins): add BrowserOp * refactor(OpenAPIPlugin): improve prompt handling * fix(useGenerations): hide edit button when message is submitting/streaming * refactor(loadSpecs): optimize OpenAPI spec loading by only loading requested specs instead of all of them * fix(loadSpecs): will retain original behavior when no tools are passed to the function * fix(MessageContent): ensure cursor only shows up for last message and last display index fix(Message): show legacy plugin and pass isLast to Content * chore: remove console.logs * docs: update docs based on breaking changes and new features refactor(structured/SD): use description_for_model for detailed prompting * docs(azure): make plugins section more clear * refactor(structured/SD): change default payload to SD-WebUI to prefer realism and config for SDXL * refactor(structured/SD): further improve system message prompt * docs: update breaking changes after rebase * refactor(MessageContent): factor out EditMessage, types, Container to separate files, rename Content -> Markdown * fix(CodeInterpreter): linting errors * chore: reduce browser console logs from message streams * chore: re-enable debug logs for plugins/langchain to help with user troubleshooting * chore(manifest.json): add [Experimental] tag to CodeInterpreter plugins, which are not intended as the end-all be-all implementation of this feature for Librechat
This commit is contained in:
parent
66b8580487
commit
d3e7627046
51 changed files with 2829 additions and 1577 deletions
|
|
@ -1,12 +1,10 @@
|
|||
const OpenAIClient = require('./OpenAIClient');
|
||||
const { ChatOpenAI } = require('langchain/chat_models/openai');
|
||||
const { CallbackManager } = require('langchain/callbacks');
|
||||
const { initializeCustomAgent, initializeFunctionsAgent } = require('./agents/');
|
||||
const { findMessageContent } = require('../../utils');
|
||||
const { loadTools } = require('./tools/util');
|
||||
const { SelfReflectionTool } = require('./tools/');
|
||||
const { HumanChatMessage, AIChatMessage } = require('langchain/schema');
|
||||
const { instructions, imageInstructions, errorInstructions } = require('./prompts/instructions');
|
||||
const { initializeCustomAgent, initializeFunctionsAgent } = require('./agents/');
|
||||
const { addImages, createLLM, buildErrorInput, buildPromptPrefix } = require('./agents/methods/');
|
||||
const { SelfReflectionTool } = require('./tools/');
|
||||
const { loadTools } = require('./tools/util');
|
||||
|
||||
class PluginsClient extends OpenAIClient {
|
||||
constructor(apiKey, options = {}) {
|
||||
|
|
@ -19,89 +17,6 @@ class PluginsClient extends OpenAIClient {
|
|||
this.executor = null;
|
||||
}
|
||||
|
||||
getActions(input = null) {
|
||||
let output = 'Internal thoughts & actions taken:\n"';
|
||||
let actions = input || this.actions;
|
||||
|
||||
if (actions[0]?.action && this.functionsAgent) {
|
||||
actions = actions.map((step) => ({
|
||||
log: `Action: ${step.action?.tool || ''}\nInput: ${
|
||||
JSON.stringify(step.action?.toolInput) || ''
|
||||
}\nObservation: ${step.observation}`,
|
||||
}));
|
||||
} else if (actions[0]?.action) {
|
||||
actions = actions.map((step) => ({
|
||||
log: `${step.action.log}\nObservation: ${step.observation}`,
|
||||
}));
|
||||
}
|
||||
|
||||
actions.forEach((actionObj, index) => {
|
||||
output += `${actionObj.log}`;
|
||||
if (index < actions.length - 1) {
|
||||
output += '\n';
|
||||
}
|
||||
});
|
||||
|
||||
return output + '"';
|
||||
}
|
||||
|
||||
buildErrorInput(message, errorMessage) {
|
||||
const log = errorMessage.includes('Could not parse LLM output:')
|
||||
? `A formatting error occurred with your response to the human's last message. You didn't follow the formatting instructions. Remember to ${instructions}`
|
||||
: `You encountered an error while replying to the human's last message. Attempt to answer again or admit an answer cannot be given.\nError: ${errorMessage}`;
|
||||
|
||||
return `
|
||||
${log}
|
||||
|
||||
${this.getActions()}
|
||||
|
||||
Human's last message: ${message}
|
||||
`;
|
||||
}
|
||||
|
||||
buildPromptPrefix(result, message) {
|
||||
if ((result.output && result.output.includes('N/A')) || result.output === undefined) {
|
||||
return null;
|
||||
}
|
||||
|
||||
if (
|
||||
result?.intermediateSteps?.length === 1 &&
|
||||
result?.intermediateSteps[0]?.action?.toolInput === 'N/A'
|
||||
) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const internalActions =
|
||||
result?.intermediateSteps?.length > 0
|
||||
? this.getActions(result.intermediateSteps)
|
||||
: 'Internal Actions Taken: None';
|
||||
|
||||
const toolBasedInstructions = internalActions.toLowerCase().includes('image')
|
||||
? imageInstructions
|
||||
: '';
|
||||
|
||||
const errorMessage = result.errorMessage ? `${errorInstructions} ${result.errorMessage}\n` : '';
|
||||
|
||||
const preliminaryAnswer =
|
||||
result.output?.length > 0 ? `Preliminary Answer: "${result.output.trim()}"` : '';
|
||||
const prefix = preliminaryAnswer
|
||||
? 'review and improve the answer you generated using plugins in response to the User Message below. The user hasn\'t seen your answer or thoughts yet.'
|
||||
: 'respond to the User Message below based on your preliminary thoughts & actions.';
|
||||
|
||||
return `As a helpful AI Assistant, ${prefix}${errorMessage}\n${internalActions}
|
||||
${preliminaryAnswer}
|
||||
Reply conversationally to the User based on your ${
|
||||
preliminaryAnswer ? 'preliminary answer, ' : ''
|
||||
}internal actions, thoughts, and observations, making improvements wherever possible, but do not modify URLs.
|
||||
${
|
||||
preliminaryAnswer
|
||||
? ''
|
||||
: '\nIf there is an incomplete thought or action, you are expected to complete it in your response now.\n'
|
||||
}You must cite sources if you are using any web links. ${toolBasedInstructions}
|
||||
Only respond with your conversational reply to the following User Message:
|
||||
"${message}"`;
|
||||
}
|
||||
|
||||
setOptions(options) {
|
||||
this.agentOptions = options.agentOptions;
|
||||
this.functionsAgent = this.agentOptions?.agent === 'functions';
|
||||
|
|
@ -149,27 +64,6 @@ Only respond with your conversational reply to the following User Message:
|
|||
};
|
||||
}
|
||||
|
||||
createLLM(modelOptions, configOptions) {
|
||||
let azure = {};
|
||||
let credentials = { openAIApiKey: this.openAIApiKey };
|
||||
let configuration = {
|
||||
apiKey: this.openAIApiKey,
|
||||
};
|
||||
|
||||
if (this.azure) {
|
||||
credentials = {};
|
||||
configuration = {};
|
||||
({ azure } = this);
|
||||
}
|
||||
|
||||
if (this.options.debug) {
|
||||
console.debug('createLLM: configOptions');
|
||||
console.debug(configOptions);
|
||||
}
|
||||
|
||||
return new ChatOpenAI({ credentials, configuration, ...azure, ...modelOptions }, configOptions);
|
||||
}
|
||||
|
||||
async initialize({ user, message, onAgentAction, onChainEnd, signal }) {
|
||||
const modelOptions = {
|
||||
modelName: this.agentOptions.model,
|
||||
|
|
@ -182,7 +76,12 @@ Only respond with your conversational reply to the following User Message:
|
|||
configOptions.basePath = this.langchainProxy;
|
||||
}
|
||||
|
||||
const model = this.createLLM(modelOptions, configOptions);
|
||||
const model = createLLM({
|
||||
modelOptions,
|
||||
configOptions,
|
||||
openAIApiKey: this.openAIApiKey,
|
||||
azure: this.azure,
|
||||
});
|
||||
|
||||
if (this.options.debug) {
|
||||
console.debug(
|
||||
|
|
@ -190,27 +89,23 @@ Only respond with your conversational reply to the following User Message:
|
|||
);
|
||||
}
|
||||
|
||||
this.availableTools = await loadTools({
|
||||
this.tools = await loadTools({
|
||||
user,
|
||||
model,
|
||||
tools: this.options.tools,
|
||||
functions: this.functionsAgent,
|
||||
options: {
|
||||
openAIApiKey: this.openAIApiKey,
|
||||
conversationId: this.conversationId,
|
||||
debug: this.options?.debug,
|
||||
message,
|
||||
},
|
||||
});
|
||||
// load tools
|
||||
for (const tool of this.options.tools) {
|
||||
const validTool = this.availableTools[tool];
|
||||
|
||||
if (tool === 'plugins') {
|
||||
const plugins = await validTool();
|
||||
this.tools = [...this.tools, ...plugins];
|
||||
} else if (validTool) {
|
||||
this.tools.push(await validTool());
|
||||
}
|
||||
if (this.tools.length > 0 && !this.functionsAgent) {
|
||||
this.tools.push(new SelfReflectionTool({ message, isGpt3: false }));
|
||||
} else if (this.tools.length === 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (this.options.debug) {
|
||||
|
|
@ -220,13 +115,7 @@ Only respond with your conversational reply to the following User Message:
|
|||
console.debug(this.tools.map((tool) => tool.name));
|
||||
}
|
||||
|
||||
if (this.tools.length > 0 && !this.functionsAgent) {
|
||||
this.tools.push(new SelfReflectionTool({ message, isGpt3: false }));
|
||||
} else if (this.tools.length === 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
const handleAction = (action, callback = null) => {
|
||||
const handleAction = (action, runId, callback = null) => {
|
||||
this.saveLatestAction(action);
|
||||
|
||||
if (this.options.debug) {
|
||||
|
|
@ -234,7 +123,7 @@ Only respond with your conversational reply to the following User Message:
|
|||
}
|
||||
|
||||
if (typeof callback === 'function') {
|
||||
callback(action);
|
||||
callback(action, runId);
|
||||
}
|
||||
};
|
||||
|
||||
|
|
@ -258,8 +147,8 @@ Only respond with your conversational reply to the following User Message:
|
|||
verbose: this.options.debug,
|
||||
returnIntermediateSteps: true,
|
||||
callbackManager: CallbackManager.fromHandlers({
|
||||
async handleAgentAction(action) {
|
||||
handleAction(action, onAgentAction);
|
||||
async handleAgentAction(action, runId) {
|
||||
handleAction(action, runId, onAgentAction);
|
||||
},
|
||||
async handleChainEnd(action) {
|
||||
if (typeof onChainEnd === 'function') {
|
||||
|
|
@ -274,12 +163,17 @@ Only respond with your conversational reply to the following User Message:
|
|||
}
|
||||
}
|
||||
|
||||
async executorCall(message, signal) {
|
||||
async executorCall(message, { signal, stream, onToolStart, onToolEnd }) {
|
||||
let errorMessage = '';
|
||||
const maxAttempts = 1;
|
||||
|
||||
for (let attempts = 1; attempts <= maxAttempts; attempts++) {
|
||||
const errorInput = this.buildErrorInput(message, errorMessage);
|
||||
const errorInput = buildErrorInput({
|
||||
message,
|
||||
errorMessage,
|
||||
actions: this.actions,
|
||||
functionsAgent: this.functionsAgent,
|
||||
});
|
||||
const input = attempts > 1 ? errorInput : message;
|
||||
|
||||
if (this.options.debug) {
|
||||
|
|
@ -291,12 +185,28 @@ Only respond with your conversational reply to the following User Message:
|
|||
}
|
||||
|
||||
try {
|
||||
this.result = await this.executor.call({ input, signal });
|
||||
this.result = await this.executor.call({ input, signal }, [
|
||||
{
|
||||
async handleToolStart(...args) {
|
||||
await onToolStart(...args);
|
||||
},
|
||||
async handleToolEnd(...args) {
|
||||
await onToolEnd(...args);
|
||||
},
|
||||
async handleLLMEnd(output) {
|
||||
const { generations } = output;
|
||||
const { text } = generations[0][0];
|
||||
if (text && typeof stream === 'function') {
|
||||
await stream(text);
|
||||
}
|
||||
},
|
||||
},
|
||||
]);
|
||||
break; // Exit the loop if the function call is successful
|
||||
} catch (err) {
|
||||
console.error(err);
|
||||
errorMessage = err.message;
|
||||
const content = findMessageContent(message);
|
||||
let content = '';
|
||||
if (content) {
|
||||
errorMessage = content;
|
||||
break;
|
||||
|
|
@ -311,31 +221,6 @@ Only respond with your conversational reply to the following User Message:
|
|||
}
|
||||
}
|
||||
|
||||
addImages(intermediateSteps, responseMessage) {
|
||||
if (!intermediateSteps || !responseMessage) {
|
||||
return;
|
||||
}
|
||||
|
||||
intermediateSteps.forEach((step) => {
|
||||
const { observation } = step;
|
||||
if (!observation || !observation.includes('![')) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Extract the image file path from the observation
|
||||
const observedImagePath = observation.match(/\(\/images\/.*\.\w*\)/g)[0];
|
||||
|
||||
// Check if the responseMessage already includes the image file path
|
||||
if (!responseMessage.text.includes(observedImagePath)) {
|
||||
// If the image file path is not found, append the whole observation
|
||||
responseMessage.text += '\n' + observation;
|
||||
if (this.options.debug) {
|
||||
console.debug('added image from intermediateSteps');
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
async handleResponseMessage(responseMessage, saveOptions, user) {
|
||||
responseMessage.tokenCount = this.getTokenCountForResponse(responseMessage);
|
||||
responseMessage.completionTokens = responseMessage.tokenCount;
|
||||
|
|
@ -351,7 +236,9 @@ Only respond with your conversational reply to the following User Message:
|
|||
this.setOptions(opts);
|
||||
return super.sendMessage(message, opts);
|
||||
}
|
||||
console.log('Plugins sendMessage', message, opts);
|
||||
if (this.options.debug) {
|
||||
console.log('Plugins sendMessage', message, opts);
|
||||
}
|
||||
const {
|
||||
user,
|
||||
conversationId,
|
||||
|
|
@ -360,8 +247,11 @@ Only respond with your conversational reply to the following User Message:
|
|||
userMessage,
|
||||
onAgentAction,
|
||||
onChainEnd,
|
||||
onToolStart,
|
||||
onToolEnd,
|
||||
} = await this.handleStartMethods(message, opts);
|
||||
|
||||
this.conversationId = conversationId;
|
||||
this.currentMessages.push(userMessage);
|
||||
|
||||
let {
|
||||
|
|
@ -413,8 +303,18 @@ Only respond with your conversational reply to the following User Message:
|
|||
onAgentAction,
|
||||
onChainEnd,
|
||||
signal: this.abortController.signal,
|
||||
onProgress: opts.onProgress,
|
||||
});
|
||||
|
||||
// const stream = async (text) => {
|
||||
// await this.generateTextStream.call(this, text, opts.onProgress, { delay: 1 });
|
||||
// };
|
||||
await this.executorCall(message, {
|
||||
signal: this.abortController.signal,
|
||||
// stream,
|
||||
onToolStart,
|
||||
onToolEnd,
|
||||
});
|
||||
await this.executorCall(message, this.abortController.signal);
|
||||
|
||||
// If message was aborted mid-generation
|
||||
if (this.result?.errorMessage?.length > 0 && this.result?.errorMessage?.includes('cancel')) {
|
||||
|
|
@ -422,10 +322,19 @@ Only respond with your conversational reply to the following User Message:
|
|||
return await this.handleResponseMessage(responseMessage, saveOptions, user);
|
||||
}
|
||||
|
||||
if (this.agentOptions.skipCompletion && this.result.output && this.functionsAgent) {
|
||||
const partialText = opts.getPartialText();
|
||||
const trimmedPartial = opts.getPartialText().replaceAll(':::plugin:::\n', '');
|
||||
responseMessage.text =
|
||||
trimmedPartial.length === 0 ? `${partialText}${this.result.output}` : partialText;
|
||||
await this.generateTextStream(this.result.output, opts.onProgress, { delay: 5 });
|
||||
return await this.handleResponseMessage(responseMessage, saveOptions, user);
|
||||
}
|
||||
|
||||
if (this.agentOptions.skipCompletion && this.result.output) {
|
||||
responseMessage.text = this.result.output;
|
||||
this.addImages(this.result.intermediateSteps, responseMessage);
|
||||
await this.generateTextStream(this.result.output, opts.onProgress, { delay: 8 });
|
||||
addImages(this.result.intermediateSteps, responseMessage);
|
||||
await this.generateTextStream(this.result.output, opts.onProgress, { delay: 5 });
|
||||
return await this.handleResponseMessage(responseMessage, saveOptions, user);
|
||||
}
|
||||
|
||||
|
|
@ -434,7 +343,11 @@ Only respond with your conversational reply to the following User Message:
|
|||
console.debug(this.result);
|
||||
}
|
||||
|
||||
const promptPrefix = this.buildPromptPrefix(this.result, message);
|
||||
const promptPrefix = buildPromptPrefix({
|
||||
result: this.result,
|
||||
message,
|
||||
functionsAgent: this.functionsAgent,
|
||||
});
|
||||
|
||||
if (this.options.debug) {
|
||||
console.debug('Plugins: promptPrefix');
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue