mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-12-18 01:10:14 +01:00
* ci(backend-review.yml): add linter step to the backend review workflow * chore(backend-review.yml): remove prettier from lint-action configuration * chore: apply new linting workflow * chore(lint-staged.config.js): reorder lint-staged tasks for JavaScript and TypeScript files * chore(eslint): update ignorePatterns in .eslintrc.js chore(lint-action): remove prettier option in backend-review.yml chore(package.json): add lint and lint:fix scripts * chore(lint-staged.config.js): remove prettier --write command for js, jsx, ts, tsx files * chore(titleConvo.js): remove unnecessary console.log statement chore(titleConvo.js): add missing comma in options object * chore: apply linting to all files * chore(lint-staged.config.js): update lint-staged configuration to include prettier formatting
120 lines
3.3 KiB
JavaScript
120 lines
3.3 KiB
JavaScript
const { Agent } = require('langchain/agents');
|
|
const { LLMChain } = require('langchain/chains');
|
|
const { FunctionChatMessage, AIChatMessage } = require('langchain/schema');
|
|
const {
|
|
ChatPromptTemplate,
|
|
MessagesPlaceholder,
|
|
SystemMessagePromptTemplate,
|
|
HumanMessagePromptTemplate,
|
|
} = require('langchain/prompts');
|
|
const PREFIX = 'You are a helpful AI assistant.';
|
|
|
|
function parseOutput(message) {
|
|
if (message.additional_kwargs.function_call) {
|
|
const function_call = message.additional_kwargs.function_call;
|
|
return {
|
|
tool: function_call.name,
|
|
toolInput: function_call.arguments ? JSON.parse(function_call.arguments) : {},
|
|
log: message.text,
|
|
};
|
|
} else {
|
|
return { returnValues: { output: message.text }, log: message.text };
|
|
}
|
|
}
|
|
|
|
class FunctionsAgent extends Agent {
|
|
constructor(input) {
|
|
super({ ...input, outputParser: undefined });
|
|
this.tools = input.tools;
|
|
}
|
|
|
|
lc_namespace = ['langchain', 'agents', 'openai'];
|
|
|
|
_agentType() {
|
|
return 'openai-functions';
|
|
}
|
|
|
|
observationPrefix() {
|
|
return 'Observation: ';
|
|
}
|
|
|
|
llmPrefix() {
|
|
return 'Thought:';
|
|
}
|
|
|
|
_stop() {
|
|
return ['Observation:'];
|
|
}
|
|
|
|
static createPrompt(_tools, fields) {
|
|
const { prefix = PREFIX, currentDateString } = fields || {};
|
|
|
|
return ChatPromptTemplate.fromPromptMessages([
|
|
SystemMessagePromptTemplate.fromTemplate(`Date: ${currentDateString}\n${prefix}`),
|
|
new MessagesPlaceholder('chat_history'),
|
|
HumanMessagePromptTemplate.fromTemplate('Query: {input}'),
|
|
new MessagesPlaceholder('agent_scratchpad'),
|
|
]);
|
|
}
|
|
|
|
static fromLLMAndTools(llm, tools, args) {
|
|
FunctionsAgent.validateTools(tools);
|
|
const prompt = FunctionsAgent.createPrompt(tools, args);
|
|
const chain = new LLMChain({
|
|
prompt,
|
|
llm,
|
|
callbacks: args?.callbacks,
|
|
});
|
|
return new FunctionsAgent({
|
|
llmChain: chain,
|
|
allowedTools: tools.map((t) => t.name),
|
|
tools,
|
|
});
|
|
}
|
|
|
|
async constructScratchPad(steps) {
|
|
return steps.flatMap(({ action, observation }) => [
|
|
new AIChatMessage('', {
|
|
function_call: {
|
|
name: action.tool,
|
|
arguments: JSON.stringify(action.toolInput),
|
|
},
|
|
}),
|
|
new FunctionChatMessage(observation, action.tool),
|
|
]);
|
|
}
|
|
|
|
async plan(steps, inputs, callbackManager) {
|
|
// Add scratchpad and stop to inputs
|
|
const thoughts = await this.constructScratchPad(steps);
|
|
const newInputs = Object.assign({}, inputs, { agent_scratchpad: thoughts });
|
|
if (this._stop().length !== 0) {
|
|
newInputs.stop = this._stop();
|
|
}
|
|
|
|
// Split inputs between prompt and llm
|
|
const llm = this.llmChain.llm;
|
|
const valuesForPrompt = Object.assign({}, newInputs);
|
|
const valuesForLLM = {
|
|
tools: this.tools,
|
|
};
|
|
for (let i = 0; i < this.llmChain.llm.callKeys.length; i++) {
|
|
const key = this.llmChain.llm.callKeys[i];
|
|
if (key in inputs) {
|
|
valuesForLLM[key] = inputs[key];
|
|
delete valuesForPrompt[key];
|
|
}
|
|
}
|
|
|
|
const promptValue = await this.llmChain.prompt.formatPromptValue(valuesForPrompt);
|
|
const message = await llm.predictMessages(
|
|
promptValue.toChatMessages(),
|
|
valuesForLLM,
|
|
callbackManager,
|
|
);
|
|
console.log('message', message);
|
|
return parseOutput(message);
|
|
}
|
|
}
|
|
|
|
module.exports = FunctionsAgent;
|