LibreChat/api/app/clients/PluginsClient.js
Danny Avila 8819e83d2c
refactor: Client Classes & Azure OpenAI as a separate Endpoint (#532)
* refactor: start new client classes, test localAi support

* feat: create base class, extend chatgpt from base

* refactor(BaseClient.js): change userId parameter to user
refactor(BaseClient.js): change userId parameter to user
feat(OpenAIClient.js): add sendMessage method
refactor(OpenAIClient.js): change getConversation method to use user parameter instead of userId
refactor(OpenAIClient.js): change saveMessageToDatabase method to use user parameter instead of userId
refactor(OpenAIClient.js): change buildPrompt method to use messages parameter instead of orderedMessages
feat(index.js): export client classes
refactor(askGPTPlugins.js): use req.body.token or process.env.OPENAI_API_KEY as OpenAI API key
refactor(index.js): comment out askOpenAI route
feat(index.js): add openAI route

feat(openAI.js): add new route for OpenAI API requests with support for progress updates and aborting requests.

* refactor(BaseClient.js): use optional chaining operator to access messageId property
refactor(OpenAIClient.js): use orderedMessages instead of messages to build prompt
refactor(OpenAIClient.js): use optional chaining operator to access messageId property
refactor(fetch-polyfill.js): remove fetch polyfill
refactor(openAI.js): comment out debug option in clientOptions

* refactor: update import statements and remove unused imports in several files
feat: add getAzureCredentials function to azureUtils module
docs: update comments in azureUtils module

* refactor(utils): rename migrateConversations to migrateDataToFirstUser for clarity and consistency

* feat(chatgpt-client.js): add getAzureCredentials function to retrieve Azure credentials
feat(chatgpt-client.js): use getAzureCredentials function to generate reverseProxyUrl
feat(OpenAIClient.js): add isChatCompletion property to determine if chat completion model is used
feat(OpenAIClient.js): add saveOptions parameter to sendMessage and buildPrompt methods
feat(OpenAIClient.js): modify buildPrompt method to handle chat completion model
feat(openAI.js): modify endpointOption to include modelOptions instead of individual options
refactor(OpenAIClient.js): modify getDelta property to use isChatCompletion property instead of isChatGptModel property
refactor(OpenAIClient.js): modify sendMessage method to use saveOptions parameter instead of modelOptions parameter
refactor(OpenAIClient.js): modify buildPrompt method to use saveOptions parameter instead of modelOptions parameter
refactor(OpenAIClient.js): modify ask method to include endpointOption parameter

* chore: delete draft file

* refactor(OpenAIClient.js): extract sendCompletion method from sendMessage method for reusability

* refactor(BaseClient.js): move sendMessage method to BaseClient class
feat(OpenAIClient.js): inherit from BaseClient class and implement necessary methods and properties for OpenAIClient class.

* refactor(BaseClient.js): rename getBuildPromptOptions to getBuildMessagesOptions
feat(BaseClient.js): add buildMessages method to BaseClient class
fix(ChatGPTClient.js): use message.text instead of message.message
refactor(ChatGPTClient.js): rename buildPromptBody to buildMessagesBody
refactor(ChatGPTClient.js): remove console.debug statement and add debug log for prompt variable

refactor(OpenAIClient.js): move setOptions method to the bottom of the class
feat(OpenAIClient.js): add support for cl100k_base encoding
feat(OpenAIClient.js): add support for unofficial chat GPT models
feat(OpenAIClient.js): add support for custom modelOptions
feat(OpenAIClient.js): add caching for tokenizers
feat(OpenAIClient.js): add freeAndInitializeEncoder method to free and reinitialize tokenizers
refactor(OpenAIClient.js): rename getBuildPromptOptions to getBuildMessagesOptions
refactor(OpenAIClient.js): rename buildPrompt to buildMessages
refactor(OpenAIClient.js): remove endpointOption from ask function arguments in openAI.js

* refactor(ChatGPTClient.js, OpenAIClient.js): improve code readability and consistency

- In ChatGPTClient.js, update the roleLabel and messageString variables to handle cases where the message object does not have an isCreatedByUser property or a role property with a value of 'user'.
- In OpenAIClient.js, rename the freeAndInitializeEncoder method to freeAndResetEncoder to better reflect its functionality. Also, update the method calls to reflect the new name. Additionally, update the getTokenCount method to handle errors by calling the freeAndResetEncoder method instead of the now-renamed freeAndInitializeEncoder method.

* refactor(OpenAIClient.js): extract instructions object to a separate variable and add it to payload after formatted messages
fix(OpenAIClient.js): handle cases where progressMessage.choices is undefined or empty

* refactor(BaseClient.js): extract addInstructions method from sendMessage method
feat(OpenAIClient.js): add maxTokensMap object to map maximum tokens for each model
refactor(OpenAIClient.js): use addInstructions method in buildMessages method instead of manually building the payload list

* refactor(OpenAIClient.js): remove unnecessary condition for modelOptions.model property in buildMessages method

* feat(BaseClient.js): add support for token count tracking and context strategy
feat(OpenAIClient.js): add support for token count tracking and context strategy
feat(Message.js): add tokenCount field to Message schema and updateMessage function

* refactor(BaseClient.js): add support for refining messages based on token limit
feat(OpenAIClient.js): add support for context refinement strategy
refactor(OpenAIClient.js): use context refinement strategy in message sending
refactor(server/index.js): improve code readability by breaking long lines

* refactor(BaseClient.js): change `remainingContext` to `remainingContextTokens` for clarity
feat(BaseClient.js): add `refinePrompt` and `refinePromptTemplate` to handle message refinement
feat(BaseClient.js): add `refineMessages` method to refine messages
feat(BaseClient.js): add `handleContextStrategy` method to handle context strategy
feat(OpenAIClient.js): add `abortController` to `buildPrompt` method options
refactor(OpenAIClient.js): change `payload` and `tokenCountMap` to let variables in `handleContextStrategy` method
refactor(BaseClient.js): change `remainingContext` to `remainingContextTokens` in `handleContextStrategy` method for consistency
refactor(BaseClient.js): change `remainingContext` to `remainingContextTokens` in `getMessagesWithinTokenLimit` method for consistency
refactor(BaseClient.js): change `remainingContext` to `remainingContext

* chore(openAI.js): comment out contextStrategy option in clientOptions

* chore(openAI.js): comment out debug option in clientOptions object

* test: BaseClient tests in progress

* test: Complete OpenAIClient & BaseClient tests

* fix(OpenAIClient.js): remove unnecessary whitespace
fix(OpenAIClient.js): remove unused variables and comments
fix(OpenAIClient.test.js): combine getTokenCount and freeAndResetEncoder tests

* chore(.eslintrc.js): add rule for maximum of 1 empty line
feat(ask/openAI.js): add abortMessage utility function
fix(ask/openAI.js): handle error and abort message if partial text is less than 2 characters
feat(utils/index.js): export abortMessage utility function

* test: complete additional tests

* feat: Azure OpenAI as a separate endpoint

* chore: remove extraneous console logs

* fix(azureOpenAI): use chatCompletion endpoint

* chore(initializeClient.js): delete initializeClient.js file

chore(askOpenAI.js): delete old OpenAI route handler

chore(handlers.js): remove trailing whitespace in thought variable assignment

* chore(chatgpt-client.js): remove unused chatgpt-client.js file
refactor(index.js): remove askClient import and export from index.js

* chore(chatgpt-client.tokens.js): update test script for memory usage and encoding performance

The test script in `chatgpt-client.tokens.js` has been updated to measure the memory usage and encoding performance of the client. The script now includes information about the initial memory usage, peak memory usage, final memory usage, and memory usage after a timeout. It also provides insights into the number of encoding requests that can be processed per second.

The script has been modified to use the `OpenAIClient` class instead of the `ChatGPTClient` class. Additionally, the number of iterations for the encoding loop has been reduced to 10,000.

A timeout function has been added to simulate a delay of 15 seconds. After the timeout, the memory usage is measured again.

The script now handles uncaught exceptions and logs any errors that occur, except for errors related to failed fetch requests.

Note: This is a test script and should not be used in production

* feat(FakeClient.js): add a new class `FakeClient` that extends `BaseClient` and implements methods for a fake client
feat(FakeClient.js): implement the `setOptions` method to handle options for the fake client
feat(FakeClient.js): implement the `initializeFakeClient` function to initialize a fake client with options and fake messages
fix(OpenAIClient.js): remove duplicate `maxTokensMap` import and use the one from utils
feat(BaseClient): return promptTokens and completionTokens

* refactor(gptPlugins): refactor ChatAgent to PluginsClient, which extends OpenAIClient

* refactor: client paths

* chore(jest.config.js): remove jest.config.js file

* fix(PluginController.js): update file path to manifest.json
feat(gptPlugins.js): add support for aborting messages

refactor(ask/index.js): rename askGPTPlugins to gptPlugins for consistency

* fix(BaseClient.js): fix spacing in generateTextStream function signature
refactor(BaseClient.js): remove unnecessary push to currentMessages in generateUserMessage function
refactor(BaseClient.js): remove unnecessary push to currentMessages in handleStartMethods function
refactor(PluginsClient.js): remove unused variables and date formatting in constructor
refactor(PluginsClient.js): simplify mapping of pastMessages in getCompletionPayload function

* refactor(GoogleClient): GoogleClient now extends BaseClient

* chore(.env.example): add AZURE_OPENAI_MODELS variable
fix(api/routes/ask/gptPlugins.js): enable Azure integration if PLUGINS_USE_AZURE is true
fix(api/routes/endpoints.js): getOpenAIModels function now accepts options, use AZURE_OPENAI_MODELS if PLUGINS_USE_AZURE is true
fix(client/components/Endpoints/OpenAI/Settings.jsx): remove console.log statement
docs(features/azure.md): add documentation for Azure OpenAI integration and environment variables

* fix(e2e:popup): includes the icon + endpoint names in role, name property
2023-07-03 16:51:12 -04:00

554 lines
18 KiB
JavaScript

const OpenAIClient = require('./OpenAIClient');
const { ChatOpenAI } = require('langchain/chat_models/openai');
const { CallbackManager } = require('langchain/callbacks');
const { initializeCustomAgent, initializeFunctionsAgent } = require('./agents/');
const { loadTools } = require('./tools/util');
const { SelfReflectionTool } = require('./tools/');
const { HumanChatMessage, AIChatMessage } = require('langchain/schema');
const {
instructions,
imageInstructions,
errorInstructions,
} = require('./prompts/instructions');
class PluginsClient extends OpenAIClient {
constructor(apiKey, options = {}) {
super(apiKey, options);
this.sender = options.sender ?? 'Assistant';
this.tools = [];
this.actions = [];
this.openAIApiKey = apiKey;
this.setOptions(options);
this.executor = null;
}
getActions(input = null) {
let output = 'Internal thoughts & actions taken:\n"';
let actions = input || this.actions;
if (actions[0]?.action && this.functionsAgent) {
actions = actions.map((step) => ({
log: `Action: ${step.action?.tool || ''}\nInput: ${JSON.stringify(step.action?.toolInput) || ''}\nObservation: ${step.observation}`
}));
} else if (actions[0]?.action) {
actions = actions.map((step) => ({
log: `${step.action.log}\nObservation: ${step.observation}`
}));
}
actions.forEach((actionObj, index) => {
output += `${actionObj.log}`;
if (index < actions.length - 1) {
output += '\n';
}
});
return output + '"';
}
buildErrorInput(message, errorMessage) {
const log = errorMessage.includes('Could not parse LLM output:')
? `A formatting error occurred with your response to the human's last message. You didn't follow the formatting instructions. Remember to ${instructions}`
: `You encountered an error while replying to the human's last message. Attempt to answer again or admit an answer cannot be given.\nError: ${errorMessage}`;
return `
${log}
${this.getActions()}
Human's last message: ${message}
`;
}
buildPromptPrefix(result, message) {
if ((result.output && result.output.includes('N/A')) || result.output === undefined) {
return null;
}
if (
result?.intermediateSteps?.length === 1 &&
result?.intermediateSteps[0]?.action?.toolInput === 'N/A'
) {
return null;
}
const internalActions =
result?.intermediateSteps?.length > 0
? this.getActions(result.intermediateSteps)
: 'Internal Actions Taken: None';
const toolBasedInstructions = internalActions.toLowerCase().includes('image')
? imageInstructions
: '';
const errorMessage = result.errorMessage ? `${errorInstructions} ${result.errorMessage}\n` : '';
const preliminaryAnswer =
result.output?.length > 0 ? `Preliminary Answer: "${result.output.trim()}"` : '';
const prefix = preliminaryAnswer
? `review and improve the answer you generated using plugins in response to the User Message below. The user hasn't seen your answer or thoughts yet.`
: 'respond to the User Message below based on your preliminary thoughts & actions.';
return `As a helpful AI Assistant, ${prefix}${errorMessage}\n${internalActions}
${preliminaryAnswer}
Reply conversationally to the User based on your ${
preliminaryAnswer ? 'preliminary answer, ' : ''
}internal actions, thoughts, and observations, making improvements wherever possible, but do not modify URLs.
${
preliminaryAnswer
? ''
: '\nIf there is an incomplete thought or action, you are expected to complete it in your response now.\n'
}You must cite sources if you are using any web links. ${toolBasedInstructions}
Only respond with your conversational reply to the following User Message:
"${message}"`;
}
setOptions(options) {
this.agentOptions = options.agentOptions;
this.functionsAgent = this.agentOptions?.agent === 'functions';
this.agentIsGpt3 = this.agentOptions?.model.startsWith('gpt-3');
if (this.functionsAgent && this.agentOptions.model) {
this.agentOptions.model = this.getFunctionModelName(this.agentOptions.model);
}
super.setOptions(options);
this.isGpt3 = this.modelOptions.model.startsWith('gpt-3');
if (this.reverseProxyUrl) {
this.langchainProxy = this.reverseProxyUrl.match(/.*v1/)[0];
}
}
getSaveOptions() {
return {
chatGptLabel: this.options.chatGptLabel,
promptPrefix: this.options.promptPrefix,
...this.modelOptions,
agentOptions: this.agentOptions,
};
}
saveLatestAction(action) {
this.actions.push(action);
}
getFunctionModelName(input) {
const prefixMap = {
'gpt-4': 'gpt-4-0613',
'gpt-4-32k': 'gpt-4-32k-0613',
'gpt-3.5-turbo': 'gpt-3.5-turbo-0613'
};
const prefix = Object.keys(prefixMap).find(key => input.startsWith(key));
return prefix ? prefixMap[prefix] : 'gpt-3.5-turbo-0613';
}
getBuildMessagesOptions(opts) {
return {
isChatCompletion: true,
promptPrefix: opts.promptPrefix,
abortController: opts.abortController,
};
}
createLLM(modelOptions, configOptions) {
let credentials = { openAIApiKey: this.openAIApiKey };
if (this.azure) {
credentials = { ...this.azure };
}
if (this.options.debug) {
console.debug('createLLM: configOptions');
console.debug(configOptions);
}
return new ChatOpenAI({ credentials, ...modelOptions }, configOptions);
}
async initialize({ user, message, onAgentAction, onChainEnd, signal }) {
const modelOptions = {
modelName: this.agentOptions.model,
temperature: this.agentOptions.temperature
};
const configOptions = {};
if (this.langchainProxy) {
configOptions.basePath = this.langchainProxy;
}
const model = this.createLLM(modelOptions, configOptions);
if (this.options.debug) {
console.debug(`<-----Agent Model: ${model.modelName} | Temp: ${model.temperature}----->`);
}
this.availableTools = await loadTools({
user,
model,
tools: this.options.tools,
functions: this.functionsAgent,
options: {
openAIApiKey: this.openAIApiKey
}
});
// load tools
for (const tool of this.options.tools) {
const validTool = this.availableTools[tool];
if (tool === 'plugins') {
const plugins = await validTool();
this.tools = [...this.tools, ...plugins];
} else if (validTool) {
this.tools.push(await validTool());
}
}
if (this.options.debug) {
console.debug('Requested Tools');
console.debug(this.options.tools);
console.debug('Loaded Tools');
console.debug(this.tools.map((tool) => tool.name));
}
if (this.tools.length > 0 && !this.functionsAgent) {
this.tools.push(new SelfReflectionTool({ message, isGpt3: false }));
} else if (this.tools.length === 0) {
return;
}
const handleAction = (action, callback = null) => {
this.saveLatestAction(action);
if (this.options.debug) {
console.debug('Latest Agent Action ', this.actions[this.actions.length - 1]);
}
if (typeof callback === 'function') {
callback(action);
}
};
// Map Messages to Langchain format
const pastMessages = this.currentMessages.map(
msg => msg?.isCreatedByUser || msg?.role?.toLowerCase() === 'user'
? new HumanChatMessage(msg.text)
: new AIChatMessage(msg.text));
if (this.options.debug) {
console.debug('Current Messages');
console.debug(this.currentMessages);
console.debug('Past Messages');
console.debug(pastMessages);
}
// initialize agent
const initializer = this.functionsAgent ? initializeFunctionsAgent : initializeCustomAgent;
this.executor = await initializer({
model,
signal,
pastMessages,
tools: this.tools,
currentDateString: this.currentDateString,
verbose: this.options.debug,
returnIntermediateSteps: true,
callbackManager: CallbackManager.fromHandlers({
async handleAgentAction(action) {
handleAction(action, onAgentAction);
},
async handleChainEnd(action) {
if (typeof onChainEnd === 'function') {
onChainEnd(action);
}
}
})
});
if (this.options.debug) {
console.debug('Loaded agent.');
}
}
async executorCall(message, signal) {
let errorMessage = '';
const maxAttempts = 1;
for (let attempts = 1; attempts <= maxAttempts; attempts++) {
const errorInput = this.buildErrorInput(message, errorMessage);
const input = attempts > 1 ? errorInput : message;
if (this.options.debug) {
console.debug(`Attempt ${attempts} of ${maxAttempts}`);
}
if (this.options.debug && errorMessage.length > 0) {
console.debug('Caught error, input:', input);
}
try {
this.result = await this.executor.call({ input, signal });
break; // Exit the loop if the function call is successful
} catch (err) {
console.error(err);
errorMessage = err.message;
if (attempts === maxAttempts) {
this.result.output = `Encountered an error while attempting to respond. Error: ${err.message}`;
this.result.intermediateSteps = this.actions;
this.result.errorMessage = errorMessage;
break;
}
}
}
}
addImages(intermediateSteps, responseMessage) {
if (!intermediateSteps || !responseMessage) {
return;
}
intermediateSteps.forEach(step => {
const { observation } = step;
if (!observation || !observation.includes('![')) {
return;
}
if (!responseMessage.text.includes(observation)) {
responseMessage.text += '\n' + observation;
if (this.options.debug) {
console.debug('added image from intermediateSteps');
}
}
});
}
async handleResponseMessage(responseMessage, saveOptions, user) {
responseMessage.tokenCount = this.getTokenCountForResponse(responseMessage);
responseMessage.completionTokens = responseMessage.tokenCount;
await this.saveMessageToDatabase(responseMessage, saveOptions, user);
delete responseMessage.tokenCount;
return { ...responseMessage, ...this.result };
}
async sendMessage(message, opts = {}) {
const completionMode = this.options.tools.length === 0;
if (completionMode) {
this.setOptions(opts);
return super.sendMessage(message, opts);
}
console.log('Plugins sendMessage', message, opts);
const {
user,
conversationId,
responseMessageId,
saveOptions,
userMessage,
onAgentAction,
onChainEnd,
} = await this.handleStartMethods(message, opts);
let { prompt: payload, tokenCountMap, promptTokens, messages } = await this.buildMessages(
this.currentMessages,
userMessage.messageId,
this.getBuildMessagesOptions({
promptPrefix: null,
abortController: this.abortController,
}),
);
if (this.options.debug) {
console.debug('buildMessages: Messages');
console.debug(messages);
}
if (tokenCountMap) {
payload = payload.map((message, i) => {
const { tokenCount, ...messageWithoutTokenCount } = message;
// userMessage is always the last one in the payload
if (i === payload.length - 1) {
userMessage.tokenCount = message.tokenCount;
console.debug(`Token count for user message: ${tokenCount}`, `Instruction Tokens: ${tokenCountMap.instructions || 'N/A'}`);
}
return messageWithoutTokenCount;
});
this.handleTokenCountMap(tokenCountMap);
}
this.result = {};
if (messages) {
this.currentMessages = messages;
}
await this.saveMessageToDatabase(userMessage, saveOptions, user);
const responseMessage = {
messageId: responseMessageId,
conversationId,
parentMessageId: userMessage.messageId,
isCreatedByUser: false,
model: this.modelOptions.model,
sender: this.sender,
promptTokens,
};
await this.initialize({
user,
message,
onAgentAction,
onChainEnd,
signal: this.abortController.signal
});
await this.executorCall(message, this.abortController.signal);
// If message was aborted mid-generation
if (this.result?.errorMessage?.length > 0 && this.result?.errorMessage?.includes('cancel')) {
responseMessage.text = 'Cancelled.';
return await this.handleResponseMessage(responseMessage, saveOptions, user);
}
if (this.agentOptions.skipCompletion && this.result.output) {
responseMessage.text = this.result.output;
this.addImages(this.result.intermediateSteps, responseMessage);
await this.generateTextStream(this.result.output, opts.onProgress);
return await this.handleResponseMessage(responseMessage, saveOptions, user);
}
if (this.options.debug) {
console.debug('Plugins completion phase: this.result');
console.debug(this.result);
}
const promptPrefix = this.buildPromptPrefix(this.result, message);
if (this.options.debug) {
console.debug('Plugins: promptPrefix');
console.debug(promptPrefix);
}
payload = await this.buildCompletionPrompt({
messages: this.currentMessages,
promptPrefix,
});
if (this.options.debug) {
console.debug('buildCompletionPrompt Payload');
console.debug(payload);
}
responseMessage.text = await this.sendCompletion(payload, opts);
return await this.handleResponseMessage(responseMessage, saveOptions, user);
}
async buildCompletionPrompt({ messages, promptPrefix: _promptPrefix }) {
if (this.options.debug) {
console.debug('buildCompletionPrompt messages', messages);
}
const orderedMessages = messages;
let promptPrefix = _promptPrefix.trim();
// If the prompt prefix doesn't end with the end token, add it.
if (!promptPrefix.endsWith(`${this.endToken}`)) {
promptPrefix = `${promptPrefix.trim()}${this.endToken}\n\n`;
}
promptPrefix = `${this.startToken}Instructions:\n${promptPrefix}`;
const promptSuffix = `${this.startToken}${this.chatGptLabel ?? 'Assistant'}:\n`;
const instructionsPayload = {
role: 'system',
name: 'instructions',
content: promptPrefix
};
const messagePayload = {
role: 'system',
content: promptSuffix
};
if (this.isGpt3) {
instructionsPayload.role = 'user';
messagePayload.role = 'user';
instructionsPayload.content += `\n${promptSuffix}`;
}
// testing if this works with browser endpoint
if (!this.isGpt3 && this.reverseProxyUrl) {
instructionsPayload.role = 'user';
}
let currentTokenCount =
this.getTokenCountForMessage(instructionsPayload) +
this.getTokenCountForMessage(messagePayload);
let promptBody = '';
const maxTokenCount = this.maxPromptTokens;
// Iterate backwards through the messages, adding them to the prompt until we reach the max token count.
// Do this within a recursive async function so that it doesn't block the event loop for too long.
const buildPromptBody = async () => {
if (currentTokenCount < maxTokenCount && orderedMessages.length > 0) {
const message = orderedMessages.pop();
// const roleLabel = message.role === 'User' ? this.userLabel : this.chatGptLabel;
const roleLabel = message.role;
let messageString = `${this.startToken}${roleLabel}:\n${message.text}${this.endToken}\n`;
let newPromptBody;
if (promptBody) {
newPromptBody = `${messageString}${promptBody}`;
} else {
// Always insert prompt prefix before the last user message, if not gpt-3.5-turbo.
// This makes the AI obey the prompt instructions better, which is important for custom instructions.
// After a bunch of testing, it doesn't seem to cause the AI any confusion, even if you ask it things
// like "what's the last thing I wrote?".
newPromptBody = `${promptPrefix}${messageString}${promptBody}`;
}
const tokenCountForMessage = this.getTokenCount(messageString);
const newTokenCount = currentTokenCount + tokenCountForMessage;
if (newTokenCount > maxTokenCount) {
if (promptBody) {
// This message would put us over the token limit, so don't add it.
return false;
}
// This is the first message, so we can't add it. Just throw an error.
throw new Error(
`Prompt is too long. Max token count is ${maxTokenCount}, but prompt is ${newTokenCount} tokens long.`
);
}
promptBody = newPromptBody;
currentTokenCount = newTokenCount;
// wait for next tick to avoid blocking the event loop
await new Promise((resolve) => setTimeout(resolve, 0));
return buildPromptBody();
}
return true;
};
await buildPromptBody();
const prompt = promptBody;
messagePayload.content = prompt;
// Add 2 tokens for metadata after all messages have been counted.
currentTokenCount += 2;
if (this.isGpt3 && messagePayload.content.length > 0) {
const context = `Chat History:\n`;
messagePayload.content = `${context}${prompt}`;
currentTokenCount += this.getTokenCount(context);
}
// Use up to `this.maxContextTokens` tokens (prompt + response), but try to leave `this.maxTokens` tokens for the response.
this.modelOptions.max_tokens = Math.min(
this.maxContextTokens - currentTokenCount,
this.maxResponseTokens
);
if (this.isGpt3) {
messagePayload.content += promptSuffix;
return [instructionsPayload, messagePayload];
}
const result = [messagePayload, instructionsPayload];
if (this.functionsAgent && !this.isGpt3) {
result[1].content = `${result[1].content}\nSure thing! Here is the output you requested:\n`;
}
return result.filter((message) => message.content.length > 0);
}
}
module.exports = PluginsClient;