LibreChat/api/app/clients/prompts/formatMessages.js
Danny Avila d59b62174f
🪨 feat: AWS Bedrock support (#3935)
* feat: Add BedrockIcon component to SVG library

* feat: EModelEndpoint.bedrock

* feat: first pass, bedrock chat. note: AgentClient is returning `agents` as conversation.endpoint

* fix: declare endpoint in initialization step

* chore: Update @librechat/agents dependency to version 1.4.5

* feat: backend content aggregation for agents/bedrock

* feat: abort agent requests

* feat: AWS Bedrock icons

* WIP: agent provider schema parsing

* chore: Update EditIcon props type

* refactor(useGenerationsByLatest): make agents and bedrock editable

* refactor: non-assistant message content, parts

* fix: Bedrock response `sender`

* fix: use endpointOption.model_parameters not endpointOption.modelOptions

* fix: types for step handler

* refactor: Update Agents.ToolCallDelta type

* refactor: Remove unnecessary assignment of parentMessageId in AskController

* refactor: remove unnecessary assignment of parentMessageId (agent request handler)

* fix(bedrock/agents): message regeneration

* refactor: dynamic form elements using react-hook-form Controllers

* fix: agent icons/labels for messages

* fix: agent actions

* fix: use of new dynamic tags causing application crash

* refactor: dynamic settings touch-ups

* refactor: update Slider component to allow custom track class name

* refactor: update DynamicSlider component styles

* refactor: use Constants value for GLOBAL_PROJECT_NAME (enum)

* feat: agent share global methods/controllers

* fix: agents query

* fix: `getResponseModel`

* fix: share prompt a11y issue

* refactor: update SharePrompt dialog theme styles

* refactor: explicit typing for SharePrompt

* feat: add agent roles/permissions

* chore: update @librechat/agents dependency to version 1.4.7 for tool_call_ids edge case

* fix(Anthropic): messages.X.content.Y.tool_use.input: Input should be a valid dictionary

* fix: handle text parts with tool_call_ids and empty text

* fix: role initialization

* refactor: don't make instructions required

* refactor: improve typing of Text part

* fix: setShowStopButton for agents route

* chore: remove params for now

* fix: add streamBuffer and streamRate to help prevent 'Overloaded' errors from Anthropic API

* refactor: remove console.log statement in ContentRender component

* chore: typing, rename Context to Delete Button

* chore(DeleteButton): logging

* refactor(Action): make accessible

* style(Action): improve a11y again

* refactor: remove use/mention of mongoose sessions

* feat: first pass, sharing agents

* feat: visual indicator for global agent, remove author when serving to non-author

* wip: params

* chore: fix typing issues

* fix(schemas): typing

* refactor: improve accessibility of ListCard component and fix console React warning

* wip: reset templates for non-legacy new convos

* Revert "wip: params"

This reverts commit f8067e91d4.

* Revert "refactor: dynamic form elements using react-hook-form Controllers"

This reverts commit 2150c4815d.

* fix(Parameters): types and parameter effect update to only update local state to parameters

* refactor: optimize useDebouncedInput hook for better performance

* feat: first pass, anthropic bedrock params

* chore: paramEndpoints check for endpointType too

* fix: maxTokens to use coerceNumber.optional(),

* feat: extra chat model params

* chore: reduce code repetition

* refactor: improve preset title handling in SaveAsPresetDialog component

* refactor: improve preset handling in HeaderOptions component

* chore: improve typing, replace legacy dialog for SaveAsPresetDialog

* feat: save as preset from parameters panel

* fix: multi-search in select dropdown when using Option type

* refactor: update default showDefault value to false in Dynamic components

* feat: Bedrock presets settings

* chore: config, fix agents schema, update config version

* refactor: update AWS region variable name in bedrock options endpoint to BEDROCK_AWS_DEFAULT_REGION

* refactor: update baseEndpointSchema in config.ts to include baseURL property

* refactor: update createRun function to include req parameter and set streamRate based on provider

* feat: availableRegions via config

* refactor: remove unused demo agent controller file

* WIP: title

* Update @librechat/agents to version 1.5.0

* chore: addTitle.js to handle empty responseText

* feat: support images and titles

* feat: context token updates

* Refactor BaseClient test to use expect.objectContaining

* refactor: add model select, remove header options params, move side panel params below prompts

* chore: update models list, catch title error

* feat: model service for bedrock models (env)

* chore: Remove verbose debug log in AgentClient class following stream

* feat(bedrock): track token spend; fix: token rates, value key mapping for AWS models

* refactor: handle streamRate in `handleLLMNewToken` callback

* chore: AWS Bedrock example config in `.env.example`

* refactor: Rename bedrockMeta to bedrockGeneral in settings.ts and use for AI21 and Amazon Bedrock providers

* refactor: Update `.env.example` with AWS Bedrock model IDs URL and additional notes

* feat: titleModel support for bedrock

* refactor: Update `.env.example` with additional notes for AWS Bedrock model IDs
2024-09-09 12:06:59 -04:00

210 lines
7.7 KiB
JavaScript

const { ToolMessage } = require('@langchain/core/messages');
const { EModelEndpoint, ContentTypes } = require('librechat-data-provider');
const { HumanMessage, AIMessage, SystemMessage } = require('langchain/schema');
/**
* Formats a message to OpenAI Vision API payload format.
*
* @param {Object} params - The parameters for formatting.
* @param {Object} params.message - The message object to format.
* @param {string} [params.message.role] - The role of the message sender (must be 'user').
* @param {string} [params.message.content] - The text content of the message.
* @param {EModelEndpoint} [params.endpoint] - Identifier for specific endpoint handling
* @param {Array<string>} [params.image_urls] - The image_urls to attach to the message.
* @returns {(Object)} - The formatted message.
*/
const formatVisionMessage = ({ message, image_urls, endpoint }) => {
if (endpoint === EModelEndpoint.anthropic) {
message.content = [...image_urls, { type: ContentTypes.TEXT, text: message.content }];
return message;
}
message.content = [{ type: ContentTypes.TEXT, text: message.content }, ...image_urls];
return message;
};
/**
* Formats a message to OpenAI payload format based on the provided options.
*
* @param {Object} params - The parameters for formatting.
* @param {Object} params.message - The message object to format.
* @param {string} [params.message.role] - The role of the message sender (e.g., 'user', 'assistant').
* @param {string} [params.message._name] - The name associated with the message.
* @param {string} [params.message.sender] - The sender of the message.
* @param {string} [params.message.text] - The text content of the message.
* @param {string} [params.message.content] - The content of the message.
* @param {Array<string>} [params.message.image_urls] - The image_urls attached to the message for Vision API.
* @param {string} [params.userName] - The name of the user.
* @param {string} [params.assistantName] - The name of the assistant.
* @param {string} [params.endpoint] - Identifier for specific endpoint handling
* @param {boolean} [params.langChain=false] - Whether to return a LangChain message object.
* @returns {(Object|HumanMessage|AIMessage|SystemMessage)} - The formatted message.
*/
const formatMessage = ({ message, userName, assistantName, endpoint, langChain = false }) => {
let { role: _role, _name, sender, text, content: _content, lc_id } = message;
if (lc_id && lc_id[2] && !langChain) {
const roleMapping = {
SystemMessage: 'system',
HumanMessage: 'user',
AIMessage: 'assistant',
};
_role = roleMapping[lc_id[2]];
}
const role = _role ?? (sender && sender?.toLowerCase() === 'user' ? 'user' : 'assistant');
const content = _content ?? text ?? '';
const formattedMessage = {
role,
content,
};
const { image_urls } = message;
if (Array.isArray(image_urls) && image_urls.length > 0 && role === 'user') {
return formatVisionMessage({
message: formattedMessage,
image_urls: message.image_urls,
endpoint,
});
}
if (_name) {
formattedMessage.name = _name;
}
if (userName && formattedMessage.role === 'user') {
formattedMessage.name = userName;
}
if (assistantName && formattedMessage.role === 'assistant') {
formattedMessage.name = assistantName;
}
if (formattedMessage.name) {
// Conform to API regex: ^[a-zA-Z0-9_-]{1,64}$
// https://community.openai.com/t/the-format-of-the-name-field-in-the-documentation-is-incorrect/175684/2
formattedMessage.name = formattedMessage.name.replace(/[^a-zA-Z0-9_-]/g, '_');
if (formattedMessage.name.length > 64) {
formattedMessage.name = formattedMessage.name.substring(0, 64);
}
}
if (!langChain) {
return formattedMessage;
}
if (role === 'user') {
return new HumanMessage(formattedMessage);
} else if (role === 'assistant') {
return new AIMessage(formattedMessage);
} else {
return new SystemMessage(formattedMessage);
}
};
/**
* Formats an array of messages for LangChain.
*
* @param {Array<Object>} messages - The array of messages to format.
* @param {Object} formatOptions - The options for formatting each message.
* @param {string} [formatOptions.userName] - The name of the user.
* @param {string} [formatOptions.assistantName] - The name of the assistant.
* @returns {Array<(HumanMessage|AIMessage|SystemMessage)>} - The array of formatted LangChain messages.
*/
const formatLangChainMessages = (messages, formatOptions) =>
messages.map((msg) => formatMessage({ ...formatOptions, message: msg, langChain: true }));
/**
* Formats a LangChain message object by merging properties from `lc_kwargs` or `kwargs` and `additional_kwargs`.
*
* @param {Object} message - The message object to format.
* @param {Object} [message.lc_kwargs] - Contains properties to be merged. Either this or `message.kwargs` should be provided.
* @param {Object} [message.kwargs] - Contains properties to be merged. Either this or `message.lc_kwargs` should be provided.
* @param {Object} [message.kwargs.additional_kwargs] - Additional properties to be merged.
*
* @returns {Object} The formatted LangChain message.
*/
const formatFromLangChain = (message) => {
const { additional_kwargs, ...message_kwargs } = message.lc_kwargs ?? message.kwargs;
return {
...message_kwargs,
...additional_kwargs,
};
};
/**
* Formats an array of messages for LangChain, handling tool calls and creating ToolMessage instances.
*
* @param {Array<Partial<TMessage>>} payload - The array of messages to format.
* @returns {Array<(HumanMessage|AIMessage|SystemMessage|ToolMessage)>} - The array of formatted LangChain messages, including ToolMessages for tool calls.
*/
const formatAgentMessages = (payload) => {
const messages = [];
for (const message of payload) {
if (message.role !== 'assistant') {
messages.push(formatMessage({ message, langChain: true }));
continue;
}
let currentContent = [];
let lastAIMessage = null;
for (const part of message.content) {
if (part.type === ContentTypes.TEXT && part.tool_call_ids) {
// If there's pending content, add it as an AIMessage
if (currentContent.length > 0) {
messages.push(new AIMessage({ content: currentContent }));
currentContent = [];
}
// Create a new AIMessage with this text and prepare for tool calls
lastAIMessage = new AIMessage({
content: part.text || '',
});
messages.push(lastAIMessage);
} else if (part.type === ContentTypes.TOOL_CALL) {
if (!lastAIMessage) {
throw new Error('Invalid tool call structure: No preceding AIMessage with tool_call_ids');
}
// Note: `tool_calls` list is defined when constructed by `AIMessage` class, and outputs should be excluded from it
const { output, args: _args, ...tool_call } = part.tool_call;
// TODO: investigate; args as dictionary may need to be provider-or-tool-specific
let args = _args;
try {
args = JSON.parse(args);
} catch (e) {
// failed to parse, leave as is
}
tool_call.args = args;
lastAIMessage.tool_calls.push(tool_call);
// Add the corresponding ToolMessage
messages.push(
new ToolMessage({
tool_call_id: tool_call.id,
name: tool_call.name,
content: output,
}),
);
} else {
currentContent.push(part);
}
}
if (currentContent.length > 0) {
messages.push(new AIMessage({ content: currentContent }));
}
}
return messages;
};
module.exports = {
formatMessage,
formatFromLangChain,
formatAgentMessages,
formatLangChainMessages,
};