mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-12-17 08:50:15 +01:00
* wip: first pass, dropdown for selecting sequential agents * refactor: Improve agent selection logic and enhance performance in SequentialAgents component * wip: seq. agents working ideas * wip: sequential agents style change * refactor: move agent form options/submission outside of AgentConfig * refactor: prevent repeating code * refactor: simplify current agent display in SequentialAgents component * feat: persist form value handling in AgentSelect component for agent_ids * feat: first pass, sequential agnets agent update * feat: enhance message display with agent updates and empty text handling * chore: update Icon component to use EModelEndpoint for agent endpoints * feat: update content type checks in BaseClient to use constants for better readability * feat: adjust max context tokens calculation to use 90% of the model's max tokens * feat: first pass, agent run message pruning * chore: increase max listeners for abort controller to prevent memory leaks * feat: enhance runAgent function to include current index count map for improved token tracking * chore: update @librechat/agents dependency to version 2.2.5 * feat: update icons and style of SequentialAgents component for improved UI consistency * feat: add AdvancedButton and AdvancedPanel components for enhanced agent settings navigation, update styling for agent form * chore: adjust minimum height of AdvancedPanel component for better layout consistency * chore: update @librechat/agents dependency to version 2.2.6 * feat: enhance message formatting by incorporating tool set into agent message processing, in order to allow better mix/matching of agents (as tool calls for tools not found in set will be stringified) * refactor: reorder components in AgentConfig for improved readability and maintainability * refactor: enhance layout of AgentUpdate component for improved visual structure * feat: add DeepSeek provider to Bedrock settings and schemas * feat: enhance link styling in mobile.css for better visibility and accessibility * fix: update banner model import in update banner script; export Banner model * refactor: `duplicateAgentHandler` to include tool_resources only for OCR context files * feat: add 'qwen-vl' to visionModels for enhanced model support * fix: change image format from JPEG to PNG in DALLE3 response * feat: reorganize Advanced components and add localizations * refactor: simplify JSX structure in AgentChain component to defer container styling to parent * feat: add FormInput component for reusable input handling * feat: make agent recursion limit configurable from builder * feat: add support for agent capabilities chain in AdvancedPanel and update data-provider version * feat: add maxRecursionLimit configuration for agents and update related documentation * fix: update CONFIG_VERSION to 1.2.3 in data provider configuration * feat: replace recursion limit input with MaxAgentSteps component and enhance input handling * feat: enhance AgentChain component with hover card for additional information and update related labels * fix: pass request and response objects to `createActionTool` when using assistant actions to prevent auth error * feat: update AgentChain component layout to include agent count display * feat: increase default max listeners and implement capability check function for agent chain * fix: update link styles in mobile.css for better visibility in dark mode * chore: temp. remove agents package while bumping shared packages * chore: update @langchain/google-genai package to version 0.1.11 * chore: update @langchain/google-vertexai package to version 0.2.2 * chore: add @librechat/agents package at version 2.2.8 * feat: add deepseek.r1 model with token rate and context values for bedrock
277 lines
9.8 KiB
JavaScript
277 lines
9.8 KiB
JavaScript
const { ToolMessage } = require('@langchain/core/messages');
|
|
const { EModelEndpoint, ContentTypes } = require('librechat-data-provider');
|
|
const { HumanMessage, AIMessage, SystemMessage } = require('@langchain/core/messages');
|
|
|
|
/**
|
|
* Formats a message to OpenAI Vision API payload format.
|
|
*
|
|
* @param {Object} params - The parameters for formatting.
|
|
* @param {Object} params.message - The message object to format.
|
|
* @param {string} [params.message.role] - The role of the message sender (must be 'user').
|
|
* @param {string} [params.message.content] - The text content of the message.
|
|
* @param {EModelEndpoint} [params.endpoint] - Identifier for specific endpoint handling
|
|
* @param {Array<string>} [params.image_urls] - The image_urls to attach to the message.
|
|
* @returns {(Object)} - The formatted message.
|
|
*/
|
|
const formatVisionMessage = ({ message, image_urls, endpoint }) => {
|
|
if (endpoint === EModelEndpoint.anthropic) {
|
|
message.content = [...image_urls, { type: ContentTypes.TEXT, text: message.content }];
|
|
return message;
|
|
}
|
|
|
|
message.content = [{ type: ContentTypes.TEXT, text: message.content }, ...image_urls];
|
|
|
|
return message;
|
|
};
|
|
|
|
/**
|
|
* Formats a message to OpenAI payload format based on the provided options.
|
|
*
|
|
* @param {Object} params - The parameters for formatting.
|
|
* @param {Object} params.message - The message object to format.
|
|
* @param {string} [params.message.role] - The role of the message sender (e.g., 'user', 'assistant').
|
|
* @param {string} [params.message._name] - The name associated with the message.
|
|
* @param {string} [params.message.sender] - The sender of the message.
|
|
* @param {string} [params.message.text] - The text content of the message.
|
|
* @param {string} [params.message.content] - The content of the message.
|
|
* @param {Array<string>} [params.message.image_urls] - The image_urls attached to the message for Vision API.
|
|
* @param {string} [params.userName] - The name of the user.
|
|
* @param {string} [params.assistantName] - The name of the assistant.
|
|
* @param {string} [params.endpoint] - Identifier for specific endpoint handling
|
|
* @param {boolean} [params.langChain=false] - Whether to return a LangChain message object.
|
|
* @returns {(Object|HumanMessage|AIMessage|SystemMessage)} - The formatted message.
|
|
*/
|
|
const formatMessage = ({ message, userName, assistantName, endpoint, langChain = false }) => {
|
|
let { role: _role, _name, sender, text, content: _content, lc_id } = message;
|
|
if (lc_id && lc_id[2] && !langChain) {
|
|
const roleMapping = {
|
|
SystemMessage: 'system',
|
|
HumanMessage: 'user',
|
|
AIMessage: 'assistant',
|
|
};
|
|
_role = roleMapping[lc_id[2]];
|
|
}
|
|
const role = _role ?? (sender && sender?.toLowerCase() === 'user' ? 'user' : 'assistant');
|
|
const content = _content ?? text ?? '';
|
|
const formattedMessage = {
|
|
role,
|
|
content,
|
|
};
|
|
|
|
const { image_urls } = message;
|
|
if (Array.isArray(image_urls) && image_urls.length > 0 && role === 'user') {
|
|
return formatVisionMessage({
|
|
message: formattedMessage,
|
|
image_urls: message.image_urls,
|
|
endpoint,
|
|
});
|
|
}
|
|
|
|
if (_name) {
|
|
formattedMessage.name = _name;
|
|
}
|
|
|
|
if (userName && formattedMessage.role === 'user') {
|
|
formattedMessage.name = userName;
|
|
}
|
|
|
|
if (assistantName && formattedMessage.role === 'assistant') {
|
|
formattedMessage.name = assistantName;
|
|
}
|
|
|
|
if (formattedMessage.name) {
|
|
// Conform to API regex: ^[a-zA-Z0-9_-]{1,64}$
|
|
// https://community.openai.com/t/the-format-of-the-name-field-in-the-documentation-is-incorrect/175684/2
|
|
formattedMessage.name = formattedMessage.name.replace(/[^a-zA-Z0-9_-]/g, '_');
|
|
|
|
if (formattedMessage.name.length > 64) {
|
|
formattedMessage.name = formattedMessage.name.substring(0, 64);
|
|
}
|
|
}
|
|
|
|
if (!langChain) {
|
|
return formattedMessage;
|
|
}
|
|
|
|
if (role === 'user') {
|
|
return new HumanMessage(formattedMessage);
|
|
} else if (role === 'assistant') {
|
|
return new AIMessage(formattedMessage);
|
|
} else {
|
|
return new SystemMessage(formattedMessage);
|
|
}
|
|
};
|
|
|
|
/**
|
|
* Formats an array of messages for LangChain.
|
|
*
|
|
* @param {Array<Object>} messages - The array of messages to format.
|
|
* @param {Object} formatOptions - The options for formatting each message.
|
|
* @param {string} [formatOptions.userName] - The name of the user.
|
|
* @param {string} [formatOptions.assistantName] - The name of the assistant.
|
|
* @returns {Array<(HumanMessage|AIMessage|SystemMessage)>} - The array of formatted LangChain messages.
|
|
*/
|
|
const formatLangChainMessages = (messages, formatOptions) =>
|
|
messages.map((msg) => formatMessage({ ...formatOptions, message: msg, langChain: true }));
|
|
|
|
/**
|
|
* Formats a LangChain message object by merging properties from `lc_kwargs` or `kwargs` and `additional_kwargs`.
|
|
*
|
|
* @param {Object} message - The message object to format.
|
|
* @param {Object} [message.lc_kwargs] - Contains properties to be merged. Either this or `message.kwargs` should be provided.
|
|
* @param {Object} [message.kwargs] - Contains properties to be merged. Either this or `message.lc_kwargs` should be provided.
|
|
* @param {Object} [message.kwargs.additional_kwargs] - Additional properties to be merged.
|
|
*
|
|
* @returns {Object} The formatted LangChain message.
|
|
*/
|
|
const formatFromLangChain = (message) => {
|
|
const { additional_kwargs, ...message_kwargs } = message.lc_kwargs ?? message.kwargs;
|
|
return {
|
|
...message_kwargs,
|
|
...additional_kwargs,
|
|
};
|
|
};
|
|
|
|
/**
|
|
* Formats an array of messages for LangChain, handling tool calls and creating ToolMessage instances.
|
|
*
|
|
* @param {Array<Partial<TMessage>>} payload - The array of messages to format.
|
|
* @returns {Array<(HumanMessage|AIMessage|SystemMessage|ToolMessage)>} - The array of formatted LangChain messages, including ToolMessages for tool calls.
|
|
*/
|
|
const formatAgentMessages = (payload) => {
|
|
const messages = [];
|
|
|
|
for (const message of payload) {
|
|
if (typeof message.content === 'string') {
|
|
message.content = [{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: message.content }];
|
|
}
|
|
if (message.role !== 'assistant') {
|
|
messages.push(formatMessage({ message, langChain: true }));
|
|
continue;
|
|
}
|
|
|
|
let currentContent = [];
|
|
let lastAIMessage = null;
|
|
|
|
let hasReasoning = false;
|
|
for (const part of message.content) {
|
|
if (part.type === ContentTypes.TEXT && part.tool_call_ids) {
|
|
/*
|
|
If there's pending content, it needs to be aggregated as a single string to prepare for tool calls.
|
|
For Anthropic models, the "tool_calls" field on a message is only respected if content is a string.
|
|
*/
|
|
if (currentContent.length > 0) {
|
|
let content = currentContent.reduce((acc, curr) => {
|
|
if (curr.type === ContentTypes.TEXT) {
|
|
return `${acc}${curr[ContentTypes.TEXT]}\n`;
|
|
}
|
|
return acc;
|
|
}, '');
|
|
content = `${content}\n${part[ContentTypes.TEXT] ?? ''}`.trim();
|
|
lastAIMessage = new AIMessage({ content });
|
|
messages.push(lastAIMessage);
|
|
currentContent = [];
|
|
continue;
|
|
}
|
|
|
|
// Create a new AIMessage with this text and prepare for tool calls
|
|
lastAIMessage = new AIMessage({
|
|
content: part.text || '',
|
|
});
|
|
|
|
messages.push(lastAIMessage);
|
|
} else if (part.type === ContentTypes.TOOL_CALL) {
|
|
if (!lastAIMessage) {
|
|
throw new Error('Invalid tool call structure: No preceding AIMessage with tool_call_ids');
|
|
}
|
|
|
|
// Note: `tool_calls` list is defined when constructed by `AIMessage` class, and outputs should be excluded from it
|
|
const { output, args: _args, ...tool_call } = part.tool_call;
|
|
// TODO: investigate; args as dictionary may need to be provider-or-tool-specific
|
|
let args = _args;
|
|
try {
|
|
args = JSON.parse(_args);
|
|
} catch (e) {
|
|
if (typeof _args === 'string') {
|
|
args = { input: _args };
|
|
}
|
|
}
|
|
|
|
tool_call.args = args;
|
|
lastAIMessage.tool_calls.push(tool_call);
|
|
|
|
// Add the corresponding ToolMessage
|
|
messages.push(
|
|
new ToolMessage({
|
|
tool_call_id: tool_call.id,
|
|
name: tool_call.name,
|
|
content: output || '',
|
|
}),
|
|
);
|
|
} else if (part.type === ContentTypes.THINK) {
|
|
hasReasoning = true;
|
|
continue;
|
|
} else if (part.type === ContentTypes.ERROR || part.type === ContentTypes.AGENT_UPDATE) {
|
|
continue;
|
|
} else {
|
|
currentContent.push(part);
|
|
}
|
|
}
|
|
|
|
if (hasReasoning) {
|
|
currentContent = currentContent
|
|
.reduce((acc, curr) => {
|
|
if (curr.type === ContentTypes.TEXT) {
|
|
return `${acc}${curr[ContentTypes.TEXT]}\n`;
|
|
}
|
|
return acc;
|
|
}, '')
|
|
.trim();
|
|
}
|
|
|
|
if (currentContent.length > 0) {
|
|
messages.push(new AIMessage({ content: currentContent }));
|
|
}
|
|
}
|
|
|
|
return messages;
|
|
};
|
|
|
|
/**
|
|
* Formats an array of messages for LangChain, making sure all content fields are strings
|
|
* @param {Array<(HumanMessage|AIMessage|SystemMessage|ToolMessage)>} payload - The array of messages to format.
|
|
* @returns {Array<(HumanMessage|AIMessage|SystemMessage|ToolMessage)>} - The array of formatted LangChain messages, including ToolMessages for tool calls.
|
|
*/
|
|
const formatContentStrings = (payload) => {
|
|
const messages = [];
|
|
|
|
for (const message of payload) {
|
|
if (typeof message.content === 'string') {
|
|
continue;
|
|
}
|
|
|
|
if (!Array.isArray(message.content)) {
|
|
continue;
|
|
}
|
|
|
|
// Reduce text types to a single string, ignore all other types
|
|
const content = message.content.reduce((acc, curr) => {
|
|
if (curr.type === ContentTypes.TEXT) {
|
|
return `${acc}${curr[ContentTypes.TEXT]}\n`;
|
|
}
|
|
return acc;
|
|
}, '');
|
|
|
|
message.content = content.trim();
|
|
}
|
|
|
|
return messages;
|
|
};
|
|
|
|
module.exports = {
|
|
formatMessage,
|
|
formatFromLangChain,
|
|
formatAgentMessages,
|
|
formatContentStrings,
|
|
formatLangChainMessages,
|
|
};
|