🤖 feat: Agent Handoffs (Routing) (#10176)

* feat: Add support for agent handoffs with edges in agent forms and schemas

chore: Mark `agent_ids` field as deprecated in favor of edges across various schemas and types

chore: Update dependencies for @langchain/core and @librechat/agents to latest versions

chore: Update peer dependency for @librechat/agents to version 3.0.0-rc2 in package.json

chore: Update @librechat/agents dependency to version 3.0.0-rc3 in package.json and package-lock.json

feat: first pass, multi-agent handoffs

fix: update output type to ToolMessage in memory handling functions

fix: improve type checking for graphConfig in createRun function

refactor: remove unused content filtering logic in AgentClient

chore: update @librechat/agents dependency to version 3.0.0-rc4 in package.json and package-lock.json

fix: update @langchain/core peer dependency version to ^0.3.72 in package.json and package-lock.json

fix: update @librechat/agents dependency to version 3.0.0-rc6 in package.json and package-lock.json; refactor stream rate handling in various endpoints

feat: Agent handoff UI

chore: update @librechat/agents dependency to version 3.0.0-rc8 in package.json and package-lock.json

fix: improve hasInfo condition and adjust UI element classes in AgentHandoff component

refactor: remove current fixed agent display from AgentHandoffs component due to redundancy

feat: enhance AgentHandoffs UI with localized beta label and improved layout

chore: update @librechat/agents dependency to version 3.0.0-rc10 in package.json and package-lock.json

feat: add `createSequentialChainEdges` function to add back agent chaining via multi-agents

feat: update `createSequentialChainEdges` call to only provide conversation context between agents

feat: deprecate Agent Chain functionality and update related methods for improved clarity

* chore: update @librechat/agents dependency to version 3.0.0-rc11 in package.json and package-lock.json

* refactor: remove unused addCacheControl function and related imports and import from @librechat/agents

* chore: remove unused i18n keys

* refactor: remove unused format export from index.ts

* chore: update @librechat/agents to v3.0.0-rc13

* chore: remove BEDROCK_LEGACY provider from Providers enum

* chore: update @librechat/agents to version 3.0.2 in package.json
This commit is contained in:
Danny Avila 2025-11-05 17:15:17 -05:00 committed by GitHub
parent 958a6c7872
commit 8a4a5a4790
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
41 changed files with 1108 additions and 3810 deletions

View file

@ -10,7 +10,7 @@ const {
getResponseSender, getResponseSender,
validateVisionModel, validateVisionModel,
} = require('librechat-data-provider'); } = require('librechat-data-provider');
const { sleep, SplitStreamHandler: _Handler } = require('@librechat/agents'); const { sleep, SplitStreamHandler: _Handler, addCacheControl } = require('@librechat/agents');
const { const {
Tokenizer, Tokenizer,
createFetch, createFetch,
@ -25,7 +25,6 @@ const {
const { const {
truncateText, truncateText,
formatMessage, formatMessage,
addCacheControl,
titleFunctionPrompt, titleFunctionPrompt,
parseParamFromPrompt, parseParamFromPrompt,
createContextHandlers, createContextHandlers,

View file

@ -1,45 +0,0 @@
/**
* Anthropic API: Adds cache control to the appropriate user messages in the payload.
* @param {Array<AnthropicMessage | BaseMessage>} messages - The array of message objects.
* @returns {Array<AnthropicMessage | BaseMessage>} - The updated array of message objects with cache control added.
*/
function addCacheControl(messages) {
if (!Array.isArray(messages) || messages.length < 2) {
return messages;
}
const updatedMessages = [...messages];
let userMessagesModified = 0;
for (let i = updatedMessages.length - 1; i >= 0 && userMessagesModified < 2; i--) {
const message = updatedMessages[i];
if (message.getType != null && message.getType() !== 'human') {
continue;
} else if (message.getType == null && message.role !== 'user') {
continue;
}
if (typeof message.content === 'string') {
message.content = [
{
type: 'text',
text: message.content,
cache_control: { type: 'ephemeral' },
},
];
userMessagesModified++;
} else if (Array.isArray(message.content)) {
for (let j = message.content.length - 1; j >= 0; j--) {
if (message.content[j].type === 'text') {
message.content[j].cache_control = { type: 'ephemeral' };
userMessagesModified++;
break;
}
}
}
}
return updatedMessages;
}
module.exports = addCacheControl;

View file

@ -1,227 +0,0 @@
const addCacheControl = require('./addCacheControl');
describe('addCacheControl', () => {
test('should add cache control to the last two user messages with array content', () => {
const messages = [
{ role: 'user', content: [{ type: 'text', text: 'Hello' }] },
{ role: 'assistant', content: [{ type: 'text', text: 'Hi there' }] },
{ role: 'user', content: [{ type: 'text', text: 'How are you?' }] },
{ role: 'assistant', content: [{ type: 'text', text: 'I\'m doing well, thanks!' }] },
{ role: 'user', content: [{ type: 'text', text: 'Great!' }] },
];
const result = addCacheControl(messages);
expect(result[0].content[0]).not.toHaveProperty('cache_control');
expect(result[2].content[0].cache_control).toEqual({ type: 'ephemeral' });
expect(result[4].content[0].cache_control).toEqual({ type: 'ephemeral' });
});
test('should add cache control to the last two user messages with string content', () => {
const messages = [
{ role: 'user', content: 'Hello' },
{ role: 'assistant', content: 'Hi there' },
{ role: 'user', content: 'How are you?' },
{ role: 'assistant', content: 'I\'m doing well, thanks!' },
{ role: 'user', content: 'Great!' },
];
const result = addCacheControl(messages);
expect(result[0].content).toBe('Hello');
expect(result[2].content[0]).toEqual({
type: 'text',
text: 'How are you?',
cache_control: { type: 'ephemeral' },
});
expect(result[4].content[0]).toEqual({
type: 'text',
text: 'Great!',
cache_control: { type: 'ephemeral' },
});
});
test('should handle mixed string and array content', () => {
const messages = [
{ role: 'user', content: 'Hello' },
{ role: 'assistant', content: 'Hi there' },
{ role: 'user', content: [{ type: 'text', text: 'How are you?' }] },
];
const result = addCacheControl(messages);
expect(result[0].content[0]).toEqual({
type: 'text',
text: 'Hello',
cache_control: { type: 'ephemeral' },
});
expect(result[2].content[0].cache_control).toEqual({ type: 'ephemeral' });
});
test('should handle less than two user messages', () => {
const messages = [
{ role: 'user', content: 'Hello' },
{ role: 'assistant', content: 'Hi there' },
];
const result = addCacheControl(messages);
expect(result[0].content[0]).toEqual({
type: 'text',
text: 'Hello',
cache_control: { type: 'ephemeral' },
});
expect(result[1].content).toBe('Hi there');
});
test('should return original array if no user messages', () => {
const messages = [
{ role: 'assistant', content: 'Hi there' },
{ role: 'assistant', content: 'How can I help?' },
];
const result = addCacheControl(messages);
expect(result).toEqual(messages);
});
test('should handle empty array', () => {
const messages = [];
const result = addCacheControl(messages);
expect(result).toEqual([]);
});
test('should handle non-array input', () => {
const messages = 'not an array';
const result = addCacheControl(messages);
expect(result).toBe('not an array');
});
test('should not modify assistant messages', () => {
const messages = [
{ role: 'user', content: 'Hello' },
{ role: 'assistant', content: 'Hi there' },
{ role: 'user', content: 'How are you?' },
];
const result = addCacheControl(messages);
expect(result[1].content).toBe('Hi there');
});
test('should handle multiple content items in user messages', () => {
const messages = [
{
role: 'user',
content: [
{ type: 'text', text: 'Hello' },
{ type: 'image', url: 'http://example.com/image.jpg' },
{ type: 'text', text: 'This is an image' },
],
},
{ role: 'assistant', content: 'Hi there' },
{ role: 'user', content: 'How are you?' },
];
const result = addCacheControl(messages);
expect(result[0].content[0]).not.toHaveProperty('cache_control');
expect(result[0].content[1]).not.toHaveProperty('cache_control');
expect(result[0].content[2].cache_control).toEqual({ type: 'ephemeral' });
expect(result[2].content[0]).toEqual({
type: 'text',
text: 'How are you?',
cache_control: { type: 'ephemeral' },
});
});
test('should handle an array with mixed content types', () => {
const messages = [
{ role: 'user', content: 'Hello' },
{ role: 'assistant', content: 'Hi there' },
{ role: 'user', content: [{ type: 'text', text: 'How are you?' }] },
{ role: 'assistant', content: 'I\'m doing well, thanks!' },
{ role: 'user', content: 'Great!' },
];
const result = addCacheControl(messages);
expect(result[0].content).toEqual('Hello');
expect(result[2].content[0]).toEqual({
type: 'text',
text: 'How are you?',
cache_control: { type: 'ephemeral' },
});
expect(result[4].content).toEqual([
{
type: 'text',
text: 'Great!',
cache_control: { type: 'ephemeral' },
},
]);
expect(result[1].content).toBe('Hi there');
expect(result[3].content).toBe('I\'m doing well, thanks!');
});
test('should handle edge case with multiple content types', () => {
const messages = [
{
role: 'user',
content: [
{
type: 'image',
source: { type: 'base64', media_type: 'image/png', data: 'some_base64_string' },
},
{
type: 'image',
source: { type: 'base64', media_type: 'image/png', data: 'another_base64_string' },
},
{ type: 'text', text: 'what do all these images have in common' },
],
},
{ role: 'assistant', content: 'I see multiple images.' },
{ role: 'user', content: 'Correct!' },
];
const result = addCacheControl(messages);
expect(result[0].content[0]).not.toHaveProperty('cache_control');
expect(result[0].content[1]).not.toHaveProperty('cache_control');
expect(result[0].content[2].cache_control).toEqual({ type: 'ephemeral' });
expect(result[2].content[0]).toEqual({
type: 'text',
text: 'Correct!',
cache_control: { type: 'ephemeral' },
});
});
test('should handle user message with no text block', () => {
const messages = [
{
role: 'user',
content: [
{
type: 'image',
source: { type: 'base64', media_type: 'image/png', data: 'some_base64_string' },
},
{
type: 'image',
source: { type: 'base64', media_type: 'image/png', data: 'another_base64_string' },
},
],
},
{ role: 'assistant', content: 'I see two images.' },
{ role: 'user', content: 'Correct!' },
];
const result = addCacheControl(messages);
expect(result[0].content[0]).not.toHaveProperty('cache_control');
expect(result[0].content[1]).not.toHaveProperty('cache_control');
expect(result[2].content[0]).toEqual({
type: 'text',
text: 'Correct!',
cache_control: { type: 'ephemeral' },
});
});
});

View file

@ -1,4 +1,3 @@
const addCacheControl = require('./addCacheControl');
const formatMessages = require('./formatMessages'); const formatMessages = require('./formatMessages');
const summaryPrompts = require('./summaryPrompts'); const summaryPrompts = require('./summaryPrompts');
const handleInputs = require('./handleInputs'); const handleInputs = require('./handleInputs');
@ -9,7 +8,6 @@ const createVisionPrompt = require('./createVisionPrompt');
const createContextHandlers = require('./createContextHandlers'); const createContextHandlers = require('./createContextHandlers');
module.exports = { module.exports = {
addCacheControl,
...formatMessages, ...formatMessages,
...summaryPrompts, ...summaryPrompts,
...handleInputs, ...handleInputs,

View file

@ -44,11 +44,11 @@
"@googleapis/youtube": "^20.0.0", "@googleapis/youtube": "^20.0.0",
"@keyv/redis": "^4.3.3", "@keyv/redis": "^4.3.3",
"@langchain/community": "^0.3.47", "@langchain/community": "^0.3.47",
"@langchain/core": "^0.3.62", "@langchain/core": "^0.3.72",
"@langchain/google-genai": "^0.2.13", "@langchain/google-genai": "^0.2.13",
"@langchain/google-vertexai": "^0.2.13", "@langchain/google-vertexai": "^0.2.13",
"@langchain/textsplitters": "^0.1.0", "@langchain/textsplitters": "^0.1.0",
"@librechat/agents": "^2.4.90", "@librechat/agents": "^3.0.2",
"@librechat/api": "*", "@librechat/api": "*",
"@librechat/data-schemas": "*", "@librechat/data-schemas": "*",
"@microsoft/microsoft-graph-client": "^3.0.7", "@microsoft/microsoft-graph-client": "^3.0.7",

View file

@ -95,6 +95,19 @@ class ModelEndHandler {
} }
} }
/**
* @deprecated Agent Chain helper
* @param {string | undefined} [last_agent_id]
* @param {string | undefined} [langgraph_node]
* @returns {boolean}
*/
function checkIfLastAgent(last_agent_id, langgraph_node) {
if (!last_agent_id || !langgraph_node) {
return false;
}
return langgraph_node?.endsWith(last_agent_id);
}
/** /**
* Get default handlers for stream events. * Get default handlers for stream events.
* @param {Object} options - The options object. * @param {Object} options - The options object.
@ -125,7 +138,7 @@ function getDefaultHandlers({ res, aggregateContent, toolEndCallback, collectedU
handle: (event, data, metadata) => { handle: (event, data, metadata) => {
if (data?.stepDetails.type === StepTypes.TOOL_CALLS) { if (data?.stepDetails.type === StepTypes.TOOL_CALLS) {
sendEvent(res, { event, data }); sendEvent(res, { event, data });
} else if (metadata?.last_agent_index === metadata?.agent_index) { } else if (checkIfLastAgent(metadata?.last_agent_id, metadata?.langgraph_node)) {
sendEvent(res, { event, data }); sendEvent(res, { event, data });
} else if (!metadata?.hide_sequential_outputs) { } else if (!metadata?.hide_sequential_outputs) {
sendEvent(res, { event, data }); sendEvent(res, { event, data });
@ -154,7 +167,7 @@ function getDefaultHandlers({ res, aggregateContent, toolEndCallback, collectedU
handle: (event, data, metadata) => { handle: (event, data, metadata) => {
if (data?.delta.type === StepTypes.TOOL_CALLS) { if (data?.delta.type === StepTypes.TOOL_CALLS) {
sendEvent(res, { event, data }); sendEvent(res, { event, data });
} else if (metadata?.last_agent_index === metadata?.agent_index) { } else if (checkIfLastAgent(metadata?.last_agent_id, metadata?.langgraph_node)) {
sendEvent(res, { event, data }); sendEvent(res, { event, data });
} else if (!metadata?.hide_sequential_outputs) { } else if (!metadata?.hide_sequential_outputs) {
sendEvent(res, { event, data }); sendEvent(res, { event, data });
@ -172,7 +185,7 @@ function getDefaultHandlers({ res, aggregateContent, toolEndCallback, collectedU
handle: (event, data, metadata) => { handle: (event, data, metadata) => {
if (data?.result != null) { if (data?.result != null) {
sendEvent(res, { event, data }); sendEvent(res, { event, data });
} else if (metadata?.last_agent_index === metadata?.agent_index) { } else if (checkIfLastAgent(metadata?.last_agent_id, metadata?.langgraph_node)) {
sendEvent(res, { event, data }); sendEvent(res, { event, data });
} else if (!metadata?.hide_sequential_outputs) { } else if (!metadata?.hide_sequential_outputs) {
sendEvent(res, { event, data }); sendEvent(res, { event, data });
@ -188,7 +201,7 @@ function getDefaultHandlers({ res, aggregateContent, toolEndCallback, collectedU
* @param {GraphRunnableConfig['configurable']} [metadata] The runnable metadata. * @param {GraphRunnableConfig['configurable']} [metadata] The runnable metadata.
*/ */
handle: (event, data, metadata) => { handle: (event, data, metadata) => {
if (metadata?.last_agent_index === metadata?.agent_index) { if (checkIfLastAgent(metadata?.last_agent_id, metadata?.langgraph_node)) {
sendEvent(res, { event, data }); sendEvent(res, { event, data });
} else if (!metadata?.hide_sequential_outputs) { } else if (!metadata?.hide_sequential_outputs) {
sendEvent(res, { event, data }); sendEvent(res, { event, data });
@ -204,7 +217,7 @@ function getDefaultHandlers({ res, aggregateContent, toolEndCallback, collectedU
* @param {GraphRunnableConfig['configurable']} [metadata] The runnable metadata. * @param {GraphRunnableConfig['configurable']} [metadata] The runnable metadata.
*/ */
handle: (event, data, metadata) => { handle: (event, data, metadata) => {
if (metadata?.last_agent_index === metadata?.agent_index) { if (checkIfLastAgent(metadata?.last_agent_id, metadata?.langgraph_node)) {
sendEvent(res, { event, data }); sendEvent(res, { event, data });
} else if (!metadata?.hide_sequential_outputs) { } else if (!metadata?.hide_sequential_outputs) {
sendEvent(res, { event, data }); sendEvent(res, { event, data });

View file

@ -3,7 +3,6 @@ const { logger } = require('@librechat/data-schemas');
const { DynamicStructuredTool } = require('@langchain/core/tools'); const { DynamicStructuredTool } = require('@langchain/core/tools');
const { getBufferString, HumanMessage } = require('@langchain/core/messages'); const { getBufferString, HumanMessage } = require('@langchain/core/messages');
const { const {
sendEvent,
createRun, createRun,
Tokenizer, Tokenizer,
checkAccess, checkAccess,
@ -12,14 +11,12 @@ const {
resolveHeaders, resolveHeaders,
getBalanceConfig, getBalanceConfig,
memoryInstructions, memoryInstructions,
formatContentStrings,
getTransactionsConfig, getTransactionsConfig,
createMemoryProcessor, createMemoryProcessor,
} = require('@librechat/api'); } = require('@librechat/api');
const { const {
Callback, Callback,
Providers, Providers,
GraphEvents,
TitleMethod, TitleMethod,
formatMessage, formatMessage,
formatAgentMessages, formatAgentMessages,
@ -38,12 +35,12 @@ const {
bedrockInputSchema, bedrockInputSchema,
removeNullishValues, removeNullishValues,
} = require('librechat-data-provider'); } = require('librechat-data-provider');
const { addCacheControl, createContextHandlers } = require('~/app/clients/prompts');
const { initializeAgent } = require('~/server/services/Endpoints/agents/agent'); const { initializeAgent } = require('~/server/services/Endpoints/agents/agent');
const { spendTokens, spendStructuredTokens } = require('~/models/spendTokens'); const { spendTokens, spendStructuredTokens } = require('~/models/spendTokens');
const { getFormattedMemories, deleteMemory, setMemory } = require('~/models'); const { getFormattedMemories, deleteMemory, setMemory } = require('~/models');
const { encodeAndFormat } = require('~/server/services/Files/images/encode'); const { encodeAndFormat } = require('~/server/services/Files/images/encode');
const { getProviderConfig } = require('~/server/services/Endpoints'); const { getProviderConfig } = require('~/server/services/Endpoints');
const { createContextHandlers } = require('~/app/clients/prompts');
const { checkCapability } = require('~/server/services/Config'); const { checkCapability } = require('~/server/services/Config');
const BaseClient = require('~/app/clients/BaseClient'); const BaseClient = require('~/app/clients/BaseClient');
const { getRoleByName } = require('~/models/Role'); const { getRoleByName } = require('~/models/Role');
@ -80,8 +77,6 @@ const payloadParser = ({ req, agent, endpoint }) => {
return req.body.endpointOption.model_parameters; return req.body.endpointOption.model_parameters;
}; };
const noSystemModelRegex = [/\b(o1-preview|o1-mini|amazon\.titan-text)\b/gi];
function createTokenCounter(encoding) { function createTokenCounter(encoding) {
return function (message) { return function (message) {
const countTokens = (text) => Tokenizer.getTokenCount(text, encoding); const countTokens = (text) => Tokenizer.getTokenCount(text, encoding);
@ -803,137 +798,81 @@ class AgentClient extends BaseClient {
); );
/** /**
*
* @param {Agent} agent
* @param {BaseMessage[]} messages * @param {BaseMessage[]} messages
* @param {number} [i]
* @param {TMessageContentParts[]} [contentData]
* @param {Record<string, number>} [currentIndexCountMap]
*/ */
const runAgent = async (agent, _messages, i = 0, contentData = [], _currentIndexCountMap) => { const runAgents = async (messages) => {
config.configurable.model = agent.model_parameters.model; const agents = [this.options.agent];
const currentIndexCountMap = _currentIndexCountMap ?? indexTokenCountMap; if (
if (i > 0) { this.agentConfigs &&
this.model = agent.model_parameters.model; this.agentConfigs.size > 0 &&
((this.options.agent.edges?.length ?? 0) > 0 ||
(await checkCapability(this.options.req, AgentCapabilities.chain)))
) {
agents.push(...this.agentConfigs.values());
} }
if (i > 0 && config.signal == null) {
config.signal = abortController.signal; if (agents[0].recursion_limit && typeof agents[0].recursion_limit === 'number') {
} config.recursionLimit = agents[0].recursion_limit;
if (agent.recursion_limit && typeof agent.recursion_limit === 'number') {
config.recursionLimit = agent.recursion_limit;
} }
if ( if (
agentsEConfig?.maxRecursionLimit && agentsEConfig?.maxRecursionLimit &&
config.recursionLimit > agentsEConfig?.maxRecursionLimit config.recursionLimit > agentsEConfig?.maxRecursionLimit
) { ) {
config.recursionLimit = agentsEConfig?.maxRecursionLimit; config.recursionLimit = agentsEConfig?.maxRecursionLimit;
} }
config.configurable.agent_id = agent.id;
config.configurable.name = agent.name;
config.configurable.agent_index = i;
const noSystemMessages = noSystemModelRegex.some((regex) =>
agent.model_parameters.model.match(regex),
);
const systemMessage = Object.values(agent.toolContextMap ?? {}) // TODO: needs to be added as part of AgentContext initialization
.join('\n') // const noSystemModelRegex = [/\b(o1-preview|o1-mini|amazon\.titan-text)\b/gi];
.trim(); // const noSystemMessages = noSystemModelRegex.some((regex) =>
// agent.model_parameters.model.match(regex),
// );
// if (noSystemMessages === true && systemContent?.length) {
// const latestMessageContent = _messages.pop().content;
// if (typeof latestMessageContent !== 'string') {
// latestMessageContent[0].text = [systemContent, latestMessageContent[0].text].join('\n');
// _messages.push(new HumanMessage({ content: latestMessageContent }));
// } else {
// const text = [systemContent, latestMessageContent].join('\n');
// _messages.push(new HumanMessage(text));
// }
// }
// let messages = _messages;
// if (agent.useLegacyContent === true) {
// messages = formatContentStrings(messages);
// }
// if (
// agent.model_parameters?.clientOptions?.defaultHeaders?.['anthropic-beta']?.includes(
// 'prompt-caching',
// )
// ) {
// messages = addCacheControl(messages);
// }
let systemContent = [ memoryPromise = this.runMemory(messages);
systemMessage,
agent.instructions ?? '',
i !== 0 ? (agent.additional_instructions ?? '') : '',
]
.join('\n')
.trim();
if (noSystemMessages === true) {
agent.instructions = undefined;
agent.additional_instructions = undefined;
} else {
agent.instructions = systemContent;
agent.additional_instructions = undefined;
}
if (noSystemMessages === true && systemContent?.length) {
const latestMessageContent = _messages.pop().content;
if (typeof latestMessageContent !== 'string') {
latestMessageContent[0].text = [systemContent, latestMessageContent[0].text].join('\n');
_messages.push(new HumanMessage({ content: latestMessageContent }));
} else {
const text = [systemContent, latestMessageContent].join('\n');
_messages.push(new HumanMessage(text));
}
}
let messages = _messages;
if (agent.useLegacyContent === true) {
messages = formatContentStrings(messages);
}
const defaultHeaders =
agent.model_parameters?.clientOptions?.defaultHeaders ??
agent.model_parameters?.configuration?.defaultHeaders;
if (defaultHeaders?.['anthropic-beta']?.includes('prompt-caching')) {
messages = addCacheControl(messages);
}
if (i === 0) {
memoryPromise = this.runMemory(messages);
}
/** Resolve request-based headers for Custom Endpoints. Note: if this is added to
* non-custom endpoints, needs consideration of varying provider header configs.
*/
if (agent.model_parameters?.configuration?.defaultHeaders != null) {
agent.model_parameters.configuration.defaultHeaders = resolveHeaders({
headers: agent.model_parameters.configuration.defaultHeaders,
body: config.configurable.requestBody,
});
}
run = await createRun({ run = await createRun({
agent, agents,
req: this.options.req, indexTokenCountMap,
runId: this.responseMessageId, runId: this.responseMessageId,
signal: abortController.signal, signal: abortController.signal,
customHandlers: this.options.eventHandlers, customHandlers: this.options.eventHandlers,
requestBody: config.configurable.requestBody,
tokenCounter: createTokenCounter(this.getEncoding()),
}); });
if (!run) { if (!run) {
throw new Error('Failed to create run'); throw new Error('Failed to create run');
} }
if (i === 0) { this.run = run;
this.run = run;
}
if (contentData.length) {
const agentUpdate = {
type: ContentTypes.AGENT_UPDATE,
[ContentTypes.AGENT_UPDATE]: {
index: contentData.length,
runId: this.responseMessageId,
agentId: agent.id,
},
};
const streamData = {
event: GraphEvents.ON_AGENT_UPDATE,
data: agentUpdate,
};
this.options.aggregateContent(streamData);
sendEvent(this.options.res, streamData);
contentData.push(agentUpdate);
run.Graph.contentData = contentData;
}
if (userMCPAuthMap != null) { if (userMCPAuthMap != null) {
config.configurable.userMCPAuthMap = userMCPAuthMap; config.configurable.userMCPAuthMap = userMCPAuthMap;
} }
/** @deprecated Agent Chain */
config.configurable.last_agent_id = agents[agents.length - 1].id;
await run.processStream({ messages }, config, { await run.processStream({ messages }, config, {
keepContent: i !== 0,
tokenCounter: createTokenCounter(this.getEncoding()),
indexTokenCountMap: currentIndexCountMap,
maxContextTokens: agent.maxContextTokens,
callbacks: { callbacks: {
[Callback.TOOL_ERROR]: logToolError, [Callback.TOOL_ERROR]: logToolError,
}, },
@ -942,109 +881,22 @@ class AgentClient extends BaseClient {
config.signal = null; config.signal = null;
}; };
await runAgent(this.options.agent, initialMessages); await runAgents(initialMessages);
let finalContentStart = 0; /** @deprecated Agent Chain */
if ( if (config.configurable.hide_sequential_outputs) {
this.agentConfigs && this.contentParts = this.contentParts.filter((part, index) => {
this.agentConfigs.size > 0 && // Include parts that are either:
(await checkCapability(this.options.req, AgentCapabilities.chain)) // 1. At or after the finalContentStart index
) { // 2. Of type tool_call
const windowSize = 5; // 3. Have tool_call_ids property
let latestMessage = initialMessages.pop().content; return (
if (typeof latestMessage !== 'string') { index >= this.contentParts.length - 1 ||
latestMessage = latestMessage[0].text; part.type === ContentTypes.TOOL_CALL ||
} part.tool_call_ids
let i = 1; );
let runMessages = []; });
const windowIndexCountMap = {};
const windowMessages = initialMessages.slice(-windowSize);
let currentIndex = 4;
for (let i = initialMessages.length - 1; i >= 0; i--) {
windowIndexCountMap[currentIndex] = indexTokenCountMap[i];
currentIndex--;
if (currentIndex < 0) {
break;
}
}
const encoding = this.getEncoding();
const tokenCounter = createTokenCounter(encoding);
for (const [agentId, agent] of this.agentConfigs) {
if (abortController.signal.aborted === true) {
break;
}
const currentRun = await run;
if (
i === this.agentConfigs.size &&
config.configurable.hide_sequential_outputs === true
) {
const content = this.contentParts.filter(
(part) => part.type === ContentTypes.TOOL_CALL,
);
this.options.res.write(
`event: message\ndata: ${JSON.stringify({
event: 'on_content_update',
data: {
runId: this.responseMessageId,
content,
},
})}\n\n`,
);
}
const _runMessages = currentRun.Graph.getRunMessages();
finalContentStart = this.contentParts.length;
runMessages = runMessages.concat(_runMessages);
const contentData = currentRun.Graph.contentData.slice();
const bufferString = getBufferString([new HumanMessage(latestMessage), ...runMessages]);
if (i === this.agentConfigs.size) {
logger.debug(`SEQUENTIAL AGENTS: Last buffer string:\n${bufferString}`);
}
try {
const contextMessages = [];
const runIndexCountMap = {};
for (let i = 0; i < windowMessages.length; i++) {
const message = windowMessages[i];
const messageType = message._getType();
if (
(!agent.tools || agent.tools.length === 0) &&
(messageType === 'tool' || (message.tool_calls?.length ?? 0) > 0)
) {
continue;
}
runIndexCountMap[contextMessages.length] = windowIndexCountMap[i];
contextMessages.push(message);
}
const bufferMessage = new HumanMessage(bufferString);
runIndexCountMap[contextMessages.length] = tokenCounter(bufferMessage);
const currentMessages = [...contextMessages, bufferMessage];
await runAgent(agent, currentMessages, i, contentData, runIndexCountMap);
} catch (err) {
logger.error(
`[api/server/controllers/agents/client.js #chatCompletion] Error running agent ${agentId} (${i})`,
err,
);
}
i++;
}
} }
/** Note: not implemented */
if (config.configurable.hide_sequential_outputs !== true) {
finalContentStart = 0;
}
this.contentParts = this.contentParts.filter((part, index) => {
// Include parts that are either:
// 1. At or after the finalContentStart index
// 2. Of type tool_call
// 3. Have tool_call_ids property
return (
index >= finalContentStart || part.type === ContentTypes.TOOL_CALL || part.tool_call_ids
);
});
try { try {
const attachments = await this.awaitMemoryWithTimeout(memoryPromise); const attachments = await this.awaitMemoryWithTimeout(memoryPromise);
if (attachments && attachments.length > 0) { if (attachments && attachments.length > 0) {

View file

@ -1,6 +1,10 @@
const { logger } = require('@librechat/data-schemas'); const { logger } = require('@librechat/data-schemas');
const { createContentAggregator } = require('@librechat/agents'); const { createContentAggregator } = require('@librechat/agents');
const { validateAgentModel, getCustomEndpointConfig } = require('@librechat/api'); const {
validateAgentModel,
getCustomEndpointConfig,
createSequentialChainEdges,
} = require('@librechat/api');
const { const {
Constants, Constants,
EModelEndpoint, EModelEndpoint,
@ -119,44 +123,90 @@ const initializeClient = async ({ req, res, signal, endpointOption }) => {
const agent_ids = primaryConfig.agent_ids; const agent_ids = primaryConfig.agent_ids;
let userMCPAuthMap = primaryConfig.userMCPAuthMap; let userMCPAuthMap = primaryConfig.userMCPAuthMap;
if (agent_ids?.length) {
for (const agentId of agent_ids) { async function processAgent(agentId) {
const agent = await getAgent({ id: agentId }); const agent = await getAgent({ id: agentId });
if (!agent) { if (!agent) {
throw new Error(`Agent ${agentId} not found`); throw new Error(`Agent ${agentId} not found`);
}
const validationResult = await validateAgentModel({
req,
res,
agent,
modelsConfig,
logViolation,
});
if (!validationResult.isValid) {
throw new Error(validationResult.error?.message);
}
const config = await initializeAgent({
req,
res,
agent,
loadTools,
requestFiles,
conversationId,
endpointOption,
allowedProviders,
});
if (userMCPAuthMap != null) {
Object.assign(userMCPAuthMap, config.userMCPAuthMap ?? {});
} else {
userMCPAuthMap = config.userMCPAuthMap;
}
agentConfigs.set(agentId, config);
}
let edges = primaryConfig.edges;
const checkAgentInit = (agentId) => agentId === primaryConfig.id || agentConfigs.has(agentId);
if ((edges?.length ?? 0) > 0) {
for (const edge of edges) {
if (Array.isArray(edge.to)) {
for (const to of edge.to) {
if (checkAgentInit(to)) {
continue;
}
await processAgent(to);
}
} else if (typeof edge.to === 'string' && checkAgentInit(edge.to)) {
continue;
} else if (typeof edge.to === 'string') {
await processAgent(edge.to);
} }
const validationResult = await validateAgentModel({ if (Array.isArray(edge.from)) {
req, for (const from of edge.from) {
res, if (checkAgentInit(from)) {
agent, continue;
modelsConfig, }
logViolation, await processAgent(from);
}); }
} else if (typeof edge.from === 'string' && checkAgentInit(edge.from)) {
if (!validationResult.isValid) { continue;
throw new Error(validationResult.error?.message); } else if (typeof edge.from === 'string') {
await processAgent(edge.from);
} }
const config = await initializeAgent({
req,
res,
agent,
loadTools,
requestFiles,
conversationId,
endpointOption,
allowedProviders,
});
if (userMCPAuthMap != null) {
Object.assign(userMCPAuthMap, config.userMCPAuthMap ?? {});
} else {
userMCPAuthMap = config.userMCPAuthMap;
}
agentConfigs.set(agentId, config);
} }
} }
/** @deprecated Agent Chain */
if (agent_ids?.length) {
for (const agentId of agent_ids) {
if (checkAgentInit(agentId)) {
continue;
}
await processAgent(agentId);
}
const chain = await createSequentialChainEdges([primaryConfig.id].concat(agent_ids), '{convo}');
edges = edges ? edges.concat(chain) : chain;
}
primaryConfig.edges = edges;
let endpointConfig = appConfig.endpoints?.[primaryConfig.endpoint]; let endpointConfig = appConfig.endpoints?.[primaryConfig.endpoint];
if (!isAgentsEndpoint(primaryConfig.endpoint) && !endpointConfig) { if (!isAgentsEndpoint(primaryConfig.endpoint) && !endpointConfig) {
try { try {

View file

@ -27,13 +27,13 @@ const initializeClient = async ({ req, res, endpointOption, overrideModel, optio
const anthropicConfig = appConfig.endpoints?.[EModelEndpoint.anthropic]; const anthropicConfig = appConfig.endpoints?.[EModelEndpoint.anthropic];
if (anthropicConfig) { if (anthropicConfig) {
clientOptions.streamRate = anthropicConfig.streamRate; clientOptions._lc_stream_delay = anthropicConfig.streamRate;
clientOptions.titleModel = anthropicConfig.titleModel; clientOptions.titleModel = anthropicConfig.titleModel;
} }
const allConfig = appConfig.endpoints?.all; const allConfig = appConfig.endpoints?.all;
if (allConfig) { if (allConfig) {
clientOptions.streamRate = allConfig.streamRate; clientOptions._lc_stream_delay = allConfig.streamRate;
} }
if (optionsOnly) { if (optionsOnly) {

View file

@ -1,8 +1,6 @@
const { HttpsProxyAgent } = require('https-proxy-agent'); const { HttpsProxyAgent } = require('https-proxy-agent');
const { createHandleLLMNewToken } = require('@librechat/api');
const { const {
AuthType, AuthType,
Constants,
EModelEndpoint, EModelEndpoint,
bedrockInputParser, bedrockInputParser,
bedrockOutputParser, bedrockOutputParser,
@ -11,7 +9,6 @@ const {
const { getUserKey, checkUserKeyExpiry } = require('~/server/services/UserService'); const { getUserKey, checkUserKeyExpiry } = require('~/server/services/UserService');
const getOptions = async ({ req, overrideModel, endpointOption }) => { const getOptions = async ({ req, overrideModel, endpointOption }) => {
const appConfig = req.config;
const { const {
BEDROCK_AWS_SECRET_ACCESS_KEY, BEDROCK_AWS_SECRET_ACCESS_KEY,
BEDROCK_AWS_ACCESS_KEY_ID, BEDROCK_AWS_ACCESS_KEY_ID,
@ -47,10 +44,12 @@ const getOptions = async ({ req, overrideModel, endpointOption }) => {
checkUserKeyExpiry(expiresAt, EModelEndpoint.bedrock); checkUserKeyExpiry(expiresAt, EModelEndpoint.bedrock);
} }
/** @type {number} */ /*
Callback for stream rate no longer awaits and may end the stream prematurely
/** @type {number}
let streamRate = Constants.DEFAULT_STREAM_RATE; let streamRate = Constants.DEFAULT_STREAM_RATE;
/** @type {undefined | TBaseEndpoint} */ /** @type {undefined | TBaseEndpoint}
const bedrockConfig = appConfig.endpoints?.[EModelEndpoint.bedrock]; const bedrockConfig = appConfig.endpoints?.[EModelEndpoint.bedrock];
if (bedrockConfig && bedrockConfig.streamRate) { if (bedrockConfig && bedrockConfig.streamRate) {
@ -61,6 +60,7 @@ const getOptions = async ({ req, overrideModel, endpointOption }) => {
if (allConfig && allConfig.streamRate) { if (allConfig && allConfig.streamRate) {
streamRate = allConfig.streamRate; streamRate = allConfig.streamRate;
} }
*/
/** @type {BedrockClientOptions} */ /** @type {BedrockClientOptions} */
const requestOptions = { const requestOptions = {
@ -88,12 +88,6 @@ const getOptions = async ({ req, overrideModel, endpointOption }) => {
llmConfig.endpointHost = BEDROCK_REVERSE_PROXY; llmConfig.endpointHost = BEDROCK_REVERSE_PROXY;
} }
llmConfig.callbacks = [
{
handleLLMNewToken: createHandleLLMNewToken(streamRate),
},
];
return { return {
/** @type {BedrockClientOptions} */ /** @type {BedrockClientOptions} */
llmConfig, llmConfig,

View file

@ -3,7 +3,6 @@ const {
isUserProvided, isUserProvided,
getOpenAIConfig, getOpenAIConfig,
getCustomEndpointConfig, getCustomEndpointConfig,
createHandleLLMNewToken,
} = require('@librechat/api'); } = require('@librechat/api');
const { const {
CacheKeys, CacheKeys,
@ -157,11 +156,7 @@ const initializeClient = async ({ req, res, endpointOption, optionsOnly, overrid
if (!clientOptions.streamRate) { if (!clientOptions.streamRate) {
return options; return options;
} }
options.llmConfig.callbacks = [ options.llmConfig._lc_stream_delay = clientOptions.streamRate;
{
handleLLMNewToken: createHandleLLMNewToken(clientOptions.streamRate),
},
];
return options; return options;
} }

View file

@ -4,7 +4,6 @@ jest.mock('@librechat/api', () => ({
...jest.requireActual('@librechat/api'), ...jest.requireActual('@librechat/api'),
resolveHeaders: jest.fn(), resolveHeaders: jest.fn(),
getOpenAIConfig: jest.fn(), getOpenAIConfig: jest.fn(),
createHandleLLMNewToken: jest.fn(),
getCustomEndpointConfig: jest.fn().mockReturnValue({ getCustomEndpointConfig: jest.fn().mockReturnValue({
apiKey: 'test-key', apiKey: 'test-key',
baseURL: 'https://test.com', baseURL: 'https://test.com',

View file

@ -5,7 +5,6 @@ const {
isUserProvided, isUserProvided,
getOpenAIConfig, getOpenAIConfig,
getAzureCredentials, getAzureCredentials,
createHandleLLMNewToken,
} = require('@librechat/api'); } = require('@librechat/api');
const { getUserKeyValues, checkUserKeyExpiry } = require('~/server/services/UserService'); const { getUserKeyValues, checkUserKeyExpiry } = require('~/server/services/UserService');
const OpenAIClient = require('~/app/clients/OpenAIClient'); const OpenAIClient = require('~/app/clients/OpenAIClient');
@ -151,11 +150,7 @@ const initializeClient = async ({
if (!streamRate) { if (!streamRate) {
return options; return options;
} }
options.llmConfig.callbacks = [ options.llmConfig._lc_stream_delay = streamRate;
{
handleLLMNewToken: createHandleLLMNewToken(streamRate),
},
];
return options; return options;
} }

View file

@ -1,9 +1,10 @@
import { AgentCapabilities, ArtifactModes } from 'librechat-data-provider'; import { AgentCapabilities, ArtifactModes } from 'librechat-data-provider';
import type { import type {
Agent,
AgentProvider,
AgentModelParameters, AgentModelParameters,
SupportContact, SupportContact,
AgentProvider,
GraphEdge,
Agent,
} from 'librechat-data-provider'; } from 'librechat-data-provider';
import type { OptionWithIcon, ExtendedFile } from './types'; import type { OptionWithIcon, ExtendedFile } from './types';
@ -33,7 +34,9 @@ export type AgentForm = {
model_parameters: AgentModelParameters; model_parameters: AgentModelParameters;
tools?: string[]; tools?: string[];
provider?: AgentProvider | OptionWithIcon; provider?: AgentProvider | OptionWithIcon;
/** @deprecated Use edges instead */
agent_ids?: string[]; agent_ids?: string[];
edges?: GraphEdge[];
[AgentCapabilities.artifacts]?: ArtifactModes | string; [AgentCapabilities.artifacts]?: ArtifactModes | string;
recursion_limit?: number; recursion_limit?: number;
support_contact?: SupportContact; support_contact?: SupportContact;

View file

@ -0,0 +1,92 @@
import React, { useMemo, useState } from 'react';
import { EModelEndpoint, Constants } from 'librechat-data-provider';
import { ChevronDown } from 'lucide-react';
import type { TMessage } from 'librechat-data-provider';
import MessageIcon from '~/components/Share/MessageIcon';
import { useAgentsMapContext } from '~/Providers';
import { useLocalize } from '~/hooks';
import { cn } from '~/utils';
interface AgentHandoffProps {
name: string;
args: string | Record<string, unknown>;
output?: string | null;
}
const AgentHandoff: React.FC<AgentHandoffProps> = ({ name, args: _args = '' }) => {
const localize = useLocalize();
const agentsMap = useAgentsMapContext();
const [showInfo, setShowInfo] = useState(false);
/** Extracted agent ID from tool name (e.g., "lc_transfer_to_agent_gUV0wMb7zHt3y3Xjz-8_4" -> "agent_gUV0wMb7zHt3y3Xjz-8_4") */
const targetAgentId = useMemo(() => {
if (typeof name !== 'string' || !name.startsWith(Constants.LC_TRANSFER_TO_)) {
return null;
}
return name.replace(Constants.LC_TRANSFER_TO_, '');
}, [name]);
const targetAgent = useMemo(() => {
if (!targetAgentId || !agentsMap) {
return null;
}
return agentsMap[targetAgentId];
}, [agentsMap, targetAgentId]);
const args = useMemo(() => {
if (typeof _args === 'string') {
return _args;
}
try {
return JSON.stringify(_args, null, 2);
} catch {
return '';
}
}, [_args]) as string;
/** Requires more than 2 characters as can be an empty object: `{}` */
const hasInfo = useMemo(() => (args?.trim()?.length ?? 0) > 2, [args]);
return (
<div className="my-3">
<div
className={cn(
'flex items-center gap-2.5 text-sm text-text-secondary',
hasInfo && 'cursor-pointer transition-colors hover:text-text-primary',
)}
onClick={() => hasInfo && setShowInfo(!showInfo)}
>
<div className="flex h-6 w-6 items-center justify-center overflow-hidden rounded-full">
<MessageIcon
message={
{
endpoint: EModelEndpoint.agents,
isCreatedByUser: false,
} as TMessage
}
agent={targetAgent || undefined}
/>
</div>
<span className="select-none">{localize('com_ui_transferred_to')}</span>
<span className="select-none font-medium text-text-primary">
{targetAgent?.name || localize('com_ui_agent')}
</span>
{hasInfo && (
<ChevronDown
className={cn('ml-1 h-3 w-3 transition-transform', showInfo && 'rotate-180')}
/>
)}
</div>
{hasInfo && showInfo && (
<div className="ml-8 mt-2 rounded-md bg-surface-secondary p-3 text-xs">
<div className="mb-1 font-medium text-text-secondary">
{localize('com_ui_handoff_instructions')}:
</div>
<pre className="overflow-x-auto whitespace-pre-wrap text-text-primary">{args}</pre>
</div>
)}
</div>
);
};
export default AgentHandoff;

View file

@ -1,5 +1,6 @@
import { import {
Tools, Tools,
Constants,
ContentTypes, ContentTypes,
ToolCallTypes, ToolCallTypes,
imageGenTools, imageGenTools,
@ -10,6 +11,7 @@ import type { TMessageContentParts, TAttachment } from 'librechat-data-provider'
import { OpenAIImageGen, EmptyText, Reasoning, ExecuteCode, AgentUpdate, Text } from './Parts'; import { OpenAIImageGen, EmptyText, Reasoning, ExecuteCode, AgentUpdate, Text } from './Parts';
import { ErrorMessage } from './MessageContent'; import { ErrorMessage } from './MessageContent';
import RetrievalCall from './RetrievalCall'; import RetrievalCall from './RetrievalCall';
import AgentHandoff from './AgentHandoff';
import CodeAnalyze from './CodeAnalyze'; import CodeAnalyze from './CodeAnalyze';
import Container from './Container'; import Container from './Container';
import WebSearch from './WebSearch'; import WebSearch from './WebSearch';
@ -123,6 +125,14 @@ const Part = memo(
isLast={isLast} isLast={isLast}
/> />
); );
} else if (isToolCall && toolCall.name?.startsWith(Constants.LC_TRANSFER_TO_)) {
return (
<AgentHandoff
args={toolCall.args ?? ''}
name={toolCall.name || ''}
output={toolCall.output ?? ''}
/>
);
} else if (isToolCall) { } else if (isToolCall) {
return ( return (
<ToolCall <ToolCall

View file

@ -11,8 +11,8 @@ interface AgentUpdateProps {
const AgentUpdate: React.FC<AgentUpdateProps> = ({ currentAgentId }) => { const AgentUpdate: React.FC<AgentUpdateProps> = ({ currentAgentId }) => {
const localize = useLocalize(); const localize = useLocalize();
const agentsMap = useAgentsMapContext() || {}; const agentsMap = useAgentsMapContext();
const currentAgent = useMemo(() => agentsMap[currentAgentId], [agentsMap, currentAgentId]); const currentAgent = useMemo(() => agentsMap?.[currentAgentId], [agentsMap, currentAgentId]);
if (!currentAgentId) { if (!currentAgentId) {
return null; return null;
} }

View file

@ -5,6 +5,7 @@ import { useFormContext, Controller } from 'react-hook-form';
import type { AgentForm } from '~/common'; import type { AgentForm } from '~/common';
import { useAgentPanelContext } from '~/Providers'; import { useAgentPanelContext } from '~/Providers';
import MaxAgentSteps from './MaxAgentSteps'; import MaxAgentSteps from './MaxAgentSteps';
import AgentHandoffs from './AgentHandoffs';
import { useLocalize } from '~/hooks'; import { useLocalize } from '~/hooks';
import AgentChain from './AgentChain'; import AgentChain from './AgentChain';
import { Panel } from '~/common'; import { Panel } from '~/common';
@ -42,6 +43,12 @@ export default function AdvancedPanel() {
</div> </div>
<div className="flex flex-col gap-4 px-2"> <div className="flex flex-col gap-4 px-2">
<MaxAgentSteps /> <MaxAgentSteps />
<Controller
name="edges"
control={control}
defaultValue={[]}
render={({ field }) => <AgentHandoffs field={field} currentAgentId={currentAgentId} />}
/>
{chainEnabled && ( {chainEnabled && (
<Controller <Controller
name="agent_ids" name="agent_ids"

View file

@ -0,0 +1,296 @@
import React, { useState, useMemo, useCallback, useEffect } from 'react';
import { EModelEndpoint } from 'librechat-data-provider';
import { X, Waypoints, PlusCircle, ChevronDown } from 'lucide-react';
import {
Label,
Input,
Textarea,
HoverCard,
CircleHelpIcon,
HoverCardPortal,
ControlCombobox,
HoverCardContent,
HoverCardTrigger,
} from '@librechat/client';
import type { TMessage, GraphEdge } from 'librechat-data-provider';
import type { ControllerRenderProps } from 'react-hook-form';
import type { AgentForm, OptionWithIcon } from '~/common';
import MessageIcon from '~/components/Share/MessageIcon';
import { useAgentsMapContext } from '~/Providers';
import { useLocalize } from '~/hooks';
import { ESide } from '~/common';
interface AgentHandoffsProps {
field: ControllerRenderProps<AgentForm, 'edges'>;
currentAgentId: string;
}
/** TODO: make configurable */
const MAX_HANDOFFS = 10;
const AgentHandoffs: React.FC<AgentHandoffsProps> = ({ field, currentAgentId }) => {
const localize = useLocalize();
const [newAgentId, setNewAgentId] = useState('');
const [expandedIndices, setExpandedIndices] = useState<Set<number>>(new Set());
const agentsMap = useAgentsMapContext();
const edgesValue = field.value;
const edges = useMemo(() => edgesValue || [], [edgesValue]);
const agents = useMemo(() => (agentsMap ? Object.values(agentsMap) : []), [agentsMap]);
const selectableAgents = useMemo(
() =>
agents
.filter((agent) => agent?.id !== currentAgentId)
.map(
(agent) =>
({
label: agent?.name || '',
value: agent?.id || '',
icon: (
<MessageIcon
message={
{
endpoint: EModelEndpoint.agents,
isCreatedByUser: false,
} as TMessage
}
agent={agent}
/>
),
}) as OptionWithIcon,
),
[agents, currentAgentId],
);
const getAgentDetails = useCallback((id: string) => agentsMap?.[id], [agentsMap]);
useEffect(() => {
if (newAgentId && edges.length < MAX_HANDOFFS) {
const newEdge: GraphEdge = {
from: currentAgentId,
to: newAgentId,
edgeType: 'handoff',
};
field.onChange([...edges, newEdge]);
setNewAgentId('');
}
}, [newAgentId, edges, field, currentAgentId]);
const removeHandoffAt = (index: number) => {
field.onChange(edges.filter((_, i) => i !== index));
// Also remove from expanded set
setExpandedIndices((prev) => {
const newSet = new Set(prev);
newSet.delete(index);
return newSet;
});
};
const updateHandoffAt = (index: number, agentId: string) => {
const updated = [...edges];
updated[index] = { ...updated[index], to: agentId };
field.onChange(updated);
};
const updateHandoffDetailsAt = (index: number, updates: Partial<GraphEdge>) => {
const updated = [...edges];
updated[index] = { ...updated[index], ...updates };
field.onChange(updated);
};
const toggleExpanded = (index: number) => {
setExpandedIndices((prev) => {
const newSet = new Set(prev);
if (newSet.has(index)) {
newSet.delete(index);
} else {
newSet.add(index);
}
return newSet;
});
};
const getTargetAgentId = (to: string | string[]): string => {
return Array.isArray(to) ? to[0] : to;
};
return (
<HoverCard openDelay={50}>
<div className="flex items-center justify-between gap-2">
<div className="flex items-center gap-2">
<label className="font-semibold text-text-primary">
{localize('com_ui_agent_handoffs')}
</label>
<HoverCardTrigger>
<CircleHelpIcon className="h-4 w-4 text-text-tertiary" />
</HoverCardTrigger>
</div>
<div className="flex items-center gap-3">
<div className="rounded-full border border-purple-600/40 bg-purple-500/10 px-2 py-0.5 text-xs font-medium text-purple-700 hover:bg-purple-700/10 dark:text-purple-400">
{localize('com_ui_beta')}
</div>
<div className="text-xs text-text-secondary">
{edges.length} / {MAX_HANDOFFS}
</div>
</div>
</div>
<div className="space-y-1">
{edges.map((edge, idx) => {
const targetAgentId = getTargetAgentId(edge.to);
const isExpanded = expandedIndices.has(idx);
return (
<React.Fragment key={idx}>
<div className="space-y-1">
<div className="flex h-10 items-center gap-2 rounded-md border border-border-medium bg-surface-tertiary pr-2">
<ControlCombobox
isCollapsed={false}
ariaLabel={localize('com_ui_agent_var', { 0: localize('com_ui_select') })}
selectedValue={targetAgentId}
setValue={(id) => updateHandoffAt(idx, id)}
selectPlaceholder={localize('com_ui_agent_var', {
0: localize('com_ui_select'),
})}
searchPlaceholder={localize('com_ui_agent_var', {
0: localize('com_ui_search'),
})}
items={selectableAgents}
displayValue={getAgentDetails(targetAgentId)?.name ?? ''}
SelectIcon={
<MessageIcon
message={
{
endpoint: EModelEndpoint.agents,
isCreatedByUser: false,
} as TMessage
}
agent={targetAgentId && agentsMap ? agentsMap[targetAgentId] : undefined}
/>
}
className="flex-1 border-border-heavy"
containerClassName="px-0"
/>
<button
type="button"
className="rounded p-1 transition hover:bg-surface-hover"
onClick={() => toggleExpanded(idx)}
>
<ChevronDown
size={16}
className={`text-text-secondary transition-transform ${
isExpanded ? 'rotate-180' : ''
}`}
/>
</button>
<button
type="button"
className="rounded-xl p-1 transition hover:bg-surface-hover"
onClick={() => removeHandoffAt(idx)}
>
<X size={18} className="text-text-secondary" />
</button>
</div>
{isExpanded && (
<div className="space-y-3 rounded-md border border-border-light bg-surface-primary p-3">
<div>
<Label
htmlFor={`handoff-desc-${idx}`}
className="text-xs text-text-secondary"
>
{localize('com_ui_agent_handoff_description')}
</Label>
<Input
id={`handoff-desc-${idx}`}
placeholder={localize('com_ui_agent_handoff_description_placeholder')}
value={edge.description || ''}
onChange={(e) =>
updateHandoffDetailsAt(idx, { description: e.target.value })
}
className="mt-1 h-8 text-sm"
/>
</div>
<div>
<Label
htmlFor={`handoff-prompt-${idx}`}
className="text-xs text-text-secondary"
>
{localize('com_ui_agent_handoff_prompt')}
</Label>
<Textarea
id={`handoff-prompt-${idx}`}
placeholder={localize('com_ui_agent_handoff_prompt_placeholder')}
value={typeof edge.prompt === 'string' ? edge.prompt : ''}
onChange={(e) => updateHandoffDetailsAt(idx, { prompt: e.target.value })}
className="mt-1 h-20 resize-none text-sm"
/>
</div>
{edge.prompt && (
<div>
<Label
htmlFor={`handoff-promptkey-${idx}`}
className="text-xs text-text-secondary"
>
{localize('com_ui_agent_handoff_prompt_key')}
</Label>
<Input
id={`handoff-promptkey-${idx}`}
placeholder={localize('com_ui_agent_handoff_prompt_key_placeholder')}
value={edge.promptKey || ''}
onChange={(e) =>
updateHandoffDetailsAt(idx, { promptKey: e.target.value })
}
className="mt-1 h-8 text-sm"
/>
</div>
)}
</div>
)}
</div>
{idx < edges.length - 1 && (
<Waypoints className="mx-auto text-text-secondary" size={14} />
)}
</React.Fragment>
);
})}
{edges.length < MAX_HANDOFFS && (
<>
{edges.length > 0 && <Waypoints className="mx-auto text-text-secondary" size={14} />}
<ControlCombobox
isCollapsed={false}
ariaLabel={localize('com_ui_agent_var', { 0: localize('com_ui_add') })}
selectedValue=""
setValue={setNewAgentId}
selectPlaceholder={localize('com_ui_agent_handoff_add')}
searchPlaceholder={localize('com_ui_agent_var', { 0: localize('com_ui_search') })}
items={selectableAgents}
className="h-10 w-full border-dashed border-border-heavy text-center text-text-secondary hover:text-text-primary"
containerClassName="px-0"
SelectIcon={<PlusCircle size={16} className="text-text-secondary" />}
/>
</>
)}
{edges.length >= MAX_HANDOFFS && (
<p className="pt-1 text-center text-xs italic text-text-tertiary">
{localize('com_ui_agent_handoff_max', { 0: MAX_HANDOFFS })}
</p>
)}
</div>
<HoverCardPortal>
<HoverCardContent side={ESide.Top} className="w-80">
<div className="space-y-2">
<p className="text-sm text-text-secondary">{localize('com_ui_agent_handoff_info')}</p>
<p className="text-sm text-text-secondary">{localize('com_ui_agent_handoff_info_2')}</p>
</div>
</HoverCardContent>
</HoverCardPortal>
</HoverCard>
);
};
export default AgentHandoffs;

View file

@ -168,6 +168,7 @@ export default function AgentPanel() {
model_parameters, model_parameters,
provider: _provider, provider: _provider,
agent_ids, agent_ids,
edges,
end_after_tools, end_after_tools,
hide_sequential_outputs, hide_sequential_outputs,
recursion_limit, recursion_limit,
@ -192,6 +193,7 @@ export default function AgentPanel() {
provider, provider,
model_parameters, model_parameters,
agent_ids, agent_ids,
edges,
end_after_tools, end_after_tools,
hide_sequential_outputs, hide_sequential_outputs,
recursion_limit, recursion_limit,
@ -225,6 +227,7 @@ export default function AgentPanel() {
provider, provider,
model_parameters, model_parameters,
agent_ids, agent_ids,
edges,
end_after_tools, end_after_tools,
hide_sequential_outputs, hide_sequential_outputs,
recursion_limit, recursion_limit,

View file

@ -103,6 +103,11 @@ export default function AgentSelect({
return; return;
} }
if (name === 'edges' && Array.isArray(value)) {
formValues[name] = value;
return;
}
if (!keys.has(name)) { if (!keys.has(name)) {
return; return;
} }

View file

@ -655,6 +655,20 @@
"com_ui_agent_chain": "Agent Chain (Mixture-of-Agents)", "com_ui_agent_chain": "Agent Chain (Mixture-of-Agents)",
"com_ui_agent_chain_info": "Enables creating sequences of agents. Each agent can access outputs from previous agents in the chain. Based on the \"Mixture-of-Agents\" architecture where agents use previous outputs as auxiliary information.", "com_ui_agent_chain_info": "Enables creating sequences of agents. Each agent can access outputs from previous agents in the chain. Based on the \"Mixture-of-Agents\" architecture where agents use previous outputs as auxiliary information.",
"com_ui_agent_chain_max": "You have reached the maximum of {{0}} agents.", "com_ui_agent_chain_max": "You have reached the maximum of {{0}} agents.",
"com_ui_agent_handoffs": "Agent Handoffs",
"com_ui_agent_handoff_add": "Add handoff agent",
"com_ui_agent_handoff_description": "Handoff description",
"com_ui_agent_handoff_description_placeholder": "e.g., Transfer to data analyst for statistical analysis",
"com_ui_agent_handoff_info": "Configure agents that this agent can transfer conversations to when specific expertise is needed.",
"com_ui_agent_handoff_info_2": "Each handoff creates a transfer tool that enables seamless routing to specialist agents with context.",
"com_ui_agent_handoff_max": "Maximum {{0}} handoff agents reached.",
"com_ui_agent_handoff_prompt": "Passthrough content",
"com_ui_agent_handoff_prompt_placeholder": "Tell this agent what content to generate and pass to the handoff agent. You need to add something here to enable this feature",
"com_ui_agent_handoff_prompt_key": "Content parameter name (default: 'instructions')",
"com_ui_agent_handoff_prompt_key_placeholder": "Label the content passed (default: 'instructions')",
"com_ui_transferred_to": "Transferred to",
"com_ui_handoff_instructions": "Handoff instructions",
"com_ui_beta": "Beta",
"com_ui_agent_delete_error": "There was an error deleting the agent", "com_ui_agent_delete_error": "There was an error deleting the agent",
"com_ui_agent_deleted": "Successfully deleted agent", "com_ui_agent_deleted": "Successfully deleted agent",
"com_ui_agent_duplicate_error": "There was an error duplicating the agent", "com_ui_agent_duplicate_error": "There was an error duplicating the agent",

3057
package-lock.json generated

File diff suppressed because it is too large Load diff

View file

@ -81,8 +81,8 @@
"@azure/search-documents": "^12.0.0", "@azure/search-documents": "^12.0.0",
"@azure/storage-blob": "^12.27.0", "@azure/storage-blob": "^12.27.0",
"@keyv/redis": "^4.3.3", "@keyv/redis": "^4.3.3",
"@langchain/core": "^0.3.62", "@langchain/core": "^0.3.72",
"@librechat/agents": "^2.4.90", "@librechat/agents": "^3.0.2",
"@librechat/data-schemas": "*", "@librechat/data-schemas": "*",
"@modelcontextprotocol/sdk": "^1.17.1", "@modelcontextprotocol/sdk": "^1.17.1",
"axios": "^1.12.1", "axios": "^1.12.1",

View file

@ -0,0 +1,47 @@
import { PromptTemplate } from '@langchain/core/prompts';
import { BaseMessage, getBufferString } from '@langchain/core/messages';
import type { GraphEdge } from '@librechat/agents';
const DEFAULT_PROMPT_TEMPLATE = `Based on the following conversation and analysis from previous agents, please provide your insights:\n\n{convo}\n\nPlease add your specific expertise and perspective to this discussion.`;
/**
* Helper function to create sequential chain edges with buffer string prompts
*
* @deprecated Agent Chain helper
* @param agentIds - Array of agent IDs in order of execution
* @param promptTemplate - Optional prompt template string; defaults to a predefined template if not provided
* @returns Array of edges configured for sequential chain with buffer prompts
*/
export async function createSequentialChainEdges(
agentIds: string[],
promptTemplate = DEFAULT_PROMPT_TEMPLATE,
): Promise<GraphEdge[]> {
const edges: GraphEdge[] = [];
for (let i = 0; i < agentIds.length - 1; i++) {
const fromAgent = agentIds[i];
const toAgent = agentIds[i + 1];
edges.push({
from: fromAgent,
to: toAgent,
edgeType: 'direct',
// Use a prompt function to create the buffer string from all previous results
prompt: async (messages: BaseMessage[], startIndex: number) => {
/** Only the messages from this run (after startIndex) are passed in */
const runMessages = messages.slice(startIndex);
const bufferString = getBufferString(runMessages);
const template = PromptTemplate.fromTemplate(promptTemplate);
const result = await template.invoke({
convo: bufferString,
});
return result.value;
},
/** Critical: exclude previous results so only the prompt is passed */
excludeResults: true,
description: `Sequential chain from ${fromAgent} to ${toAgent}`,
});
}
return edges;
}

View file

@ -1,3 +1,4 @@
export * from './chain';
export * from './memory'; export * from './memory';
export * from './migration'; export * from './migration';
export * from './legacy'; export * from './legacy';

View file

@ -15,7 +15,7 @@ import type {
} from '@librechat/agents'; } from '@librechat/agents';
import type { TAttachment, MemoryArtifact } from 'librechat-data-provider'; import type { TAttachment, MemoryArtifact } from 'librechat-data-provider';
import type { ObjectId, MemoryMethods } from '@librechat/data-schemas'; import type { ObjectId, MemoryMethods } from '@librechat/data-schemas';
import type { BaseMessage } from '@langchain/core/messages'; import type { BaseMessage, ToolMessage } from '@langchain/core/messages';
import type { Response as ServerResponse } from 'express'; import type { Response as ServerResponse } from 'express';
import { Tokenizer } from '~/utils'; import { Tokenizer } from '~/utils';
@ -466,7 +466,7 @@ async function handleMemoryArtifact({
data: ToolEndData; data: ToolEndData;
metadata?: ToolEndMetadata; metadata?: ToolEndMetadata;
}) { }) {
const output = data?.output; const output = data?.output as ToolMessage | undefined;
if (!output) { if (!output) {
return null; return null;
} }
@ -509,7 +509,7 @@ export function createMemoryCallback({
artifactPromises: Promise<Partial<TAttachment> | null>[]; artifactPromises: Promise<Partial<TAttachment> | null>[];
}): ToolEndCallback { }): ToolEndCallback {
return async (data: ToolEndData, metadata?: Record<string, unknown>) => { return async (data: ToolEndData, metadata?: Record<string, unknown>) => {
const output = data?.output; const output = data?.output as ToolMessage | undefined;
const memoryArtifact = output?.artifact?.[Tools.memory] as MemoryArtifact; const memoryArtifact = output?.artifact?.[Tools.memory] as MemoryArtifact;
if (memoryArtifact == null) { if (memoryArtifact == null) {
return; return;

View file

@ -1,15 +1,17 @@
import { Run, Providers } from '@librechat/agents'; import { Run, Providers } from '@librechat/agents';
import { providerEndpointMap, KnownEndpoints } from 'librechat-data-provider'; import { providerEndpointMap, KnownEndpoints } from 'librechat-data-provider';
import type { import type {
MultiAgentGraphConfig,
OpenAIClientOptions, OpenAIClientOptions,
StandardGraphConfig, StandardGraphConfig,
EventHandler, AgentInputs,
GenericTool, GenericTool,
GraphEvents, RunConfig,
IState, IState,
} from '@librechat/agents'; } from '@librechat/agents';
import type { Agent } from 'librechat-data-provider'; import type { Agent } from 'librechat-data-provider';
import type * as t from '~/types'; import type * as t from '~/types';
import { resolveHeaders } from '~/utils/env';
const customProviders = new Set([ const customProviders = new Set([
Providers.XAI, Providers.XAI,
@ -40,13 +42,18 @@ export function getReasoningKey(
return reasoningKey; return reasoningKey;
} }
type RunAgent = Omit<Agent, 'tools'> & {
tools?: GenericTool[];
maxContextTokens?: number;
toolContextMap?: Record<string, string>;
};
/** /**
* Creates a new Run instance with custom handlers and configuration. * Creates a new Run instance with custom handlers and configuration.
* *
* @param options - The options for creating the Run instance. * @param options - The options for creating the Run instance.
* @param options.agent - The agent for this run. * @param options.agents - The agents for this run.
* @param options.signal - The signal for this run. * @param options.signal - The signal for this run.
* @param options.req - The server request.
* @param options.runId - Optional run ID; otherwise, a new run ID will be generated. * @param options.runId - Optional run ID; otherwise, a new run ID will be generated.
* @param options.customHandlers - Custom event handlers. * @param options.customHandlers - Custom event handlers.
* @param options.streaming - Whether to use streaming. * @param options.streaming - Whether to use streaming.
@ -55,61 +62,108 @@ export function getReasoningKey(
*/ */
export async function createRun({ export async function createRun({
runId, runId,
agent,
signal, signal,
agents,
requestBody,
tokenCounter,
customHandlers, customHandlers,
indexTokenCountMap,
streaming = true, streaming = true,
streamUsage = true, streamUsage = true,
}: { }: {
agent: Omit<Agent, 'tools'> & { tools?: GenericTool[] }; agents: RunAgent[];
signal: AbortSignal; signal: AbortSignal;
runId?: string; runId?: string;
streaming?: boolean; streaming?: boolean;
streamUsage?: boolean; streamUsage?: boolean;
customHandlers?: Record<GraphEvents, EventHandler>; requestBody?: t.RequestBody;
}): Promise<Run<IState>> { } & Pick<RunConfig, 'tokenCounter' | 'customHandlers' | 'indexTokenCountMap'>): Promise<
const provider = Run<IState>
(providerEndpointMap[ > {
agent.provider as keyof typeof providerEndpointMap const agentInputs: AgentInputs[] = [];
] as unknown as Providers) ?? agent.provider; const buildAgentContext = (agent: RunAgent) => {
const provider =
(providerEndpointMap[
agent.provider as keyof typeof providerEndpointMap
] as unknown as Providers) ?? agent.provider;
const llmConfig: t.RunLLMConfig = Object.assign( const llmConfig: t.RunLLMConfig = Object.assign(
{ {
provider,
streaming,
streamUsage,
},
agent.model_parameters,
);
const systemMessage = Object.values(agent.toolContextMap ?? {})
.join('\n')
.trim();
const systemContent = [
systemMessage,
agent.instructions ?? '',
agent.additional_instructions ?? '',
]
.join('\n')
.trim();
/**
* Resolve request-based headers for Custom Endpoints. Note: if this is added to
* non-custom endpoints, needs consideration of varying provider header configs.
* This is done at this step because the request body may contain dynamic values
* that need to be resolved after agent initialization.
*/
if (llmConfig?.configuration?.defaultHeaders != null) {
llmConfig.configuration.defaultHeaders = resolveHeaders({
headers: llmConfig.configuration.defaultHeaders as Record<string, string>,
body: requestBody,
});
}
/** Resolves issues with new OpenAI usage field */
if (
customProviders.has(agent.provider) ||
(agent.provider === Providers.OPENAI && agent.endpoint !== agent.provider)
) {
llmConfig.streamUsage = false;
llmConfig.usage = true;
}
const reasoningKey = getReasoningKey(provider, llmConfig, agent.endpoint);
const agentInput: AgentInputs = {
provider, provider,
streaming, reasoningKey,
streamUsage, agentId: agent.id,
}, tools: agent.tools,
agent.model_parameters, clientOptions: llmConfig,
); instructions: systemContent,
maxContextTokens: agent.maxContextTokens,
/** Resolves issues with new OpenAI usage field */ };
if ( agentInputs.push(agentInput);
customProviders.has(agent.provider) ||
(agent.provider === Providers.OPENAI && agent.endpoint !== agent.provider)
) {
llmConfig.streamUsage = false;
llmConfig.usage = true;
}
const reasoningKey = getReasoningKey(provider, llmConfig, agent.endpoint);
const graphConfig: StandardGraphConfig = {
signal,
llmConfig,
reasoningKey,
tools: agent.tools,
instructions: agent.instructions,
additional_instructions: agent.additional_instructions,
// toolEnd: agent.end_after_tools,
}; };
// TEMPORARY FOR TESTING for (const agent of agents) {
if (agent.provider === Providers.ANTHROPIC || agent.provider === Providers.BEDROCK) { buildAgentContext(agent);
graphConfig.streamBuffer = 2000; }
const graphConfig: RunConfig['graphConfig'] = {
signal,
agents: agentInputs,
edges: agents[0].edges,
};
if (agentInputs.length > 1 || ((graphConfig as MultiAgentGraphConfig).edges?.length ?? 0) > 0) {
(graphConfig as unknown as MultiAgentGraphConfig).type = 'multi-agent';
} else {
(graphConfig as StandardGraphConfig).type = 'standard';
} }
return Run.create({ return Run.create({
runId, runId,
graphConfig, graphConfig,
tokenCounter,
customHandlers, customHandlers,
indexTokenCountMap,
}); });
} }

View file

@ -40,6 +40,17 @@ export const agentSupportContactSchema = z
}) })
.optional(); .optional();
/** Graph edge schema for agent handoffs */
export const graphEdgeSchema = z.object({
from: z.union([z.string(), z.array(z.string())]),
to: z.union([z.string(), z.array(z.string())]),
description: z.string().optional(),
edgeType: z.enum(['handoff', 'direct']).optional(),
prompt: z.union([z.string(), z.function()]).optional(),
excludeResults: z.boolean().optional(),
promptKey: z.string().optional(),
});
/** Base agent schema with all common fields */ /** Base agent schema with all common fields */
export const agentBaseSchema = z.object({ export const agentBaseSchema = z.object({
name: z.string().nullable().optional(), name: z.string().nullable().optional(),
@ -48,7 +59,9 @@ export const agentBaseSchema = z.object({
avatar: agentAvatarSchema.nullable().optional(), avatar: agentAvatarSchema.nullable().optional(),
model_parameters: z.record(z.unknown()).optional(), model_parameters: z.record(z.unknown()).optional(),
tools: z.array(z.string()).optional(), tools: z.array(z.string()).optional(),
/** @deprecated Use edges instead */
agent_ids: z.array(z.string()).optional(), agent_ids: z.array(z.string()).optional(),
edges: z.array(graphEdgeSchema).optional(),
end_after_tools: z.boolean().optional(), end_after_tools: z.boolean().optional(),
hide_sequential_outputs: z.boolean().optional(), hide_sequential_outputs: z.boolean().optional(),
artifacts: z.string().optional(), artifacts: z.string().optional(),

View file

@ -1,11 +1,10 @@
import { ErrorTypes, EModelEndpoint, mapModelToAzureConfig } from 'librechat-data-provider'; import { ErrorTypes, EModelEndpoint, mapModelToAzureConfig } from 'librechat-data-provider';
import type { import type {
InitializeOpenAIOptionsParams, InitializeOpenAIOptionsParams,
OpenAIOptionsResult,
OpenAIConfigOptions, OpenAIConfigOptions,
LLMConfigResult,
UserKeyValues, UserKeyValues,
} from '~/types'; } from '~/types';
import { createHandleLLMNewToken } from '~/utils/generators';
import { getAzureCredentials } from '~/utils/azure'; import { getAzureCredentials } from '~/utils/azure';
import { isUserProvided } from '~/utils/common'; import { isUserProvided } from '~/utils/common';
import { resolveHeaders } from '~/utils/env'; import { resolveHeaders } from '~/utils/env';
@ -27,7 +26,7 @@ export const initializeOpenAI = async ({
overrideEndpoint, overrideEndpoint,
getUserKeyValues, getUserKeyValues,
checkUserKeyExpiry, checkUserKeyExpiry,
}: InitializeOpenAIOptionsParams): Promise<OpenAIOptionsResult> => { }: InitializeOpenAIOptionsParams): Promise<LLMConfigResult> => {
const { PROXY, OPENAI_API_KEY, AZURE_API_KEY, OPENAI_REVERSE_PROXY, AZURE_OPENAI_BASEURL } = const { PROXY, OPENAI_API_KEY, AZURE_API_KEY, OPENAI_REVERSE_PROXY, AZURE_OPENAI_BASEURL } =
process.env; process.env;
@ -160,17 +159,8 @@ export const initializeOpenAI = async ({
} }
if (streamRate) { if (streamRate) {
options.llmConfig.callbacks = [ options.llmConfig._lc_stream_delay = streamRate;
{
handleLLMNewToken: createHandleLLMNewToken(streamRate),
},
];
} }
const result: OpenAIOptionsResult = { return options;
...options,
streamRate,
};
return result;
}; };

View file

@ -1,340 +0,0 @@
import { ContentTypes } from 'librechat-data-provider';
import { HumanMessage, AIMessage, SystemMessage } from '@langchain/core/messages';
import { formatContentStrings } from './content';
describe('formatContentStrings', () => {
describe('Human messages', () => {
it('should convert human message with all text blocks to string', () => {
const messages = [
new HumanMessage({
content: [
{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Hello' },
{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'World' },
],
}),
];
const result = formatContentStrings(messages);
expect(result).toHaveLength(1);
expect(result[0].content).toBe('Hello\nWorld');
});
it('should not convert human message with mixed content types (text + image)', () => {
const messages = [
new HumanMessage({
content: [
{ type: ContentTypes.TEXT, text: 'what do you see' },
{
type: 'image_url',
image_url: {
url: 'data:image/png;base64,iVBO_SOME_BASE64_DATA=',
detail: 'auto',
},
},
],
}),
];
const result = formatContentStrings(messages);
expect(result).toHaveLength(1);
expect(result[0].content).toEqual([
{ type: ContentTypes.TEXT, text: 'what do you see' },
{
type: 'image_url',
image_url: {
url: 'data:image/png;base64,iVBO_SOME_BASE64_DATA=',
detail: 'auto',
},
},
]);
});
it('should leave string content unchanged', () => {
const messages = [
new HumanMessage({
content: 'Hello World',
}),
];
const result = formatContentStrings(messages);
expect(result).toHaveLength(1);
expect(result[0].content).toBe('Hello World');
});
it('should handle empty text blocks', () => {
const messages = [
new HumanMessage({
content: [
{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Hello' },
{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: '' },
{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'World' },
],
}),
];
const result = formatContentStrings(messages);
expect(result).toHaveLength(1);
expect(result[0].content).toBe('Hello\n\nWorld');
});
it('should handle null/undefined text values', () => {
const messages = [
new HumanMessage({
content: [
{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Hello' },
{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: null },
{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: undefined },
{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'World' },
],
}),
];
const result = formatContentStrings(messages);
expect(result).toHaveLength(1);
expect(result[0].content).toBe('Hello\n\n\nWorld');
});
});
describe('AI messages', () => {
it('should convert AI message with all text blocks to string', () => {
const messages = [
new AIMessage({
content: [
{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Hello' },
{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'World' },
],
}),
];
const result = formatContentStrings(messages);
expect(result).toHaveLength(1);
expect(result[0].content).toBe('Hello\nWorld');
expect(result[0].getType()).toBe('ai');
});
it('should not convert AI message with mixed content types', () => {
const messages = [
new AIMessage({
content: [
{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Here is an image' },
{ type: ContentTypes.TOOL_CALL, tool_call: { name: 'generate_image' } },
],
}),
];
const result = formatContentStrings(messages);
expect(result).toHaveLength(1);
expect(result[0].content).toEqual([
{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Here is an image' },
{ type: ContentTypes.TOOL_CALL, tool_call: { name: 'generate_image' } },
]);
});
});
describe('System messages', () => {
it('should convert System message with all text blocks to string', () => {
const messages = [
new SystemMessage({
content: [
{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'System' },
{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Message' },
],
}),
];
const result = formatContentStrings(messages);
expect(result).toHaveLength(1);
expect(result[0].content).toBe('System\nMessage');
expect(result[0].getType()).toBe('system');
});
});
describe('Mixed message types', () => {
it('should process all valid message types in mixed array', () => {
const messages = [
new HumanMessage({
content: [
{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Human' },
{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Message' },
],
}),
new AIMessage({
content: [
{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'AI' },
{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Response' },
],
}),
new SystemMessage({
content: [
{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'System' },
{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Prompt' },
],
}),
];
const result = formatContentStrings(messages);
expect(result).toHaveLength(3);
// All messages should be converted
expect(result[0].content).toBe('Human\nMessage');
expect(result[0].getType()).toBe('human');
expect(result[1].content).toBe('AI\nResponse');
expect(result[1].getType()).toBe('ai');
expect(result[2].content).toBe('System\nPrompt');
expect(result[2].getType()).toBe('system');
});
});
describe('Edge cases', () => {
it('should handle empty array', () => {
const result = formatContentStrings([]);
expect(result).toEqual([]);
});
it('should handle messages with non-array content', () => {
const messages = [
new HumanMessage({
content: 'This is a string content',
}),
];
const result = formatContentStrings(messages);
expect(result).toHaveLength(1);
expect(result[0].content).toBe('This is a string content');
});
it('should trim the final concatenated string', () => {
const messages = [
new HumanMessage({
content: [
{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: ' Hello ' },
{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: ' World ' },
],
}),
];
const result = formatContentStrings(messages);
expect(result).toHaveLength(1);
expect(result[0].content).toBe('Hello \n World');
});
});
describe('Real-world scenarios', () => {
it('should handle the exact scenario from the issue', () => {
const messages = [
new HumanMessage({
content: [
{
type: 'text',
text: 'hi there',
},
],
}),
new AIMessage({
content: [
{
type: 'text',
text: 'Hi Danny! How can I help you today?',
},
],
}),
new HumanMessage({
content: [
{
type: 'text',
text: 'what do you see',
},
{
type: 'image_url',
image_url: {
url: 'data:image/png;base64,iVBO_SOME_BASE64_DATA=',
detail: 'auto',
},
},
],
}),
];
const result = formatContentStrings(messages);
expect(result).toHaveLength(3);
// First human message (all text) should be converted
expect(result[0].content).toBe('hi there');
expect(result[0].getType()).toBe('human');
// AI message (all text) should now also be converted
expect(result[1].content).toBe('Hi Danny! How can I help you today?');
expect(result[1].getType()).toBe('ai');
// Third message (mixed content) should remain unchanged
expect(result[2].content).toEqual([
{
type: 'text',
text: 'what do you see',
},
{
type: 'image_url',
image_url: {
url: 'data:image/png;base64,iVBO_SOME_BASE64_DATA=',
detail: 'auto',
},
},
]);
});
it('should handle messages with tool calls', () => {
const messages = [
new HumanMessage({
content: [
{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Please use the calculator' },
{
type: ContentTypes.TOOL_CALL,
tool_call: { name: 'calculator', args: '{"a": 1, "b": 2}' },
},
],
}),
new AIMessage({
content: [
{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'I will calculate that for you' },
{
type: ContentTypes.TOOL_CALL,
tool_call: { name: 'calculator', args: '{"a": 1, "b": 2}' },
},
],
}),
];
const result = formatContentStrings(messages);
expect(result).toHaveLength(2);
// Should not convert because not all blocks are text
expect(result[0].content).toEqual([
{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Please use the calculator' },
{
type: ContentTypes.TOOL_CALL,
tool_call: { name: 'calculator', args: '{"a": 1, "b": 2}' },
},
]);
expect(result[1].content).toEqual([
{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'I will calculate that for you' },
{
type: ContentTypes.TOOL_CALL,
tool_call: { name: 'calculator', args: '{"a": 1, "b": 2}' },
},
]);
});
});
});

View file

@ -1,57 +0,0 @@
import { ContentTypes } from 'librechat-data-provider';
import type { BaseMessage } from '@langchain/core/messages';
/**
* Formats an array of messages for LangChain, making sure all content fields are strings
* @param {Array<HumanMessage | AIMessage | SystemMessage | ToolMessage>} payload - The array of messages to format.
* @returns {Array<HumanMessage | AIMessage | SystemMessage | ToolMessage>} - The array of formatted LangChain messages, including ToolMessages for tool calls.
*/
export const formatContentStrings = (payload: Array<BaseMessage>): Array<BaseMessage> => {
// Create a new array to store the processed messages
const result: Array<BaseMessage> = [];
for (const message of payload) {
const messageType = message.getType();
const isValidMessage =
messageType === 'human' || messageType === 'ai' || messageType === 'system';
if (!isValidMessage) {
result.push(message);
continue;
}
// If content is already a string, add as-is
if (typeof message.content === 'string') {
result.push(message);
continue;
}
// If content is not an array, add as-is
if (!Array.isArray(message.content)) {
result.push(message);
continue;
}
// Check if all content blocks are text type
const allTextBlocks = message.content.every((block) => block.type === ContentTypes.TEXT);
// Only convert to string if all blocks are text type
if (!allTextBlocks) {
result.push(message);
continue;
}
// Reduce text types to a single string
const content = message.content.reduce((acc, curr) => {
if (curr.type === ContentTypes.TEXT) {
return `${acc}${curr[ContentTypes.TEXT] || ''}\n`;
}
return acc;
}, '');
message.content = content.trim();
result.push(message);
}
return result;
};

View file

@ -1 +0,0 @@
export * from './content';

View file

@ -10,7 +10,6 @@ export * from './mcp/oauth';
export * from './mcp/auth'; export * from './mcp/auth';
export * from './mcp/zod'; export * from './mcp/zod';
/* Utilities */ /* Utilities */
export * from './format';
export * from './mcp/utils'; export * from './mcp/utils';
export * from './utils'; export * from './utils';
export * from './db/utils'; export * from './db/utils';

View file

@ -31,6 +31,7 @@ export type OpenAIConfiguration = OpenAIClientOptions['configuration'];
export type OAIClientOptions = OpenAIClientOptions & { export type OAIClientOptions = OpenAIClientOptions & {
include_reasoning?: boolean; include_reasoning?: boolean;
_lc_stream_delay?: number;
}; };
/** /**
@ -100,10 +101,3 @@ export interface InitializeOpenAIOptionsParams {
getUserKeyValues: GetUserKeyValuesFunction; getUserKeyValues: GetUserKeyValuesFunction;
checkUserKeyExpiry: CheckUserKeyExpiryFunction; checkUserKeyExpiry: CheckUserKeyExpiryFunction;
} }
/**
* Extended LLM config result with stream rate handling
*/
export interface OpenAIOptionsResult extends LLMConfigResult {
streamRate?: number;
}

View file

@ -1615,6 +1615,10 @@ export enum Constants {
* This helps inform the UI if the mcp server was previously added. * This helps inform the UI if the mcp server was previously added.
* */ * */
mcp_server = 'sys__server__sys', mcp_server = 'sys__server__sys',
/**
* Handoff Tool Name Prefix
*/
LC_TRANSFER_TO_ = 'lc_transfer_to_',
/** Placeholder Agent ID for Ephemeral Agents */ /** Placeholder Agent ID for Ephemeral Agents */
EPHEMERAL_AGENT_ID = 'ephemeral', EPHEMERAL_AGENT_ID = 'ephemeral',
} }

View file

@ -39,7 +39,6 @@ export enum Providers {
GOOGLE = 'google', GOOGLE = 'google',
VERTEXAI = 'vertexai', VERTEXAI = 'vertexai',
BEDROCK = 'bedrock', BEDROCK = 'bedrock',
BEDROCK_LEGACY = 'bedrock_legacy',
MISTRALAI = 'mistralai', MISTRALAI = 'mistralai',
MISTRAL = 'mistral', MISTRAL = 'mistral',
OLLAMA = 'ollama', OLLAMA = 'ollama',
@ -231,6 +230,7 @@ export const defaultAgentFormValues = {
tools: [], tools: [],
provider: {}, provider: {},
projectIds: [], projectIds: [],
edges: [],
artifacts: '', artifacts: '',
/** @deprecated Use ACL permissions instead */ /** @deprecated Use ACL permissions instead */
isCollaborative: false, isCollaborative: false,

View file

@ -355,3 +355,45 @@ export type AgentToolType = {
} & ({ assistant_id: string; agent_id?: never } | { assistant_id?: never; agent_id?: string }); } & ({ assistant_id: string; agent_id?: never } | { assistant_id?: never; agent_id?: string });
export type ToolMetadata = TPlugin; export type ToolMetadata = TPlugin;
export interface BaseMessage {
content: string;
role?: string;
[key: string]: unknown;
}
export interface BaseGraphState {
[key: string]: unknown;
}
export type GraphEdge = {
/** Agent ID, use a list for multiple sources */
from: string | string[];
/** Agent ID, use a list for multiple destinations */
to: string | string[];
description?: string;
/** Can return boolean or specific destination(s) */
condition?: (state: BaseGraphState) => boolean | string | string[];
/** 'handoff' creates tools for dynamic routing, 'direct' creates direct edges, which also allow parallel execution */
edgeType?: 'handoff' | 'direct';
/**
* For direct edges: Optional prompt to add when transitioning through this edge.
* String prompts can include variables like {results} which will be replaced with
* messages from startIndex onwards. When {results} is used, excludeResults defaults to true.
*
* For handoff edges: Description for the input parameter that the handoff tool accepts,
* allowing the supervisor to pass specific instructions/context to the transferred agent.
*/
prompt?: string | ((messages: BaseMessage[], runStartIndex: number) => string | undefined);
/**
* When true, excludes messages from startIndex when adding prompt.
* Automatically set to true when {results} variable is used in prompt.
*/
excludeResults?: boolean;
/**
* For handoff edges: Customizes the parameter name for the handoff input.
* Defaults to "instructions" if not specified.
* Only applies when prompt is provided for handoff edges.
*/
promptKey?: string;
};

View file

@ -1,7 +1,7 @@
import type { OpenAPIV3 } from 'openapi-types'; import type { OpenAPIV3 } from 'openapi-types';
import type { AssistantsEndpoint, AgentProvider } from 'src/schemas'; import type { AssistantsEndpoint, AgentProvider } from 'src/schemas';
import type { Agents, GraphEdge } from './agents';
import type { ContentTypes } from './runs'; import type { ContentTypes } from './runs';
import type { Agents } from './agents';
import type { TFile } from './files'; import type { TFile } from './files';
import { ArtifactModes } from 'src/artifacts'; import { ArtifactModes } from 'src/artifacts';
@ -229,7 +229,9 @@ export type Agent = {
/** @deprecated Use ACL permissions instead */ /** @deprecated Use ACL permissions instead */
isCollaborative?: boolean; isCollaborative?: boolean;
tool_resources?: AgentToolResources; tool_resources?: AgentToolResources;
/** @deprecated Use edges instead */
agent_ids?: string[]; agent_ids?: string[];
edges?: GraphEdge[];
end_after_tools?: boolean; end_after_tools?: boolean;
hide_sequential_outputs?: boolean; hide_sequential_outputs?: boolean;
artifacts?: ArtifactModes; artifacts?: ArtifactModes;
@ -255,6 +257,7 @@ export type AgentCreateParams = {
} & Pick< } & Pick<
Agent, Agent,
| 'agent_ids' | 'agent_ids'
| 'edges'
| 'end_after_tools' | 'end_after_tools'
| 'hide_sequential_outputs' | 'hide_sequential_outputs'
| 'artifacts' | 'artifacts'
@ -280,6 +283,7 @@ export type AgentUpdateParams = {
} & Pick< } & Pick<
Agent, Agent,
| 'agent_ids' | 'agent_ids'
| 'edges'
| 'end_after_tools' | 'end_after_tools'
| 'hide_sequential_outputs' | 'hide_sequential_outputs'
| 'artifacts' | 'artifacts'

View file

@ -68,9 +68,14 @@ const agentSchema = new Schema<IAgent>(
end_after_tools: { end_after_tools: {
type: Boolean, type: Boolean,
}, },
/** @deprecated Use edges instead */
agent_ids: { agent_ids: {
type: [String], type: [String],
}, },
edges: {
type: [{ type: Schema.Types.Mixed }],
default: [],
},
isCollaborative: { isCollaborative: {
type: Boolean, type: Boolean,
default: undefined, default: undefined,

View file

@ -1,4 +1,5 @@
import { Document, Types } from 'mongoose'; import { Document, Types } from 'mongoose';
import type { GraphEdge } from 'librechat-data-provider';
export interface ISupportContact { export interface ISupportContact {
name?: string; name?: string;
@ -27,7 +28,9 @@ export interface IAgent extends Omit<Document, 'model'> {
authorName?: string; authorName?: string;
hide_sequential_outputs?: boolean; hide_sequential_outputs?: boolean;
end_after_tools?: boolean; end_after_tools?: boolean;
/** @deprecated Use edges instead */
agent_ids?: string[]; agent_ids?: string[];
edges?: GraphEdge[];
/** @deprecated Use ACL permissions instead */ /** @deprecated Use ACL permissions instead */
isCollaborative?: boolean; isCollaborative?: boolean;
conversation_starters?: string[]; conversation_starters?: string[];