🤖 feat: Agent Handoffs (Routing) (#10176)

* feat: Add support for agent handoffs with edges in agent forms and schemas

chore: Mark `agent_ids` field as deprecated in favor of edges across various schemas and types

chore: Update dependencies for @langchain/core and @librechat/agents to latest versions

chore: Update peer dependency for @librechat/agents to version 3.0.0-rc2 in package.json

chore: Update @librechat/agents dependency to version 3.0.0-rc3 in package.json and package-lock.json

feat: first pass, multi-agent handoffs

fix: update output type to ToolMessage in memory handling functions

fix: improve type checking for graphConfig in createRun function

refactor: remove unused content filtering logic in AgentClient

chore: update @librechat/agents dependency to version 3.0.0-rc4 in package.json and package-lock.json

fix: update @langchain/core peer dependency version to ^0.3.72 in package.json and package-lock.json

fix: update @librechat/agents dependency to version 3.0.0-rc6 in package.json and package-lock.json; refactor stream rate handling in various endpoints

feat: Agent handoff UI

chore: update @librechat/agents dependency to version 3.0.0-rc8 in package.json and package-lock.json

fix: improve hasInfo condition and adjust UI element classes in AgentHandoff component

refactor: remove current fixed agent display from AgentHandoffs component due to redundancy

feat: enhance AgentHandoffs UI with localized beta label and improved layout

chore: update @librechat/agents dependency to version 3.0.0-rc10 in package.json and package-lock.json

feat: add `createSequentialChainEdges` function to add back agent chaining via multi-agents

feat: update `createSequentialChainEdges` call to only provide conversation context between agents

feat: deprecate Agent Chain functionality and update related methods for improved clarity

* chore: update @librechat/agents dependency to version 3.0.0-rc11 in package.json and package-lock.json

* refactor: remove unused addCacheControl function and related imports and import from @librechat/agents

* chore: remove unused i18n keys

* refactor: remove unused format export from index.ts

* chore: update @librechat/agents to v3.0.0-rc13

* chore: remove BEDROCK_LEGACY provider from Providers enum

* chore: update @librechat/agents to version 3.0.2 in package.json
This commit is contained in:
Danny Avila 2025-11-05 17:15:17 -05:00 committed by GitHub
parent 958a6c7872
commit 8a4a5a4790
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
41 changed files with 1108 additions and 3810 deletions

View file

@ -0,0 +1,47 @@
import { PromptTemplate } from '@langchain/core/prompts';
import { BaseMessage, getBufferString } from '@langchain/core/messages';
import type { GraphEdge } from '@librechat/agents';
const DEFAULT_PROMPT_TEMPLATE = `Based on the following conversation and analysis from previous agents, please provide your insights:\n\n{convo}\n\nPlease add your specific expertise and perspective to this discussion.`;
/**
* Helper function to create sequential chain edges with buffer string prompts
*
* @deprecated Agent Chain helper
* @param agentIds - Array of agent IDs in order of execution
* @param promptTemplate - Optional prompt template string; defaults to a predefined template if not provided
* @returns Array of edges configured for sequential chain with buffer prompts
*/
export async function createSequentialChainEdges(
agentIds: string[],
promptTemplate = DEFAULT_PROMPT_TEMPLATE,
): Promise<GraphEdge[]> {
const edges: GraphEdge[] = [];
for (let i = 0; i < agentIds.length - 1; i++) {
const fromAgent = agentIds[i];
const toAgent = agentIds[i + 1];
edges.push({
from: fromAgent,
to: toAgent,
edgeType: 'direct',
// Use a prompt function to create the buffer string from all previous results
prompt: async (messages: BaseMessage[], startIndex: number) => {
/** Only the messages from this run (after startIndex) are passed in */
const runMessages = messages.slice(startIndex);
const bufferString = getBufferString(runMessages);
const template = PromptTemplate.fromTemplate(promptTemplate);
const result = await template.invoke({
convo: bufferString,
});
return result.value;
},
/** Critical: exclude previous results so only the prompt is passed */
excludeResults: true,
description: `Sequential chain from ${fromAgent} to ${toAgent}`,
});
}
return edges;
}

View file

@ -1,3 +1,4 @@
export * from './chain';
export * from './memory';
export * from './migration';
export * from './legacy';

View file

@ -15,7 +15,7 @@ import type {
} from '@librechat/agents';
import type { TAttachment, MemoryArtifact } from 'librechat-data-provider';
import type { ObjectId, MemoryMethods } from '@librechat/data-schemas';
import type { BaseMessage } from '@langchain/core/messages';
import type { BaseMessage, ToolMessage } from '@langchain/core/messages';
import type { Response as ServerResponse } from 'express';
import { Tokenizer } from '~/utils';
@ -466,7 +466,7 @@ async function handleMemoryArtifact({
data: ToolEndData;
metadata?: ToolEndMetadata;
}) {
const output = data?.output;
const output = data?.output as ToolMessage | undefined;
if (!output) {
return null;
}
@ -509,7 +509,7 @@ export function createMemoryCallback({
artifactPromises: Promise<Partial<TAttachment> | null>[];
}): ToolEndCallback {
return async (data: ToolEndData, metadata?: Record<string, unknown>) => {
const output = data?.output;
const output = data?.output as ToolMessage | undefined;
const memoryArtifact = output?.artifact?.[Tools.memory] as MemoryArtifact;
if (memoryArtifact == null) {
return;

View file

@ -1,15 +1,17 @@
import { Run, Providers } from '@librechat/agents';
import { providerEndpointMap, KnownEndpoints } from 'librechat-data-provider';
import type {
MultiAgentGraphConfig,
OpenAIClientOptions,
StandardGraphConfig,
EventHandler,
AgentInputs,
GenericTool,
GraphEvents,
RunConfig,
IState,
} from '@librechat/agents';
import type { Agent } from 'librechat-data-provider';
import type * as t from '~/types';
import { resolveHeaders } from '~/utils/env';
const customProviders = new Set([
Providers.XAI,
@ -40,13 +42,18 @@ export function getReasoningKey(
return reasoningKey;
}
type RunAgent = Omit<Agent, 'tools'> & {
tools?: GenericTool[];
maxContextTokens?: number;
toolContextMap?: Record<string, string>;
};
/**
* Creates a new Run instance with custom handlers and configuration.
*
* @param options - The options for creating the Run instance.
* @param options.agent - The agent for this run.
* @param options.agents - The agents for this run.
* @param options.signal - The signal for this run.
* @param options.req - The server request.
* @param options.runId - Optional run ID; otherwise, a new run ID will be generated.
* @param options.customHandlers - Custom event handlers.
* @param options.streaming - Whether to use streaming.
@ -55,61 +62,108 @@ export function getReasoningKey(
*/
export async function createRun({
runId,
agent,
signal,
agents,
requestBody,
tokenCounter,
customHandlers,
indexTokenCountMap,
streaming = true,
streamUsage = true,
}: {
agent: Omit<Agent, 'tools'> & { tools?: GenericTool[] };
agents: RunAgent[];
signal: AbortSignal;
runId?: string;
streaming?: boolean;
streamUsage?: boolean;
customHandlers?: Record<GraphEvents, EventHandler>;
}): Promise<Run<IState>> {
const provider =
(providerEndpointMap[
agent.provider as keyof typeof providerEndpointMap
] as unknown as Providers) ?? agent.provider;
requestBody?: t.RequestBody;
} & Pick<RunConfig, 'tokenCounter' | 'customHandlers' | 'indexTokenCountMap'>): Promise<
Run<IState>
> {
const agentInputs: AgentInputs[] = [];
const buildAgentContext = (agent: RunAgent) => {
const provider =
(providerEndpointMap[
agent.provider as keyof typeof providerEndpointMap
] as unknown as Providers) ?? agent.provider;
const llmConfig: t.RunLLMConfig = Object.assign(
{
const llmConfig: t.RunLLMConfig = Object.assign(
{
provider,
streaming,
streamUsage,
},
agent.model_parameters,
);
const systemMessage = Object.values(agent.toolContextMap ?? {})
.join('\n')
.trim();
const systemContent = [
systemMessage,
agent.instructions ?? '',
agent.additional_instructions ?? '',
]
.join('\n')
.trim();
/**
* Resolve request-based headers for Custom Endpoints. Note: if this is added to
* non-custom endpoints, needs consideration of varying provider header configs.
* This is done at this step because the request body may contain dynamic values
* that need to be resolved after agent initialization.
*/
if (llmConfig?.configuration?.defaultHeaders != null) {
llmConfig.configuration.defaultHeaders = resolveHeaders({
headers: llmConfig.configuration.defaultHeaders as Record<string, string>,
body: requestBody,
});
}
/** Resolves issues with new OpenAI usage field */
if (
customProviders.has(agent.provider) ||
(agent.provider === Providers.OPENAI && agent.endpoint !== agent.provider)
) {
llmConfig.streamUsage = false;
llmConfig.usage = true;
}
const reasoningKey = getReasoningKey(provider, llmConfig, agent.endpoint);
const agentInput: AgentInputs = {
provider,
streaming,
streamUsage,
},
agent.model_parameters,
);
/** Resolves issues with new OpenAI usage field */
if (
customProviders.has(agent.provider) ||
(agent.provider === Providers.OPENAI && agent.endpoint !== agent.provider)
) {
llmConfig.streamUsage = false;
llmConfig.usage = true;
}
const reasoningKey = getReasoningKey(provider, llmConfig, agent.endpoint);
const graphConfig: StandardGraphConfig = {
signal,
llmConfig,
reasoningKey,
tools: agent.tools,
instructions: agent.instructions,
additional_instructions: agent.additional_instructions,
// toolEnd: agent.end_after_tools,
reasoningKey,
agentId: agent.id,
tools: agent.tools,
clientOptions: llmConfig,
instructions: systemContent,
maxContextTokens: agent.maxContextTokens,
};
agentInputs.push(agentInput);
};
// TEMPORARY FOR TESTING
if (agent.provider === Providers.ANTHROPIC || agent.provider === Providers.BEDROCK) {
graphConfig.streamBuffer = 2000;
for (const agent of agents) {
buildAgentContext(agent);
}
const graphConfig: RunConfig['graphConfig'] = {
signal,
agents: agentInputs,
edges: agents[0].edges,
};
if (agentInputs.length > 1 || ((graphConfig as MultiAgentGraphConfig).edges?.length ?? 0) > 0) {
(graphConfig as unknown as MultiAgentGraphConfig).type = 'multi-agent';
} else {
(graphConfig as StandardGraphConfig).type = 'standard';
}
return Run.create({
runId,
graphConfig,
tokenCounter,
customHandlers,
indexTokenCountMap,
});
}

View file

@ -40,6 +40,17 @@ export const agentSupportContactSchema = z
})
.optional();
/** Graph edge schema for agent handoffs */
export const graphEdgeSchema = z.object({
from: z.union([z.string(), z.array(z.string())]),
to: z.union([z.string(), z.array(z.string())]),
description: z.string().optional(),
edgeType: z.enum(['handoff', 'direct']).optional(),
prompt: z.union([z.string(), z.function()]).optional(),
excludeResults: z.boolean().optional(),
promptKey: z.string().optional(),
});
/** Base agent schema with all common fields */
export const agentBaseSchema = z.object({
name: z.string().nullable().optional(),
@ -48,7 +59,9 @@ export const agentBaseSchema = z.object({
avatar: agentAvatarSchema.nullable().optional(),
model_parameters: z.record(z.unknown()).optional(),
tools: z.array(z.string()).optional(),
/** @deprecated Use edges instead */
agent_ids: z.array(z.string()).optional(),
edges: z.array(graphEdgeSchema).optional(),
end_after_tools: z.boolean().optional(),
hide_sequential_outputs: z.boolean().optional(),
artifacts: z.string().optional(),