🤖 feat: Agent Handoffs (Routing) (#10176)

* feat: Add support for agent handoffs with edges in agent forms and schemas

chore: Mark `agent_ids` field as deprecated in favor of edges across various schemas and types

chore: Update dependencies for @langchain/core and @librechat/agents to latest versions

chore: Update peer dependency for @librechat/agents to version 3.0.0-rc2 in package.json

chore: Update @librechat/agents dependency to version 3.0.0-rc3 in package.json and package-lock.json

feat: first pass, multi-agent handoffs

fix: update output type to ToolMessage in memory handling functions

fix: improve type checking for graphConfig in createRun function

refactor: remove unused content filtering logic in AgentClient

chore: update @librechat/agents dependency to version 3.0.0-rc4 in package.json and package-lock.json

fix: update @langchain/core peer dependency version to ^0.3.72 in package.json and package-lock.json

fix: update @librechat/agents dependency to version 3.0.0-rc6 in package.json and package-lock.json; refactor stream rate handling in various endpoints

feat: Agent handoff UI

chore: update @librechat/agents dependency to version 3.0.0-rc8 in package.json and package-lock.json

fix: improve hasInfo condition and adjust UI element classes in AgentHandoff component

refactor: remove current fixed agent display from AgentHandoffs component due to redundancy

feat: enhance AgentHandoffs UI with localized beta label and improved layout

chore: update @librechat/agents dependency to version 3.0.0-rc10 in package.json and package-lock.json

feat: add `createSequentialChainEdges` function to add back agent chaining via multi-agents

feat: update `createSequentialChainEdges` call to only provide conversation context between agents

feat: deprecate Agent Chain functionality and update related methods for improved clarity

* chore: update @librechat/agents dependency to version 3.0.0-rc11 in package.json and package-lock.json

* refactor: remove unused addCacheControl function and related imports and import from @librechat/agents

* chore: remove unused i18n keys

* refactor: remove unused format export from index.ts

* chore: update @librechat/agents to v3.0.0-rc13

* chore: remove BEDROCK_LEGACY provider from Providers enum

* chore: update @librechat/agents to version 3.0.2 in package.json
This commit is contained in:
Danny Avila 2025-11-05 17:15:17 -05:00 committed by GitHub
parent 958a6c7872
commit 8a4a5a4790
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
41 changed files with 1108 additions and 3810 deletions

View file

@ -1,15 +1,17 @@
import { Run, Providers } from '@librechat/agents';
import { providerEndpointMap, KnownEndpoints } from 'librechat-data-provider';
import type {
MultiAgentGraphConfig,
OpenAIClientOptions,
StandardGraphConfig,
EventHandler,
AgentInputs,
GenericTool,
GraphEvents,
RunConfig,
IState,
} from '@librechat/agents';
import type { Agent } from 'librechat-data-provider';
import type * as t from '~/types';
import { resolveHeaders } from '~/utils/env';
const customProviders = new Set([
Providers.XAI,
@ -40,13 +42,18 @@ export function getReasoningKey(
return reasoningKey;
}
type RunAgent = Omit<Agent, 'tools'> & {
tools?: GenericTool[];
maxContextTokens?: number;
toolContextMap?: Record<string, string>;
};
/**
* Creates a new Run instance with custom handlers and configuration.
*
* @param options - The options for creating the Run instance.
* @param options.agent - The agent for this run.
* @param options.agents - The agents for this run.
* @param options.signal - The signal for this run.
* @param options.req - The server request.
* @param options.runId - Optional run ID; otherwise, a new run ID will be generated.
* @param options.customHandlers - Custom event handlers.
* @param options.streaming - Whether to use streaming.
@ -55,61 +62,108 @@ export function getReasoningKey(
*/
export async function createRun({
runId,
agent,
signal,
agents,
requestBody,
tokenCounter,
customHandlers,
indexTokenCountMap,
streaming = true,
streamUsage = true,
}: {
agent: Omit<Agent, 'tools'> & { tools?: GenericTool[] };
agents: RunAgent[];
signal: AbortSignal;
runId?: string;
streaming?: boolean;
streamUsage?: boolean;
customHandlers?: Record<GraphEvents, EventHandler>;
}): Promise<Run<IState>> {
const provider =
(providerEndpointMap[
agent.provider as keyof typeof providerEndpointMap
] as unknown as Providers) ?? agent.provider;
requestBody?: t.RequestBody;
} & Pick<RunConfig, 'tokenCounter' | 'customHandlers' | 'indexTokenCountMap'>): Promise<
Run<IState>
> {
const agentInputs: AgentInputs[] = [];
const buildAgentContext = (agent: RunAgent) => {
const provider =
(providerEndpointMap[
agent.provider as keyof typeof providerEndpointMap
] as unknown as Providers) ?? agent.provider;
const llmConfig: t.RunLLMConfig = Object.assign(
{
const llmConfig: t.RunLLMConfig = Object.assign(
{
provider,
streaming,
streamUsage,
},
agent.model_parameters,
);
const systemMessage = Object.values(agent.toolContextMap ?? {})
.join('\n')
.trim();
const systemContent = [
systemMessage,
agent.instructions ?? '',
agent.additional_instructions ?? '',
]
.join('\n')
.trim();
/**
* Resolve request-based headers for Custom Endpoints. Note: if this is added to
* non-custom endpoints, needs consideration of varying provider header configs.
* This is done at this step because the request body may contain dynamic values
* that need to be resolved after agent initialization.
*/
if (llmConfig?.configuration?.defaultHeaders != null) {
llmConfig.configuration.defaultHeaders = resolveHeaders({
headers: llmConfig.configuration.defaultHeaders as Record<string, string>,
body: requestBody,
});
}
/** Resolves issues with new OpenAI usage field */
if (
customProviders.has(agent.provider) ||
(agent.provider === Providers.OPENAI && agent.endpoint !== agent.provider)
) {
llmConfig.streamUsage = false;
llmConfig.usage = true;
}
const reasoningKey = getReasoningKey(provider, llmConfig, agent.endpoint);
const agentInput: AgentInputs = {
provider,
streaming,
streamUsage,
},
agent.model_parameters,
);
/** Resolves issues with new OpenAI usage field */
if (
customProviders.has(agent.provider) ||
(agent.provider === Providers.OPENAI && agent.endpoint !== agent.provider)
) {
llmConfig.streamUsage = false;
llmConfig.usage = true;
}
const reasoningKey = getReasoningKey(provider, llmConfig, agent.endpoint);
const graphConfig: StandardGraphConfig = {
signal,
llmConfig,
reasoningKey,
tools: agent.tools,
instructions: agent.instructions,
additional_instructions: agent.additional_instructions,
// toolEnd: agent.end_after_tools,
reasoningKey,
agentId: agent.id,
tools: agent.tools,
clientOptions: llmConfig,
instructions: systemContent,
maxContextTokens: agent.maxContextTokens,
};
agentInputs.push(agentInput);
};
// TEMPORARY FOR TESTING
if (agent.provider === Providers.ANTHROPIC || agent.provider === Providers.BEDROCK) {
graphConfig.streamBuffer = 2000;
for (const agent of agents) {
buildAgentContext(agent);
}
const graphConfig: RunConfig['graphConfig'] = {
signal,
agents: agentInputs,
edges: agents[0].edges,
};
if (agentInputs.length > 1 || ((graphConfig as MultiAgentGraphConfig).edges?.length ?? 0) > 0) {
(graphConfig as unknown as MultiAgentGraphConfig).type = 'multi-agent';
} else {
(graphConfig as StandardGraphConfig).type = 'standard';
}
return Run.create({
runId,
graphConfig,
tokenCounter,
customHandlers,
indexTokenCountMap,
});
}