mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-12-17 00:40:14 +01:00
🤖 feat: Agent Handoffs (Routing) (#10176)
* feat: Add support for agent handoffs with edges in agent forms and schemas chore: Mark `agent_ids` field as deprecated in favor of edges across various schemas and types chore: Update dependencies for @langchain/core and @librechat/agents to latest versions chore: Update peer dependency for @librechat/agents to version 3.0.0-rc2 in package.json chore: Update @librechat/agents dependency to version 3.0.0-rc3 in package.json and package-lock.json feat: first pass, multi-agent handoffs fix: update output type to ToolMessage in memory handling functions fix: improve type checking for graphConfig in createRun function refactor: remove unused content filtering logic in AgentClient chore: update @librechat/agents dependency to version 3.0.0-rc4 in package.json and package-lock.json fix: update @langchain/core peer dependency version to ^0.3.72 in package.json and package-lock.json fix: update @librechat/agents dependency to version 3.0.0-rc6 in package.json and package-lock.json; refactor stream rate handling in various endpoints feat: Agent handoff UI chore: update @librechat/agents dependency to version 3.0.0-rc8 in package.json and package-lock.json fix: improve hasInfo condition and adjust UI element classes in AgentHandoff component refactor: remove current fixed agent display from AgentHandoffs component due to redundancy feat: enhance AgentHandoffs UI with localized beta label and improved layout chore: update @librechat/agents dependency to version 3.0.0-rc10 in package.json and package-lock.json feat: add `createSequentialChainEdges` function to add back agent chaining via multi-agents feat: update `createSequentialChainEdges` call to only provide conversation context between agents feat: deprecate Agent Chain functionality and update related methods for improved clarity * chore: update @librechat/agents dependency to version 3.0.0-rc11 in package.json and package-lock.json * refactor: remove unused addCacheControl function and related imports and import from @librechat/agents * chore: remove unused i18n keys * refactor: remove unused format export from index.ts * chore: update @librechat/agents to v3.0.0-rc13 * chore: remove BEDROCK_LEGACY provider from Providers enum * chore: update @librechat/agents to version 3.0.2 in package.json
This commit is contained in:
parent
958a6c7872
commit
8a4a5a4790
41 changed files with 1108 additions and 3810 deletions
|
|
@ -81,8 +81,8 @@
|
|||
"@azure/search-documents": "^12.0.0",
|
||||
"@azure/storage-blob": "^12.27.0",
|
||||
"@keyv/redis": "^4.3.3",
|
||||
"@langchain/core": "^0.3.62",
|
||||
"@librechat/agents": "^2.4.90",
|
||||
"@langchain/core": "^0.3.72",
|
||||
"@librechat/agents": "^3.0.2",
|
||||
"@librechat/data-schemas": "*",
|
||||
"@modelcontextprotocol/sdk": "^1.17.1",
|
||||
"axios": "^1.12.1",
|
||||
|
|
|
|||
47
packages/api/src/agents/chain.ts
Normal file
47
packages/api/src/agents/chain.ts
Normal file
|
|
@ -0,0 +1,47 @@
|
|||
import { PromptTemplate } from '@langchain/core/prompts';
|
||||
import { BaseMessage, getBufferString } from '@langchain/core/messages';
|
||||
import type { GraphEdge } from '@librechat/agents';
|
||||
|
||||
const DEFAULT_PROMPT_TEMPLATE = `Based on the following conversation and analysis from previous agents, please provide your insights:\n\n{convo}\n\nPlease add your specific expertise and perspective to this discussion.`;
|
||||
|
||||
/**
|
||||
* Helper function to create sequential chain edges with buffer string prompts
|
||||
*
|
||||
* @deprecated Agent Chain helper
|
||||
* @param agentIds - Array of agent IDs in order of execution
|
||||
* @param promptTemplate - Optional prompt template string; defaults to a predefined template if not provided
|
||||
* @returns Array of edges configured for sequential chain with buffer prompts
|
||||
*/
|
||||
export async function createSequentialChainEdges(
|
||||
agentIds: string[],
|
||||
promptTemplate = DEFAULT_PROMPT_TEMPLATE,
|
||||
): Promise<GraphEdge[]> {
|
||||
const edges: GraphEdge[] = [];
|
||||
|
||||
for (let i = 0; i < agentIds.length - 1; i++) {
|
||||
const fromAgent = agentIds[i];
|
||||
const toAgent = agentIds[i + 1];
|
||||
|
||||
edges.push({
|
||||
from: fromAgent,
|
||||
to: toAgent,
|
||||
edgeType: 'direct',
|
||||
// Use a prompt function to create the buffer string from all previous results
|
||||
prompt: async (messages: BaseMessage[], startIndex: number) => {
|
||||
/** Only the messages from this run (after startIndex) are passed in */
|
||||
const runMessages = messages.slice(startIndex);
|
||||
const bufferString = getBufferString(runMessages);
|
||||
const template = PromptTemplate.fromTemplate(promptTemplate);
|
||||
const result = await template.invoke({
|
||||
convo: bufferString,
|
||||
});
|
||||
return result.value;
|
||||
},
|
||||
/** Critical: exclude previous results so only the prompt is passed */
|
||||
excludeResults: true,
|
||||
description: `Sequential chain from ${fromAgent} to ${toAgent}`,
|
||||
});
|
||||
}
|
||||
|
||||
return edges;
|
||||
}
|
||||
|
|
@ -1,3 +1,4 @@
|
|||
export * from './chain';
|
||||
export * from './memory';
|
||||
export * from './migration';
|
||||
export * from './legacy';
|
||||
|
|
|
|||
|
|
@ -15,7 +15,7 @@ import type {
|
|||
} from '@librechat/agents';
|
||||
import type { TAttachment, MemoryArtifact } from 'librechat-data-provider';
|
||||
import type { ObjectId, MemoryMethods } from '@librechat/data-schemas';
|
||||
import type { BaseMessage } from '@langchain/core/messages';
|
||||
import type { BaseMessage, ToolMessage } from '@langchain/core/messages';
|
||||
import type { Response as ServerResponse } from 'express';
|
||||
import { Tokenizer } from '~/utils';
|
||||
|
||||
|
|
@ -466,7 +466,7 @@ async function handleMemoryArtifact({
|
|||
data: ToolEndData;
|
||||
metadata?: ToolEndMetadata;
|
||||
}) {
|
||||
const output = data?.output;
|
||||
const output = data?.output as ToolMessage | undefined;
|
||||
if (!output) {
|
||||
return null;
|
||||
}
|
||||
|
|
@ -509,7 +509,7 @@ export function createMemoryCallback({
|
|||
artifactPromises: Promise<Partial<TAttachment> | null>[];
|
||||
}): ToolEndCallback {
|
||||
return async (data: ToolEndData, metadata?: Record<string, unknown>) => {
|
||||
const output = data?.output;
|
||||
const output = data?.output as ToolMessage | undefined;
|
||||
const memoryArtifact = output?.artifact?.[Tools.memory] as MemoryArtifact;
|
||||
if (memoryArtifact == null) {
|
||||
return;
|
||||
|
|
|
|||
|
|
@ -1,15 +1,17 @@
|
|||
import { Run, Providers } from '@librechat/agents';
|
||||
import { providerEndpointMap, KnownEndpoints } from 'librechat-data-provider';
|
||||
import type {
|
||||
MultiAgentGraphConfig,
|
||||
OpenAIClientOptions,
|
||||
StandardGraphConfig,
|
||||
EventHandler,
|
||||
AgentInputs,
|
||||
GenericTool,
|
||||
GraphEvents,
|
||||
RunConfig,
|
||||
IState,
|
||||
} from '@librechat/agents';
|
||||
import type { Agent } from 'librechat-data-provider';
|
||||
import type * as t from '~/types';
|
||||
import { resolveHeaders } from '~/utils/env';
|
||||
|
||||
const customProviders = new Set([
|
||||
Providers.XAI,
|
||||
|
|
@ -40,13 +42,18 @@ export function getReasoningKey(
|
|||
return reasoningKey;
|
||||
}
|
||||
|
||||
type RunAgent = Omit<Agent, 'tools'> & {
|
||||
tools?: GenericTool[];
|
||||
maxContextTokens?: number;
|
||||
toolContextMap?: Record<string, string>;
|
||||
};
|
||||
|
||||
/**
|
||||
* Creates a new Run instance with custom handlers and configuration.
|
||||
*
|
||||
* @param options - The options for creating the Run instance.
|
||||
* @param options.agent - The agent for this run.
|
||||
* @param options.agents - The agents for this run.
|
||||
* @param options.signal - The signal for this run.
|
||||
* @param options.req - The server request.
|
||||
* @param options.runId - Optional run ID; otherwise, a new run ID will be generated.
|
||||
* @param options.customHandlers - Custom event handlers.
|
||||
* @param options.streaming - Whether to use streaming.
|
||||
|
|
@ -55,61 +62,108 @@ export function getReasoningKey(
|
|||
*/
|
||||
export async function createRun({
|
||||
runId,
|
||||
agent,
|
||||
signal,
|
||||
agents,
|
||||
requestBody,
|
||||
tokenCounter,
|
||||
customHandlers,
|
||||
indexTokenCountMap,
|
||||
streaming = true,
|
||||
streamUsage = true,
|
||||
}: {
|
||||
agent: Omit<Agent, 'tools'> & { tools?: GenericTool[] };
|
||||
agents: RunAgent[];
|
||||
signal: AbortSignal;
|
||||
runId?: string;
|
||||
streaming?: boolean;
|
||||
streamUsage?: boolean;
|
||||
customHandlers?: Record<GraphEvents, EventHandler>;
|
||||
}): Promise<Run<IState>> {
|
||||
const provider =
|
||||
(providerEndpointMap[
|
||||
agent.provider as keyof typeof providerEndpointMap
|
||||
] as unknown as Providers) ?? agent.provider;
|
||||
requestBody?: t.RequestBody;
|
||||
} & Pick<RunConfig, 'tokenCounter' | 'customHandlers' | 'indexTokenCountMap'>): Promise<
|
||||
Run<IState>
|
||||
> {
|
||||
const agentInputs: AgentInputs[] = [];
|
||||
const buildAgentContext = (agent: RunAgent) => {
|
||||
const provider =
|
||||
(providerEndpointMap[
|
||||
agent.provider as keyof typeof providerEndpointMap
|
||||
] as unknown as Providers) ?? agent.provider;
|
||||
|
||||
const llmConfig: t.RunLLMConfig = Object.assign(
|
||||
{
|
||||
const llmConfig: t.RunLLMConfig = Object.assign(
|
||||
{
|
||||
provider,
|
||||
streaming,
|
||||
streamUsage,
|
||||
},
|
||||
agent.model_parameters,
|
||||
);
|
||||
|
||||
const systemMessage = Object.values(agent.toolContextMap ?? {})
|
||||
.join('\n')
|
||||
.trim();
|
||||
|
||||
const systemContent = [
|
||||
systemMessage,
|
||||
agent.instructions ?? '',
|
||||
agent.additional_instructions ?? '',
|
||||
]
|
||||
.join('\n')
|
||||
.trim();
|
||||
|
||||
/**
|
||||
* Resolve request-based headers for Custom Endpoints. Note: if this is added to
|
||||
* non-custom endpoints, needs consideration of varying provider header configs.
|
||||
* This is done at this step because the request body may contain dynamic values
|
||||
* that need to be resolved after agent initialization.
|
||||
*/
|
||||
if (llmConfig?.configuration?.defaultHeaders != null) {
|
||||
llmConfig.configuration.defaultHeaders = resolveHeaders({
|
||||
headers: llmConfig.configuration.defaultHeaders as Record<string, string>,
|
||||
body: requestBody,
|
||||
});
|
||||
}
|
||||
|
||||
/** Resolves issues with new OpenAI usage field */
|
||||
if (
|
||||
customProviders.has(agent.provider) ||
|
||||
(agent.provider === Providers.OPENAI && agent.endpoint !== agent.provider)
|
||||
) {
|
||||
llmConfig.streamUsage = false;
|
||||
llmConfig.usage = true;
|
||||
}
|
||||
|
||||
const reasoningKey = getReasoningKey(provider, llmConfig, agent.endpoint);
|
||||
const agentInput: AgentInputs = {
|
||||
provider,
|
||||
streaming,
|
||||
streamUsage,
|
||||
},
|
||||
agent.model_parameters,
|
||||
);
|
||||
|
||||
/** Resolves issues with new OpenAI usage field */
|
||||
if (
|
||||
customProviders.has(agent.provider) ||
|
||||
(agent.provider === Providers.OPENAI && agent.endpoint !== agent.provider)
|
||||
) {
|
||||
llmConfig.streamUsage = false;
|
||||
llmConfig.usage = true;
|
||||
}
|
||||
|
||||
const reasoningKey = getReasoningKey(provider, llmConfig, agent.endpoint);
|
||||
const graphConfig: StandardGraphConfig = {
|
||||
signal,
|
||||
llmConfig,
|
||||
reasoningKey,
|
||||
tools: agent.tools,
|
||||
instructions: agent.instructions,
|
||||
additional_instructions: agent.additional_instructions,
|
||||
// toolEnd: agent.end_after_tools,
|
||||
reasoningKey,
|
||||
agentId: agent.id,
|
||||
tools: agent.tools,
|
||||
clientOptions: llmConfig,
|
||||
instructions: systemContent,
|
||||
maxContextTokens: agent.maxContextTokens,
|
||||
};
|
||||
agentInputs.push(agentInput);
|
||||
};
|
||||
|
||||
// TEMPORARY FOR TESTING
|
||||
if (agent.provider === Providers.ANTHROPIC || agent.provider === Providers.BEDROCK) {
|
||||
graphConfig.streamBuffer = 2000;
|
||||
for (const agent of agents) {
|
||||
buildAgentContext(agent);
|
||||
}
|
||||
|
||||
const graphConfig: RunConfig['graphConfig'] = {
|
||||
signal,
|
||||
agents: agentInputs,
|
||||
edges: agents[0].edges,
|
||||
};
|
||||
|
||||
if (agentInputs.length > 1 || ((graphConfig as MultiAgentGraphConfig).edges?.length ?? 0) > 0) {
|
||||
(graphConfig as unknown as MultiAgentGraphConfig).type = 'multi-agent';
|
||||
} else {
|
||||
(graphConfig as StandardGraphConfig).type = 'standard';
|
||||
}
|
||||
|
||||
return Run.create({
|
||||
runId,
|
||||
graphConfig,
|
||||
tokenCounter,
|
||||
customHandlers,
|
||||
indexTokenCountMap,
|
||||
});
|
||||
}
|
||||
|
|
|
|||
|
|
@ -40,6 +40,17 @@ export const agentSupportContactSchema = z
|
|||
})
|
||||
.optional();
|
||||
|
||||
/** Graph edge schema for agent handoffs */
|
||||
export const graphEdgeSchema = z.object({
|
||||
from: z.union([z.string(), z.array(z.string())]),
|
||||
to: z.union([z.string(), z.array(z.string())]),
|
||||
description: z.string().optional(),
|
||||
edgeType: z.enum(['handoff', 'direct']).optional(),
|
||||
prompt: z.union([z.string(), z.function()]).optional(),
|
||||
excludeResults: z.boolean().optional(),
|
||||
promptKey: z.string().optional(),
|
||||
});
|
||||
|
||||
/** Base agent schema with all common fields */
|
||||
export const agentBaseSchema = z.object({
|
||||
name: z.string().nullable().optional(),
|
||||
|
|
@ -48,7 +59,9 @@ export const agentBaseSchema = z.object({
|
|||
avatar: agentAvatarSchema.nullable().optional(),
|
||||
model_parameters: z.record(z.unknown()).optional(),
|
||||
tools: z.array(z.string()).optional(),
|
||||
/** @deprecated Use edges instead */
|
||||
agent_ids: z.array(z.string()).optional(),
|
||||
edges: z.array(graphEdgeSchema).optional(),
|
||||
end_after_tools: z.boolean().optional(),
|
||||
hide_sequential_outputs: z.boolean().optional(),
|
||||
artifacts: z.string().optional(),
|
||||
|
|
|
|||
|
|
@ -1,11 +1,10 @@
|
|||
import { ErrorTypes, EModelEndpoint, mapModelToAzureConfig } from 'librechat-data-provider';
|
||||
import type {
|
||||
InitializeOpenAIOptionsParams,
|
||||
OpenAIOptionsResult,
|
||||
OpenAIConfigOptions,
|
||||
LLMConfigResult,
|
||||
UserKeyValues,
|
||||
} from '~/types';
|
||||
import { createHandleLLMNewToken } from '~/utils/generators';
|
||||
import { getAzureCredentials } from '~/utils/azure';
|
||||
import { isUserProvided } from '~/utils/common';
|
||||
import { resolveHeaders } from '~/utils/env';
|
||||
|
|
@ -27,7 +26,7 @@ export const initializeOpenAI = async ({
|
|||
overrideEndpoint,
|
||||
getUserKeyValues,
|
||||
checkUserKeyExpiry,
|
||||
}: InitializeOpenAIOptionsParams): Promise<OpenAIOptionsResult> => {
|
||||
}: InitializeOpenAIOptionsParams): Promise<LLMConfigResult> => {
|
||||
const { PROXY, OPENAI_API_KEY, AZURE_API_KEY, OPENAI_REVERSE_PROXY, AZURE_OPENAI_BASEURL } =
|
||||
process.env;
|
||||
|
||||
|
|
@ -160,17 +159,8 @@ export const initializeOpenAI = async ({
|
|||
}
|
||||
|
||||
if (streamRate) {
|
||||
options.llmConfig.callbacks = [
|
||||
{
|
||||
handleLLMNewToken: createHandleLLMNewToken(streamRate),
|
||||
},
|
||||
];
|
||||
options.llmConfig._lc_stream_delay = streamRate;
|
||||
}
|
||||
|
||||
const result: OpenAIOptionsResult = {
|
||||
...options,
|
||||
streamRate,
|
||||
};
|
||||
|
||||
return result;
|
||||
return options;
|
||||
};
|
||||
|
|
|
|||
|
|
@ -1,340 +0,0 @@
|
|||
import { ContentTypes } from 'librechat-data-provider';
|
||||
import { HumanMessage, AIMessage, SystemMessage } from '@langchain/core/messages';
|
||||
import { formatContentStrings } from './content';
|
||||
|
||||
describe('formatContentStrings', () => {
|
||||
describe('Human messages', () => {
|
||||
it('should convert human message with all text blocks to string', () => {
|
||||
const messages = [
|
||||
new HumanMessage({
|
||||
content: [
|
||||
{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Hello' },
|
||||
{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'World' },
|
||||
],
|
||||
}),
|
||||
];
|
||||
|
||||
const result = formatContentStrings(messages);
|
||||
|
||||
expect(result).toHaveLength(1);
|
||||
expect(result[0].content).toBe('Hello\nWorld');
|
||||
});
|
||||
|
||||
it('should not convert human message with mixed content types (text + image)', () => {
|
||||
const messages = [
|
||||
new HumanMessage({
|
||||
content: [
|
||||
{ type: ContentTypes.TEXT, text: 'what do you see' },
|
||||
{
|
||||
type: 'image_url',
|
||||
image_url: {
|
||||
url: 'data:image/png;base64,iVBO_SOME_BASE64_DATA=',
|
||||
detail: 'auto',
|
||||
},
|
||||
},
|
||||
],
|
||||
}),
|
||||
];
|
||||
|
||||
const result = formatContentStrings(messages);
|
||||
|
||||
expect(result).toHaveLength(1);
|
||||
expect(result[0].content).toEqual([
|
||||
{ type: ContentTypes.TEXT, text: 'what do you see' },
|
||||
{
|
||||
type: 'image_url',
|
||||
image_url: {
|
||||
url: 'data:image/png;base64,iVBO_SOME_BASE64_DATA=',
|
||||
detail: 'auto',
|
||||
},
|
||||
},
|
||||
]);
|
||||
});
|
||||
|
||||
it('should leave string content unchanged', () => {
|
||||
const messages = [
|
||||
new HumanMessage({
|
||||
content: 'Hello World',
|
||||
}),
|
||||
];
|
||||
|
||||
const result = formatContentStrings(messages);
|
||||
|
||||
expect(result).toHaveLength(1);
|
||||
expect(result[0].content).toBe('Hello World');
|
||||
});
|
||||
|
||||
it('should handle empty text blocks', () => {
|
||||
const messages = [
|
||||
new HumanMessage({
|
||||
content: [
|
||||
{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Hello' },
|
||||
{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: '' },
|
||||
{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'World' },
|
||||
],
|
||||
}),
|
||||
];
|
||||
|
||||
const result = formatContentStrings(messages);
|
||||
|
||||
expect(result).toHaveLength(1);
|
||||
expect(result[0].content).toBe('Hello\n\nWorld');
|
||||
});
|
||||
|
||||
it('should handle null/undefined text values', () => {
|
||||
const messages = [
|
||||
new HumanMessage({
|
||||
content: [
|
||||
{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Hello' },
|
||||
{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: null },
|
||||
{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: undefined },
|
||||
{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'World' },
|
||||
],
|
||||
}),
|
||||
];
|
||||
|
||||
const result = formatContentStrings(messages);
|
||||
|
||||
expect(result).toHaveLength(1);
|
||||
expect(result[0].content).toBe('Hello\n\n\nWorld');
|
||||
});
|
||||
});
|
||||
|
||||
describe('AI messages', () => {
|
||||
it('should convert AI message with all text blocks to string', () => {
|
||||
const messages = [
|
||||
new AIMessage({
|
||||
content: [
|
||||
{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Hello' },
|
||||
{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'World' },
|
||||
],
|
||||
}),
|
||||
];
|
||||
|
||||
const result = formatContentStrings(messages);
|
||||
|
||||
expect(result).toHaveLength(1);
|
||||
expect(result[0].content).toBe('Hello\nWorld');
|
||||
expect(result[0].getType()).toBe('ai');
|
||||
});
|
||||
|
||||
it('should not convert AI message with mixed content types', () => {
|
||||
const messages = [
|
||||
new AIMessage({
|
||||
content: [
|
||||
{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Here is an image' },
|
||||
{ type: ContentTypes.TOOL_CALL, tool_call: { name: 'generate_image' } },
|
||||
],
|
||||
}),
|
||||
];
|
||||
|
||||
const result = formatContentStrings(messages);
|
||||
|
||||
expect(result).toHaveLength(1);
|
||||
expect(result[0].content).toEqual([
|
||||
{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Here is an image' },
|
||||
{ type: ContentTypes.TOOL_CALL, tool_call: { name: 'generate_image' } },
|
||||
]);
|
||||
});
|
||||
});
|
||||
|
||||
describe('System messages', () => {
|
||||
it('should convert System message with all text blocks to string', () => {
|
||||
const messages = [
|
||||
new SystemMessage({
|
||||
content: [
|
||||
{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'System' },
|
||||
{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Message' },
|
||||
],
|
||||
}),
|
||||
];
|
||||
|
||||
const result = formatContentStrings(messages);
|
||||
|
||||
expect(result).toHaveLength(1);
|
||||
expect(result[0].content).toBe('System\nMessage');
|
||||
expect(result[0].getType()).toBe('system');
|
||||
});
|
||||
});
|
||||
|
||||
describe('Mixed message types', () => {
|
||||
it('should process all valid message types in mixed array', () => {
|
||||
const messages = [
|
||||
new HumanMessage({
|
||||
content: [
|
||||
{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Human' },
|
||||
{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Message' },
|
||||
],
|
||||
}),
|
||||
new AIMessage({
|
||||
content: [
|
||||
{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'AI' },
|
||||
{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Response' },
|
||||
],
|
||||
}),
|
||||
new SystemMessage({
|
||||
content: [
|
||||
{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'System' },
|
||||
{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Prompt' },
|
||||
],
|
||||
}),
|
||||
];
|
||||
|
||||
const result = formatContentStrings(messages);
|
||||
|
||||
expect(result).toHaveLength(3);
|
||||
// All messages should be converted
|
||||
expect(result[0].content).toBe('Human\nMessage');
|
||||
expect(result[0].getType()).toBe('human');
|
||||
|
||||
expect(result[1].content).toBe('AI\nResponse');
|
||||
expect(result[1].getType()).toBe('ai');
|
||||
|
||||
expect(result[2].content).toBe('System\nPrompt');
|
||||
expect(result[2].getType()).toBe('system');
|
||||
});
|
||||
});
|
||||
|
||||
describe('Edge cases', () => {
|
||||
it('should handle empty array', () => {
|
||||
const result = formatContentStrings([]);
|
||||
expect(result).toEqual([]);
|
||||
});
|
||||
|
||||
it('should handle messages with non-array content', () => {
|
||||
const messages = [
|
||||
new HumanMessage({
|
||||
content: 'This is a string content',
|
||||
}),
|
||||
];
|
||||
|
||||
const result = formatContentStrings(messages);
|
||||
|
||||
expect(result).toHaveLength(1);
|
||||
expect(result[0].content).toBe('This is a string content');
|
||||
});
|
||||
|
||||
it('should trim the final concatenated string', () => {
|
||||
const messages = [
|
||||
new HumanMessage({
|
||||
content: [
|
||||
{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: ' Hello ' },
|
||||
{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: ' World ' },
|
||||
],
|
||||
}),
|
||||
];
|
||||
|
||||
const result = formatContentStrings(messages);
|
||||
|
||||
expect(result).toHaveLength(1);
|
||||
expect(result[0].content).toBe('Hello \n World');
|
||||
});
|
||||
});
|
||||
|
||||
describe('Real-world scenarios', () => {
|
||||
it('should handle the exact scenario from the issue', () => {
|
||||
const messages = [
|
||||
new HumanMessage({
|
||||
content: [
|
||||
{
|
||||
type: 'text',
|
||||
text: 'hi there',
|
||||
},
|
||||
],
|
||||
}),
|
||||
new AIMessage({
|
||||
content: [
|
||||
{
|
||||
type: 'text',
|
||||
text: 'Hi Danny! How can I help you today?',
|
||||
},
|
||||
],
|
||||
}),
|
||||
new HumanMessage({
|
||||
content: [
|
||||
{
|
||||
type: 'text',
|
||||
text: 'what do you see',
|
||||
},
|
||||
{
|
||||
type: 'image_url',
|
||||
image_url: {
|
||||
url: 'data:image/png;base64,iVBO_SOME_BASE64_DATA=',
|
||||
detail: 'auto',
|
||||
},
|
||||
},
|
||||
],
|
||||
}),
|
||||
];
|
||||
|
||||
const result = formatContentStrings(messages);
|
||||
|
||||
expect(result).toHaveLength(3);
|
||||
|
||||
// First human message (all text) should be converted
|
||||
expect(result[0].content).toBe('hi there');
|
||||
expect(result[0].getType()).toBe('human');
|
||||
|
||||
// AI message (all text) should now also be converted
|
||||
expect(result[1].content).toBe('Hi Danny! How can I help you today?');
|
||||
expect(result[1].getType()).toBe('ai');
|
||||
|
||||
// Third message (mixed content) should remain unchanged
|
||||
expect(result[2].content).toEqual([
|
||||
{
|
||||
type: 'text',
|
||||
text: 'what do you see',
|
||||
},
|
||||
{
|
||||
type: 'image_url',
|
||||
image_url: {
|
||||
url: 'data:image/png;base64,iVBO_SOME_BASE64_DATA=',
|
||||
detail: 'auto',
|
||||
},
|
||||
},
|
||||
]);
|
||||
});
|
||||
|
||||
it('should handle messages with tool calls', () => {
|
||||
const messages = [
|
||||
new HumanMessage({
|
||||
content: [
|
||||
{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Please use the calculator' },
|
||||
{
|
||||
type: ContentTypes.TOOL_CALL,
|
||||
tool_call: { name: 'calculator', args: '{"a": 1, "b": 2}' },
|
||||
},
|
||||
],
|
||||
}),
|
||||
new AIMessage({
|
||||
content: [
|
||||
{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'I will calculate that for you' },
|
||||
{
|
||||
type: ContentTypes.TOOL_CALL,
|
||||
tool_call: { name: 'calculator', args: '{"a": 1, "b": 2}' },
|
||||
},
|
||||
],
|
||||
}),
|
||||
];
|
||||
|
||||
const result = formatContentStrings(messages);
|
||||
|
||||
expect(result).toHaveLength(2);
|
||||
// Should not convert because not all blocks are text
|
||||
expect(result[0].content).toEqual([
|
||||
{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Please use the calculator' },
|
||||
{
|
||||
type: ContentTypes.TOOL_CALL,
|
||||
tool_call: { name: 'calculator', args: '{"a": 1, "b": 2}' },
|
||||
},
|
||||
]);
|
||||
expect(result[1].content).toEqual([
|
||||
{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'I will calculate that for you' },
|
||||
{
|
||||
type: ContentTypes.TOOL_CALL,
|
||||
tool_call: { name: 'calculator', args: '{"a": 1, "b": 2}' },
|
||||
},
|
||||
]);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
|
@ -1,57 +0,0 @@
|
|||
import { ContentTypes } from 'librechat-data-provider';
|
||||
import type { BaseMessage } from '@langchain/core/messages';
|
||||
|
||||
/**
|
||||
* Formats an array of messages for LangChain, making sure all content fields are strings
|
||||
* @param {Array<HumanMessage | AIMessage | SystemMessage | ToolMessage>} payload - The array of messages to format.
|
||||
* @returns {Array<HumanMessage | AIMessage | SystemMessage | ToolMessage>} - The array of formatted LangChain messages, including ToolMessages for tool calls.
|
||||
*/
|
||||
export const formatContentStrings = (payload: Array<BaseMessage>): Array<BaseMessage> => {
|
||||
// Create a new array to store the processed messages
|
||||
const result: Array<BaseMessage> = [];
|
||||
|
||||
for (const message of payload) {
|
||||
const messageType = message.getType();
|
||||
const isValidMessage =
|
||||
messageType === 'human' || messageType === 'ai' || messageType === 'system';
|
||||
|
||||
if (!isValidMessage) {
|
||||
result.push(message);
|
||||
continue;
|
||||
}
|
||||
|
||||
// If content is already a string, add as-is
|
||||
if (typeof message.content === 'string') {
|
||||
result.push(message);
|
||||
continue;
|
||||
}
|
||||
|
||||
// If content is not an array, add as-is
|
||||
if (!Array.isArray(message.content)) {
|
||||
result.push(message);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Check if all content blocks are text type
|
||||
const allTextBlocks = message.content.every((block) => block.type === ContentTypes.TEXT);
|
||||
|
||||
// Only convert to string if all blocks are text type
|
||||
if (!allTextBlocks) {
|
||||
result.push(message);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Reduce text types to a single string
|
||||
const content = message.content.reduce((acc, curr) => {
|
||||
if (curr.type === ContentTypes.TEXT) {
|
||||
return `${acc}${curr[ContentTypes.TEXT] || ''}\n`;
|
||||
}
|
||||
return acc;
|
||||
}, '');
|
||||
|
||||
message.content = content.trim();
|
||||
result.push(message);
|
||||
}
|
||||
|
||||
return result;
|
||||
};
|
||||
|
|
@ -1 +0,0 @@
|
|||
export * from './content';
|
||||
|
|
@ -10,7 +10,6 @@ export * from './mcp/oauth';
|
|||
export * from './mcp/auth';
|
||||
export * from './mcp/zod';
|
||||
/* Utilities */
|
||||
export * from './format';
|
||||
export * from './mcp/utils';
|
||||
export * from './utils';
|
||||
export * from './db/utils';
|
||||
|
|
|
|||
|
|
@ -31,6 +31,7 @@ export type OpenAIConfiguration = OpenAIClientOptions['configuration'];
|
|||
|
||||
export type OAIClientOptions = OpenAIClientOptions & {
|
||||
include_reasoning?: boolean;
|
||||
_lc_stream_delay?: number;
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
@ -100,10 +101,3 @@ export interface InitializeOpenAIOptionsParams {
|
|||
getUserKeyValues: GetUserKeyValuesFunction;
|
||||
checkUserKeyExpiry: CheckUserKeyExpiryFunction;
|
||||
}
|
||||
|
||||
/**
|
||||
* Extended LLM config result with stream rate handling
|
||||
*/
|
||||
export interface OpenAIOptionsResult extends LLMConfigResult {
|
||||
streamRate?: number;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1615,6 +1615,10 @@ export enum Constants {
|
|||
* This helps inform the UI if the mcp server was previously added.
|
||||
* */
|
||||
mcp_server = 'sys__server__sys',
|
||||
/**
|
||||
* Handoff Tool Name Prefix
|
||||
*/
|
||||
LC_TRANSFER_TO_ = 'lc_transfer_to_',
|
||||
/** Placeholder Agent ID for Ephemeral Agents */
|
||||
EPHEMERAL_AGENT_ID = 'ephemeral',
|
||||
}
|
||||
|
|
|
|||
|
|
@ -39,7 +39,6 @@ export enum Providers {
|
|||
GOOGLE = 'google',
|
||||
VERTEXAI = 'vertexai',
|
||||
BEDROCK = 'bedrock',
|
||||
BEDROCK_LEGACY = 'bedrock_legacy',
|
||||
MISTRALAI = 'mistralai',
|
||||
MISTRAL = 'mistral',
|
||||
OLLAMA = 'ollama',
|
||||
|
|
@ -231,6 +230,7 @@ export const defaultAgentFormValues = {
|
|||
tools: [],
|
||||
provider: {},
|
||||
projectIds: [],
|
||||
edges: [],
|
||||
artifacts: '',
|
||||
/** @deprecated Use ACL permissions instead */
|
||||
isCollaborative: false,
|
||||
|
|
|
|||
|
|
@ -355,3 +355,45 @@ export type AgentToolType = {
|
|||
} & ({ assistant_id: string; agent_id?: never } | { assistant_id?: never; agent_id?: string });
|
||||
|
||||
export type ToolMetadata = TPlugin;
|
||||
|
||||
export interface BaseMessage {
|
||||
content: string;
|
||||
role?: string;
|
||||
[key: string]: unknown;
|
||||
}
|
||||
|
||||
export interface BaseGraphState {
|
||||
[key: string]: unknown;
|
||||
}
|
||||
|
||||
export type GraphEdge = {
|
||||
/** Agent ID, use a list for multiple sources */
|
||||
from: string | string[];
|
||||
/** Agent ID, use a list for multiple destinations */
|
||||
to: string | string[];
|
||||
description?: string;
|
||||
/** Can return boolean or specific destination(s) */
|
||||
condition?: (state: BaseGraphState) => boolean | string | string[];
|
||||
/** 'handoff' creates tools for dynamic routing, 'direct' creates direct edges, which also allow parallel execution */
|
||||
edgeType?: 'handoff' | 'direct';
|
||||
/**
|
||||
* For direct edges: Optional prompt to add when transitioning through this edge.
|
||||
* String prompts can include variables like {results} which will be replaced with
|
||||
* messages from startIndex onwards. When {results} is used, excludeResults defaults to true.
|
||||
*
|
||||
* For handoff edges: Description for the input parameter that the handoff tool accepts,
|
||||
* allowing the supervisor to pass specific instructions/context to the transferred agent.
|
||||
*/
|
||||
prompt?: string | ((messages: BaseMessage[], runStartIndex: number) => string | undefined);
|
||||
/**
|
||||
* When true, excludes messages from startIndex when adding prompt.
|
||||
* Automatically set to true when {results} variable is used in prompt.
|
||||
*/
|
||||
excludeResults?: boolean;
|
||||
/**
|
||||
* For handoff edges: Customizes the parameter name for the handoff input.
|
||||
* Defaults to "instructions" if not specified.
|
||||
* Only applies when prompt is provided for handoff edges.
|
||||
*/
|
||||
promptKey?: string;
|
||||
};
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
import type { OpenAPIV3 } from 'openapi-types';
|
||||
import type { AssistantsEndpoint, AgentProvider } from 'src/schemas';
|
||||
import type { Agents, GraphEdge } from './agents';
|
||||
import type { ContentTypes } from './runs';
|
||||
import type { Agents } from './agents';
|
||||
import type { TFile } from './files';
|
||||
import { ArtifactModes } from 'src/artifacts';
|
||||
|
||||
|
|
@ -229,7 +229,9 @@ export type Agent = {
|
|||
/** @deprecated Use ACL permissions instead */
|
||||
isCollaborative?: boolean;
|
||||
tool_resources?: AgentToolResources;
|
||||
/** @deprecated Use edges instead */
|
||||
agent_ids?: string[];
|
||||
edges?: GraphEdge[];
|
||||
end_after_tools?: boolean;
|
||||
hide_sequential_outputs?: boolean;
|
||||
artifacts?: ArtifactModes;
|
||||
|
|
@ -255,6 +257,7 @@ export type AgentCreateParams = {
|
|||
} & Pick<
|
||||
Agent,
|
||||
| 'agent_ids'
|
||||
| 'edges'
|
||||
| 'end_after_tools'
|
||||
| 'hide_sequential_outputs'
|
||||
| 'artifacts'
|
||||
|
|
@ -280,6 +283,7 @@ export type AgentUpdateParams = {
|
|||
} & Pick<
|
||||
Agent,
|
||||
| 'agent_ids'
|
||||
| 'edges'
|
||||
| 'end_after_tools'
|
||||
| 'hide_sequential_outputs'
|
||||
| 'artifacts'
|
||||
|
|
|
|||
|
|
@ -68,9 +68,14 @@ const agentSchema = new Schema<IAgent>(
|
|||
end_after_tools: {
|
||||
type: Boolean,
|
||||
},
|
||||
/** @deprecated Use edges instead */
|
||||
agent_ids: {
|
||||
type: [String],
|
||||
},
|
||||
edges: {
|
||||
type: [{ type: Schema.Types.Mixed }],
|
||||
default: [],
|
||||
},
|
||||
isCollaborative: {
|
||||
type: Boolean,
|
||||
default: undefined,
|
||||
|
|
|
|||
|
|
@ -1,4 +1,5 @@
|
|||
import { Document, Types } from 'mongoose';
|
||||
import type { GraphEdge } from 'librechat-data-provider';
|
||||
|
||||
export interface ISupportContact {
|
||||
name?: string;
|
||||
|
|
@ -27,7 +28,9 @@ export interface IAgent extends Omit<Document, 'model'> {
|
|||
authorName?: string;
|
||||
hide_sequential_outputs?: boolean;
|
||||
end_after_tools?: boolean;
|
||||
/** @deprecated Use edges instead */
|
||||
agent_ids?: string[];
|
||||
edges?: GraphEdge[];
|
||||
/** @deprecated Use ACL permissions instead */
|
||||
isCollaborative?: boolean;
|
||||
conversation_starters?: string[];
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue