mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-12-28 14:18:51 +01:00
Some checks failed
Docker Dev Branch Images Build / build (Dockerfile, lc-dev, node) (push) Waiting to run
Docker Dev Branch Images Build / build (Dockerfile.multi, lc-dev-api, api-build) (push) Waiting to run
Docker Dev Images Build / build (Dockerfile, librechat-dev, node) (push) Has been cancelled
Docker Dev Images Build / build (Dockerfile.multi, librechat-dev-api, api-build) (push) Has been cancelled
Sync Locize Translations & Create Translation PR / Sync Translation Keys with Locize (push) Has been cancelled
Sync Locize Translations & Create Translation PR / Create Translation PR on Version Published (push) Has been cancelled
* 🔧 fix: Correct URL Construction in fetchModels Function Updated the URL construction in the fetchModels function to ensure proper formatting by removing trailing slashes from the base URL. This change prevents potential issues with API endpoint calls. * 🔧 fix: Remove OLLAMA from Known Custom Providers Updated the isKnownCustomProvider function and providerConfigMap to exclude OLLAMA as a known custom provider, streamlining the provider checks and configurations. * 🔧 test: Enhance fetchModels Tests for URL Construction Added new test cases to validate the URL construction in the fetchModels function, ensuring it handles trailing slashes correctly and appends query parameters as expected. This improves the robustness of the API endpoint calls. * chore: remove ollama provider-specific handling * chore: Refactor imports to use isUserProvided from @librechat/api
175 lines
5 KiB
TypeScript
175 lines
5 KiB
TypeScript
import { Run, Providers } from '@librechat/agents';
|
|
import { providerEndpointMap, KnownEndpoints } from 'librechat-data-provider';
|
|
import type {
|
|
MultiAgentGraphConfig,
|
|
OpenAIClientOptions,
|
|
StandardGraphConfig,
|
|
AgentInputs,
|
|
GenericTool,
|
|
RunConfig,
|
|
IState,
|
|
} from '@librechat/agents';
|
|
import type { IUser } from '@librechat/data-schemas';
|
|
import type { Agent } from 'librechat-data-provider';
|
|
import type * as t from '~/types';
|
|
import { resolveHeaders, createSafeUser } from '~/utils/env';
|
|
|
|
const customProviders = new Set([
|
|
Providers.XAI,
|
|
Providers.DEEPSEEK,
|
|
Providers.OPENROUTER,
|
|
KnownEndpoints.ollama,
|
|
]);
|
|
|
|
export function getReasoningKey(
|
|
provider: Providers,
|
|
llmConfig: t.RunLLMConfig,
|
|
agentEndpoint?: string | null,
|
|
): 'reasoning_content' | 'reasoning' {
|
|
let reasoningKey: 'reasoning_content' | 'reasoning' = 'reasoning_content';
|
|
if (provider === Providers.GOOGLE) {
|
|
reasoningKey = 'reasoning';
|
|
} else if (
|
|
llmConfig.configuration?.baseURL?.includes(KnownEndpoints.openrouter) ||
|
|
(agentEndpoint && agentEndpoint.toLowerCase().includes(KnownEndpoints.openrouter))
|
|
) {
|
|
reasoningKey = 'reasoning';
|
|
} else if (
|
|
(llmConfig as OpenAIClientOptions).useResponsesApi === true &&
|
|
(provider === Providers.OPENAI || provider === Providers.AZURE)
|
|
) {
|
|
reasoningKey = 'reasoning';
|
|
}
|
|
return reasoningKey;
|
|
}
|
|
|
|
type RunAgent = Omit<Agent, 'tools'> & {
|
|
tools?: GenericTool[];
|
|
maxContextTokens?: number;
|
|
useLegacyContent?: boolean;
|
|
toolContextMap?: Record<string, string>;
|
|
};
|
|
|
|
/**
|
|
* Creates a new Run instance with custom handlers and configuration.
|
|
*
|
|
* @param options - The options for creating the Run instance.
|
|
* @param options.agents - The agents for this run.
|
|
* @param options.signal - The signal for this run.
|
|
* @param options.runId - Optional run ID; otherwise, a new run ID will be generated.
|
|
* @param options.customHandlers - Custom event handlers.
|
|
* @param options.streaming - Whether to use streaming.
|
|
* @param options.streamUsage - Whether to stream usage information.
|
|
* @returns {Promise<Run<IState>>} A promise that resolves to a new Run instance.
|
|
*/
|
|
export async function createRun({
|
|
runId,
|
|
signal,
|
|
agents,
|
|
requestBody,
|
|
user,
|
|
tokenCounter,
|
|
customHandlers,
|
|
indexTokenCountMap,
|
|
streaming = true,
|
|
streamUsage = true,
|
|
}: {
|
|
agents: RunAgent[];
|
|
signal: AbortSignal;
|
|
runId?: string;
|
|
streaming?: boolean;
|
|
streamUsage?: boolean;
|
|
requestBody?: t.RequestBody;
|
|
user?: IUser;
|
|
} & Pick<RunConfig, 'tokenCounter' | 'customHandlers' | 'indexTokenCountMap'>): Promise<
|
|
Run<IState>
|
|
> {
|
|
const agentInputs: AgentInputs[] = [];
|
|
const buildAgentContext = (agent: RunAgent) => {
|
|
const provider =
|
|
(providerEndpointMap[
|
|
agent.provider as keyof typeof providerEndpointMap
|
|
] as unknown as Providers) ?? agent.provider;
|
|
|
|
const llmConfig: t.RunLLMConfig = Object.assign(
|
|
{
|
|
provider,
|
|
streaming,
|
|
streamUsage,
|
|
},
|
|
agent.model_parameters,
|
|
);
|
|
|
|
const systemMessage = Object.values(agent.toolContextMap ?? {})
|
|
.join('\n')
|
|
.trim();
|
|
|
|
const systemContent = [
|
|
systemMessage,
|
|
agent.instructions ?? '',
|
|
agent.additional_instructions ?? '',
|
|
]
|
|
.join('\n')
|
|
.trim();
|
|
|
|
/**
|
|
* Resolve request-based headers for Custom Endpoints. Note: if this is added to
|
|
* non-custom endpoints, needs consideration of varying provider header configs.
|
|
* This is done at this step because the request body may contain dynamic values
|
|
* that need to be resolved after agent initialization.
|
|
*/
|
|
if (llmConfig?.configuration?.defaultHeaders != null) {
|
|
llmConfig.configuration.defaultHeaders = resolveHeaders({
|
|
headers: llmConfig.configuration.defaultHeaders as Record<string, string>,
|
|
user: createSafeUser(user),
|
|
body: requestBody,
|
|
});
|
|
}
|
|
|
|
/** Resolves issues with new OpenAI usage field */
|
|
if (
|
|
customProviders.has(agent.provider) ||
|
|
(agent.provider === Providers.OPENAI && agent.endpoint !== agent.provider)
|
|
) {
|
|
llmConfig.streamUsage = false;
|
|
llmConfig.usage = true;
|
|
}
|
|
|
|
const reasoningKey = getReasoningKey(provider, llmConfig, agent.endpoint);
|
|
const agentInput: AgentInputs = {
|
|
provider,
|
|
reasoningKey,
|
|
agentId: agent.id,
|
|
tools: agent.tools,
|
|
clientOptions: llmConfig,
|
|
instructions: systemContent,
|
|
maxContextTokens: agent.maxContextTokens,
|
|
useLegacyContent: agent.useLegacyContent ?? false,
|
|
};
|
|
agentInputs.push(agentInput);
|
|
};
|
|
|
|
for (const agent of agents) {
|
|
buildAgentContext(agent);
|
|
}
|
|
|
|
const graphConfig: RunConfig['graphConfig'] = {
|
|
signal,
|
|
agents: agentInputs,
|
|
edges: agents[0].edges,
|
|
};
|
|
|
|
if (agentInputs.length > 1 || ((graphConfig as MultiAgentGraphConfig).edges?.length ?? 0) > 0) {
|
|
(graphConfig as unknown as MultiAgentGraphConfig).type = 'multi-agent';
|
|
} else {
|
|
(graphConfig as StandardGraphConfig).type = 'standard';
|
|
}
|
|
|
|
return Run.create({
|
|
runId,
|
|
graphConfig,
|
|
tokenCounter,
|
|
customHandlers,
|
|
indexTokenCountMap,
|
|
});
|
|
}
|