mirror of
https://github.com/danny-avila/LibreChat.git
synced 2026-04-03 06:17:21 +02:00
🔁 fix: Pass recursionLimit to OpenAI-Compatible Agents API Endpoint (#12510)
* fix: pass recursionLimit to processStream in OpenAI-compatible agents API The OpenAI-compatible endpoint never passed recursionLimit to LangGraph's processStream(), silently capping all API-based agent calls at the default 25 steps. Mirror the 3-step cascade already used by the UI path (client.js): yaml config default → per-agent DB override → max cap. * refactor: extract resolveRecursionLimit into shared utility Extract the 3-step recursion limit cascade into a shared resolveRecursionLimit() function in @librechat/api. Both openai.js and client.js now call this single source of truth. Also fixes falsy-guard edge cases where recursion_limit=0 or maxRecursionLimit=0 would silently misbehave, by using explicit typeof + positive checks. Includes unit tests covering all cascade branches and edge cases. * refactor: use resolveRecursionLimit in openai.js and client.js Replace duplicated cascade logic in both controllers with the shared resolveRecursionLimit() utility from @librechat/api. In openai.js: hoist agentsEConfig to avoid double property walk, remove displaced comment, add integration test assertions. In client.js: remove inline cascade that was overriding config after initial assignment. * fix: hoist processStream mock for test accessibility The processStream mock was created inline inside mockResolvedValue, making it inaccessible via createRun.mock.results (which returns the Promise, not the resolved value). Hoist it to a module-level variable so tests can assert on it directly. * test: improve test isolation and boundary coverage Use mockReturnValueOnce instead of mockReturnValue to prevent mock leaking across test boundaries. Add boundary tests for downward agent override and exact-match maxRecursionLimit.
This commit is contained in:
parent
aa575b274b
commit
cb41ba14b2
6 changed files with 134 additions and 18 deletions
|
|
@ -15,6 +15,7 @@ const {
|
|||
createErrorResponse,
|
||||
recordCollectedUsage,
|
||||
getTransactionsConfig,
|
||||
resolveRecursionLimit,
|
||||
createToolExecuteHandler,
|
||||
buildNonStreamingResponse,
|
||||
createOpenAIStreamTracker,
|
||||
|
|
@ -194,10 +195,8 @@ const OpenAIChatCompletionController = async (req, res) => {
|
|||
const conversationId = request.conversation_id ?? nanoid();
|
||||
const parentMessageId = request.parent_message_id ?? null;
|
||||
|
||||
// Build allowed providers set
|
||||
const allowedProviders = new Set(
|
||||
appConfig?.endpoints?.[EModelEndpoint.agents]?.allowedProviders,
|
||||
);
|
||||
const agentsEConfig = appConfig?.endpoints?.[EModelEndpoint.agents];
|
||||
const allowedProviders = new Set(agentsEConfig?.allowedProviders);
|
||||
|
||||
// Create tool loader
|
||||
const loadTools = createToolLoader(abortController.signal);
|
||||
|
|
@ -491,7 +490,6 @@ const OpenAIChatCompletionController = async (req, res) => {
|
|||
throw new Error('Failed to create agent run');
|
||||
}
|
||||
|
||||
// Process the stream
|
||||
const config = {
|
||||
runName: 'AgentRun',
|
||||
configurable: {
|
||||
|
|
@ -504,6 +502,7 @@ const OpenAIChatCompletionController = async (req, res) => {
|
|||
},
|
||||
...(userMCPAuthMap != null && { userMCPAuthMap }),
|
||||
},
|
||||
recursionLimit: resolveRecursionLimit(agentsEConfig, agent),
|
||||
signal: abortController.signal,
|
||||
streamMode: 'values',
|
||||
version: 'v2',
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue