🔁 fix: Pass recursionLimit to OpenAI-Compatible Agents API Endpoint (#12510)

* fix: pass recursionLimit to processStream in OpenAI-compatible agents API

The OpenAI-compatible endpoint never passed recursionLimit to LangGraph's
processStream(), silently capping all API-based agent calls at the default
25 steps. Mirror the 3-step cascade already used by the UI path (client.js):
yaml config default → per-agent DB override → max cap.

* refactor: extract resolveRecursionLimit into shared utility

Extract the 3-step recursion limit cascade into a shared
resolveRecursionLimit() function in @librechat/api. Both openai.js and
client.js now call this single source of truth.

Also fixes falsy-guard edge cases where recursion_limit=0 or
maxRecursionLimit=0 would silently misbehave, by using explicit
typeof + positive checks.

Includes unit tests covering all cascade branches and edge cases.

* refactor: use resolveRecursionLimit in openai.js and client.js

Replace duplicated cascade logic in both controllers with the shared
resolveRecursionLimit() utility from @librechat/api.

In openai.js: hoist agentsEConfig to avoid double property walk,
remove displaced comment, add integration test assertions.

In client.js: remove inline cascade that was overriding config
after initial assignment.

* fix: hoist processStream mock for test accessibility

The processStream mock was created inline inside mockResolvedValue,
making it inaccessible via createRun.mock.results (which returns
the Promise, not the resolved value). Hoist it to a module-level
variable so tests can assert on it directly.

* test: improve test isolation and boundary coverage

Use mockReturnValueOnce instead of mockReturnValue to prevent mock
leaking across test boundaries. Add boundary tests for downward
agent override and exact-match maxRecursionLimit.
This commit is contained in:
Danny Avila 2026-04-01 21:13:07 -04:00 committed by GitHub
parent aa575b274b
commit cb41ba14b2
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
6 changed files with 134 additions and 18 deletions

View file

@ -3,6 +3,7 @@
* Tests that recordCollectedUsage is called correctly for token spending
*/
const mockProcessStream = jest.fn().mockResolvedValue(undefined);
const mockSpendTokens = jest.fn().mockResolvedValue({});
const mockSpendStructuredTokens = jest.fn().mockResolvedValue({});
const mockRecordCollectedUsage = jest
@ -35,7 +36,7 @@ jest.mock('@librechat/agents', () => ({
jest.mock('@librechat/api', () => ({
writeSSE: jest.fn(),
createRun: jest.fn().mockResolvedValue({
processStream: jest.fn().mockResolvedValue(undefined),
processStream: mockProcessStream,
}),
createChunk: jest.fn().mockReturnValue({}),
buildToolSet: jest.fn().mockReturnValue(new Set()),
@ -68,6 +69,7 @@ jest.mock('@librechat/api', () => ({
toolCalls: new Map(),
usage: { promptTokens: 100, completionTokens: 50, reasoningTokens: 0 },
}),
resolveRecursionLimit: jest.fn().mockReturnValue(50),
createToolExecuteHandler: jest.fn().mockReturnValue({ handle: jest.fn() }),
isChatCompletionValidationFailure: jest.fn().mockReturnValue(false),
}));
@ -286,4 +288,36 @@ describe('OpenAIChatCompletionController', () => {
);
});
});
describe('recursionLimit resolution', () => {
it('should pass resolveRecursionLimit result to processStream config', async () => {
const { resolveRecursionLimit } = require('@librechat/api');
resolveRecursionLimit.mockReturnValueOnce(75);
await OpenAIChatCompletionController(req, res);
expect(mockProcessStream).toHaveBeenCalledWith(
expect.anything(),
expect.objectContaining({ recursionLimit: 75 }),
expect.anything(),
);
});
it('should call resolveRecursionLimit with agentsEConfig and agent', async () => {
const { resolveRecursionLimit } = require('@librechat/api');
const { getAgent } = require('~/models');
const mockAgent = { id: 'agent-123', name: 'Test', recursion_limit: 200 };
getAgent.mockResolvedValueOnce(mockAgent);
req.config = {
endpoints: {
agents: { recursionLimit: 100, maxRecursionLimit: 150, allowedProviders: [] },
},
};
await OpenAIChatCompletionController(req, res);
expect(resolveRecursionLimit).toHaveBeenCalledWith(req.config.endpoints.agents, mockAgent);
});
});
});

View file

@ -21,6 +21,7 @@ const {
recordCollectedUsage,
GenerationJobManager,
getTransactionsConfig,
resolveRecursionLimit,
createMemoryProcessor,
loadAgent: loadAgentFn,
createMultiAgentMapper,
@ -733,7 +734,7 @@ class AgentClient extends BaseClient {
},
user: createSafeUser(this.options.req.user),
},
recursionLimit: agentsEConfig?.recursionLimit ?? 50,
recursionLimit: resolveRecursionLimit(agentsEConfig, this.options.agent),
signal: abortController.signal,
streamMode: 'values',
version: 'v2',
@ -781,17 +782,6 @@ class AgentClient extends BaseClient {
agents.push(...this.agentConfigs.values());
}
if (agents[0].recursion_limit && typeof agents[0].recursion_limit === 'number') {
config.recursionLimit = agents[0].recursion_limit;
}
if (
agentsEConfig?.maxRecursionLimit &&
config.recursionLimit > agentsEConfig?.maxRecursionLimit
) {
config.recursionLimit = agentsEConfig?.maxRecursionLimit;
}
// TODO: needs to be added as part of AgentContext initialization
// const noSystemModelRegex = [/\b(o1-preview|o1-mini|amazon\.titan-text)\b/gi];
// const noSystemMessages = noSystemModelRegex.some((regex) =>

View file

@ -15,6 +15,7 @@ const {
createErrorResponse,
recordCollectedUsage,
getTransactionsConfig,
resolveRecursionLimit,
createToolExecuteHandler,
buildNonStreamingResponse,
createOpenAIStreamTracker,
@ -194,10 +195,8 @@ const OpenAIChatCompletionController = async (req, res) => {
const conversationId = request.conversation_id ?? nanoid();
const parentMessageId = request.parent_message_id ?? null;
// Build allowed providers set
const allowedProviders = new Set(
appConfig?.endpoints?.[EModelEndpoint.agents]?.allowedProviders,
);
const agentsEConfig = appConfig?.endpoints?.[EModelEndpoint.agents];
const allowedProviders = new Set(agentsEConfig?.allowedProviders);
// Create tool loader
const loadTools = createToolLoader(abortController.signal);
@ -491,7 +490,6 @@ const OpenAIChatCompletionController = async (req, res) => {
throw new Error('Failed to create agent run');
}
// Process the stream
const config = {
runName: 'AgentRun',
configurable: {
@ -504,6 +502,7 @@ const OpenAIChatCompletionController = async (req, res) => {
},
...(userMCPAuthMap != null && { userMCPAuthMap }),
},
recursionLimit: resolveRecursionLimit(agentsEConfig, agent),
signal: abortController.signal,
streamMode: 'values',
version: 'v2',