🔄 refactor: Max tokens handling in Agent Initialization (#10299)

* Refactored the logic for determining max output tokens in the agent initialization process.
* Changed variable names for clarity, updating from `maxTokens` to `maxOutputTokens` to better reflect their purpose.
* Adjusted calculations for `maxContextTokens` to use the new `maxOutputTokens` variable.
This commit is contained in:
Danny Avila 2025-10-29 16:41:27 -04:00 committed by GitHub
parent e6aeec9f25
commit 6adb425780
No known key found for this signature in database
GPG key ID: B5690EEEBB952194

View file

@ -134,10 +134,10 @@ const initializeAgent = async ({
}); });
const tokensModel = const tokensModel =
agent.provider === EModelEndpoint.azureOpenAI ? agent.model : modelOptions.model; agent.provider === EModelEndpoint.azureOpenAI ? agent.model : options.llmConfig?.model;
const maxTokens = optionalChainWithEmptyCheck( const maxOutputTokens = optionalChainWithEmptyCheck(
modelOptions.maxOutputTokens, options.llmConfig?.maxOutputTokens,
modelOptions.maxTokens, options.llmConfig?.maxTokens,
0, 0,
); );
const agentMaxContextTokens = optionalChainWithEmptyCheck( const agentMaxContextTokens = optionalChainWithEmptyCheck(
@ -203,7 +203,7 @@ const initializeAgent = async ({
userMCPAuthMap, userMCPAuthMap,
toolContextMap, toolContextMap,
useLegacyContent: !!options.useLegacyContent, useLegacyContent: !!options.useLegacyContent,
maxContextTokens: Math.round((agentMaxContextTokens - maxTokens) * 0.9), maxContextTokens: Math.round((agentMaxContextTokens - maxOutputTokens) * 0.9),
}; };
}; };