🧠 feat: Enable xhigh reasoning for OpenAI gpt-5.2 (#10924)

This commit is contained in:
Alex Ferrari 2025-12-14 08:42:42 +01:00
parent b288d81f5a
commit 2fd2a62886
6 changed files with 76 additions and 9 deletions

View file

@ -210,6 +210,32 @@ describe('getOpenAILLMConfig', () => {
});
});
it('should allow xhigh reasoning_effort for gpt-5.2 models', () => {
const result = getOpenAILLMConfig({
apiKey: 'test-api-key',
streaming: true,
modelOptions: {
model: 'gpt-5.2-pro',
reasoning_effort: ReasoningEffort.xhigh,
},
});
expect(result.llmConfig).toHaveProperty('reasoning_effort', ReasoningEffort.xhigh);
});
it('should drop xhigh reasoning_effort for non-gpt-5.2 models', () => {
const result = getOpenAILLMConfig({
apiKey: 'test-api-key',
streaming: true,
modelOptions: {
model: 'gpt-5-pro',
reasoning_effort: ReasoningEffort.xhigh,
},
});
expect(result.llmConfig).not.toHaveProperty('reasoning_effort');
});
it('should NOT exclude parameters for gpt-5-chat (it supports sampling params)', () => {
const result = getOpenAILLMConfig({
apiKey: 'test-api-key',

View file

@ -220,20 +220,24 @@ export function getOpenAILLMConfig({
llmConfig.include_reasoning = true;
}
const allowXHigh = modelOptions.model ? /^gpt-5\\.2/.test(String(modelOptions.model)) : false;
const sanitizedReasoningEffort =
reasoning_effort === 'xhigh' && !allowXHigh ? undefined : reasoning_effort;
if (
hasReasoningParams({ reasoning_effort, reasoning_summary }) &&
hasReasoningParams({ reasoning_effort: sanitizedReasoningEffort, reasoning_summary }) &&
(llmConfig.useResponsesApi === true ||
(endpoint !== EModelEndpoint.openAI && endpoint !== EModelEndpoint.azureOpenAI))
) {
llmConfig.reasoning = removeNullishValues(
{
effort: reasoning_effort,
effort: sanitizedReasoningEffort,
summary: reasoning_summary,
},
true,
) as OpenAI.Reasoning;
} else if (hasReasoningParams({ reasoning_effort })) {
llmConfig.reasoning_effort = reasoning_effort;
} else if (hasReasoningParams({ reasoning_effort: sanitizedReasoningEffort })) {
llmConfig.reasoning_effort = sanitizedReasoningEffort;
}
if (llmConfig.max_tokens != null) {