mirror of
https://github.com/danny-avila/LibreChat.git
synced 2026-01-03 17:18:51 +01:00
🧠 feat: Enable xhigh reasoning for OpenAI gpt-5.2 (#10924)
This commit is contained in:
parent
b288d81f5a
commit
2fd2a62886
6 changed files with 76 additions and 9 deletions
|
|
@ -210,6 +210,32 @@ describe('getOpenAILLMConfig', () => {
|
|||
});
|
||||
});
|
||||
|
||||
it('should allow xhigh reasoning_effort for gpt-5.2 models', () => {
|
||||
const result = getOpenAILLMConfig({
|
||||
apiKey: 'test-api-key',
|
||||
streaming: true,
|
||||
modelOptions: {
|
||||
model: 'gpt-5.2-pro',
|
||||
reasoning_effort: ReasoningEffort.xhigh,
|
||||
},
|
||||
});
|
||||
|
||||
expect(result.llmConfig).toHaveProperty('reasoning_effort', ReasoningEffort.xhigh);
|
||||
});
|
||||
|
||||
it('should drop xhigh reasoning_effort for non-gpt-5.2 models', () => {
|
||||
const result = getOpenAILLMConfig({
|
||||
apiKey: 'test-api-key',
|
||||
streaming: true,
|
||||
modelOptions: {
|
||||
model: 'gpt-5-pro',
|
||||
reasoning_effort: ReasoningEffort.xhigh,
|
||||
},
|
||||
});
|
||||
|
||||
expect(result.llmConfig).not.toHaveProperty('reasoning_effort');
|
||||
});
|
||||
|
||||
it('should NOT exclude parameters for gpt-5-chat (it supports sampling params)', () => {
|
||||
const result = getOpenAILLMConfig({
|
||||
apiKey: 'test-api-key',
|
||||
|
|
|
|||
|
|
@ -220,20 +220,24 @@ export function getOpenAILLMConfig({
|
|||
llmConfig.include_reasoning = true;
|
||||
}
|
||||
|
||||
const allowXHigh = modelOptions.model ? /^gpt-5\\.2/.test(String(modelOptions.model)) : false;
|
||||
const sanitizedReasoningEffort =
|
||||
reasoning_effort === 'xhigh' && !allowXHigh ? undefined : reasoning_effort;
|
||||
|
||||
if (
|
||||
hasReasoningParams({ reasoning_effort, reasoning_summary }) &&
|
||||
hasReasoningParams({ reasoning_effort: sanitizedReasoningEffort, reasoning_summary }) &&
|
||||
(llmConfig.useResponsesApi === true ||
|
||||
(endpoint !== EModelEndpoint.openAI && endpoint !== EModelEndpoint.azureOpenAI))
|
||||
) {
|
||||
llmConfig.reasoning = removeNullishValues(
|
||||
{
|
||||
effort: reasoning_effort,
|
||||
effort: sanitizedReasoningEffort,
|
||||
summary: reasoning_summary,
|
||||
},
|
||||
true,
|
||||
) as OpenAI.Reasoning;
|
||||
} else if (hasReasoningParams({ reasoning_effort })) {
|
||||
llmConfig.reasoning_effort = reasoning_effort;
|
||||
} else if (hasReasoningParams({ reasoning_effort: sanitizedReasoningEffort })) {
|
||||
llmConfig.reasoning_effort = sanitizedReasoningEffort;
|
||||
}
|
||||
|
||||
if (llmConfig.max_tokens != null) {
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue