diff --git a/client/src/components/SidePanel/Parameters/Panel.tsx b/client/src/components/SidePanel/Parameters/Panel.tsx index 7541f4f0e8..0aeb990224 100644 --- a/client/src/components/SidePanel/Parameters/Panel.tsx +++ b/client/src/components/SidePanel/Parameters/Panel.tsx @@ -50,8 +50,34 @@ export default function Parameters() { .map((param) => (overriddenParamsMap[param.key] as SettingDefinition) ?? param); }, [endpointType, endpointsConfig, model, provider]); + const filteredParameters = useMemo((): SettingDefinition[] => { + const allowXHigh = /^gpt-5\\.2/.test(model ?? ''); + + return parameters?.map((param) => { + if (param?.key !== 'reasoning_effort' || !param.options) { + return param; + } + + const filteredOptions = allowXHigh + ? param.options + : param.options.filter((option) => option !== 'xhigh'); + + const filteredEnumMappings = param.enumMappings + ? Object.fromEntries( + Object.entries(param.enumMappings).filter(([key]) => allowXHigh || key !== 'xhigh'), + ) + : undefined; + + return { + ...param, + options: filteredOptions, + enumMappings: filteredEnumMappings, + }; + }); + }, [parameters, model]); + useEffect(() => { - if (!parameters) { + if (!filteredParameters) { return; } @@ -65,7 +91,7 @@ export default function Parameters() { // }), // ); const paramKeys = new Set( - parameters.filter((setting) => setting != null).map((setting) => setting.key), + filteredParameters.filter((setting) => setting != null).map((setting) => setting.key), ); setConversation((prev) => { if (!prev) { @@ -76,6 +102,13 @@ export default function Parameters() { const conversationKeys = Object.keys(updatedConversation); const updatedKeys: string[] = []; + + const allowXHigh = /^gpt-5\\.2/.test(model ?? ''); + if (!allowXHigh && updatedConversation.reasoning_effort === 'xhigh') { + updatedKeys.push('reasoning_effort'); + delete updatedConversation.reasoning_effort; + } + conversationKeys.forEach((key) => { // const defaultValue = defaultValueMap.get(key); // if (paramKeys.has(key) && defaultValue != null && prev[key] != null) { @@ -102,7 +135,7 @@ export default function Parameters() { return updatedConversation; }); - }, [parameters, setConversation]); + }, [filteredParameters, setConversation, model]); const resetParameters = useCallback(() => { setConversation((prev) => { @@ -137,7 +170,7 @@ export default function Parameters() { setIsDialogOpen(true); }, [conversation]); - if (!parameters) { + if (!filteredParameters) { return null; } @@ -147,7 +180,7 @@ export default function Parameters() { {' '} {/* This is the parent element containing all settings */} {/* Below is an example of an applied dynamic setting, each be contained by a div with the column span specified */} - {parameters.map((setting) => { + {filteredParameters.map((setting) => { const Component = componentMapping[setting.component]; if (!Component) { return null; diff --git a/client/src/locales/en/translation.json b/client/src/locales/en/translation.json index a82e931072..7ee49a2a3a 100644 --- a/client/src/locales/en/translation.json +++ b/client/src/locales/en/translation.json @@ -971,6 +971,7 @@ "com_ui_hide_password": "Hide password", "com_ui_hide_qr": "Hide QR Code", "com_ui_high": "High", + "com_ui_extra_high": "Extra High", "com_ui_host": "Host", "com_ui_icon": "Icon", "com_ui_idea": "Ideas", diff --git a/packages/api/src/endpoints/openai/llm.spec.ts b/packages/api/src/endpoints/openai/llm.spec.ts index a4eb7c78e3..09047ba93b 100644 --- a/packages/api/src/endpoints/openai/llm.spec.ts +++ b/packages/api/src/endpoints/openai/llm.spec.ts @@ -210,6 +210,32 @@ describe('getOpenAILLMConfig', () => { }); }); + it('should allow xhigh reasoning_effort for gpt-5.2 models', () => { + const result = getOpenAILLMConfig({ + apiKey: 'test-api-key', + streaming: true, + modelOptions: { + model: 'gpt-5.2-pro', + reasoning_effort: ReasoningEffort.xhigh, + }, + }); + + expect(result.llmConfig).toHaveProperty('reasoning_effort', ReasoningEffort.xhigh); + }); + + it('should drop xhigh reasoning_effort for non-gpt-5.2 models', () => { + const result = getOpenAILLMConfig({ + apiKey: 'test-api-key', + streaming: true, + modelOptions: { + model: 'gpt-5-pro', + reasoning_effort: ReasoningEffort.xhigh, + }, + }); + + expect(result.llmConfig).not.toHaveProperty('reasoning_effort'); + }); + it('should NOT exclude parameters for gpt-5-chat (it supports sampling params)', () => { const result = getOpenAILLMConfig({ apiKey: 'test-api-key', diff --git a/packages/api/src/endpoints/openai/llm.ts b/packages/api/src/endpoints/openai/llm.ts index c88122310a..e50d29ed3e 100644 --- a/packages/api/src/endpoints/openai/llm.ts +++ b/packages/api/src/endpoints/openai/llm.ts @@ -220,20 +220,24 @@ export function getOpenAILLMConfig({ llmConfig.include_reasoning = true; } + const allowXHigh = modelOptions.model ? /^gpt-5\\.2/.test(String(modelOptions.model)) : false; + const sanitizedReasoningEffort = + reasoning_effort === 'xhigh' && !allowXHigh ? undefined : reasoning_effort; + if ( - hasReasoningParams({ reasoning_effort, reasoning_summary }) && + hasReasoningParams({ reasoning_effort: sanitizedReasoningEffort, reasoning_summary }) && (llmConfig.useResponsesApi === true || (endpoint !== EModelEndpoint.openAI && endpoint !== EModelEndpoint.azureOpenAI)) ) { llmConfig.reasoning = removeNullishValues( { - effort: reasoning_effort, + effort: sanitizedReasoningEffort, summary: reasoning_summary, }, true, ) as OpenAI.Reasoning; - } else if (hasReasoningParams({ reasoning_effort })) { - llmConfig.reasoning_effort = reasoning_effort; + } else if (hasReasoningParams({ reasoning_effort: sanitizedReasoningEffort })) { + llmConfig.reasoning_effort = sanitizedReasoningEffort; } if (llmConfig.max_tokens != null) { diff --git a/packages/data-provider/src/parameterSettings.ts b/packages/data-provider/src/parameterSettings.ts index b3ed86b0f3..ec92e43837 100644 --- a/packages/data-provider/src/parameterSettings.ts +++ b/packages/data-provider/src/parameterSettings.ts @@ -239,6 +239,7 @@ const openAIParams: Record = { ReasoningEffort.low, ReasoningEffort.medium, ReasoningEffort.high, + ReasoningEffort.xhigh, ], enumMappings: { [ReasoningEffort.unset]: 'com_ui_auto', @@ -247,6 +248,7 @@ const openAIParams: Record = { [ReasoningEffort.low]: 'com_ui_low', [ReasoningEffort.medium]: 'com_ui_medium', [ReasoningEffort.high]: 'com_ui_high', + [ReasoningEffort.xhigh]: 'com_ui_extra_high', }, optionType: 'model', columnSpan: 4, diff --git a/packages/data-provider/src/schemas.ts b/packages/data-provider/src/schemas.ts index ecf0a925fc..53410af871 100644 --- a/packages/data-provider/src/schemas.ts +++ b/packages/data-provider/src/schemas.ts @@ -169,6 +169,7 @@ export enum ReasoningEffort { low = 'low', medium = 'medium', high = 'high', + xhigh = 'xhigh', } export enum ReasoningSummary {