diff --git a/api/server/controllers/agents/client.test.js b/api/server/controllers/agents/client.test.js index ac47dff66c..0ce59c5fbc 100644 --- a/api/server/controllers/agents/client.test.js +++ b/api/server/controllers/agents/client.test.js @@ -989,7 +989,7 @@ describe('AgentClient - titleConvo', () => { }; // Simulate the getOptions logic that handles GPT-5+ models - if (/\bgpt-[5-9]\b/i.test(clientOptions.model) && clientOptions.maxTokens != null) { + if (/\bgpt-[5-9](?:\.\d+)?\b/i.test(clientOptions.model) && clientOptions.maxTokens != null) { clientOptions.modelKwargs = clientOptions.modelKwargs ?? {}; clientOptions.modelKwargs.max_completion_tokens = clientOptions.maxTokens; delete clientOptions.maxTokens; @@ -1009,7 +1009,7 @@ describe('AgentClient - titleConvo', () => { useResponsesApi: true, }; - if (/\bgpt-[5-9]\b/i.test(clientOptions.model) && clientOptions.maxTokens != null) { + if (/\bgpt-[5-9](?:\.\d+)?\b/i.test(clientOptions.model) && clientOptions.maxTokens != null) { clientOptions.modelKwargs = clientOptions.modelKwargs ?? {}; const paramName = clientOptions.useResponsesApi === true ? 'max_output_tokens' : 'max_completion_tokens'; @@ -1034,7 +1034,7 @@ describe('AgentClient - titleConvo', () => { }; // Simulate the getOptions logic - if (/\bgpt-[5-9]\b/i.test(clientOptions.model) && clientOptions.maxTokens != null) { + if (/\bgpt-[5-9](?:\.\d+)?\b/i.test(clientOptions.model) && clientOptions.maxTokens != null) { clientOptions.modelKwargs = clientOptions.modelKwargs ?? {}; clientOptions.modelKwargs.max_completion_tokens = clientOptions.maxTokens; delete clientOptions.maxTokens; @@ -1055,7 +1055,7 @@ describe('AgentClient - titleConvo', () => { }; // Simulate the getOptions logic - if (/\bgpt-[5-9]\b/i.test(clientOptions.model) && clientOptions.maxTokens != null) { + if (/\bgpt-[5-9](?:\.\d+)?\b/i.test(clientOptions.model) && clientOptions.maxTokens != null) { clientOptions.modelKwargs = clientOptions.modelKwargs ?? {}; clientOptions.modelKwargs.max_completion_tokens = clientOptions.maxTokens; delete clientOptions.maxTokens; @@ -1068,6 +1068,9 @@ describe('AgentClient - titleConvo', () => { it('should handle various GPT-5+ model formats', () => { const testCases = [ + { model: 'gpt-5.1', shouldTransform: true }, + { model: 'gpt-5.1-chat-latest', shouldTransform: true }, + { model: 'gpt-5.1-codex', shouldTransform: true }, { model: 'gpt-5', shouldTransform: true }, { model: 'gpt-5-turbo', shouldTransform: true }, { model: 'gpt-6', shouldTransform: true }, @@ -1087,7 +1090,10 @@ describe('AgentClient - titleConvo', () => { }; // Simulate the getOptions logic - if (/\bgpt-[5-9]\b/i.test(clientOptions.model) && clientOptions.maxTokens != null) { + if ( + /\bgpt-[5-9](?:\.\d+)?\b/i.test(clientOptions.model) && + clientOptions.maxTokens != null + ) { clientOptions.modelKwargs = clientOptions.modelKwargs ?? {}; clientOptions.modelKwargs.max_completion_tokens = clientOptions.maxTokens; delete clientOptions.maxTokens; @@ -1105,6 +1111,9 @@ describe('AgentClient - titleConvo', () => { it('should not swap max token param for older models when using useResponsesApi', () => { const testCases = [ + { model: 'gpt-5.1', shouldTransform: true }, + { model: 'gpt-5.1-chat-latest', shouldTransform: true }, + { model: 'gpt-5.1-codex', shouldTransform: true }, { model: 'gpt-5', shouldTransform: true }, { model: 'gpt-5-turbo', shouldTransform: true }, { model: 'gpt-6', shouldTransform: true }, @@ -1124,7 +1133,10 @@ describe('AgentClient - titleConvo', () => { useResponsesApi: true, }; - if (/\bgpt-[5-9]\b/i.test(clientOptions.model) && clientOptions.maxTokens != null) { + if ( + /\bgpt-[5-9](?:\.\d+)?\b/i.test(clientOptions.model) && + clientOptions.maxTokens != null + ) { clientOptions.modelKwargs = clientOptions.modelKwargs ?? {}; const paramName = clientOptions.useResponsesApi === true ? 'max_output_tokens' : 'max_completion_tokens'; @@ -1157,7 +1169,10 @@ describe('AgentClient - titleConvo', () => { }; // Simulate the getOptions logic - if (/\bgpt-[5-9]\b/i.test(clientOptions.model) && clientOptions.maxTokens != null) { + if ( + /\bgpt-[5-9](?:\.\d+)?\b/i.test(clientOptions.model) && + clientOptions.maxTokens != null + ) { clientOptions.modelKwargs = clientOptions.modelKwargs ?? {}; clientOptions.modelKwargs.max_completion_tokens = clientOptions.maxTokens; delete clientOptions.maxTokens; diff --git a/client/src/components/Endpoints/MessageEndpointIcon.tsx b/client/src/components/Endpoints/MessageEndpointIcon.tsx index 0a9782ce99..9b71396d12 100644 --- a/client/src/components/Endpoints/MessageEndpointIcon.tsx +++ b/client/src/components/Endpoints/MessageEndpointIcon.tsx @@ -25,7 +25,7 @@ type EndpointIcon = { function getOpenAIColor(_model: string | null | undefined) { const model = _model?.toLowerCase() ?? ''; - if (model && (/\b(o\d)\b/i.test(model) || /\bgpt-[5-9]\b/i.test(model))) { + if (model && (/\b(o\d)\b/i.test(model) || /\bgpt-[5-9](?:\.\d+)?\b/i.test(model))) { return '#000000'; } return model.includes('gpt-4') ? '#AB68FF' : '#19C37D'; diff --git a/packages/api/src/agents/memory.ts b/packages/api/src/agents/memory.ts index 8ea9de14b8..865aaea7b8 100644 --- a/packages/api/src/agents/memory.ts +++ b/packages/api/src/agents/memory.ts @@ -345,7 +345,7 @@ ${memory ?? 'No existing memories'}`; }; // Handle GPT-5+ models - if ('model' in finalLLMConfig && /\bgpt-[5-9]\b/i.test(finalLLMConfig.model ?? '')) { + if ('model' in finalLLMConfig && /\bgpt-[5-9](?:\.\d+)?\b/i.test(finalLLMConfig.model ?? '')) { // Remove temperature for GPT-5+ models delete finalLLMConfig.temperature; diff --git a/packages/api/src/endpoints/openai/config.spec.ts b/packages/api/src/endpoints/openai/config.spec.ts index 14516ee102..8d85014e79 100644 --- a/packages/api/src/endpoints/openai/config.spec.ts +++ b/packages/api/src/endpoints/openai/config.spec.ts @@ -940,6 +940,16 @@ describe('getOpenAIConfig', () => { { reasoning_effort: null, reasoning_summary: null, shouldHaveReasoning: false }, { reasoning_effort: undefined, reasoning_summary: undefined, shouldHaveReasoning: false }, { reasoning_effort: '', reasoning_summary: '', shouldHaveReasoning: false }, + { + reasoning_effort: ReasoningEffort.unset, + reasoning_summary: '', + shouldHaveReasoning: false, + }, + { + reasoning_effort: ReasoningEffort.none, + reasoning_summary: null, + shouldHaveReasoning: true, + }, { reasoning_effort: null, reasoning_summary: ReasoningSummary.concise, diff --git a/packages/api/src/endpoints/openai/llm.ts b/packages/api/src/endpoints/openai/llm.ts index a48d7d680c..e10bf1d556 100644 --- a/packages/api/src/endpoints/openai/llm.ts +++ b/packages/api/src/endpoints/openai/llm.ts @@ -300,7 +300,11 @@ export function getOpenAILLMConfig({ delete modelKwargs.verbosity; } - if (llmConfig.model && /\bgpt-[5-9]\b/i.test(llmConfig.model) && llmConfig.maxTokens != null) { + if ( + llmConfig.model && + /\bgpt-[5-9](?:\.\d+)?\b/i.test(llmConfig.model) && + llmConfig.maxTokens != null + ) { const paramName = llmConfig.useResponsesApi === true ? 'max_output_tokens' : 'max_completion_tokens'; modelKwargs[paramName] = llmConfig.maxTokens; diff --git a/packages/data-provider/src/config.ts b/packages/data-provider/src/config.ts index 602880d573..5639e51d4b 100644 --- a/packages/data-provider/src/config.ts +++ b/packages/data-provider/src/config.ts @@ -927,7 +927,7 @@ export enum KnownEndpoints { export enum FetchTokenConfig { openrouter = KnownEndpoints.openrouter, - helicone = KnownEndpoints.helicone + helicone = KnownEndpoints.helicone, } export const defaultEndpoints: EModelEndpoint[] = [ @@ -964,6 +964,10 @@ export const alternateName = { }; const sharedOpenAIModels = [ + 'gpt-5.1', + 'gpt-5.1-chat-latest', + 'gpt-5.1-codex', + 'gpt-5.1-codex-mini', 'gpt-5', 'gpt-5-mini', 'gpt-5-nano', diff --git a/packages/data-provider/src/parameterSettings.ts b/packages/data-provider/src/parameterSettings.ts index 806a5f4bbd..b3ed86b0f3 100644 --- a/packages/data-provider/src/parameterSettings.ts +++ b/packages/data-provider/src/parameterSettings.ts @@ -230,9 +230,10 @@ const openAIParams: Record = { description: 'com_endpoint_openai_reasoning_effort', descriptionCode: true, type: 'enum', - default: ReasoningEffort.none, + default: ReasoningEffort.unset, component: 'slider', options: [ + ReasoningEffort.unset, ReasoningEffort.none, ReasoningEffort.minimal, ReasoningEffort.low, @@ -240,6 +241,7 @@ const openAIParams: Record = { ReasoningEffort.high, ], enumMappings: { + [ReasoningEffort.unset]: 'com_ui_auto', [ReasoningEffort.none]: 'com_ui_none', [ReasoningEffort.minimal]: 'com_ui_minimal', [ReasoningEffort.low]: 'com_ui_low', @@ -291,7 +293,7 @@ const openAIParams: Record = { ReasoningSummary.detailed, ], enumMappings: { - [ReasoningSummary.none]: 'com_ui_none', + [ReasoningSummary.none]: 'com_ui_unset', [ReasoningSummary.auto]: 'com_ui_auto', [ReasoningSummary.concise]: 'com_ui_concise', [ReasoningSummary.detailed]: 'com_ui_detailed', diff --git a/packages/data-provider/src/schemas.ts b/packages/data-provider/src/schemas.ts index cf0412e579..9d1761d468 100644 --- a/packages/data-provider/src/schemas.ts +++ b/packages/data-provider/src/schemas.ts @@ -166,7 +166,8 @@ export enum ImageDetail { } export enum ReasoningEffort { - none = '', + unset = '', + none = 'none', minimal = 'minimal', low = 'low', medium = 'medium',