🤖 feat: GPT-5.1 (#10491)

This commit is contained in:
Danny Avila 2025-11-14 12:28:20 -05:00 committed by GitHub
parent e71c48ec3d
commit 6522789f5b
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
8 changed files with 50 additions and 14 deletions

View file

@ -989,7 +989,7 @@ describe('AgentClient - titleConvo', () => {
};
// Simulate the getOptions logic that handles GPT-5+ models
if (/\bgpt-[5-9]\b/i.test(clientOptions.model) && clientOptions.maxTokens != null) {
if (/\bgpt-[5-9](?:\.\d+)?\b/i.test(clientOptions.model) && clientOptions.maxTokens != null) {
clientOptions.modelKwargs = clientOptions.modelKwargs ?? {};
clientOptions.modelKwargs.max_completion_tokens = clientOptions.maxTokens;
delete clientOptions.maxTokens;
@ -1009,7 +1009,7 @@ describe('AgentClient - titleConvo', () => {
useResponsesApi: true,
};
if (/\bgpt-[5-9]\b/i.test(clientOptions.model) && clientOptions.maxTokens != null) {
if (/\bgpt-[5-9](?:\.\d+)?\b/i.test(clientOptions.model) && clientOptions.maxTokens != null) {
clientOptions.modelKwargs = clientOptions.modelKwargs ?? {};
const paramName =
clientOptions.useResponsesApi === true ? 'max_output_tokens' : 'max_completion_tokens';
@ -1034,7 +1034,7 @@ describe('AgentClient - titleConvo', () => {
};
// Simulate the getOptions logic
if (/\bgpt-[5-9]\b/i.test(clientOptions.model) && clientOptions.maxTokens != null) {
if (/\bgpt-[5-9](?:\.\d+)?\b/i.test(clientOptions.model) && clientOptions.maxTokens != null) {
clientOptions.modelKwargs = clientOptions.modelKwargs ?? {};
clientOptions.modelKwargs.max_completion_tokens = clientOptions.maxTokens;
delete clientOptions.maxTokens;
@ -1055,7 +1055,7 @@ describe('AgentClient - titleConvo', () => {
};
// Simulate the getOptions logic
if (/\bgpt-[5-9]\b/i.test(clientOptions.model) && clientOptions.maxTokens != null) {
if (/\bgpt-[5-9](?:\.\d+)?\b/i.test(clientOptions.model) && clientOptions.maxTokens != null) {
clientOptions.modelKwargs = clientOptions.modelKwargs ?? {};
clientOptions.modelKwargs.max_completion_tokens = clientOptions.maxTokens;
delete clientOptions.maxTokens;
@ -1068,6 +1068,9 @@ describe('AgentClient - titleConvo', () => {
it('should handle various GPT-5+ model formats', () => {
const testCases = [
{ model: 'gpt-5.1', shouldTransform: true },
{ model: 'gpt-5.1-chat-latest', shouldTransform: true },
{ model: 'gpt-5.1-codex', shouldTransform: true },
{ model: 'gpt-5', shouldTransform: true },
{ model: 'gpt-5-turbo', shouldTransform: true },
{ model: 'gpt-6', shouldTransform: true },
@ -1087,7 +1090,10 @@ describe('AgentClient - titleConvo', () => {
};
// Simulate the getOptions logic
if (/\bgpt-[5-9]\b/i.test(clientOptions.model) && clientOptions.maxTokens != null) {
if (
/\bgpt-[5-9](?:\.\d+)?\b/i.test(clientOptions.model) &&
clientOptions.maxTokens != null
) {
clientOptions.modelKwargs = clientOptions.modelKwargs ?? {};
clientOptions.modelKwargs.max_completion_tokens = clientOptions.maxTokens;
delete clientOptions.maxTokens;
@ -1105,6 +1111,9 @@ describe('AgentClient - titleConvo', () => {
it('should not swap max token param for older models when using useResponsesApi', () => {
const testCases = [
{ model: 'gpt-5.1', shouldTransform: true },
{ model: 'gpt-5.1-chat-latest', shouldTransform: true },
{ model: 'gpt-5.1-codex', shouldTransform: true },
{ model: 'gpt-5', shouldTransform: true },
{ model: 'gpt-5-turbo', shouldTransform: true },
{ model: 'gpt-6', shouldTransform: true },
@ -1124,7 +1133,10 @@ describe('AgentClient - titleConvo', () => {
useResponsesApi: true,
};
if (/\bgpt-[5-9]\b/i.test(clientOptions.model) && clientOptions.maxTokens != null) {
if (
/\bgpt-[5-9](?:\.\d+)?\b/i.test(clientOptions.model) &&
clientOptions.maxTokens != null
) {
clientOptions.modelKwargs = clientOptions.modelKwargs ?? {};
const paramName =
clientOptions.useResponsesApi === true ? 'max_output_tokens' : 'max_completion_tokens';
@ -1157,7 +1169,10 @@ describe('AgentClient - titleConvo', () => {
};
// Simulate the getOptions logic
if (/\bgpt-[5-9]\b/i.test(clientOptions.model) && clientOptions.maxTokens != null) {
if (
/\bgpt-[5-9](?:\.\d+)?\b/i.test(clientOptions.model) &&
clientOptions.maxTokens != null
) {
clientOptions.modelKwargs = clientOptions.modelKwargs ?? {};
clientOptions.modelKwargs.max_completion_tokens = clientOptions.maxTokens;
delete clientOptions.maxTokens;

View file

@ -25,7 +25,7 @@ type EndpointIcon = {
function getOpenAIColor(_model: string | null | undefined) {
const model = _model?.toLowerCase() ?? '';
if (model && (/\b(o\d)\b/i.test(model) || /\bgpt-[5-9]\b/i.test(model))) {
if (model && (/\b(o\d)\b/i.test(model) || /\bgpt-[5-9](?:\.\d+)?\b/i.test(model))) {
return '#000000';
}
return model.includes('gpt-4') ? '#AB68FF' : '#19C37D';

View file

@ -345,7 +345,7 @@ ${memory ?? 'No existing memories'}`;
};
// Handle GPT-5+ models
if ('model' in finalLLMConfig && /\bgpt-[5-9]\b/i.test(finalLLMConfig.model ?? '')) {
if ('model' in finalLLMConfig && /\bgpt-[5-9](?:\.\d+)?\b/i.test(finalLLMConfig.model ?? '')) {
// Remove temperature for GPT-5+ models
delete finalLLMConfig.temperature;

View file

@ -940,6 +940,16 @@ describe('getOpenAIConfig', () => {
{ reasoning_effort: null, reasoning_summary: null, shouldHaveReasoning: false },
{ reasoning_effort: undefined, reasoning_summary: undefined, shouldHaveReasoning: false },
{ reasoning_effort: '', reasoning_summary: '', shouldHaveReasoning: false },
{
reasoning_effort: ReasoningEffort.unset,
reasoning_summary: '',
shouldHaveReasoning: false,
},
{
reasoning_effort: ReasoningEffort.none,
reasoning_summary: null,
shouldHaveReasoning: true,
},
{
reasoning_effort: null,
reasoning_summary: ReasoningSummary.concise,

View file

@ -300,7 +300,11 @@ export function getOpenAILLMConfig({
delete modelKwargs.verbosity;
}
if (llmConfig.model && /\bgpt-[5-9]\b/i.test(llmConfig.model) && llmConfig.maxTokens != null) {
if (
llmConfig.model &&
/\bgpt-[5-9](?:\.\d+)?\b/i.test(llmConfig.model) &&
llmConfig.maxTokens != null
) {
const paramName =
llmConfig.useResponsesApi === true ? 'max_output_tokens' : 'max_completion_tokens';
modelKwargs[paramName] = llmConfig.maxTokens;

View file

@ -927,7 +927,7 @@ export enum KnownEndpoints {
export enum FetchTokenConfig {
openrouter = KnownEndpoints.openrouter,
helicone = KnownEndpoints.helicone
helicone = KnownEndpoints.helicone,
}
export const defaultEndpoints: EModelEndpoint[] = [
@ -964,6 +964,10 @@ export const alternateName = {
};
const sharedOpenAIModels = [
'gpt-5.1',
'gpt-5.1-chat-latest',
'gpt-5.1-codex',
'gpt-5.1-codex-mini',
'gpt-5',
'gpt-5-mini',
'gpt-5-nano',

View file

@ -230,9 +230,10 @@ const openAIParams: Record<string, SettingDefinition> = {
description: 'com_endpoint_openai_reasoning_effort',
descriptionCode: true,
type: 'enum',
default: ReasoningEffort.none,
default: ReasoningEffort.unset,
component: 'slider',
options: [
ReasoningEffort.unset,
ReasoningEffort.none,
ReasoningEffort.minimal,
ReasoningEffort.low,
@ -240,6 +241,7 @@ const openAIParams: Record<string, SettingDefinition> = {
ReasoningEffort.high,
],
enumMappings: {
[ReasoningEffort.unset]: 'com_ui_auto',
[ReasoningEffort.none]: 'com_ui_none',
[ReasoningEffort.minimal]: 'com_ui_minimal',
[ReasoningEffort.low]: 'com_ui_low',
@ -291,7 +293,7 @@ const openAIParams: Record<string, SettingDefinition> = {
ReasoningSummary.detailed,
],
enumMappings: {
[ReasoningSummary.none]: 'com_ui_none',
[ReasoningSummary.none]: 'com_ui_unset',
[ReasoningSummary.auto]: 'com_ui_auto',
[ReasoningSummary.concise]: 'com_ui_concise',
[ReasoningSummary.detailed]: 'com_ui_detailed',

View file

@ -166,7 +166,8 @@ export enum ImageDetail {
}
export enum ReasoningEffort {
none = '',
unset = '',
none = 'none',
minimal = 'minimal',
low = 'low',
medium = 'medium',