mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-12-17 08:50:15 +01:00
🤖 feat: GPT-5.1 (#10491)
This commit is contained in:
parent
e71c48ec3d
commit
6522789f5b
8 changed files with 50 additions and 14 deletions
|
|
@ -989,7 +989,7 @@ describe('AgentClient - titleConvo', () => {
|
||||||
};
|
};
|
||||||
|
|
||||||
// Simulate the getOptions logic that handles GPT-5+ models
|
// Simulate the getOptions logic that handles GPT-5+ models
|
||||||
if (/\bgpt-[5-9]\b/i.test(clientOptions.model) && clientOptions.maxTokens != null) {
|
if (/\bgpt-[5-9](?:\.\d+)?\b/i.test(clientOptions.model) && clientOptions.maxTokens != null) {
|
||||||
clientOptions.modelKwargs = clientOptions.modelKwargs ?? {};
|
clientOptions.modelKwargs = clientOptions.modelKwargs ?? {};
|
||||||
clientOptions.modelKwargs.max_completion_tokens = clientOptions.maxTokens;
|
clientOptions.modelKwargs.max_completion_tokens = clientOptions.maxTokens;
|
||||||
delete clientOptions.maxTokens;
|
delete clientOptions.maxTokens;
|
||||||
|
|
@ -1009,7 +1009,7 @@ describe('AgentClient - titleConvo', () => {
|
||||||
useResponsesApi: true,
|
useResponsesApi: true,
|
||||||
};
|
};
|
||||||
|
|
||||||
if (/\bgpt-[5-9]\b/i.test(clientOptions.model) && clientOptions.maxTokens != null) {
|
if (/\bgpt-[5-9](?:\.\d+)?\b/i.test(clientOptions.model) && clientOptions.maxTokens != null) {
|
||||||
clientOptions.modelKwargs = clientOptions.modelKwargs ?? {};
|
clientOptions.modelKwargs = clientOptions.modelKwargs ?? {};
|
||||||
const paramName =
|
const paramName =
|
||||||
clientOptions.useResponsesApi === true ? 'max_output_tokens' : 'max_completion_tokens';
|
clientOptions.useResponsesApi === true ? 'max_output_tokens' : 'max_completion_tokens';
|
||||||
|
|
@ -1034,7 +1034,7 @@ describe('AgentClient - titleConvo', () => {
|
||||||
};
|
};
|
||||||
|
|
||||||
// Simulate the getOptions logic
|
// Simulate the getOptions logic
|
||||||
if (/\bgpt-[5-9]\b/i.test(clientOptions.model) && clientOptions.maxTokens != null) {
|
if (/\bgpt-[5-9](?:\.\d+)?\b/i.test(clientOptions.model) && clientOptions.maxTokens != null) {
|
||||||
clientOptions.modelKwargs = clientOptions.modelKwargs ?? {};
|
clientOptions.modelKwargs = clientOptions.modelKwargs ?? {};
|
||||||
clientOptions.modelKwargs.max_completion_tokens = clientOptions.maxTokens;
|
clientOptions.modelKwargs.max_completion_tokens = clientOptions.maxTokens;
|
||||||
delete clientOptions.maxTokens;
|
delete clientOptions.maxTokens;
|
||||||
|
|
@ -1055,7 +1055,7 @@ describe('AgentClient - titleConvo', () => {
|
||||||
};
|
};
|
||||||
|
|
||||||
// Simulate the getOptions logic
|
// Simulate the getOptions logic
|
||||||
if (/\bgpt-[5-9]\b/i.test(clientOptions.model) && clientOptions.maxTokens != null) {
|
if (/\bgpt-[5-9](?:\.\d+)?\b/i.test(clientOptions.model) && clientOptions.maxTokens != null) {
|
||||||
clientOptions.modelKwargs = clientOptions.modelKwargs ?? {};
|
clientOptions.modelKwargs = clientOptions.modelKwargs ?? {};
|
||||||
clientOptions.modelKwargs.max_completion_tokens = clientOptions.maxTokens;
|
clientOptions.modelKwargs.max_completion_tokens = clientOptions.maxTokens;
|
||||||
delete clientOptions.maxTokens;
|
delete clientOptions.maxTokens;
|
||||||
|
|
@ -1068,6 +1068,9 @@ describe('AgentClient - titleConvo', () => {
|
||||||
|
|
||||||
it('should handle various GPT-5+ model formats', () => {
|
it('should handle various GPT-5+ model formats', () => {
|
||||||
const testCases = [
|
const testCases = [
|
||||||
|
{ model: 'gpt-5.1', shouldTransform: true },
|
||||||
|
{ model: 'gpt-5.1-chat-latest', shouldTransform: true },
|
||||||
|
{ model: 'gpt-5.1-codex', shouldTransform: true },
|
||||||
{ model: 'gpt-5', shouldTransform: true },
|
{ model: 'gpt-5', shouldTransform: true },
|
||||||
{ model: 'gpt-5-turbo', shouldTransform: true },
|
{ model: 'gpt-5-turbo', shouldTransform: true },
|
||||||
{ model: 'gpt-6', shouldTransform: true },
|
{ model: 'gpt-6', shouldTransform: true },
|
||||||
|
|
@ -1087,7 +1090,10 @@ describe('AgentClient - titleConvo', () => {
|
||||||
};
|
};
|
||||||
|
|
||||||
// Simulate the getOptions logic
|
// Simulate the getOptions logic
|
||||||
if (/\bgpt-[5-9]\b/i.test(clientOptions.model) && clientOptions.maxTokens != null) {
|
if (
|
||||||
|
/\bgpt-[5-9](?:\.\d+)?\b/i.test(clientOptions.model) &&
|
||||||
|
clientOptions.maxTokens != null
|
||||||
|
) {
|
||||||
clientOptions.modelKwargs = clientOptions.modelKwargs ?? {};
|
clientOptions.modelKwargs = clientOptions.modelKwargs ?? {};
|
||||||
clientOptions.modelKwargs.max_completion_tokens = clientOptions.maxTokens;
|
clientOptions.modelKwargs.max_completion_tokens = clientOptions.maxTokens;
|
||||||
delete clientOptions.maxTokens;
|
delete clientOptions.maxTokens;
|
||||||
|
|
@ -1105,6 +1111,9 @@ describe('AgentClient - titleConvo', () => {
|
||||||
|
|
||||||
it('should not swap max token param for older models when using useResponsesApi', () => {
|
it('should not swap max token param for older models when using useResponsesApi', () => {
|
||||||
const testCases = [
|
const testCases = [
|
||||||
|
{ model: 'gpt-5.1', shouldTransform: true },
|
||||||
|
{ model: 'gpt-5.1-chat-latest', shouldTransform: true },
|
||||||
|
{ model: 'gpt-5.1-codex', shouldTransform: true },
|
||||||
{ model: 'gpt-5', shouldTransform: true },
|
{ model: 'gpt-5', shouldTransform: true },
|
||||||
{ model: 'gpt-5-turbo', shouldTransform: true },
|
{ model: 'gpt-5-turbo', shouldTransform: true },
|
||||||
{ model: 'gpt-6', shouldTransform: true },
|
{ model: 'gpt-6', shouldTransform: true },
|
||||||
|
|
@ -1124,7 +1133,10 @@ describe('AgentClient - titleConvo', () => {
|
||||||
useResponsesApi: true,
|
useResponsesApi: true,
|
||||||
};
|
};
|
||||||
|
|
||||||
if (/\bgpt-[5-9]\b/i.test(clientOptions.model) && clientOptions.maxTokens != null) {
|
if (
|
||||||
|
/\bgpt-[5-9](?:\.\d+)?\b/i.test(clientOptions.model) &&
|
||||||
|
clientOptions.maxTokens != null
|
||||||
|
) {
|
||||||
clientOptions.modelKwargs = clientOptions.modelKwargs ?? {};
|
clientOptions.modelKwargs = clientOptions.modelKwargs ?? {};
|
||||||
const paramName =
|
const paramName =
|
||||||
clientOptions.useResponsesApi === true ? 'max_output_tokens' : 'max_completion_tokens';
|
clientOptions.useResponsesApi === true ? 'max_output_tokens' : 'max_completion_tokens';
|
||||||
|
|
@ -1157,7 +1169,10 @@ describe('AgentClient - titleConvo', () => {
|
||||||
};
|
};
|
||||||
|
|
||||||
// Simulate the getOptions logic
|
// Simulate the getOptions logic
|
||||||
if (/\bgpt-[5-9]\b/i.test(clientOptions.model) && clientOptions.maxTokens != null) {
|
if (
|
||||||
|
/\bgpt-[5-9](?:\.\d+)?\b/i.test(clientOptions.model) &&
|
||||||
|
clientOptions.maxTokens != null
|
||||||
|
) {
|
||||||
clientOptions.modelKwargs = clientOptions.modelKwargs ?? {};
|
clientOptions.modelKwargs = clientOptions.modelKwargs ?? {};
|
||||||
clientOptions.modelKwargs.max_completion_tokens = clientOptions.maxTokens;
|
clientOptions.modelKwargs.max_completion_tokens = clientOptions.maxTokens;
|
||||||
delete clientOptions.maxTokens;
|
delete clientOptions.maxTokens;
|
||||||
|
|
|
||||||
|
|
@ -25,7 +25,7 @@ type EndpointIcon = {
|
||||||
|
|
||||||
function getOpenAIColor(_model: string | null | undefined) {
|
function getOpenAIColor(_model: string | null | undefined) {
|
||||||
const model = _model?.toLowerCase() ?? '';
|
const model = _model?.toLowerCase() ?? '';
|
||||||
if (model && (/\b(o\d)\b/i.test(model) || /\bgpt-[5-9]\b/i.test(model))) {
|
if (model && (/\b(o\d)\b/i.test(model) || /\bgpt-[5-9](?:\.\d+)?\b/i.test(model))) {
|
||||||
return '#000000';
|
return '#000000';
|
||||||
}
|
}
|
||||||
return model.includes('gpt-4') ? '#AB68FF' : '#19C37D';
|
return model.includes('gpt-4') ? '#AB68FF' : '#19C37D';
|
||||||
|
|
|
||||||
|
|
@ -345,7 +345,7 @@ ${memory ?? 'No existing memories'}`;
|
||||||
};
|
};
|
||||||
|
|
||||||
// Handle GPT-5+ models
|
// Handle GPT-5+ models
|
||||||
if ('model' in finalLLMConfig && /\bgpt-[5-9]\b/i.test(finalLLMConfig.model ?? '')) {
|
if ('model' in finalLLMConfig && /\bgpt-[5-9](?:\.\d+)?\b/i.test(finalLLMConfig.model ?? '')) {
|
||||||
// Remove temperature for GPT-5+ models
|
// Remove temperature for GPT-5+ models
|
||||||
delete finalLLMConfig.temperature;
|
delete finalLLMConfig.temperature;
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -940,6 +940,16 @@ describe('getOpenAIConfig', () => {
|
||||||
{ reasoning_effort: null, reasoning_summary: null, shouldHaveReasoning: false },
|
{ reasoning_effort: null, reasoning_summary: null, shouldHaveReasoning: false },
|
||||||
{ reasoning_effort: undefined, reasoning_summary: undefined, shouldHaveReasoning: false },
|
{ reasoning_effort: undefined, reasoning_summary: undefined, shouldHaveReasoning: false },
|
||||||
{ reasoning_effort: '', reasoning_summary: '', shouldHaveReasoning: false },
|
{ reasoning_effort: '', reasoning_summary: '', shouldHaveReasoning: false },
|
||||||
|
{
|
||||||
|
reasoning_effort: ReasoningEffort.unset,
|
||||||
|
reasoning_summary: '',
|
||||||
|
shouldHaveReasoning: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
reasoning_effort: ReasoningEffort.none,
|
||||||
|
reasoning_summary: null,
|
||||||
|
shouldHaveReasoning: true,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
reasoning_effort: null,
|
reasoning_effort: null,
|
||||||
reasoning_summary: ReasoningSummary.concise,
|
reasoning_summary: ReasoningSummary.concise,
|
||||||
|
|
|
||||||
|
|
@ -300,7 +300,11 @@ export function getOpenAILLMConfig({
|
||||||
delete modelKwargs.verbosity;
|
delete modelKwargs.verbosity;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (llmConfig.model && /\bgpt-[5-9]\b/i.test(llmConfig.model) && llmConfig.maxTokens != null) {
|
if (
|
||||||
|
llmConfig.model &&
|
||||||
|
/\bgpt-[5-9](?:\.\d+)?\b/i.test(llmConfig.model) &&
|
||||||
|
llmConfig.maxTokens != null
|
||||||
|
) {
|
||||||
const paramName =
|
const paramName =
|
||||||
llmConfig.useResponsesApi === true ? 'max_output_tokens' : 'max_completion_tokens';
|
llmConfig.useResponsesApi === true ? 'max_output_tokens' : 'max_completion_tokens';
|
||||||
modelKwargs[paramName] = llmConfig.maxTokens;
|
modelKwargs[paramName] = llmConfig.maxTokens;
|
||||||
|
|
|
||||||
|
|
@ -927,7 +927,7 @@ export enum KnownEndpoints {
|
||||||
|
|
||||||
export enum FetchTokenConfig {
|
export enum FetchTokenConfig {
|
||||||
openrouter = KnownEndpoints.openrouter,
|
openrouter = KnownEndpoints.openrouter,
|
||||||
helicone = KnownEndpoints.helicone
|
helicone = KnownEndpoints.helicone,
|
||||||
}
|
}
|
||||||
|
|
||||||
export const defaultEndpoints: EModelEndpoint[] = [
|
export const defaultEndpoints: EModelEndpoint[] = [
|
||||||
|
|
@ -964,6 +964,10 @@ export const alternateName = {
|
||||||
};
|
};
|
||||||
|
|
||||||
const sharedOpenAIModels = [
|
const sharedOpenAIModels = [
|
||||||
|
'gpt-5.1',
|
||||||
|
'gpt-5.1-chat-latest',
|
||||||
|
'gpt-5.1-codex',
|
||||||
|
'gpt-5.1-codex-mini',
|
||||||
'gpt-5',
|
'gpt-5',
|
||||||
'gpt-5-mini',
|
'gpt-5-mini',
|
||||||
'gpt-5-nano',
|
'gpt-5-nano',
|
||||||
|
|
|
||||||
|
|
@ -230,9 +230,10 @@ const openAIParams: Record<string, SettingDefinition> = {
|
||||||
description: 'com_endpoint_openai_reasoning_effort',
|
description: 'com_endpoint_openai_reasoning_effort',
|
||||||
descriptionCode: true,
|
descriptionCode: true,
|
||||||
type: 'enum',
|
type: 'enum',
|
||||||
default: ReasoningEffort.none,
|
default: ReasoningEffort.unset,
|
||||||
component: 'slider',
|
component: 'slider',
|
||||||
options: [
|
options: [
|
||||||
|
ReasoningEffort.unset,
|
||||||
ReasoningEffort.none,
|
ReasoningEffort.none,
|
||||||
ReasoningEffort.minimal,
|
ReasoningEffort.minimal,
|
||||||
ReasoningEffort.low,
|
ReasoningEffort.low,
|
||||||
|
|
@ -240,6 +241,7 @@ const openAIParams: Record<string, SettingDefinition> = {
|
||||||
ReasoningEffort.high,
|
ReasoningEffort.high,
|
||||||
],
|
],
|
||||||
enumMappings: {
|
enumMappings: {
|
||||||
|
[ReasoningEffort.unset]: 'com_ui_auto',
|
||||||
[ReasoningEffort.none]: 'com_ui_none',
|
[ReasoningEffort.none]: 'com_ui_none',
|
||||||
[ReasoningEffort.minimal]: 'com_ui_minimal',
|
[ReasoningEffort.minimal]: 'com_ui_minimal',
|
||||||
[ReasoningEffort.low]: 'com_ui_low',
|
[ReasoningEffort.low]: 'com_ui_low',
|
||||||
|
|
@ -291,7 +293,7 @@ const openAIParams: Record<string, SettingDefinition> = {
|
||||||
ReasoningSummary.detailed,
|
ReasoningSummary.detailed,
|
||||||
],
|
],
|
||||||
enumMappings: {
|
enumMappings: {
|
||||||
[ReasoningSummary.none]: 'com_ui_none',
|
[ReasoningSummary.none]: 'com_ui_unset',
|
||||||
[ReasoningSummary.auto]: 'com_ui_auto',
|
[ReasoningSummary.auto]: 'com_ui_auto',
|
||||||
[ReasoningSummary.concise]: 'com_ui_concise',
|
[ReasoningSummary.concise]: 'com_ui_concise',
|
||||||
[ReasoningSummary.detailed]: 'com_ui_detailed',
|
[ReasoningSummary.detailed]: 'com_ui_detailed',
|
||||||
|
|
|
||||||
|
|
@ -166,7 +166,8 @@ export enum ImageDetail {
|
||||||
}
|
}
|
||||||
|
|
||||||
export enum ReasoningEffort {
|
export enum ReasoningEffort {
|
||||||
none = '',
|
unset = '',
|
||||||
|
none = 'none',
|
||||||
minimal = 'minimal',
|
minimal = 'minimal',
|
||||||
low = 'low',
|
low = 'low',
|
||||||
medium = 'medium',
|
medium = 'medium',
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue