feat: Add OpenAI Verbosity Parameter (#8929)

* WIP: Verbosity OpenAI Parameter

* 🔧 chore: remove unused import of extractEnvVariable from parsers.ts

*  feat: add comprehensive tests for getOpenAIConfig and enhance verbosity handling

* fix: Handling for maxTokens in GPT-5+ models and add corresponding tests

* feat: Implement GPT-5+ model handling in processMemory function
This commit is contained in:
Danny Avila 2025-08-07 20:49:40 -04:00 committed by GitHub
parent 486fe34a2b
commit 7147bce3c3
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
14 changed files with 989 additions and 6 deletions

View file

@ -1,4 +1,5 @@
import {
Verbosity,
ImageDetail,
EModelEndpoint,
openAISettings,
@ -286,6 +287,25 @@ const openAIParams: Record<string, SettingDefinition> = {
optionType: 'model',
columnSpan: 4,
},
verbosity: {
key: 'verbosity',
label: 'com_endpoint_verbosity',
labelCode: true,
description: 'com_endpoint_openai_verbosity',
descriptionCode: true,
type: 'enum',
default: Verbosity.none,
component: 'slider',
options: [Verbosity.none, Verbosity.low, Verbosity.medium, Verbosity.high],
enumMappings: {
[Verbosity.none]: 'com_ui_none',
[Verbosity.low]: 'com_ui_low',
[Verbosity.medium]: 'com_ui_medium',
[Verbosity.high]: 'com_ui_high',
},
optionType: 'model',
columnSpan: 4,
},
disableStreaming: {
key: 'disableStreaming',
label: 'com_endpoint_disable_streaming_label',
@ -641,6 +661,7 @@ const openAI: SettingsConfiguration = [
openAIParams.reasoning_effort,
openAIParams.useResponsesApi,
openAIParams.reasoning_summary,
openAIParams.verbosity,
openAIParams.disableStreaming,
];
@ -662,6 +683,7 @@ const openAICol2: SettingsConfiguration = [
baseDefinitions.imageDetail,
openAIParams.reasoning_effort,
openAIParams.reasoning_summary,
openAIParams.verbosity,
openAIParams.useResponsesApi,
openAIParams.web_search,
openAIParams.disableStreaming,

View file

@ -18,7 +18,6 @@ import {
compactAssistantSchema,
} from './schemas';
import { bedrockInputSchema } from './bedrock';
import { extractEnvVariable } from './utils';
import { alternateName } from './config';
type EndpointSchema =

View file

@ -126,6 +126,13 @@ export enum ReasoningSummary {
detailed = 'detailed',
}
export enum Verbosity {
none = '',
low = 'low',
medium = 'medium',
high = 'high',
}
export const imageDetailNumeric = {
[ImageDetail.low]: 0,
[ImageDetail.auto]: 1,
@ -141,6 +148,7 @@ export const imageDetailValue = {
export const eImageDetailSchema = z.nativeEnum(ImageDetail);
export const eReasoningEffortSchema = z.nativeEnum(ReasoningEffort);
export const eReasoningSummarySchema = z.nativeEnum(ReasoningSummary);
export const eVerbositySchema = z.nativeEnum(Verbosity);
export const defaultAssistantFormValues = {
assistant: '',
@ -636,6 +644,8 @@ export const tConversationSchema = z.object({
/* OpenAI: Reasoning models only */
reasoning_effort: eReasoningEffortSchema.optional().nullable(),
reasoning_summary: eReasoningSummarySchema.optional().nullable(),
/* OpenAI: Verbosity control */
verbosity: eVerbositySchema.optional().nullable(),
/* OpenAI: use Responses API */
useResponsesApi: z.boolean().optional(),
/* OpenAI Responses API / Anthropic API / Google API */
@ -743,6 +753,8 @@ export const tQueryParamsSchema = tConversationSchema
/** @endpoints openAI, custom, azureOpenAI */
reasoning_summary: true,
/** @endpoints openAI, custom, azureOpenAI */
verbosity: true,
/** @endpoints openAI, custom, azureOpenAI */
useResponsesApi: true,
/** @endpoints openAI, anthropic, google */
web_search: true,
@ -1078,6 +1090,7 @@ export const openAIBaseSchema = tConversationSchema.pick({
max_tokens: true,
reasoning_effort: true,
reasoning_summary: true,
verbosity: true,
useResponsesApi: true,
web_search: true,
disableStreaming: true,

View file

@ -40,6 +40,7 @@ export type TEndpointOption = Pick<
| 'resendFiles'
| 'imageDetail'
| 'reasoning_effort'
| 'verbosity'
| 'instructions'
| 'additional_instructions'
| 'append_current_datetime'