diff --git a/api/package.json b/api/package.json index 571db53aa6..fd002d58ce 100644 --- a/api/package.json +++ b/api/package.json @@ -48,7 +48,7 @@ "@langchain/google-genai": "^0.2.13", "@langchain/google-vertexai": "^0.2.13", "@langchain/textsplitters": "^0.1.0", - "@librechat/agents": "^2.4.46", + "@librechat/agents": "^2.4.49", "@librechat/api": "*", "@librechat/data-schemas": "*", "@node-saml/passport-saml": "^5.0.0", diff --git a/client/src/components/SidePanel/Parameters/DynamicSlider.tsx b/client/src/components/SidePanel/Parameters/DynamicSlider.tsx index a9142468e9..d1b9bd9678 100644 --- a/client/src/components/SidePanel/Parameters/DynamicSlider.tsx +++ b/client/src/components/SidePanel/Parameters/DynamicSlider.tsx @@ -18,6 +18,7 @@ function DynamicSlider({ setOption, optionType, options, + enumMappings, readonly = false, showDefault = false, includeInput = true, @@ -60,24 +61,68 @@ function DynamicSlider({ const enumToNumeric = useMemo(() => { if (isEnum && options) { - return options.reduce((acc, mapping, index) => { - acc[mapping] = index; - return acc; - }, {} as Record); + return options.reduce( + (acc, mapping, index) => { + acc[mapping] = index; + return acc; + }, + {} as Record, + ); } return {}; }, [isEnum, options]); const valueToEnumOption = useMemo(() => { if (isEnum && options) { - return options.reduce((acc, option, index) => { - acc[index] = option; - return acc; - }, {} as Record); + return options.reduce( + (acc, option, index) => { + acc[index] = option; + return acc; + }, + {} as Record, + ); } return {}; }, [isEnum, options]); + const getDisplayValue = useCallback( + (value: string | number | undefined | null): string => { + if (isEnum && enumMappings && value != null) { + const stringValue = String(value); + // Check if the value exists in enumMappings + if (stringValue in enumMappings) { + const mappedValue = String(enumMappings[stringValue]); + // Check if the mapped value is a localization key + if (mappedValue.startsWith('com_')) { + return localize(mappedValue as TranslationKeys) ?? mappedValue; + } + return mappedValue; + } + } + // Always return a string for Input component compatibility + if (value != null) { + return String(value); + } + return String(defaultValue ?? ''); + }, + [isEnum, enumMappings, defaultValue, localize], + ); + + const getDefaultDisplayValue = useCallback((): string => { + if (defaultValue != null && enumMappings) { + const stringDefault = String(defaultValue); + if (stringDefault in enumMappings) { + const mappedValue = String(enumMappings[stringDefault]); + // Check if the mapped value is a localization key + if (mappedValue.startsWith('com_')) { + return localize(mappedValue as TranslationKeys) ?? mappedValue; + } + return mappedValue; + } + } + return String(defaultValue ?? ''); + }, [defaultValue, enumMappings, localize]); + const handleValueChange = useCallback( (value: number) => { if (isEnum) { @@ -115,12 +160,12 @@ function DynamicSlider({
@@ -132,13 +177,13 @@ function DynamicSlider({ onChange={(value) => setInputValue(Number(value))} max={range ? range.max : (options?.length ?? 0) - 1} min={range ? range.min : 0} - step={range ? range.step ?? 1 : 1} + step={range ? (range.step ?? 1) : 1} controls={false} className={cn( defaultTextProps, cn( optionText, - 'reset-rc-number-input reset-rc-number-input-text-right h-auto w-12 border-0 group-hover/temp:border-gray-200', + 'reset-rc-number-input reset-rc-number-input-text-right h-auto w-12 border-0 py-1 text-xs group-hover/temp:border-gray-200', ), )} /> @@ -146,13 +191,13 @@ function DynamicSlider({ ({})} className={cn( defaultTextProps, cn( optionText, - 'reset-rc-number-input reset-rc-number-input-text-right h-auto w-12 border-0 group-hover/temp:border-gray-200', + 'reset-rc-number-input reset-rc-number-input-text-right h-auto w-12 border-0 py-1 text-xs group-hover/temp:border-gray-200', ), )} /> @@ -164,19 +209,23 @@ function DynamicSlider({ value={[ isEnum ? enumToNumeric[(selectedValue as number) ?? ''] - : (inputValue as number) ?? (defaultValue as number), + : ((inputValue as number) ?? (defaultValue as number)), ]} onValueChange={(value) => handleValueChange(value[0])} onDoubleClick={() => setInputValue(defaultValue as string | number)} max={max} min={range ? range.min : 0} - step={range ? range.step ?? 1 : 1} + step={range ? (range.step ?? 1) : 1} className="flex h-4 w-full" /> {description && ( )} diff --git a/client/src/locales/en/translation.json b/client/src/locales/en/translation.json index aeb7695b8b..ad133e7a35 100644 --- a/client/src/locales/en/translation.json +++ b/client/src/locales/en/translation.json @@ -225,12 +225,14 @@ "com_endpoint_openai_max_tokens": "Optional 'max_tokens' field, representing the maximum number of tokens that can be generated in the chat completion. The total length of input tokens and generated tokens is limited by the models context length. You may experience errors if this number exceeds the max context tokens.", "com_endpoint_openai_pres": "Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.", "com_endpoint_openai_prompt_prefix_placeholder": "Set custom instructions to include in System Message. Default: none", - "com_endpoint_openai_reasoning_effort": "o1 and o3 models only: constrains effort on reasoning for reasoning models. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.", + "com_endpoint_openai_reasoning_effort": "Reasoning models only: constrains effort on reasoning for reasoning models. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.", + "com_endpoint_openai_reasoning_summary": "Responses API only: A summary of the reasoning performed by the model. This can be useful for debugging and understanding the model's reasoning process. Set to none,auto, concise, or detailed.", "com_endpoint_openai_resend": "Resend all previously attached images. Note: this can significantly increase token cost and you may experience errors with many image attachments.", "com_endpoint_openai_resend_files": "Resend all previously attached files. Note: this will increase token cost and you may experience errors with many attachments.", "com_endpoint_openai_stop": "Up to 4 sequences where the API will stop generating further tokens.", "com_endpoint_openai_temp": "Higher values = more random, while lower values = more focused and deterministic. We recommend altering this or Top P but not both.", "com_endpoint_openai_topp": "An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We recommend altering this or temperature but not both.", + "com_endpoint_openai_use_responses_api": "Use the Responses API instead of Chat Completions, which includes extended features from OpenAI. Required for o1-pro, o3-pro, and to enable reasoning summaries.", "com_endpoint_output": "Output", "com_endpoint_plug_image_detail": "Image Detail", "com_endpoint_plug_resend_files": "Resend Files", @@ -261,6 +263,7 @@ "com_endpoint_prompt_prefix_assistants_placeholder": "Set additional instructions or context on top of the Assistant's main instructions. Ignored if empty.", "com_endpoint_prompt_prefix_placeholder": "Set custom instructions or context. Ignored if empty.", "com_endpoint_reasoning_effort": "Reasoning Effort", + "com_endpoint_reasoning_summary": "Reasoning Summary", "com_endpoint_save_as_preset": "Save As Preset", "com_endpoint_search": "Search endpoint by name", "com_endpoint_search_endpoint_models": "Search {{0}} models...", @@ -276,6 +279,7 @@ "com_endpoint_top_k": "Top K", "com_endpoint_top_p": "Top P", "com_endpoint_use_active_assistant": "Use Active Assistant", + "com_endpoint_use_responses_api": "Use Responses API", "com_error_expired_user_key": "Provided key for {{0}} expired at {{1}}. Please provide a new key and try again.", "com_error_files_dupe": "Duplicate file detected.", "com_error_files_empty": "Empty files are not allowed.", @@ -820,6 +824,11 @@ "com_ui_loading": "Loading...", "com_ui_locked": "Locked", "com_ui_logo": "{{0}} Logo", + "com_ui_low": "Low", + "com_ui_concise": "Concise", + "com_ui_detailed": "Detailed", + "com_ui_high": "High", + "com_ui_medium": "Medium", "com_ui_manage": "Manage", "com_ui_max_tags": "Maximum number allowed is {{0}}, using latest values.", "com_ui_mcp_dialog_desc": "Please enter the necessary information below.", @@ -1060,4 +1069,4 @@ "com_ui_zoom": "Zoom", "com_user_message": "You", "com_warning_resubmit_unsupported": "Resubmitting the AI message is not supported for this endpoint." -} \ No newline at end of file +} diff --git a/package-lock.json b/package-lock.json index 10210b9c25..8d04047ef4 100644 --- a/package-lock.json +++ b/package-lock.json @@ -64,7 +64,7 @@ "@langchain/google-genai": "^0.2.13", "@langchain/google-vertexai": "^0.2.13", "@langchain/textsplitters": "^0.1.0", - "@librechat/agents": "^2.4.46", + "@librechat/agents": "^2.4.49", "@librechat/api": "*", "@librechat/data-schemas": "*", "@node-saml/passport-saml": "^5.0.0", @@ -19436,9 +19436,9 @@ } }, "node_modules/@librechat/agents": { - "version": "2.4.46", - "resolved": "https://registry.npmjs.org/@librechat/agents/-/agents-2.4.46.tgz", - "integrity": "sha512-zR27U19/WGF3HN64oBbiaFgjjWHaF7BjYzRFWzQKEkk+iEzCe59IpuEZUizQ54YcY02nhhh6S3MNUjhAJwMYVA==", + "version": "2.4.49", + "resolved": "https://registry.npmjs.org/@librechat/agents/-/agents-2.4.49.tgz", + "integrity": "sha512-Bnp/PZsg1VgnmGS80tW4ssKpcqUZ7xysKesV/8gGaUBF1VDBiYBh0gC6ugfJhltNOv93rEVSucjPlTAuHimNCg==", "license": "MIT", "dependencies": { "@langchain/anthropic": "^0.3.23", @@ -46624,7 +46624,7 @@ "typescript": "^5.0.4" }, "peerDependencies": { - "@librechat/agents": "^2.4.46", + "@librechat/agents": "^2.4.49", "@librechat/data-schemas": "*", "@modelcontextprotocol/sdk": "^1.12.3", "axios": "^1.8.2", diff --git a/packages/api/package.json b/packages/api/package.json index ed2b70965d..a4c41ec537 100644 --- a/packages/api/package.json +++ b/packages/api/package.json @@ -69,7 +69,7 @@ "registry": "https://registry.npmjs.org/" }, "peerDependencies": { - "@librechat/agents": "^2.4.46", + "@librechat/agents": "^2.4.49", "@librechat/data-schemas": "*", "@modelcontextprotocol/sdk": "^1.12.3", "axios": "^1.8.2", diff --git a/packages/api/src/agents/run.ts b/packages/api/src/agents/run.ts index e12d2cf2b6..9f07a1fb9c 100644 --- a/packages/api/src/agents/run.ts +++ b/packages/api/src/agents/run.ts @@ -1,6 +1,7 @@ import { Run, Providers } from '@librechat/agents'; import { providerEndpointMap, KnownEndpoints } from 'librechat-data-provider'; import type { + OpenAIClientOptions, StandardGraphConfig, EventHandler, GenericTool, @@ -76,6 +77,11 @@ export async function createRun({ (agent.endpoint && agent.endpoint.toLowerCase().includes(KnownEndpoints.openrouter)) ) { reasoningKey = 'reasoning'; + } else if ( + (llmConfig as OpenAIClientOptions).useResponsesApi === true && + (provider === Providers.OPENAI || provider === Providers.AZURE) + ) { + reasoningKey = 'reasoning'; } const graphConfig: StandardGraphConfig = { diff --git a/packages/api/src/endpoints/openai/llm.ts b/packages/api/src/endpoints/openai/llm.ts index ddf61016e8..0c3135c554 100644 --- a/packages/api/src/endpoints/openai/llm.ts +++ b/packages/api/src/endpoints/openai/llm.ts @@ -1,9 +1,23 @@ import { ProxyAgent } from 'undici'; -import { KnownEndpoints } from 'librechat-data-provider'; +import { KnownEndpoints, removeNullishValues } from 'librechat-data-provider'; +import type { OpenAI } from 'openai'; import type * as t from '~/types'; import { sanitizeModelName, constructAzureURL } from '~/utils/azure'; import { isEnabled } from '~/utils/common'; +function hasReasoningParams({ + reasoning_effort, + reasoning_summary, +}: { + reasoning_effort?: string | null; + reasoning_summary?: string | null; +}): boolean { + return ( + (reasoning_effort != null && reasoning_effort !== '') || + (reasoning_summary != null && reasoning_summary !== '') + ); +} + /** * Generates configuration options for creating a language model (LLM) instance. * @param apiKey - The API key for authentication. @@ -17,7 +31,7 @@ export function getOpenAIConfig( endpoint?: string | null, ): t.LLMConfigResult { const { - modelOptions = {}, + modelOptions: _modelOptions = {}, reverseProxyUrl, defaultQuery, headers, @@ -27,7 +41,7 @@ export function getOpenAIConfig( addParams, dropParams, } = options; - + const { reasoning_effort, reasoning_summary, ...modelOptions } = _modelOptions; const llmConfig: Partial & Partial = Object.assign( { streaming, @@ -40,39 +54,6 @@ export function getOpenAIConfig( Object.assign(llmConfig, addParams); } - // Note: OpenAI Web Search models do not support any known parameters besides `max_tokens` - if (modelOptions.model && /gpt-4o.*search/.test(modelOptions.model)) { - const searchExcludeParams = [ - 'frequency_penalty', - 'presence_penalty', - 'temperature', - 'top_p', - 'top_k', - 'stop', - 'logit_bias', - 'seed', - 'response_format', - 'n', - 'logprobs', - 'user', - ]; - - const updatedDropParams = dropParams || []; - const combinedDropParams = [...new Set([...updatedDropParams, ...searchExcludeParams])]; - - combinedDropParams.forEach((param) => { - if (param in llmConfig) { - delete llmConfig[param as keyof t.ClientOptions]; - } - }); - } else if (dropParams && Array.isArray(dropParams)) { - dropParams.forEach((param) => { - if (param in llmConfig) { - delete llmConfig[param as keyof t.ClientOptions]; - } - }); - } - let useOpenRouter = false; const configOptions: t.OpenAIConfiguration = {}; @@ -139,11 +120,19 @@ export function getOpenAIConfig( configOptions.organization = process.env.OPENAI_ORGANIZATION; } - if (useOpenRouter && llmConfig.reasoning_effort != null) { - llmConfig.reasoning = { - effort: llmConfig.reasoning_effort, - }; - delete llmConfig.reasoning_effort; + if ( + hasReasoningParams({ reasoning_effort, reasoning_summary }) && + (llmConfig.useResponsesApi === true || useOpenRouter) + ) { + llmConfig.reasoning = removeNullishValues( + { + effort: reasoning_effort, + summary: reasoning_summary, + }, + true, + ) as OpenAI.Reasoning; + } else if (hasReasoningParams({ reasoning_effort })) { + llmConfig.reasoning_effort = reasoning_effort; } if (llmConfig.max_tokens != null) { @@ -151,6 +140,43 @@ export function getOpenAIConfig( delete llmConfig.max_tokens; } + /** + * Note: OpenAI Web Search models do not support any known parameters besides `max_tokens` + */ + if (modelOptions.model && /gpt-4o.*search/.test(modelOptions.model)) { + const searchExcludeParams = [ + 'frequency_penalty', + 'presence_penalty', + 'reasoning', + 'reasoning_effort', + 'temperature', + 'top_p', + 'top_k', + 'stop', + 'logit_bias', + 'seed', + 'response_format', + 'n', + 'logprobs', + 'user', + ]; + + const updatedDropParams = dropParams || []; + const combinedDropParams = [...new Set([...updatedDropParams, ...searchExcludeParams])]; + + combinedDropParams.forEach((param) => { + if (param in llmConfig) { + delete llmConfig[param as keyof t.ClientOptions]; + } + }); + } else if (dropParams && Array.isArray(dropParams)) { + dropParams.forEach((param) => { + if (param in llmConfig) { + delete llmConfig[param as keyof t.ClientOptions]; + } + }); + } + return { llmConfig, configOptions, diff --git a/packages/api/src/types/run.ts b/packages/api/src/types/run.ts index 72c61a587f..81dce780d4 100644 --- a/packages/api/src/types/run.ts +++ b/packages/api/src/types/run.ts @@ -1,4 +1,4 @@ -import type { Providers } from '@librechat/agents'; +import type { Providers, ClientOptions } from '@librechat/agents'; import type { AgentModelParameters } from 'librechat-data-provider'; import type { OpenAIConfiguration } from './openai'; @@ -8,4 +8,5 @@ export type RunLLMConfig = { streamUsage: boolean; usage?: boolean; configuration?: OpenAIConfiguration; -} & AgentModelParameters; +} & AgentModelParameters & + ClientOptions; diff --git a/packages/data-provider/specs/generate.spec.ts b/packages/data-provider/specs/generate.spec.ts index 64ca86a036..2c3cda0f17 100644 --- a/packages/data-provider/specs/generate.spec.ts +++ b/packages/data-provider/specs/generate.spec.ts @@ -1,4 +1,3 @@ -/* eslint-disable jest/no-conditional-expect */ import { ZodError, z } from 'zod'; import { generateDynamicSchema, validateSettingDefinitions, OptionTypes } from '../src/generate'; import type { SettingsConfiguration } from '../src/generate'; @@ -97,6 +96,37 @@ describe('generateDynamicSchema', () => { expect(result['data']).toEqual({ testEnum: 'option2' }); }); + it('should generate a schema for enum settings with empty string option', () => { + const settings: SettingsConfiguration = [ + { + key: 'testEnumWithEmpty', + description: 'A test enum setting with empty string', + type: 'enum', + default: '', + options: ['', 'option1', 'option2'], + enumMappings: { + '': 'None', + option1: 'First Option', + option2: 'Second Option', + }, + component: 'slider', + columnSpan: 2, + label: 'Test Enum with Empty String', + }, + ]; + + const schema = generateDynamicSchema(settings); + const result = schema.safeParse({ testEnumWithEmpty: '' }); + + expect(result.success).toBeTruthy(); + expect(result['data']).toEqual({ testEnumWithEmpty: '' }); + + // Test with non-empty option + const result2 = schema.safeParse({ testEnumWithEmpty: 'option1' }); + expect(result2.success).toBeTruthy(); + expect(result2['data']).toEqual({ testEnumWithEmpty: 'option1' }); + }); + it('should fail for incorrect enum value', () => { const settings: SettingsConfiguration = [ { @@ -481,6 +511,47 @@ describe('validateSettingDefinitions', () => { expect(() => validateSettingDefinitions(settingsExceedingMaxTags)).toThrow(ZodError); }); + + // Test for incomplete enumMappings + test('should throw error for incomplete enumMappings', () => { + const settingsWithIncompleteEnumMappings: SettingsConfiguration = [ + { + key: 'displayMode', + type: 'enum', + component: 'dropdown', + options: ['light', 'dark', 'auto'], + enumMappings: { + light: 'Light Mode', + dark: 'Dark Mode', + // Missing mapping for 'auto' + }, + optionType: OptionTypes.Custom, + }, + ]; + + expect(() => validateSettingDefinitions(settingsWithIncompleteEnumMappings)).toThrow(ZodError); + }); + + // Test for complete enumMappings including empty string + test('should not throw error for complete enumMappings including empty string', () => { + const settingsWithCompleteEnumMappings: SettingsConfiguration = [ + { + key: 'selectionMode', + type: 'enum', + component: 'slider', + options: ['', 'single', 'multiple'], + enumMappings: { + '': 'None', + single: 'Single Selection', + multiple: 'Multiple Selection', + }, + default: '', + optionType: OptionTypes.Custom, + }, + ]; + + expect(() => validateSettingDefinitions(settingsWithCompleteEnumMappings)).not.toThrow(); + }); }); const settingsConfiguration: SettingsConfiguration = [ @@ -515,7 +586,7 @@ const settingsConfiguration: SettingsConfiguration = [ { key: 'presence_penalty', description: - 'Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model\'s likelihood to talk about new topics.', + "Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.", type: 'number', default: 0, range: { @@ -529,7 +600,7 @@ const settingsConfiguration: SettingsConfiguration = [ { key: 'frequency_penalty', description: - 'Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model\'s likelihood to repeat the same line verbatim.', + "Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.", type: 'number', default: 0, range: { diff --git a/packages/data-provider/src/generate.ts b/packages/data-provider/src/generate.ts index bf0b2c1acd..21f63a34d9 100644 --- a/packages/data-provider/src/generate.ts +++ b/packages/data-provider/src/generate.ts @@ -467,7 +467,11 @@ export function validateSettingDefinitions(settings: SettingsConfiguration): voi } /* Default value checks */ - if (setting.type === SettingTypes.Number && isNaN(setting.default as number) && setting.default != null) { + if ( + setting.type === SettingTypes.Number && + isNaN(setting.default as number) && + setting.default != null + ) { errors.push({ code: ZodIssueCode.custom, message: `Invalid default value for setting ${setting.key}. Must be a number.`, @@ -475,7 +479,11 @@ export function validateSettingDefinitions(settings: SettingsConfiguration): voi }); } - if (setting.type === SettingTypes.Boolean && typeof setting.default !== 'boolean' && setting.default != null) { + if ( + setting.type === SettingTypes.Boolean && + typeof setting.default !== 'boolean' && + setting.default != null + ) { errors.push({ code: ZodIssueCode.custom, message: `Invalid default value for setting ${setting.key}. Must be a boolean.`, @@ -485,7 +493,8 @@ export function validateSettingDefinitions(settings: SettingsConfiguration): voi if ( (setting.type === SettingTypes.String || setting.type === SettingTypes.Enum) && - typeof setting.default !== 'string' && setting.default != null + typeof setting.default !== 'string' && + setting.default != null ) { errors.push({ code: ZodIssueCode.custom, @@ -520,6 +529,19 @@ export function validateSettingDefinitions(settings: SettingsConfiguration): voi path: ['default'], }); } + + // Validate enumMappings + if (setting.enumMappings && setting.type === SettingTypes.Enum && setting.options) { + for (const option of setting.options) { + if (!(option in setting.enumMappings)) { + errors.push({ + code: ZodIssueCode.custom, + message: `Missing enumMapping for option "${option}" in setting ${setting.key}.`, + path: ['enumMappings'], + }); + } + } + } } if (errors.length > 0) { diff --git a/packages/data-provider/src/parameterSettings.ts b/packages/data-provider/src/parameterSettings.ts index 8b1dd222a4..91de6a83af 100644 --- a/packages/data-provider/src/parameterSettings.ts +++ b/packages/data-provider/src/parameterSettings.ts @@ -4,6 +4,7 @@ import { openAISettings, googleSettings, ReasoningEffort, + ReasoningSummary, BedrockProviders, anthropicSettings, } from './types'; @@ -71,6 +72,11 @@ const baseDefinitions: Record = { default: ImageDetail.auto, component: 'slider', options: [ImageDetail.low, ImageDetail.auto, ImageDetail.high], + enumMappings: { + [ImageDetail.low]: 'com_ui_low', + [ImageDetail.auto]: 'com_ui_auto', + [ImageDetail.high]: 'com_ui_high', + }, optionType: 'conversation', columnSpan: 2, }, @@ -211,9 +217,57 @@ const openAIParams: Record = { description: 'com_endpoint_openai_reasoning_effort', descriptionCode: true, type: 'enum', - default: ReasoningEffort.medium, + default: ReasoningEffort.none, component: 'slider', - options: [ReasoningEffort.low, ReasoningEffort.medium, ReasoningEffort.high], + options: [ + ReasoningEffort.none, + ReasoningEffort.low, + ReasoningEffort.medium, + ReasoningEffort.high, + ], + enumMappings: { + [ReasoningEffort.none]: 'com_ui_none', + [ReasoningEffort.low]: 'com_ui_low', + [ReasoningEffort.medium]: 'com_ui_medium', + [ReasoningEffort.high]: 'com_ui_high', + }, + optionType: 'model', + columnSpan: 4, + }, + useResponsesApi: { + key: 'useResponsesApi', + label: 'com_endpoint_use_responses_api', + labelCode: true, + description: 'com_endpoint_openai_use_responses_api', + descriptionCode: true, + type: 'boolean', + default: false, + component: 'switch', + optionType: 'model', + showDefault: false, + columnSpan: 2, + }, + reasoning_summary: { + key: 'reasoning_summary', + label: 'com_endpoint_reasoning_summary', + labelCode: true, + description: 'com_endpoint_openai_reasoning_summary', + descriptionCode: true, + type: 'enum', + default: ReasoningSummary.none, + component: 'slider', + options: [ + ReasoningSummary.none, + ReasoningSummary.auto, + ReasoningSummary.concise, + ReasoningSummary.detailed, + ], + enumMappings: { + [ReasoningSummary.none]: 'com_ui_none', + [ReasoningSummary.auto]: 'com_ui_auto', + [ReasoningSummary.concise]: 'com_ui_concise', + [ReasoningSummary.detailed]: 'com_ui_detailed', + }, optionType: 'model', columnSpan: 4, }, @@ -526,6 +580,8 @@ const openAI: SettingsConfiguration = [ librechat.resendFiles, baseDefinitions.imageDetail, openAIParams.reasoning_effort, + openAIParams.useResponsesApi, + openAIParams.reasoning_summary, ]; const openAICol1: SettingsConfiguration = [ @@ -542,9 +598,11 @@ const openAICol2: SettingsConfiguration = [ openAIParams.frequency_penalty, openAIParams.presence_penalty, baseDefinitions.stop, - openAIParams.reasoning_effort, librechat.resendFiles, baseDefinitions.imageDetail, + openAIParams.reasoning_effort, + openAIParams.useResponsesApi, + openAIParams.reasoning_summary, ]; const anthropicConfig: SettingsConfiguration = [ diff --git a/packages/data-provider/src/schemas.ts b/packages/data-provider/src/schemas.ts index 463150d36f..340e60d34a 100644 --- a/packages/data-provider/src/schemas.ts +++ b/packages/data-provider/src/schemas.ts @@ -112,11 +112,19 @@ export enum ImageDetail { } export enum ReasoningEffort { + none = '', low = 'low', medium = 'medium', high = 'high', } +export enum ReasoningSummary { + none = '', + auto = 'auto', + concise = 'concise', + detailed = 'detailed', +} + export const imageDetailNumeric = { [ImageDetail.low]: 0, [ImageDetail.auto]: 1, @@ -131,6 +139,7 @@ export const imageDetailValue = { export const eImageDetailSchema = z.nativeEnum(ImageDetail); export const eReasoningEffortSchema = z.nativeEnum(ReasoningEffort); +export const eReasoningSummarySchema = z.nativeEnum(ReasoningSummary); export const defaultAssistantFormValues = { assistant: '', @@ -619,8 +628,11 @@ export const tConversationSchema = z.object({ file_ids: z.array(z.string()).optional(), /* vision */ imageDetail: eImageDetailSchema.optional(), - /* OpenAI: o1 only */ - reasoning_effort: eReasoningEffortSchema.optional(), + /* OpenAI: Reasoning models only */ + reasoning_effort: eReasoningEffortSchema.optional().nullable(), + reasoning_summary: eReasoningSummarySchema.optional().nullable(), + /* OpenAI: use Responses API */ + useResponsesApi: z.boolean().optional(), /* assistant */ assistant_id: z.string().optional(), /* agents */ @@ -717,6 +729,12 @@ export const tQueryParamsSchema = tConversationSchema top_p: true, /** @endpoints openAI, custom, azureOpenAI */ max_tokens: true, + /** @endpoints openAI, custom, azureOpenAI */ + reasoning_effort: true, + /** @endpoints openAI, custom, azureOpenAI */ + reasoning_summary: true, + /** @endpoints openAI, custom, azureOpenAI */ + useResponsesApi: true, /** @endpoints google, anthropic, bedrock */ topP: true, /** @endpoints google, anthropic */ @@ -1044,10 +1062,12 @@ export const openAIBaseSchema = tConversationSchema.pick({ maxContextTokens: true, max_tokens: true, reasoning_effort: true, + reasoning_summary: true, + useResponsesApi: true, }); export const openAISchema = openAIBaseSchema - .transform((obj: Partial) => removeNullishValues(obj)) + .transform((obj: Partial) => removeNullishValues(obj, true)) .catch(() => ({})); export const compactGoogleSchema = googleBaseSchema diff --git a/packages/data-schemas/src/schema/defaults.ts b/packages/data-schemas/src/schema/defaults.ts index d6e8ed851c..d42771d09c 100644 --- a/packages/data-schemas/src/schema/defaults.ts +++ b/packages/data-schemas/src/schema/defaults.ts @@ -131,8 +131,14 @@ export const conversationPreset = { max_tokens: { type: Number, }, - /** omni models only */ + useResponsesApi: { + type: Boolean, + }, + /** Reasoning models only */ reasoning_effort: { type: String, }, + reasoning_summary: { + type: String, + }, }; diff --git a/packages/data-schemas/src/schema/preset.ts b/packages/data-schemas/src/schema/preset.ts index 95f1e276d9..1b128413f3 100644 --- a/packages/data-schemas/src/schema/preset.ts +++ b/packages/data-schemas/src/schema/preset.ts @@ -46,6 +46,8 @@ export interface IPreset extends Document { maxContextTokens?: number; max_tokens?: number; reasoning_effort?: string; + reasoning_summary?: string; + useResponsesApi?: boolean; // end of additional fields agentOptions?: unknown; } diff --git a/packages/data-schemas/src/types/convo.ts b/packages/data-schemas/src/types/convo.ts index f088db8c92..b97f179b53 100644 --- a/packages/data-schemas/src/types/convo.ts +++ b/packages/data-schemas/src/types/convo.ts @@ -45,6 +45,8 @@ export interface IConversation extends Document { maxContextTokens?: number; max_tokens?: number; reasoning_effort?: string; + reasoning_summary?: string; + useResponsesApi?: boolean; // Additional fields files?: string[]; expiredAt?: Date;