🪐 feat: Initial OpenAI Responses API Support (#8149)

* chore: update @librechat/agents to v2.4.47

* WIP: temporary auto-toggle responses api for o1/o3-pro

* feat: Enable Responses API for OpenAI models

- Updated the OpenAI client initialization to check for the useResponsesApi parameter in model options.
- Added translations for enabling the Responses API in the UI.
- Introduced useResponsesApi parameter in data provider settings and schemas.
- Updated relevant schemas to include useResponsesApi for conversation and preset configurations.

* refactor: Remove useResponsesApi check from OpenAI client initialization and update translation for Responses API

- Removed the check for useResponsesApi in the OpenAI client initialization.
- Updated the translation for enabling the Responses API to clarify its functionality.

* chore: update @librechat/agents dependency to version 2.4.48

* chore: update @librechat/agents dependency to version 2.4.49

* chore: linting

* chore: linting

* feat: Enhance DynamicSlider and validation for enumMappings

- Added support for enumMappings in DynamicSlider to display values correctly based on enum settings.
- Implemented validation for enumMappings in the generate function to ensure all options have corresponding mappings.
- Added tests for handling empty string options and incomplete enumMappings in the generate.spec.ts file.

* feat: Enhance DynamicSlider localization support

- Added localization handling for mapped values in DynamicSlider when using enumMappings.
- Updated the logic to check if the mapped value is a localization key and return the localized string if applicable.
- Adjusted dependencies in useCallback hooks to include localize for proper functionality.

* feat: Add reasoning summary and effort options to OpenAI configuration and UI

* feat: Add enumMappings for ImageDetail options in parameter settings

* style: Improve styling for DynamicSlider component labels and inputs

* chore: Update reasoning effort description and parameter order for OpenAI params

---------

Co-authored-by: Dustin Healy <dustinhealy1@gmail.com>
This commit is contained in:
Danny Avila 2025-06-30 18:34:47 -04:00 committed by GitHub
parent 20100e120b
commit f869d772f7
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
15 changed files with 355 additions and 83 deletions

View file

@ -48,7 +48,7 @@
"@langchain/google-genai": "^0.2.13",
"@langchain/google-vertexai": "^0.2.13",
"@langchain/textsplitters": "^0.1.0",
"@librechat/agents": "^2.4.46",
"@librechat/agents": "^2.4.49",
"@librechat/api": "*",
"@librechat/data-schemas": "*",
"@node-saml/passport-saml": "^5.0.0",

View file

@ -18,6 +18,7 @@ function DynamicSlider({
setOption,
optionType,
options,
enumMappings,
readonly = false,
showDefault = false,
includeInput = true,
@ -60,24 +61,68 @@ function DynamicSlider({
const enumToNumeric = useMemo(() => {
if (isEnum && options) {
return options.reduce((acc, mapping, index) => {
return options.reduce(
(acc, mapping, index) => {
acc[mapping] = index;
return acc;
}, {} as Record<string, number>);
},
{} as Record<string, number>,
);
}
return {};
}, [isEnum, options]);
const valueToEnumOption = useMemo(() => {
if (isEnum && options) {
return options.reduce((acc, option, index) => {
return options.reduce(
(acc, option, index) => {
acc[index] = option;
return acc;
}, {} as Record<number, string>);
},
{} as Record<number, string>,
);
}
return {};
}, [isEnum, options]);
const getDisplayValue = useCallback(
(value: string | number | undefined | null): string => {
if (isEnum && enumMappings && value != null) {
const stringValue = String(value);
// Check if the value exists in enumMappings
if (stringValue in enumMappings) {
const mappedValue = String(enumMappings[stringValue]);
// Check if the mapped value is a localization key
if (mappedValue.startsWith('com_')) {
return localize(mappedValue as TranslationKeys) ?? mappedValue;
}
return mappedValue;
}
}
// Always return a string for Input component compatibility
if (value != null) {
return String(value);
}
return String(defaultValue ?? '');
},
[isEnum, enumMappings, defaultValue, localize],
);
const getDefaultDisplayValue = useCallback((): string => {
if (defaultValue != null && enumMappings) {
const stringDefault = String(defaultValue);
if (stringDefault in enumMappings) {
const mappedValue = String(enumMappings[stringDefault]);
// Check if the mapped value is a localization key
if (mappedValue.startsWith('com_')) {
return localize(mappedValue as TranslationKeys) ?? mappedValue;
}
return mappedValue;
}
}
return String(defaultValue ?? '');
}, [defaultValue, enumMappings, localize]);
const handleValueChange = useCallback(
(value: number) => {
if (isEnum) {
@ -115,12 +160,12 @@ function DynamicSlider({
<div className="flex w-full items-center justify-between">
<Label
htmlFor={`${settingKey}-dynamic-setting`}
className="text-left text-sm font-medium"
className="break-words text-left text-sm font-medium"
>
{labelCode ? localize(label as TranslationKeys) ?? label : label || settingKey}{' '}
{labelCode ? (localize(label as TranslationKeys) ?? label) : label || settingKey}{' '}
{showDefault && (
<small className="opacity-40">
({localize('com_endpoint_default')}: {defaultValue})
({localize('com_endpoint_default')}: {getDefaultDisplayValue()})
</small>
)}
</Label>
@ -132,13 +177,13 @@ function DynamicSlider({
onChange={(value) => setInputValue(Number(value))}
max={range ? range.max : (options?.length ?? 0) - 1}
min={range ? range.min : 0}
step={range ? range.step ?? 1 : 1}
step={range ? (range.step ?? 1) : 1}
controls={false}
className={cn(
defaultTextProps,
cn(
optionText,
'reset-rc-number-input reset-rc-number-input-text-right h-auto w-12 border-0 group-hover/temp:border-gray-200',
'reset-rc-number-input reset-rc-number-input-text-right h-auto w-12 border-0 py-1 text-xs group-hover/temp:border-gray-200',
),
)}
/>
@ -146,13 +191,13 @@ function DynamicSlider({
<Input
id={`${settingKey}-dynamic-setting-input`}
disabled={readonly}
value={selectedValue ?? defaultValue}
value={getDisplayValue(selectedValue)}
onChange={() => ({})}
className={cn(
defaultTextProps,
cn(
optionText,
'reset-rc-number-input reset-rc-number-input-text-right h-auto w-12 border-0 group-hover/temp:border-gray-200',
'reset-rc-number-input reset-rc-number-input-text-right h-auto w-12 border-0 py-1 text-xs group-hover/temp:border-gray-200',
),
)}
/>
@ -164,19 +209,23 @@ function DynamicSlider({
value={[
isEnum
? enumToNumeric[(selectedValue as number) ?? '']
: (inputValue as number) ?? (defaultValue as number),
: ((inputValue as number) ?? (defaultValue as number)),
]}
onValueChange={(value) => handleValueChange(value[0])}
onDoubleClick={() => setInputValue(defaultValue as string | number)}
max={max}
min={range ? range.min : 0}
step={range ? range.step ?? 1 : 1}
step={range ? (range.step ?? 1) : 1}
className="flex h-4 w-full"
/>
</HoverCardTrigger>
{description && (
<OptionHover
description={descriptionCode ? localize(description as TranslationKeys) ?? description : description}
description={
descriptionCode
? (localize(description as TranslationKeys) ?? description)
: description
}
side={ESide.Left}
/>
)}

View file

@ -225,12 +225,14 @@
"com_endpoint_openai_max_tokens": "Optional 'max_tokens' field, representing the maximum number of tokens that can be generated in the chat completion. The total length of input tokens and generated tokens is limited by the models context length. You may experience errors if this number exceeds the max context tokens.",
"com_endpoint_openai_pres": "Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.",
"com_endpoint_openai_prompt_prefix_placeholder": "Set custom instructions to include in System Message. Default: none",
"com_endpoint_openai_reasoning_effort": "o1 and o3 models only: constrains effort on reasoning for reasoning models. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.",
"com_endpoint_openai_reasoning_effort": "Reasoning models only: constrains effort on reasoning for reasoning models. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.",
"com_endpoint_openai_reasoning_summary": "Responses API only: A summary of the reasoning performed by the model. This can be useful for debugging and understanding the model's reasoning process. Set to none,auto, concise, or detailed.",
"com_endpoint_openai_resend": "Resend all previously attached images. Note: this can significantly increase token cost and you may experience errors with many image attachments.",
"com_endpoint_openai_resend_files": "Resend all previously attached files. Note: this will increase token cost and you may experience errors with many attachments.",
"com_endpoint_openai_stop": "Up to 4 sequences where the API will stop generating further tokens.",
"com_endpoint_openai_temp": "Higher values = more random, while lower values = more focused and deterministic. We recommend altering this or Top P but not both.",
"com_endpoint_openai_topp": "An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We recommend altering this or temperature but not both.",
"com_endpoint_openai_use_responses_api": "Use the Responses API instead of Chat Completions, which includes extended features from OpenAI. Required for o1-pro, o3-pro, and to enable reasoning summaries.",
"com_endpoint_output": "Output",
"com_endpoint_plug_image_detail": "Image Detail",
"com_endpoint_plug_resend_files": "Resend Files",
@ -261,6 +263,7 @@
"com_endpoint_prompt_prefix_assistants_placeholder": "Set additional instructions or context on top of the Assistant's main instructions. Ignored if empty.",
"com_endpoint_prompt_prefix_placeholder": "Set custom instructions or context. Ignored if empty.",
"com_endpoint_reasoning_effort": "Reasoning Effort",
"com_endpoint_reasoning_summary": "Reasoning Summary",
"com_endpoint_save_as_preset": "Save As Preset",
"com_endpoint_search": "Search endpoint by name",
"com_endpoint_search_endpoint_models": "Search {{0}} models...",
@ -276,6 +279,7 @@
"com_endpoint_top_k": "Top K",
"com_endpoint_top_p": "Top P",
"com_endpoint_use_active_assistant": "Use Active Assistant",
"com_endpoint_use_responses_api": "Use Responses API",
"com_error_expired_user_key": "Provided key for {{0}} expired at {{1}}. Please provide a new key and try again.",
"com_error_files_dupe": "Duplicate file detected.",
"com_error_files_empty": "Empty files are not allowed.",
@ -820,6 +824,11 @@
"com_ui_loading": "Loading...",
"com_ui_locked": "Locked",
"com_ui_logo": "{{0}} Logo",
"com_ui_low": "Low",
"com_ui_concise": "Concise",
"com_ui_detailed": "Detailed",
"com_ui_high": "High",
"com_ui_medium": "Medium",
"com_ui_manage": "Manage",
"com_ui_max_tags": "Maximum number allowed is {{0}}, using latest values.",
"com_ui_mcp_dialog_desc": "Please enter the necessary information below.",

10
package-lock.json generated
View file

@ -64,7 +64,7 @@
"@langchain/google-genai": "^0.2.13",
"@langchain/google-vertexai": "^0.2.13",
"@langchain/textsplitters": "^0.1.0",
"@librechat/agents": "^2.4.46",
"@librechat/agents": "^2.4.49",
"@librechat/api": "*",
"@librechat/data-schemas": "*",
"@node-saml/passport-saml": "^5.0.0",
@ -19436,9 +19436,9 @@
}
},
"node_modules/@librechat/agents": {
"version": "2.4.46",
"resolved": "https://registry.npmjs.org/@librechat/agents/-/agents-2.4.46.tgz",
"integrity": "sha512-zR27U19/WGF3HN64oBbiaFgjjWHaF7BjYzRFWzQKEkk+iEzCe59IpuEZUizQ54YcY02nhhh6S3MNUjhAJwMYVA==",
"version": "2.4.49",
"resolved": "https://registry.npmjs.org/@librechat/agents/-/agents-2.4.49.tgz",
"integrity": "sha512-Bnp/PZsg1VgnmGS80tW4ssKpcqUZ7xysKesV/8gGaUBF1VDBiYBh0gC6ugfJhltNOv93rEVSucjPlTAuHimNCg==",
"license": "MIT",
"dependencies": {
"@langchain/anthropic": "^0.3.23",
@ -46624,7 +46624,7 @@
"typescript": "^5.0.4"
},
"peerDependencies": {
"@librechat/agents": "^2.4.46",
"@librechat/agents": "^2.4.49",
"@librechat/data-schemas": "*",
"@modelcontextprotocol/sdk": "^1.12.3",
"axios": "^1.8.2",

View file

@ -69,7 +69,7 @@
"registry": "https://registry.npmjs.org/"
},
"peerDependencies": {
"@librechat/agents": "^2.4.46",
"@librechat/agents": "^2.4.49",
"@librechat/data-schemas": "*",
"@modelcontextprotocol/sdk": "^1.12.3",
"axios": "^1.8.2",

View file

@ -1,6 +1,7 @@
import { Run, Providers } from '@librechat/agents';
import { providerEndpointMap, KnownEndpoints } from 'librechat-data-provider';
import type {
OpenAIClientOptions,
StandardGraphConfig,
EventHandler,
GenericTool,
@ -76,6 +77,11 @@ export async function createRun({
(agent.endpoint && agent.endpoint.toLowerCase().includes(KnownEndpoints.openrouter))
) {
reasoningKey = 'reasoning';
} else if (
(llmConfig as OpenAIClientOptions).useResponsesApi === true &&
(provider === Providers.OPENAI || provider === Providers.AZURE)
) {
reasoningKey = 'reasoning';
}
const graphConfig: StandardGraphConfig = {

View file

@ -1,9 +1,23 @@
import { ProxyAgent } from 'undici';
import { KnownEndpoints } from 'librechat-data-provider';
import { KnownEndpoints, removeNullishValues } from 'librechat-data-provider';
import type { OpenAI } from 'openai';
import type * as t from '~/types';
import { sanitizeModelName, constructAzureURL } from '~/utils/azure';
import { isEnabled } from '~/utils/common';
function hasReasoningParams({
reasoning_effort,
reasoning_summary,
}: {
reasoning_effort?: string | null;
reasoning_summary?: string | null;
}): boolean {
return (
(reasoning_effort != null && reasoning_effort !== '') ||
(reasoning_summary != null && reasoning_summary !== '')
);
}
/**
* Generates configuration options for creating a language model (LLM) instance.
* @param apiKey - The API key for authentication.
@ -17,7 +31,7 @@ export function getOpenAIConfig(
endpoint?: string | null,
): t.LLMConfigResult {
const {
modelOptions = {},
modelOptions: _modelOptions = {},
reverseProxyUrl,
defaultQuery,
headers,
@ -27,7 +41,7 @@ export function getOpenAIConfig(
addParams,
dropParams,
} = options;
const { reasoning_effort, reasoning_summary, ...modelOptions } = _modelOptions;
const llmConfig: Partial<t.ClientOptions> & Partial<t.OpenAIParameters> = Object.assign(
{
streaming,
@ -40,39 +54,6 @@ export function getOpenAIConfig(
Object.assign(llmConfig, addParams);
}
// Note: OpenAI Web Search models do not support any known parameters besides `max_tokens`
if (modelOptions.model && /gpt-4o.*search/.test(modelOptions.model)) {
const searchExcludeParams = [
'frequency_penalty',
'presence_penalty',
'temperature',
'top_p',
'top_k',
'stop',
'logit_bias',
'seed',
'response_format',
'n',
'logprobs',
'user',
];
const updatedDropParams = dropParams || [];
const combinedDropParams = [...new Set([...updatedDropParams, ...searchExcludeParams])];
combinedDropParams.forEach((param) => {
if (param in llmConfig) {
delete llmConfig[param as keyof t.ClientOptions];
}
});
} else if (dropParams && Array.isArray(dropParams)) {
dropParams.forEach((param) => {
if (param in llmConfig) {
delete llmConfig[param as keyof t.ClientOptions];
}
});
}
let useOpenRouter = false;
const configOptions: t.OpenAIConfiguration = {};
@ -139,11 +120,19 @@ export function getOpenAIConfig(
configOptions.organization = process.env.OPENAI_ORGANIZATION;
}
if (useOpenRouter && llmConfig.reasoning_effort != null) {
llmConfig.reasoning = {
effort: llmConfig.reasoning_effort,
};
delete llmConfig.reasoning_effort;
if (
hasReasoningParams({ reasoning_effort, reasoning_summary }) &&
(llmConfig.useResponsesApi === true || useOpenRouter)
) {
llmConfig.reasoning = removeNullishValues(
{
effort: reasoning_effort,
summary: reasoning_summary,
},
true,
) as OpenAI.Reasoning;
} else if (hasReasoningParams({ reasoning_effort })) {
llmConfig.reasoning_effort = reasoning_effort;
}
if (llmConfig.max_tokens != null) {
@ -151,6 +140,43 @@ export function getOpenAIConfig(
delete llmConfig.max_tokens;
}
/**
* Note: OpenAI Web Search models do not support any known parameters besides `max_tokens`
*/
if (modelOptions.model && /gpt-4o.*search/.test(modelOptions.model)) {
const searchExcludeParams = [
'frequency_penalty',
'presence_penalty',
'reasoning',
'reasoning_effort',
'temperature',
'top_p',
'top_k',
'stop',
'logit_bias',
'seed',
'response_format',
'n',
'logprobs',
'user',
];
const updatedDropParams = dropParams || [];
const combinedDropParams = [...new Set([...updatedDropParams, ...searchExcludeParams])];
combinedDropParams.forEach((param) => {
if (param in llmConfig) {
delete llmConfig[param as keyof t.ClientOptions];
}
});
} else if (dropParams && Array.isArray(dropParams)) {
dropParams.forEach((param) => {
if (param in llmConfig) {
delete llmConfig[param as keyof t.ClientOptions];
}
});
}
return {
llmConfig,
configOptions,

View file

@ -1,4 +1,4 @@
import type { Providers } from '@librechat/agents';
import type { Providers, ClientOptions } from '@librechat/agents';
import type { AgentModelParameters } from 'librechat-data-provider';
import type { OpenAIConfiguration } from './openai';
@ -8,4 +8,5 @@ export type RunLLMConfig = {
streamUsage: boolean;
usage?: boolean;
configuration?: OpenAIConfiguration;
} & AgentModelParameters;
} & AgentModelParameters &
ClientOptions;

View file

@ -1,4 +1,3 @@
/* eslint-disable jest/no-conditional-expect */
import { ZodError, z } from 'zod';
import { generateDynamicSchema, validateSettingDefinitions, OptionTypes } from '../src/generate';
import type { SettingsConfiguration } from '../src/generate';
@ -97,6 +96,37 @@ describe('generateDynamicSchema', () => {
expect(result['data']).toEqual({ testEnum: 'option2' });
});
it('should generate a schema for enum settings with empty string option', () => {
const settings: SettingsConfiguration = [
{
key: 'testEnumWithEmpty',
description: 'A test enum setting with empty string',
type: 'enum',
default: '',
options: ['', 'option1', 'option2'],
enumMappings: {
'': 'None',
option1: 'First Option',
option2: 'Second Option',
},
component: 'slider',
columnSpan: 2,
label: 'Test Enum with Empty String',
},
];
const schema = generateDynamicSchema(settings);
const result = schema.safeParse({ testEnumWithEmpty: '' });
expect(result.success).toBeTruthy();
expect(result['data']).toEqual({ testEnumWithEmpty: '' });
// Test with non-empty option
const result2 = schema.safeParse({ testEnumWithEmpty: 'option1' });
expect(result2.success).toBeTruthy();
expect(result2['data']).toEqual({ testEnumWithEmpty: 'option1' });
});
it('should fail for incorrect enum value', () => {
const settings: SettingsConfiguration = [
{
@ -481,6 +511,47 @@ describe('validateSettingDefinitions', () => {
expect(() => validateSettingDefinitions(settingsExceedingMaxTags)).toThrow(ZodError);
});
// Test for incomplete enumMappings
test('should throw error for incomplete enumMappings', () => {
const settingsWithIncompleteEnumMappings: SettingsConfiguration = [
{
key: 'displayMode',
type: 'enum',
component: 'dropdown',
options: ['light', 'dark', 'auto'],
enumMappings: {
light: 'Light Mode',
dark: 'Dark Mode',
// Missing mapping for 'auto'
},
optionType: OptionTypes.Custom,
},
];
expect(() => validateSettingDefinitions(settingsWithIncompleteEnumMappings)).toThrow(ZodError);
});
// Test for complete enumMappings including empty string
test('should not throw error for complete enumMappings including empty string', () => {
const settingsWithCompleteEnumMappings: SettingsConfiguration = [
{
key: 'selectionMode',
type: 'enum',
component: 'slider',
options: ['', 'single', 'multiple'],
enumMappings: {
'': 'None',
single: 'Single Selection',
multiple: 'Multiple Selection',
},
default: '',
optionType: OptionTypes.Custom,
},
];
expect(() => validateSettingDefinitions(settingsWithCompleteEnumMappings)).not.toThrow();
});
});
const settingsConfiguration: SettingsConfiguration = [
@ -515,7 +586,7 @@ const settingsConfiguration: SettingsConfiguration = [
{
key: 'presence_penalty',
description:
'Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model\'s likelihood to talk about new topics.',
"Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.",
type: 'number',
default: 0,
range: {
@ -529,7 +600,7 @@ const settingsConfiguration: SettingsConfiguration = [
{
key: 'frequency_penalty',
description:
'Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model\'s likelihood to repeat the same line verbatim.',
"Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.",
type: 'number',
default: 0,
range: {

View file

@ -467,7 +467,11 @@ export function validateSettingDefinitions(settings: SettingsConfiguration): voi
}
/* Default value checks */
if (setting.type === SettingTypes.Number && isNaN(setting.default as number) && setting.default != null) {
if (
setting.type === SettingTypes.Number &&
isNaN(setting.default as number) &&
setting.default != null
) {
errors.push({
code: ZodIssueCode.custom,
message: `Invalid default value for setting ${setting.key}. Must be a number.`,
@ -475,7 +479,11 @@ export function validateSettingDefinitions(settings: SettingsConfiguration): voi
});
}
if (setting.type === SettingTypes.Boolean && typeof setting.default !== 'boolean' && setting.default != null) {
if (
setting.type === SettingTypes.Boolean &&
typeof setting.default !== 'boolean' &&
setting.default != null
) {
errors.push({
code: ZodIssueCode.custom,
message: `Invalid default value for setting ${setting.key}. Must be a boolean.`,
@ -485,7 +493,8 @@ export function validateSettingDefinitions(settings: SettingsConfiguration): voi
if (
(setting.type === SettingTypes.String || setting.type === SettingTypes.Enum) &&
typeof setting.default !== 'string' && setting.default != null
typeof setting.default !== 'string' &&
setting.default != null
) {
errors.push({
code: ZodIssueCode.custom,
@ -520,6 +529,19 @@ export function validateSettingDefinitions(settings: SettingsConfiguration): voi
path: ['default'],
});
}
// Validate enumMappings
if (setting.enumMappings && setting.type === SettingTypes.Enum && setting.options) {
for (const option of setting.options) {
if (!(option in setting.enumMappings)) {
errors.push({
code: ZodIssueCode.custom,
message: `Missing enumMapping for option "${option}" in setting ${setting.key}.`,
path: ['enumMappings'],
});
}
}
}
}
if (errors.length > 0) {

View file

@ -4,6 +4,7 @@ import {
openAISettings,
googleSettings,
ReasoningEffort,
ReasoningSummary,
BedrockProviders,
anthropicSettings,
} from './types';
@ -71,6 +72,11 @@ const baseDefinitions: Record<string, SettingDefinition> = {
default: ImageDetail.auto,
component: 'slider',
options: [ImageDetail.low, ImageDetail.auto, ImageDetail.high],
enumMappings: {
[ImageDetail.low]: 'com_ui_low',
[ImageDetail.auto]: 'com_ui_auto',
[ImageDetail.high]: 'com_ui_high',
},
optionType: 'conversation',
columnSpan: 2,
},
@ -211,9 +217,57 @@ const openAIParams: Record<string, SettingDefinition> = {
description: 'com_endpoint_openai_reasoning_effort',
descriptionCode: true,
type: 'enum',
default: ReasoningEffort.medium,
default: ReasoningEffort.none,
component: 'slider',
options: [ReasoningEffort.low, ReasoningEffort.medium, ReasoningEffort.high],
options: [
ReasoningEffort.none,
ReasoningEffort.low,
ReasoningEffort.medium,
ReasoningEffort.high,
],
enumMappings: {
[ReasoningEffort.none]: 'com_ui_none',
[ReasoningEffort.low]: 'com_ui_low',
[ReasoningEffort.medium]: 'com_ui_medium',
[ReasoningEffort.high]: 'com_ui_high',
},
optionType: 'model',
columnSpan: 4,
},
useResponsesApi: {
key: 'useResponsesApi',
label: 'com_endpoint_use_responses_api',
labelCode: true,
description: 'com_endpoint_openai_use_responses_api',
descriptionCode: true,
type: 'boolean',
default: false,
component: 'switch',
optionType: 'model',
showDefault: false,
columnSpan: 2,
},
reasoning_summary: {
key: 'reasoning_summary',
label: 'com_endpoint_reasoning_summary',
labelCode: true,
description: 'com_endpoint_openai_reasoning_summary',
descriptionCode: true,
type: 'enum',
default: ReasoningSummary.none,
component: 'slider',
options: [
ReasoningSummary.none,
ReasoningSummary.auto,
ReasoningSummary.concise,
ReasoningSummary.detailed,
],
enumMappings: {
[ReasoningSummary.none]: 'com_ui_none',
[ReasoningSummary.auto]: 'com_ui_auto',
[ReasoningSummary.concise]: 'com_ui_concise',
[ReasoningSummary.detailed]: 'com_ui_detailed',
},
optionType: 'model',
columnSpan: 4,
},
@ -526,6 +580,8 @@ const openAI: SettingsConfiguration = [
librechat.resendFiles,
baseDefinitions.imageDetail,
openAIParams.reasoning_effort,
openAIParams.useResponsesApi,
openAIParams.reasoning_summary,
];
const openAICol1: SettingsConfiguration = [
@ -542,9 +598,11 @@ const openAICol2: SettingsConfiguration = [
openAIParams.frequency_penalty,
openAIParams.presence_penalty,
baseDefinitions.stop,
openAIParams.reasoning_effort,
librechat.resendFiles,
baseDefinitions.imageDetail,
openAIParams.reasoning_effort,
openAIParams.useResponsesApi,
openAIParams.reasoning_summary,
];
const anthropicConfig: SettingsConfiguration = [

View file

@ -112,11 +112,19 @@ export enum ImageDetail {
}
export enum ReasoningEffort {
none = '',
low = 'low',
medium = 'medium',
high = 'high',
}
export enum ReasoningSummary {
none = '',
auto = 'auto',
concise = 'concise',
detailed = 'detailed',
}
export const imageDetailNumeric = {
[ImageDetail.low]: 0,
[ImageDetail.auto]: 1,
@ -131,6 +139,7 @@ export const imageDetailValue = {
export const eImageDetailSchema = z.nativeEnum(ImageDetail);
export const eReasoningEffortSchema = z.nativeEnum(ReasoningEffort);
export const eReasoningSummarySchema = z.nativeEnum(ReasoningSummary);
export const defaultAssistantFormValues = {
assistant: '',
@ -619,8 +628,11 @@ export const tConversationSchema = z.object({
file_ids: z.array(z.string()).optional(),
/* vision */
imageDetail: eImageDetailSchema.optional(),
/* OpenAI: o1 only */
reasoning_effort: eReasoningEffortSchema.optional(),
/* OpenAI: Reasoning models only */
reasoning_effort: eReasoningEffortSchema.optional().nullable(),
reasoning_summary: eReasoningSummarySchema.optional().nullable(),
/* OpenAI: use Responses API */
useResponsesApi: z.boolean().optional(),
/* assistant */
assistant_id: z.string().optional(),
/* agents */
@ -717,6 +729,12 @@ export const tQueryParamsSchema = tConversationSchema
top_p: true,
/** @endpoints openAI, custom, azureOpenAI */
max_tokens: true,
/** @endpoints openAI, custom, azureOpenAI */
reasoning_effort: true,
/** @endpoints openAI, custom, azureOpenAI */
reasoning_summary: true,
/** @endpoints openAI, custom, azureOpenAI */
useResponsesApi: true,
/** @endpoints google, anthropic, bedrock */
topP: true,
/** @endpoints google, anthropic */
@ -1044,10 +1062,12 @@ export const openAIBaseSchema = tConversationSchema.pick({
maxContextTokens: true,
max_tokens: true,
reasoning_effort: true,
reasoning_summary: true,
useResponsesApi: true,
});
export const openAISchema = openAIBaseSchema
.transform((obj: Partial<TConversation>) => removeNullishValues(obj))
.transform((obj: Partial<TConversation>) => removeNullishValues(obj, true))
.catch(() => ({}));
export const compactGoogleSchema = googleBaseSchema

View file

@ -131,8 +131,14 @@ export const conversationPreset = {
max_tokens: {
type: Number,
},
/** omni models only */
useResponsesApi: {
type: Boolean,
},
/** Reasoning models only */
reasoning_effort: {
type: String,
},
reasoning_summary: {
type: String,
},
};

View file

@ -46,6 +46,8 @@ export interface IPreset extends Document {
maxContextTokens?: number;
max_tokens?: number;
reasoning_effort?: string;
reasoning_summary?: string;
useResponsesApi?: boolean;
// end of additional fields
agentOptions?: unknown;
}

View file

@ -45,6 +45,8 @@ export interface IConversation extends Document {
maxContextTokens?: number;
max_tokens?: number;
reasoning_effort?: string;
reasoning_summary?: string;
useResponsesApi?: boolean;
// Additional fields
files?: string[];
expiredAt?: Date;