🪐 feat: Initial OpenAI Responses API Support (#8149)

* chore: update @librechat/agents to v2.4.47

* WIP: temporary auto-toggle responses api for o1/o3-pro

* feat: Enable Responses API for OpenAI models

- Updated the OpenAI client initialization to check for the useResponsesApi parameter in model options.
- Added translations for enabling the Responses API in the UI.
- Introduced useResponsesApi parameter in data provider settings and schemas.
- Updated relevant schemas to include useResponsesApi for conversation and preset configurations.

* refactor: Remove useResponsesApi check from OpenAI client initialization and update translation for Responses API

- Removed the check for useResponsesApi in the OpenAI client initialization.
- Updated the translation for enabling the Responses API to clarify its functionality.

* chore: update @librechat/agents dependency to version 2.4.48

* chore: update @librechat/agents dependency to version 2.4.49

* chore: linting

* chore: linting

* feat: Enhance DynamicSlider and validation for enumMappings

- Added support for enumMappings in DynamicSlider to display values correctly based on enum settings.
- Implemented validation for enumMappings in the generate function to ensure all options have corresponding mappings.
- Added tests for handling empty string options and incomplete enumMappings in the generate.spec.ts file.

* feat: Enhance DynamicSlider localization support

- Added localization handling for mapped values in DynamicSlider when using enumMappings.
- Updated the logic to check if the mapped value is a localization key and return the localized string if applicable.
- Adjusted dependencies in useCallback hooks to include localize for proper functionality.

* feat: Add reasoning summary and effort options to OpenAI configuration and UI

* feat: Add enumMappings for ImageDetail options in parameter settings

* style: Improve styling for DynamicSlider component labels and inputs

* chore: Update reasoning effort description and parameter order for OpenAI params

---------

Co-authored-by: Dustin Healy <dustinhealy1@gmail.com>
This commit is contained in:
Danny Avila 2025-06-30 18:34:47 -04:00 committed by GitHub
parent 20100e120b
commit f869d772f7
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
15 changed files with 355 additions and 83 deletions

View file

@ -69,7 +69,7 @@
"registry": "https://registry.npmjs.org/"
},
"peerDependencies": {
"@librechat/agents": "^2.4.46",
"@librechat/agents": "^2.4.49",
"@librechat/data-schemas": "*",
"@modelcontextprotocol/sdk": "^1.12.3",
"axios": "^1.8.2",

View file

@ -1,6 +1,7 @@
import { Run, Providers } from '@librechat/agents';
import { providerEndpointMap, KnownEndpoints } from 'librechat-data-provider';
import type {
OpenAIClientOptions,
StandardGraphConfig,
EventHandler,
GenericTool,
@ -76,6 +77,11 @@ export async function createRun({
(agent.endpoint && agent.endpoint.toLowerCase().includes(KnownEndpoints.openrouter))
) {
reasoningKey = 'reasoning';
} else if (
(llmConfig as OpenAIClientOptions).useResponsesApi === true &&
(provider === Providers.OPENAI || provider === Providers.AZURE)
) {
reasoningKey = 'reasoning';
}
const graphConfig: StandardGraphConfig = {

View file

@ -1,9 +1,23 @@
import { ProxyAgent } from 'undici';
import { KnownEndpoints } from 'librechat-data-provider';
import { KnownEndpoints, removeNullishValues } from 'librechat-data-provider';
import type { OpenAI } from 'openai';
import type * as t from '~/types';
import { sanitizeModelName, constructAzureURL } from '~/utils/azure';
import { isEnabled } from '~/utils/common';
function hasReasoningParams({
reasoning_effort,
reasoning_summary,
}: {
reasoning_effort?: string | null;
reasoning_summary?: string | null;
}): boolean {
return (
(reasoning_effort != null && reasoning_effort !== '') ||
(reasoning_summary != null && reasoning_summary !== '')
);
}
/**
* Generates configuration options for creating a language model (LLM) instance.
* @param apiKey - The API key for authentication.
@ -17,7 +31,7 @@ export function getOpenAIConfig(
endpoint?: string | null,
): t.LLMConfigResult {
const {
modelOptions = {},
modelOptions: _modelOptions = {},
reverseProxyUrl,
defaultQuery,
headers,
@ -27,7 +41,7 @@ export function getOpenAIConfig(
addParams,
dropParams,
} = options;
const { reasoning_effort, reasoning_summary, ...modelOptions } = _modelOptions;
const llmConfig: Partial<t.ClientOptions> & Partial<t.OpenAIParameters> = Object.assign(
{
streaming,
@ -40,39 +54,6 @@ export function getOpenAIConfig(
Object.assign(llmConfig, addParams);
}
// Note: OpenAI Web Search models do not support any known parameters besides `max_tokens`
if (modelOptions.model && /gpt-4o.*search/.test(modelOptions.model)) {
const searchExcludeParams = [
'frequency_penalty',
'presence_penalty',
'temperature',
'top_p',
'top_k',
'stop',
'logit_bias',
'seed',
'response_format',
'n',
'logprobs',
'user',
];
const updatedDropParams = dropParams || [];
const combinedDropParams = [...new Set([...updatedDropParams, ...searchExcludeParams])];
combinedDropParams.forEach((param) => {
if (param in llmConfig) {
delete llmConfig[param as keyof t.ClientOptions];
}
});
} else if (dropParams && Array.isArray(dropParams)) {
dropParams.forEach((param) => {
if (param in llmConfig) {
delete llmConfig[param as keyof t.ClientOptions];
}
});
}
let useOpenRouter = false;
const configOptions: t.OpenAIConfiguration = {};
@ -139,11 +120,19 @@ export function getOpenAIConfig(
configOptions.organization = process.env.OPENAI_ORGANIZATION;
}
if (useOpenRouter && llmConfig.reasoning_effort != null) {
llmConfig.reasoning = {
effort: llmConfig.reasoning_effort,
};
delete llmConfig.reasoning_effort;
if (
hasReasoningParams({ reasoning_effort, reasoning_summary }) &&
(llmConfig.useResponsesApi === true || useOpenRouter)
) {
llmConfig.reasoning = removeNullishValues(
{
effort: reasoning_effort,
summary: reasoning_summary,
},
true,
) as OpenAI.Reasoning;
} else if (hasReasoningParams({ reasoning_effort })) {
llmConfig.reasoning_effort = reasoning_effort;
}
if (llmConfig.max_tokens != null) {
@ -151,6 +140,43 @@ export function getOpenAIConfig(
delete llmConfig.max_tokens;
}
/**
* Note: OpenAI Web Search models do not support any known parameters besides `max_tokens`
*/
if (modelOptions.model && /gpt-4o.*search/.test(modelOptions.model)) {
const searchExcludeParams = [
'frequency_penalty',
'presence_penalty',
'reasoning',
'reasoning_effort',
'temperature',
'top_p',
'top_k',
'stop',
'logit_bias',
'seed',
'response_format',
'n',
'logprobs',
'user',
];
const updatedDropParams = dropParams || [];
const combinedDropParams = [...new Set([...updatedDropParams, ...searchExcludeParams])];
combinedDropParams.forEach((param) => {
if (param in llmConfig) {
delete llmConfig[param as keyof t.ClientOptions];
}
});
} else if (dropParams && Array.isArray(dropParams)) {
dropParams.forEach((param) => {
if (param in llmConfig) {
delete llmConfig[param as keyof t.ClientOptions];
}
});
}
return {
llmConfig,
configOptions,

View file

@ -1,4 +1,4 @@
import type { Providers } from '@librechat/agents';
import type { Providers, ClientOptions } from '@librechat/agents';
import type { AgentModelParameters } from 'librechat-data-provider';
import type { OpenAIConfiguration } from './openai';
@ -8,4 +8,5 @@ export type RunLLMConfig = {
streamUsage: boolean;
usage?: boolean;
configuration?: OpenAIConfiguration;
} & AgentModelParameters;
} & AgentModelParameters &
ClientOptions;

View file

@ -1,4 +1,3 @@
/* eslint-disable jest/no-conditional-expect */
import { ZodError, z } from 'zod';
import { generateDynamicSchema, validateSettingDefinitions, OptionTypes } from '../src/generate';
import type { SettingsConfiguration } from '../src/generate';
@ -97,6 +96,37 @@ describe('generateDynamicSchema', () => {
expect(result['data']).toEqual({ testEnum: 'option2' });
});
it('should generate a schema for enum settings with empty string option', () => {
const settings: SettingsConfiguration = [
{
key: 'testEnumWithEmpty',
description: 'A test enum setting with empty string',
type: 'enum',
default: '',
options: ['', 'option1', 'option2'],
enumMappings: {
'': 'None',
option1: 'First Option',
option2: 'Second Option',
},
component: 'slider',
columnSpan: 2,
label: 'Test Enum with Empty String',
},
];
const schema = generateDynamicSchema(settings);
const result = schema.safeParse({ testEnumWithEmpty: '' });
expect(result.success).toBeTruthy();
expect(result['data']).toEqual({ testEnumWithEmpty: '' });
// Test with non-empty option
const result2 = schema.safeParse({ testEnumWithEmpty: 'option1' });
expect(result2.success).toBeTruthy();
expect(result2['data']).toEqual({ testEnumWithEmpty: 'option1' });
});
it('should fail for incorrect enum value', () => {
const settings: SettingsConfiguration = [
{
@ -481,6 +511,47 @@ describe('validateSettingDefinitions', () => {
expect(() => validateSettingDefinitions(settingsExceedingMaxTags)).toThrow(ZodError);
});
// Test for incomplete enumMappings
test('should throw error for incomplete enumMappings', () => {
const settingsWithIncompleteEnumMappings: SettingsConfiguration = [
{
key: 'displayMode',
type: 'enum',
component: 'dropdown',
options: ['light', 'dark', 'auto'],
enumMappings: {
light: 'Light Mode',
dark: 'Dark Mode',
// Missing mapping for 'auto'
},
optionType: OptionTypes.Custom,
},
];
expect(() => validateSettingDefinitions(settingsWithIncompleteEnumMappings)).toThrow(ZodError);
});
// Test for complete enumMappings including empty string
test('should not throw error for complete enumMappings including empty string', () => {
const settingsWithCompleteEnumMappings: SettingsConfiguration = [
{
key: 'selectionMode',
type: 'enum',
component: 'slider',
options: ['', 'single', 'multiple'],
enumMappings: {
'': 'None',
single: 'Single Selection',
multiple: 'Multiple Selection',
},
default: '',
optionType: OptionTypes.Custom,
},
];
expect(() => validateSettingDefinitions(settingsWithCompleteEnumMappings)).not.toThrow();
});
});
const settingsConfiguration: SettingsConfiguration = [
@ -515,7 +586,7 @@ const settingsConfiguration: SettingsConfiguration = [
{
key: 'presence_penalty',
description:
'Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model\'s likelihood to talk about new topics.',
"Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.",
type: 'number',
default: 0,
range: {
@ -529,7 +600,7 @@ const settingsConfiguration: SettingsConfiguration = [
{
key: 'frequency_penalty',
description:
'Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model\'s likelihood to repeat the same line verbatim.',
"Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.",
type: 'number',
default: 0,
range: {

View file

@ -467,7 +467,11 @@ export function validateSettingDefinitions(settings: SettingsConfiguration): voi
}
/* Default value checks */
if (setting.type === SettingTypes.Number && isNaN(setting.default as number) && setting.default != null) {
if (
setting.type === SettingTypes.Number &&
isNaN(setting.default as number) &&
setting.default != null
) {
errors.push({
code: ZodIssueCode.custom,
message: `Invalid default value for setting ${setting.key}. Must be a number.`,
@ -475,7 +479,11 @@ export function validateSettingDefinitions(settings: SettingsConfiguration): voi
});
}
if (setting.type === SettingTypes.Boolean && typeof setting.default !== 'boolean' && setting.default != null) {
if (
setting.type === SettingTypes.Boolean &&
typeof setting.default !== 'boolean' &&
setting.default != null
) {
errors.push({
code: ZodIssueCode.custom,
message: `Invalid default value for setting ${setting.key}. Must be a boolean.`,
@ -485,7 +493,8 @@ export function validateSettingDefinitions(settings: SettingsConfiguration): voi
if (
(setting.type === SettingTypes.String || setting.type === SettingTypes.Enum) &&
typeof setting.default !== 'string' && setting.default != null
typeof setting.default !== 'string' &&
setting.default != null
) {
errors.push({
code: ZodIssueCode.custom,
@ -520,6 +529,19 @@ export function validateSettingDefinitions(settings: SettingsConfiguration): voi
path: ['default'],
});
}
// Validate enumMappings
if (setting.enumMappings && setting.type === SettingTypes.Enum && setting.options) {
for (const option of setting.options) {
if (!(option in setting.enumMappings)) {
errors.push({
code: ZodIssueCode.custom,
message: `Missing enumMapping for option "${option}" in setting ${setting.key}.`,
path: ['enumMappings'],
});
}
}
}
}
if (errors.length > 0) {

View file

@ -4,6 +4,7 @@ import {
openAISettings,
googleSettings,
ReasoningEffort,
ReasoningSummary,
BedrockProviders,
anthropicSettings,
} from './types';
@ -71,6 +72,11 @@ const baseDefinitions: Record<string, SettingDefinition> = {
default: ImageDetail.auto,
component: 'slider',
options: [ImageDetail.low, ImageDetail.auto, ImageDetail.high],
enumMappings: {
[ImageDetail.low]: 'com_ui_low',
[ImageDetail.auto]: 'com_ui_auto',
[ImageDetail.high]: 'com_ui_high',
},
optionType: 'conversation',
columnSpan: 2,
},
@ -211,9 +217,57 @@ const openAIParams: Record<string, SettingDefinition> = {
description: 'com_endpoint_openai_reasoning_effort',
descriptionCode: true,
type: 'enum',
default: ReasoningEffort.medium,
default: ReasoningEffort.none,
component: 'slider',
options: [ReasoningEffort.low, ReasoningEffort.medium, ReasoningEffort.high],
options: [
ReasoningEffort.none,
ReasoningEffort.low,
ReasoningEffort.medium,
ReasoningEffort.high,
],
enumMappings: {
[ReasoningEffort.none]: 'com_ui_none',
[ReasoningEffort.low]: 'com_ui_low',
[ReasoningEffort.medium]: 'com_ui_medium',
[ReasoningEffort.high]: 'com_ui_high',
},
optionType: 'model',
columnSpan: 4,
},
useResponsesApi: {
key: 'useResponsesApi',
label: 'com_endpoint_use_responses_api',
labelCode: true,
description: 'com_endpoint_openai_use_responses_api',
descriptionCode: true,
type: 'boolean',
default: false,
component: 'switch',
optionType: 'model',
showDefault: false,
columnSpan: 2,
},
reasoning_summary: {
key: 'reasoning_summary',
label: 'com_endpoint_reasoning_summary',
labelCode: true,
description: 'com_endpoint_openai_reasoning_summary',
descriptionCode: true,
type: 'enum',
default: ReasoningSummary.none,
component: 'slider',
options: [
ReasoningSummary.none,
ReasoningSummary.auto,
ReasoningSummary.concise,
ReasoningSummary.detailed,
],
enumMappings: {
[ReasoningSummary.none]: 'com_ui_none',
[ReasoningSummary.auto]: 'com_ui_auto',
[ReasoningSummary.concise]: 'com_ui_concise',
[ReasoningSummary.detailed]: 'com_ui_detailed',
},
optionType: 'model',
columnSpan: 4,
},
@ -526,6 +580,8 @@ const openAI: SettingsConfiguration = [
librechat.resendFiles,
baseDefinitions.imageDetail,
openAIParams.reasoning_effort,
openAIParams.useResponsesApi,
openAIParams.reasoning_summary,
];
const openAICol1: SettingsConfiguration = [
@ -542,9 +598,11 @@ const openAICol2: SettingsConfiguration = [
openAIParams.frequency_penalty,
openAIParams.presence_penalty,
baseDefinitions.stop,
openAIParams.reasoning_effort,
librechat.resendFiles,
baseDefinitions.imageDetail,
openAIParams.reasoning_effort,
openAIParams.useResponsesApi,
openAIParams.reasoning_summary,
];
const anthropicConfig: SettingsConfiguration = [

View file

@ -112,11 +112,19 @@ export enum ImageDetail {
}
export enum ReasoningEffort {
none = '',
low = 'low',
medium = 'medium',
high = 'high',
}
export enum ReasoningSummary {
none = '',
auto = 'auto',
concise = 'concise',
detailed = 'detailed',
}
export const imageDetailNumeric = {
[ImageDetail.low]: 0,
[ImageDetail.auto]: 1,
@ -131,6 +139,7 @@ export const imageDetailValue = {
export const eImageDetailSchema = z.nativeEnum(ImageDetail);
export const eReasoningEffortSchema = z.nativeEnum(ReasoningEffort);
export const eReasoningSummarySchema = z.nativeEnum(ReasoningSummary);
export const defaultAssistantFormValues = {
assistant: '',
@ -619,8 +628,11 @@ export const tConversationSchema = z.object({
file_ids: z.array(z.string()).optional(),
/* vision */
imageDetail: eImageDetailSchema.optional(),
/* OpenAI: o1 only */
reasoning_effort: eReasoningEffortSchema.optional(),
/* OpenAI: Reasoning models only */
reasoning_effort: eReasoningEffortSchema.optional().nullable(),
reasoning_summary: eReasoningSummarySchema.optional().nullable(),
/* OpenAI: use Responses API */
useResponsesApi: z.boolean().optional(),
/* assistant */
assistant_id: z.string().optional(),
/* agents */
@ -717,6 +729,12 @@ export const tQueryParamsSchema = tConversationSchema
top_p: true,
/** @endpoints openAI, custom, azureOpenAI */
max_tokens: true,
/** @endpoints openAI, custom, azureOpenAI */
reasoning_effort: true,
/** @endpoints openAI, custom, azureOpenAI */
reasoning_summary: true,
/** @endpoints openAI, custom, azureOpenAI */
useResponsesApi: true,
/** @endpoints google, anthropic, bedrock */
topP: true,
/** @endpoints google, anthropic */
@ -1044,10 +1062,12 @@ export const openAIBaseSchema = tConversationSchema.pick({
maxContextTokens: true,
max_tokens: true,
reasoning_effort: true,
reasoning_summary: true,
useResponsesApi: true,
});
export const openAISchema = openAIBaseSchema
.transform((obj: Partial<TConversation>) => removeNullishValues(obj))
.transform((obj: Partial<TConversation>) => removeNullishValues(obj, true))
.catch(() => ({}));
export const compactGoogleSchema = googleBaseSchema

View file

@ -131,8 +131,14 @@ export const conversationPreset = {
max_tokens: {
type: Number,
},
/** omni models only */
useResponsesApi: {
type: Boolean,
},
/** Reasoning models only */
reasoning_effort: {
type: String,
},
reasoning_summary: {
type: String,
},
};

View file

@ -46,6 +46,8 @@ export interface IPreset extends Document {
maxContextTokens?: number;
max_tokens?: number;
reasoning_effort?: string;
reasoning_summary?: string;
useResponsesApi?: boolean;
// end of additional fields
agentOptions?: unknown;
}

View file

@ -45,6 +45,8 @@ export interface IConversation extends Document {
maxContextTokens?: number;
max_tokens?: number;
reasoning_effort?: string;
reasoning_summary?: string;
useResponsesApi?: boolean;
// Additional fields
files?: string[];
expiredAt?: Date;