🪐 feat: Initial OpenAI Responses API Support (#8149)

* chore: update @librechat/agents to v2.4.47

* WIP: temporary auto-toggle responses api for o1/o3-pro

* feat: Enable Responses API for OpenAI models

- Updated the OpenAI client initialization to check for the useResponsesApi parameter in model options.
- Added translations for enabling the Responses API in the UI.
- Introduced useResponsesApi parameter in data provider settings and schemas.
- Updated relevant schemas to include useResponsesApi for conversation and preset configurations.

* refactor: Remove useResponsesApi check from OpenAI client initialization and update translation for Responses API

- Removed the check for useResponsesApi in the OpenAI client initialization.
- Updated the translation for enabling the Responses API to clarify its functionality.

* chore: update @librechat/agents dependency to version 2.4.48

* chore: update @librechat/agents dependency to version 2.4.49

* chore: linting

* chore: linting

* feat: Enhance DynamicSlider and validation for enumMappings

- Added support for enumMappings in DynamicSlider to display values correctly based on enum settings.
- Implemented validation for enumMappings in the generate function to ensure all options have corresponding mappings.
- Added tests for handling empty string options and incomplete enumMappings in the generate.spec.ts file.

* feat: Enhance DynamicSlider localization support

- Added localization handling for mapped values in DynamicSlider when using enumMappings.
- Updated the logic to check if the mapped value is a localization key and return the localized string if applicable.
- Adjusted dependencies in useCallback hooks to include localize for proper functionality.

* feat: Add reasoning summary and effort options to OpenAI configuration and UI

* feat: Add enumMappings for ImageDetail options in parameter settings

* style: Improve styling for DynamicSlider component labels and inputs

* chore: Update reasoning effort description and parameter order for OpenAI params

---------

Co-authored-by: Dustin Healy <dustinhealy1@gmail.com>
This commit is contained in:
Danny Avila 2025-06-30 18:34:47 -04:00 committed by GitHub
parent 20100e120b
commit f869d772f7
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
15 changed files with 355 additions and 83 deletions

View file

@ -1,6 +1,7 @@
import { Run, Providers } from '@librechat/agents';
import { providerEndpointMap, KnownEndpoints } from 'librechat-data-provider';
import type {
OpenAIClientOptions,
StandardGraphConfig,
EventHandler,
GenericTool,
@ -76,6 +77,11 @@ export async function createRun({
(agent.endpoint && agent.endpoint.toLowerCase().includes(KnownEndpoints.openrouter))
) {
reasoningKey = 'reasoning';
} else if (
(llmConfig as OpenAIClientOptions).useResponsesApi === true &&
(provider === Providers.OPENAI || provider === Providers.AZURE)
) {
reasoningKey = 'reasoning';
}
const graphConfig: StandardGraphConfig = {

View file

@ -1,9 +1,23 @@
import { ProxyAgent } from 'undici';
import { KnownEndpoints } from 'librechat-data-provider';
import { KnownEndpoints, removeNullishValues } from 'librechat-data-provider';
import type { OpenAI } from 'openai';
import type * as t from '~/types';
import { sanitizeModelName, constructAzureURL } from '~/utils/azure';
import { isEnabled } from '~/utils/common';
function hasReasoningParams({
reasoning_effort,
reasoning_summary,
}: {
reasoning_effort?: string | null;
reasoning_summary?: string | null;
}): boolean {
return (
(reasoning_effort != null && reasoning_effort !== '') ||
(reasoning_summary != null && reasoning_summary !== '')
);
}
/**
* Generates configuration options for creating a language model (LLM) instance.
* @param apiKey - The API key for authentication.
@ -17,7 +31,7 @@ export function getOpenAIConfig(
endpoint?: string | null,
): t.LLMConfigResult {
const {
modelOptions = {},
modelOptions: _modelOptions = {},
reverseProxyUrl,
defaultQuery,
headers,
@ -27,7 +41,7 @@ export function getOpenAIConfig(
addParams,
dropParams,
} = options;
const { reasoning_effort, reasoning_summary, ...modelOptions } = _modelOptions;
const llmConfig: Partial<t.ClientOptions> & Partial<t.OpenAIParameters> = Object.assign(
{
streaming,
@ -40,39 +54,6 @@ export function getOpenAIConfig(
Object.assign(llmConfig, addParams);
}
// Note: OpenAI Web Search models do not support any known parameters besides `max_tokens`
if (modelOptions.model && /gpt-4o.*search/.test(modelOptions.model)) {
const searchExcludeParams = [
'frequency_penalty',
'presence_penalty',
'temperature',
'top_p',
'top_k',
'stop',
'logit_bias',
'seed',
'response_format',
'n',
'logprobs',
'user',
];
const updatedDropParams = dropParams || [];
const combinedDropParams = [...new Set([...updatedDropParams, ...searchExcludeParams])];
combinedDropParams.forEach((param) => {
if (param in llmConfig) {
delete llmConfig[param as keyof t.ClientOptions];
}
});
} else if (dropParams && Array.isArray(dropParams)) {
dropParams.forEach((param) => {
if (param in llmConfig) {
delete llmConfig[param as keyof t.ClientOptions];
}
});
}
let useOpenRouter = false;
const configOptions: t.OpenAIConfiguration = {};
@ -139,11 +120,19 @@ export function getOpenAIConfig(
configOptions.organization = process.env.OPENAI_ORGANIZATION;
}
if (useOpenRouter && llmConfig.reasoning_effort != null) {
llmConfig.reasoning = {
effort: llmConfig.reasoning_effort,
};
delete llmConfig.reasoning_effort;
if (
hasReasoningParams({ reasoning_effort, reasoning_summary }) &&
(llmConfig.useResponsesApi === true || useOpenRouter)
) {
llmConfig.reasoning = removeNullishValues(
{
effort: reasoning_effort,
summary: reasoning_summary,
},
true,
) as OpenAI.Reasoning;
} else if (hasReasoningParams({ reasoning_effort })) {
llmConfig.reasoning_effort = reasoning_effort;
}
if (llmConfig.max_tokens != null) {
@ -151,6 +140,43 @@ export function getOpenAIConfig(
delete llmConfig.max_tokens;
}
/**
* Note: OpenAI Web Search models do not support any known parameters besides `max_tokens`
*/
if (modelOptions.model && /gpt-4o.*search/.test(modelOptions.model)) {
const searchExcludeParams = [
'frequency_penalty',
'presence_penalty',
'reasoning',
'reasoning_effort',
'temperature',
'top_p',
'top_k',
'stop',
'logit_bias',
'seed',
'response_format',
'n',
'logprobs',
'user',
];
const updatedDropParams = dropParams || [];
const combinedDropParams = [...new Set([...updatedDropParams, ...searchExcludeParams])];
combinedDropParams.forEach((param) => {
if (param in llmConfig) {
delete llmConfig[param as keyof t.ClientOptions];
}
});
} else if (dropParams && Array.isArray(dropParams)) {
dropParams.forEach((param) => {
if (param in llmConfig) {
delete llmConfig[param as keyof t.ClientOptions];
}
});
}
return {
llmConfig,
configOptions,

View file

@ -1,4 +1,4 @@
import type { Providers } from '@librechat/agents';
import type { Providers, ClientOptions } from '@librechat/agents';
import type { AgentModelParameters } from 'librechat-data-provider';
import type { OpenAIConfiguration } from './openai';
@ -8,4 +8,5 @@ export type RunLLMConfig = {
streamUsage: boolean;
usage?: boolean;
configuration?: OpenAIConfiguration;
} & AgentModelParameters;
} & AgentModelParameters &
ClientOptions;