🔍 feat: Web Search via OpenAI Responses API (#8186)

* 🔍 feat: Introduce Web Search Functionality for OpenAI API

- Added a new web_search parameter to enable web search capabilities in the OpenAI configuration.
- Updated the DynamicSlider component for improved styling.
- Enhanced the useSetIndexOptions hook to auto-enable the Responses API when web search is activated.
- Modified relevant schemas, types, and translation files to support the new web search feature.

* chore: remove comments

* refactor: tool handling in initializeAgent for better clarity and functionality and reflection of openai features

---------

Co-authored-by: Danny Avila <danny@librechat.ai>
This commit is contained in:
Dustin Healy 2025-07-02 07:03:14 -07:00 committed by GitHub
parent 56ad92fb1c
commit 8ba61a86f4
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
9 changed files with 53 additions and 6 deletions

View file

@ -85,7 +85,7 @@ const initializeAgent = async ({
});
const provider = agent.provider;
const { tools, toolContextMap } =
const { tools: structuredTools, toolContextMap } =
(await loadTools?.({
req,
res,
@ -140,12 +140,20 @@ const initializeAgent = async ({
agent.provider = options.provider;
}
/** @type {import('@librechat/agents').GenericTool[]} */
let tools = options.tools ?? structuredTools;
if (
(agent.provider === Providers.GOOGLE || agent.provider === Providers.VERTEXAI) &&
options?.tools?.length &&
tools?.length
options.tools?.length &&
structuredTools?.length
) {
throw new Error(`{ "type": "${ErrorTypes.GOOGLE_TOOL_CONFLICT}"}`);
} else if (
(agent.provider === Providers.OPENAI || agent.provider === Providers.AZURE) &&
options.tools?.length &&
structuredTools?.length
) {
tools = structuredTools.concat(options.tools);
}
/** @type {import('@librechat/agents').ClientOptions} */
@ -173,7 +181,7 @@ const initializeAgent = async ({
attachments,
resendFiles,
toolContextMap,
tools: options.tools ?? tools,
tools,
maxContextTokens: (agentMaxContextTokens - maxTokens) * 0.9,
};
};

View file

@ -197,7 +197,7 @@ function DynamicSlider({
defaultTextProps,
cn(
optionText,
'reset-rc-number-input reset-rc-number-input-text-right h-auto w-12 border-0 py-1 text-xs group-hover/temp:border-gray-200',
'reset-rc-number-input h-auto w-14 border-0 py-1 pl-1 text-center text-xs group-hover/temp:border-gray-200',
),
)}
/>

View file

@ -30,6 +30,14 @@ const useSetIndexOptions: TUseSetOptions = (preset = false) => {
};
}
// Auto-enable Responses API when web search is enabled
if (param === 'web_search' && newValue === true) {
const currentUseResponsesApi = conversation?.useResponsesApi ?? false;
if (!currentUseResponsesApi) {
update['useResponsesApi'] = true;
}
}
setConversation(
(prevState) =>
tConvoUpdateSchema.parse({

View file

@ -234,6 +234,7 @@
"com_endpoint_openai_temp": "Higher values = more random, while lower values = more focused and deterministic. We recommend altering this or Top P but not both.",
"com_endpoint_openai_topp": "An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We recommend altering this or temperature but not both.",
"com_endpoint_openai_use_responses_api": "Use the Responses API instead of Chat Completions, which includes extended features from OpenAI. Required for o1-pro, o3-pro, and to enable reasoning summaries.",
"com_endpoint_openai_use_web_search": "Enable web search functionality using OpenAI's built-in search capabilities. This allows the model to search the web for up-to-date information and provide more accurate, current responses.",
"com_endpoint_output": "Output",
"com_endpoint_plug_image_detail": "Image Detail",
"com_endpoint_plug_resend_files": "Resend Files",

View file

@ -1,5 +1,6 @@
import { ProxyAgent } from 'undici';
import { KnownEndpoints, removeNullishValues } from 'librechat-data-provider';
import type { BindToolsInput } from '@langchain/core/language_models/chat_models';
import type { AzureOpenAIInput } from '@langchain/openai';
import type { OpenAI } from 'openai';
import type * as t from '~/types';
@ -176,6 +177,13 @@ export function getOpenAIConfig(
delete llmConfig.max_tokens;
}
const tools: BindToolsInput[] = [];
if (modelOptions.web_search) {
llmConfig.useResponsesApi = true;
tools.push({ type: 'web_search_preview' });
}
/**
* Note: OpenAI Web Search models do not support any known parameters besides `max_tokens`
*/
@ -216,5 +224,6 @@ export function getOpenAIConfig(
return {
llmConfig,
configOptions,
tools,
};
}

View file

@ -1,6 +1,7 @@
import { z } from 'zod';
import { openAISchema, EModelEndpoint } from 'librechat-data-provider';
import type { TEndpointOption, TAzureConfig, TEndpoint } from 'librechat-data-provider';
import type { BindToolsInput } from '@langchain/core/language_models/chat_models';
import type { OpenAIClientOptions } from '@librechat/agents';
import type { AzureOptions } from './azure';
@ -33,6 +34,7 @@ export type ClientOptions = OpenAIClientOptions & {
export interface LLMConfigResult {
llmConfig: ClientOptions;
configOptions: OpenAIConfiguration;
tools?: BindToolsInput[];
}
/**

View file

@ -247,6 +247,19 @@ const openAIParams: Record<string, SettingDefinition> = {
showDefault: false,
columnSpan: 2,
},
web_search: {
key: 'web_search',
label: 'com_ui_web_search',
labelCode: true,
description: 'com_endpoint_openai_use_web_search',
descriptionCode: true,
type: 'boolean',
default: false,
component: 'switch',
optionType: 'model',
showDefault: false,
columnSpan: 2,
},
reasoning_summary: {
key: 'reasoning_summary',
label: 'com_endpoint_reasoning_summary',
@ -596,6 +609,7 @@ const openAI: SettingsConfiguration = [
baseDefinitions.stop,
librechat.resendFiles,
baseDefinitions.imageDetail,
openAIParams.web_search,
openAIParams.reasoning_effort,
openAIParams.useResponsesApi,
openAIParams.reasoning_summary,
@ -618,8 +632,9 @@ const openAICol2: SettingsConfiguration = [
librechat.resendFiles,
baseDefinitions.imageDetail,
openAIParams.reasoning_effort,
openAIParams.useResponsesApi,
openAIParams.reasoning_summary,
openAIParams.useResponsesApi,
openAIParams.web_search,
];
const anthropicConfig: SettingsConfiguration = [

View file

@ -634,6 +634,8 @@ export const tConversationSchema = z.object({
reasoning_summary: eReasoningSummarySchema.optional().nullable(),
/* OpenAI: use Responses API */
useResponsesApi: z.boolean().optional(),
/* OpenAI: use Responses API with Web Search */
web_search: z.boolean().optional(),
/* Google: use Search Grounding */
grounding: z.boolean().optional(),
/* assistant */
@ -1071,6 +1073,7 @@ export const openAIBaseSchema = tConversationSchema.pick({
reasoning_effort: true,
reasoning_summary: true,
useResponsesApi: true,
web_search: true,
});
export const openAISchema = openAIBaseSchema

View file

@ -47,6 +47,7 @@ export interface IConversation extends Document {
reasoning_effort?: string;
reasoning_summary?: string;
useResponsesApi?: boolean;
web_search?: boolean;
grounding?: boolean;
// Additional fields
files?: string[];