This commit is contained in:
Rashmith Tula 2026-04-05 00:27:13 +00:00 committed by GitHub
commit f99525c39a
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
8 changed files with 114 additions and 0 deletions

View file

@ -352,6 +352,9 @@
"com_endpoint_top_p": "Top P",
"com_endpoint_use_active_assistant": "Use Active Assistant",
"com_endpoint_use_responses_api": "Use Responses API",
"com_endpoint_stored_prompt_id": "Stored Prompt ID",
"com_endpoint_stored_prompt_id_placeholder": "pmpt_...",
"com_endpoint_openai_stored_prompt_id": "OpenAI Playground stored prompt ID (pmpt_...). Routes the conversation through a pre-configured prompt that carries its own system instructions and file-search vector store. Automatically enables the Responses API.",
"com_endpoint_use_search_grounding": "Grounding with Google Search",
"com_endpoint_verbosity": "Verbosity",
"com_error_endpoint_models_not_loaded": "Models for {{0}} could not be loaded. Please refresh the page and try again.",

View file

@ -737,6 +737,68 @@ describe('extractDefaultParams', () => {
});
});
describe('Stored Playground Prompt (stored_prompt_id)', () => {
it('should inject prompt.id into modelKwargs and force useResponsesApi when stored_prompt_id is set', () => {
const result = getOpenAILLMConfig({
apiKey: 'test-api-key',
streaming: true,
modelOptions: {
model: 'gpt-4o',
stored_prompt_id: 'pmpt_abc123',
} as Partial<t.OpenAIParameters>,
});
expect(result.llmConfig).toHaveProperty('useResponsesApi', true);
expect(result.llmConfig.modelKwargs).toEqual(
expect.objectContaining({ prompt: { id: 'pmpt_abc123' } }),
);
});
it('should not set modelKwargs.prompt when stored_prompt_id is absent', () => {
const result = getOpenAILLMConfig({
apiKey: 'test-api-key',
streaming: true,
modelOptions: {
model: 'gpt-4o',
useResponsesApi: true,
} as Partial<t.OpenAIParameters>,
});
expect(result.llmConfig.modelKwargs?.prompt).toBeUndefined();
});
it('should preserve a manually-set useResponsesApi=true alongside stored_prompt_id', () => {
const result = getOpenAILLMConfig({
apiKey: 'test-api-key',
streaming: true,
modelOptions: {
model: 'gpt-4o',
useResponsesApi: true,
stored_prompt_id: 'pmpt_xyz789',
} as Partial<t.OpenAIParameters>,
});
expect(result.llmConfig).toHaveProperty('useResponsesApi', true);
expect(result.llmConfig.modelKwargs).toEqual(
expect.objectContaining({ prompt: { id: 'pmpt_xyz789' } }),
);
});
it('should not add stored_prompt_id itself to llmConfig or modelKwargs as a raw field', () => {
const result = getOpenAILLMConfig({
apiKey: 'test-api-key',
streaming: true,
modelOptions: {
model: 'gpt-4o',
stored_prompt_id: 'pmpt_abc123',
} as Partial<t.OpenAIParameters>,
});
expect(result.llmConfig).not.toHaveProperty('stored_prompt_id');
expect(result.llmConfig.modelKwargs?.stored_prompt_id).toBeUndefined();
});
});
describe('applyDefaultParams', () => {
it('should apply defaults only when field is undefined', () => {
const target: Record<string, unknown> = {

View file

@ -53,6 +53,7 @@ export const knownOpenAIParams = new Set([
'truncation',
'include',
'previous_response_id',
'stored_prompt_id',
// LangChain specific
'__includeRawResponse',
'maxConcurrency',
@ -150,6 +151,7 @@ export function getOpenAILLMConfig({
reasoning_summary,
verbosity,
web_search,
stored_prompt_id,
frequency_penalty,
presence_penalty,
...modelOptions
@ -274,6 +276,20 @@ export function getOpenAILLMConfig({
tools.push({ type: 'web_search' });
}
if (stored_prompt_id) {
/**
* Forward a stored OpenAI Playground prompt by ID.
* The Responses API accepts `prompt: { id, variables? }` which causes OpenAI
* to hydrate the stored system instructions, file-search vector store, and
* any tool definitions configured in the Playground.
* We force `useResponsesApi` here because Chat Completions does not support
* this parameter.
*/
llmConfig.useResponsesApi = true;
modelKwargs.prompt = { id: stored_prompt_id };
hasModelKwargs = true;
}
/**
* Note: OpenAI reasoning models (o1/o3/gpt-5) do not support temperature and other sampling parameters
* Exception: gpt-5-chat and versioned models like gpt-5.1 DO support these parameters

View file

@ -268,6 +268,20 @@ const openAIParams: Record<string, SettingDefinition> = {
showDefault: false,
columnSpan: 2,
},
stored_prompt_id: {
key: 'stored_prompt_id',
label: 'com_endpoint_stored_prompt_id',
labelCode: true,
type: 'string',
default: '',
component: 'input',
placeholder: 'com_endpoint_stored_prompt_id_placeholder',
placeholderCode: true,
description: 'com_endpoint_openai_stored_prompt_id',
descriptionCode: true,
optionType: 'model',
columnSpan: 4,
},
web_search: {
key: 'web_search',
label: 'com_ui_web_search',
@ -765,6 +779,7 @@ const openAI: SettingsConfiguration = [
openAIParams.web_search,
openAIParams.reasoning_effort,
openAIParams.useResponsesApi,
openAIParams.stored_prompt_id,
openAIParams.reasoning_summary,
openAIParams.verbosity,
openAIParams.disableStreaming,
@ -791,6 +806,7 @@ const openAICol2: SettingsConfiguration = [
openAIParams.reasoning_summary,
openAIParams.verbosity,
openAIParams.useResponsesApi,
openAIParams.stored_prompt_id,
openAIParams.web_search,
openAIParams.disableStreaming,
librechat.fileTokenLimit,

View file

@ -770,6 +770,14 @@ export const tConversationSchema = z.object({
verbosity: eVerbositySchema.optional().nullable(),
/* OpenAI: use Responses API */
useResponsesApi: z.boolean().optional(),
/**
* OpenAI Playground stored prompt ID (e.g. "pmpt_...").
* When set, LibreChat forwards `prompt: { id }` in the Responses API request,
* routing the call through a pre-configured Playground prompt that can carry
* its own system instructions, file-search vector store, and tool definitions.
* Implicitly enables `useResponsesApi`.
*/
stored_prompt_id: z.string().optional(),
/* Anthropic: Effort control */
effort: eAnthropicEffortSchema.optional().nullable(),
/* OpenAI Responses API / Anthropic API / Google API */
@ -880,6 +888,8 @@ export const tQueryParamsSchema = tConversationSchema
verbosity: true,
/** @endpoints openAI, custom, azureOpenAI */
useResponsesApi: true,
/** @endpoints openAI, custom, azureOpenAI */
stored_prompt_id: true,
/** @endpoints openAI, anthropic, google */
web_search: true,
/** @endpoints openAI, custom, azureOpenAI */
@ -1178,6 +1188,7 @@ export const openAIBaseSchema = tConversationSchema.pick({
reasoning_summary: true,
verbosity: true,
useResponsesApi: true,
stored_prompt_id: true,
web_search: true,
disableStreaming: true,
fileTokenLimit: true,

View file

@ -139,6 +139,10 @@ export const conversationPreset = {
useResponsesApi: {
type: Boolean,
},
/** OpenAI Playground stored prompt ID (pmpt_...) */
stored_prompt_id: {
type: String,
},
/** OpenAI Responses API / Anthropic API / Google API */
web_search: {
type: Boolean,

View file

@ -50,6 +50,7 @@ export interface IPreset extends Document {
reasoning_summary?: string;
verbosity?: string;
useResponsesApi?: boolean;
stored_prompt_id?: string;
web_search?: boolean;
disableStreaming?: boolean;
fileTokenLimit?: number;

View file

@ -48,6 +48,7 @@ export interface IConversation extends Document {
reasoning_summary?: string;
verbosity?: string;
useResponsesApi?: boolean;
stored_prompt_id?: string;
web_search?: boolean;
disableStreaming?: boolean;
fileTokenLimit?: number;