mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-12-17 00:40:14 +01:00
🌊 feat: Add Disable Streaming Toggle (#8177)
* 🌊 feat: Add Disable Streaming Option in Configuration - Introduced a new setting to disable streaming responses in openAI, Azure, and custom endpoint parameter panels. - Updated translation files to include labels and descriptions for the disable streaming feature. - Modified relevant schemas and parameter settings to support the new disable streaming functionality. * 🔧 fix: disableStreaming state not persisting when returning to a conversation - Added disableStreaming field to the IPreset interface and conversationPreset. - Moved toggles and sliders around for nicer left-right UI split in parameters panel. - Removed old reference to 'grounding' ub conversationPreset (now web_search) and added web_search to IPreset.
This commit is contained in:
parent
52bbac3a37
commit
7f8c327509
6 changed files with 27 additions and 3 deletions
|
|
@ -231,6 +231,7 @@
|
||||||
"com_endpoint_openai_reasoning_summary": "Responses API only: A summary of the reasoning performed by the model. This can be useful for debugging and understanding the model's reasoning process. Set to none,auto, concise, or detailed.",
|
"com_endpoint_openai_reasoning_summary": "Responses API only: A summary of the reasoning performed by the model. This can be useful for debugging and understanding the model's reasoning process. Set to none,auto, concise, or detailed.",
|
||||||
"com_endpoint_openai_resend": "Resend all previously attached images. Note: this can significantly increase token cost and you may experience errors with many image attachments.",
|
"com_endpoint_openai_resend": "Resend all previously attached images. Note: this can significantly increase token cost and you may experience errors with many image attachments.",
|
||||||
"com_endpoint_openai_resend_files": "Resend all previously attached files. Note: this will increase token cost and you may experience errors with many attachments.",
|
"com_endpoint_openai_resend_files": "Resend all previously attached files. Note: this will increase token cost and you may experience errors with many attachments.",
|
||||||
|
"com_endpoint_disable_streaming": "Disable streaming responses and receive the complete response at once. Useful for models like o3 that require organization verification for streaming",
|
||||||
"com_endpoint_openai_stop": "Up to 4 sequences where the API will stop generating further tokens.",
|
"com_endpoint_openai_stop": "Up to 4 sequences where the API will stop generating further tokens.",
|
||||||
"com_endpoint_openai_temp": "Higher values = more random, while lower values = more focused and deterministic. We recommend altering this or Top P but not both.",
|
"com_endpoint_openai_temp": "Higher values = more random, while lower values = more focused and deterministic. We recommend altering this or Top P but not both.",
|
||||||
"com_endpoint_openai_topp": "An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We recommend altering this or temperature but not both.",
|
"com_endpoint_openai_topp": "An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We recommend altering this or temperature but not both.",
|
||||||
|
|
@ -239,6 +240,7 @@
|
||||||
"com_endpoint_output": "Output",
|
"com_endpoint_output": "Output",
|
||||||
"com_endpoint_plug_image_detail": "Image Detail",
|
"com_endpoint_plug_image_detail": "Image Detail",
|
||||||
"com_endpoint_plug_resend_files": "Resend Files",
|
"com_endpoint_plug_resend_files": "Resend Files",
|
||||||
|
"com_endpoint_disable_streaming_label": "Disable Streaming",
|
||||||
"com_endpoint_plug_set_custom_instructions_for_gpt_placeholder": "Set custom instructions to include in System Message. Default: none",
|
"com_endpoint_plug_set_custom_instructions_for_gpt_placeholder": "Set custom instructions to include in System Message. Default: none",
|
||||||
"com_endpoint_plug_skip_completion": "Skip Completion",
|
"com_endpoint_plug_skip_completion": "Skip Completion",
|
||||||
"com_endpoint_plug_use_functions": "Use Functions",
|
"com_endpoint_plug_use_functions": "Use Functions",
|
||||||
|
|
|
||||||
|
|
@ -284,6 +284,19 @@ const openAIParams: Record<string, SettingDefinition> = {
|
||||||
optionType: 'model',
|
optionType: 'model',
|
||||||
columnSpan: 4,
|
columnSpan: 4,
|
||||||
},
|
},
|
||||||
|
disableStreaming: {
|
||||||
|
key: 'disableStreaming',
|
||||||
|
label: 'com_endpoint_disable_streaming_label',
|
||||||
|
labelCode: true,
|
||||||
|
description: 'com_endpoint_disable_streaming',
|
||||||
|
descriptionCode: true,
|
||||||
|
type: 'boolean',
|
||||||
|
default: false,
|
||||||
|
component: 'switch',
|
||||||
|
optionType: 'model',
|
||||||
|
showDefault: false,
|
||||||
|
columnSpan: 2,
|
||||||
|
} as const,
|
||||||
};
|
};
|
||||||
|
|
||||||
const anthropic: Record<string, SettingDefinition> = {
|
const anthropic: Record<string, SettingDefinition> = {
|
||||||
|
|
@ -626,6 +639,7 @@ const openAI: SettingsConfiguration = [
|
||||||
openAIParams.reasoning_effort,
|
openAIParams.reasoning_effort,
|
||||||
openAIParams.useResponsesApi,
|
openAIParams.useResponsesApi,
|
||||||
openAIParams.reasoning_summary,
|
openAIParams.reasoning_summary,
|
||||||
|
openAIParams.disableStreaming,
|
||||||
];
|
];
|
||||||
|
|
||||||
const openAICol1: SettingsConfiguration = [
|
const openAICol1: SettingsConfiguration = [
|
||||||
|
|
@ -648,6 +662,7 @@ const openAICol2: SettingsConfiguration = [
|
||||||
openAIParams.reasoning_summary,
|
openAIParams.reasoning_summary,
|
||||||
openAIParams.useResponsesApi,
|
openAIParams.useResponsesApi,
|
||||||
openAIParams.web_search,
|
openAIParams.web_search,
|
||||||
|
openAIParams.disableStreaming,
|
||||||
];
|
];
|
||||||
|
|
||||||
const anthropicConfig: SettingsConfiguration = [
|
const anthropicConfig: SettingsConfiguration = [
|
||||||
|
|
|
||||||
|
|
@ -639,6 +639,8 @@ export const tConversationSchema = z.object({
|
||||||
useResponsesApi: z.boolean().optional(),
|
useResponsesApi: z.boolean().optional(),
|
||||||
/* OpenAI Responses API / Anthropic API / Google API */
|
/* OpenAI Responses API / Anthropic API / Google API */
|
||||||
web_search: z.boolean().optional(),
|
web_search: z.boolean().optional(),
|
||||||
|
/* disable streaming */
|
||||||
|
disableStreaming: z.boolean().optional(),
|
||||||
/* assistant */
|
/* assistant */
|
||||||
assistant_id: z.string().optional(),
|
assistant_id: z.string().optional(),
|
||||||
/* agents */
|
/* agents */
|
||||||
|
|
@ -743,6 +745,8 @@ export const tQueryParamsSchema = tConversationSchema
|
||||||
useResponsesApi: true,
|
useResponsesApi: true,
|
||||||
/** @endpoints openAI, anthropic, google */
|
/** @endpoints openAI, anthropic, google */
|
||||||
web_search: true,
|
web_search: true,
|
||||||
|
/** @endpoints openAI, custom, azureOpenAI */
|
||||||
|
disableStreaming: true,
|
||||||
/** @endpoints google, anthropic, bedrock */
|
/** @endpoints google, anthropic, bedrock */
|
||||||
topP: true,
|
topP: true,
|
||||||
/** @endpoints google, anthropic */
|
/** @endpoints google, anthropic */
|
||||||
|
|
@ -1075,6 +1079,7 @@ export const openAIBaseSchema = tConversationSchema.pick({
|
||||||
reasoning_summary: true,
|
reasoning_summary: true,
|
||||||
useResponsesApi: true,
|
useResponsesApi: true,
|
||||||
web_search: true,
|
web_search: true,
|
||||||
|
disableStreaming: true,
|
||||||
});
|
});
|
||||||
|
|
||||||
export const openAISchema = openAIBaseSchema
|
export const openAISchema = openAIBaseSchema
|
||||||
|
|
|
||||||
|
|
@ -134,12 +134,11 @@ export const conversationPreset = {
|
||||||
useResponsesApi: {
|
useResponsesApi: {
|
||||||
type: Boolean,
|
type: Boolean,
|
||||||
},
|
},
|
||||||
/** OpenAI Responses API / Anthropic API */
|
/** OpenAI Responses API / Anthropic API / Google API */
|
||||||
web_search: {
|
web_search: {
|
||||||
type: Boolean,
|
type: Boolean,
|
||||||
},
|
},
|
||||||
/** Google */
|
disableStreaming: {
|
||||||
grounding: {
|
|
||||||
type: Boolean,
|
type: Boolean,
|
||||||
},
|
},
|
||||||
/** Reasoning models only */
|
/** Reasoning models only */
|
||||||
|
|
|
||||||
|
|
@ -48,6 +48,8 @@ export interface IPreset extends Document {
|
||||||
reasoning_effort?: string;
|
reasoning_effort?: string;
|
||||||
reasoning_summary?: string;
|
reasoning_summary?: string;
|
||||||
useResponsesApi?: boolean;
|
useResponsesApi?: boolean;
|
||||||
|
web_search?: boolean;
|
||||||
|
disableStreaming?: boolean;
|
||||||
// end of additional fields
|
// end of additional fields
|
||||||
agentOptions?: unknown;
|
agentOptions?: unknown;
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -48,6 +48,7 @@ export interface IConversation extends Document {
|
||||||
reasoning_summary?: string;
|
reasoning_summary?: string;
|
||||||
useResponsesApi?: boolean;
|
useResponsesApi?: boolean;
|
||||||
web_search?: boolean;
|
web_search?: boolean;
|
||||||
|
disableStreaming?: boolean;
|
||||||
// Additional fields
|
// Additional fields
|
||||||
files?: string[];
|
files?: string[];
|
||||||
expiredAt?: Date;
|
expiredAt?: Date;
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue