⚠️ chore: Remove Deprecated forcePrompt setting (#11622)

- Removed `forcePrompt` parameter from various configuration files including `librechat.example.yaml`, `initialize.js`, `values.yaml`, and `initialize.ts`.
    - This change simplifies the configuration by eliminating unused options, enhancing clarity and maintainability across the codebase.
This commit is contained in:
Danny Avila 2026-02-04 11:02:27 +01:00 committed by GitHub
parent f34052c6bb
commit 5eb0a3ad90
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
6 changed files with 0 additions and 12 deletions

View file

@ -128,7 +128,6 @@ const initializeClient = async ({ req, res, version, endpointOption, initAppClie
const groupName = modelGroupMap[modelName].group;
clientOptions.addParams = azureConfig.groupMap[groupName].addParams;
clientOptions.dropParams = azureConfig.groupMap[groupName].dropParams;
clientOptions.forcePrompt = azureConfig.groupMap[groupName].forcePrompt;
clientOptions.reverseProxyUrl = baseURL ?? clientOptions.reverseProxyUrl;
clientOptions.headers = opts.defaultHeaders;

View file

@ -97,7 +97,6 @@ librechat:
# titleModel: "gpt-3.5-turbo"
# summarize: false
# summaryModel: "gpt-3.5-turbo"
# forcePrompt: false
# modelDisplayLabel: "OpenRouter"
# name of existing Yaml configmap, key must be librechat.yaml

View file

@ -374,9 +374,6 @@ endpoints:
# Summary Model: Specify the model to use if summarization is enabled.
# summaryModel: "mistral-tiny" # Defaults to "gpt-3.5-turbo" if omitted.
# Force Prompt setting: If true, sends a `prompt` parameter instead of `messages`.
# forcePrompt: false
# The label displayed for the AI model in messages.
modelDisplayLabel: 'Mistral' # Default is "AI" when not set.
@ -436,7 +433,6 @@ endpoints:
titleModel: 'current_model'
summarize: false
summaryModel: 'current_model'
forcePrompt: false
modelDisplayLabel: 'Portkey'
iconURL: https://images.crunchbase.com/image/upload/c_pad,f_auto,q_auto:eco,dpr_1/rjqy7ghvjoiu4cd1xjbf

View file

@ -31,7 +31,6 @@ function buildCustomOptions(
customParams: endpointConfig.customParams,
titleConvo: endpointConfig.titleConvo,
titleModel: endpointConfig.titleModel,
forcePrompt: endpointConfig.forcePrompt,
summaryModel: endpointConfig.summaryModel,
modelDisplayLabel: endpointConfig.modelDisplayLabel,
titleMethod: endpointConfig.titleMethod ?? 'completion',

View file

@ -1300,7 +1300,6 @@ describe('getOpenAIConfig', () => {
max_completion_tokens: 4000,
},
dropParams: ['frequency_penalty'],
forcePrompt: false,
modelOptions: {
model: modelName,
user: 'azure-user-123',
@ -1395,7 +1394,6 @@ describe('getOpenAIConfig', () => {
dropParams: ['presence_penalty'],
titleConvo: true,
titleModel: 'gpt-3.5-turbo',
forcePrompt: false,
summaryModel: 'gpt-3.5-turbo',
modelDisplayLabel: 'Custom GPT-4',
titleMethod: 'completion',
@ -1414,7 +1412,6 @@ describe('getOpenAIConfig', () => {
customParams: {},
titleConvo: endpointConfig.titleConvo,
titleModel: endpointConfig.titleModel,
forcePrompt: endpointConfig.forcePrompt,
summaryModel: endpointConfig.summaryModel,
modelDisplayLabel: endpointConfig.modelDisplayLabel,
titleMethod: endpointConfig.titleMethod,

View file

@ -122,7 +122,6 @@ export const azureBaseSchema = z.object({
assistants: z.boolean().optional(),
addParams: z.record(z.any()).optional(),
dropParams: z.array(z.string()).optional(),
forcePrompt: z.boolean().optional(),
version: z.string().optional(),
baseURL: z.string().optional(),
additionalHeaders: z.record(z.any()).optional(),
@ -321,7 +320,6 @@ export const endpointSchema = baseEndpointSchema.merge(
summarize: z.boolean().optional(),
summaryModel: z.string().optional(),
iconURL: z.string().optional(),
forcePrompt: z.boolean().optional(),
modelDisplayLabel: z.string().optional(),
headers: z.record(z.any()).optional(),
addParams: z.record(z.any()).optional(),