mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-12-16 16:30:15 +01:00
Merge 2fd2a62886 into b288d81f5a
This commit is contained in:
commit
a145c204fd
6 changed files with 76 additions and 9 deletions
|
|
@ -50,8 +50,34 @@ export default function Parameters() {
|
|||
.map((param) => (overriddenParamsMap[param.key] as SettingDefinition) ?? param);
|
||||
}, [endpointType, endpointsConfig, model, provider]);
|
||||
|
||||
const filteredParameters = useMemo((): SettingDefinition[] => {
|
||||
const allowXHigh = /^gpt-5\\.2/.test(model ?? '');
|
||||
|
||||
return parameters?.map((param) => {
|
||||
if (param?.key !== 'reasoning_effort' || !param.options) {
|
||||
return param;
|
||||
}
|
||||
|
||||
const filteredOptions = allowXHigh
|
||||
? param.options
|
||||
: param.options.filter((option) => option !== 'xhigh');
|
||||
|
||||
const filteredEnumMappings = param.enumMappings
|
||||
? Object.fromEntries(
|
||||
Object.entries(param.enumMappings).filter(([key]) => allowXHigh || key !== 'xhigh'),
|
||||
)
|
||||
: undefined;
|
||||
|
||||
return {
|
||||
...param,
|
||||
options: filteredOptions,
|
||||
enumMappings: filteredEnumMappings,
|
||||
};
|
||||
});
|
||||
}, [parameters, model]);
|
||||
|
||||
useEffect(() => {
|
||||
if (!parameters) {
|
||||
if (!filteredParameters) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
@ -65,7 +91,7 @@ export default function Parameters() {
|
|||
// }),
|
||||
// );
|
||||
const paramKeys = new Set(
|
||||
parameters.filter((setting) => setting != null).map((setting) => setting.key),
|
||||
filteredParameters.filter((setting) => setting != null).map((setting) => setting.key),
|
||||
);
|
||||
setConversation((prev) => {
|
||||
if (!prev) {
|
||||
|
|
@ -76,6 +102,13 @@ export default function Parameters() {
|
|||
|
||||
const conversationKeys = Object.keys(updatedConversation);
|
||||
const updatedKeys: string[] = [];
|
||||
|
||||
const allowXHigh = /^gpt-5\\.2/.test(model ?? '');
|
||||
if (!allowXHigh && updatedConversation.reasoning_effort === 'xhigh') {
|
||||
updatedKeys.push('reasoning_effort');
|
||||
delete updatedConversation.reasoning_effort;
|
||||
}
|
||||
|
||||
conversationKeys.forEach((key) => {
|
||||
// const defaultValue = defaultValueMap.get(key);
|
||||
// if (paramKeys.has(key) && defaultValue != null && prev[key] != null) {
|
||||
|
|
@ -102,7 +135,7 @@ export default function Parameters() {
|
|||
|
||||
return updatedConversation;
|
||||
});
|
||||
}, [parameters, setConversation]);
|
||||
}, [filteredParameters, setConversation, model]);
|
||||
|
||||
const resetParameters = useCallback(() => {
|
||||
setConversation((prev) => {
|
||||
|
|
@ -137,7 +170,7 @@ export default function Parameters() {
|
|||
setIsDialogOpen(true);
|
||||
}, [conversation]);
|
||||
|
||||
if (!parameters) {
|
||||
if (!filteredParameters) {
|
||||
return null;
|
||||
}
|
||||
|
||||
|
|
@ -147,7 +180,7 @@ export default function Parameters() {
|
|||
{' '}
|
||||
{/* This is the parent element containing all settings */}
|
||||
{/* Below is an example of an applied dynamic setting, each be contained by a div with the column span specified */}
|
||||
{parameters.map((setting) => {
|
||||
{filteredParameters.map((setting) => {
|
||||
const Component = componentMapping[setting.component];
|
||||
if (!Component) {
|
||||
return null;
|
||||
|
|
|
|||
|
|
@ -971,6 +971,7 @@
|
|||
"com_ui_hide_password": "Hide password",
|
||||
"com_ui_hide_qr": "Hide QR Code",
|
||||
"com_ui_high": "High",
|
||||
"com_ui_extra_high": "Extra High",
|
||||
"com_ui_host": "Host",
|
||||
"com_ui_icon": "Icon",
|
||||
"com_ui_idea": "Ideas",
|
||||
|
|
|
|||
|
|
@ -210,6 +210,32 @@ describe('getOpenAILLMConfig', () => {
|
|||
});
|
||||
});
|
||||
|
||||
it('should allow xhigh reasoning_effort for gpt-5.2 models', () => {
|
||||
const result = getOpenAILLMConfig({
|
||||
apiKey: 'test-api-key',
|
||||
streaming: true,
|
||||
modelOptions: {
|
||||
model: 'gpt-5.2-pro',
|
||||
reasoning_effort: ReasoningEffort.xhigh,
|
||||
},
|
||||
});
|
||||
|
||||
expect(result.llmConfig).toHaveProperty('reasoning_effort', ReasoningEffort.xhigh);
|
||||
});
|
||||
|
||||
it('should drop xhigh reasoning_effort for non-gpt-5.2 models', () => {
|
||||
const result = getOpenAILLMConfig({
|
||||
apiKey: 'test-api-key',
|
||||
streaming: true,
|
||||
modelOptions: {
|
||||
model: 'gpt-5-pro',
|
||||
reasoning_effort: ReasoningEffort.xhigh,
|
||||
},
|
||||
});
|
||||
|
||||
expect(result.llmConfig).not.toHaveProperty('reasoning_effort');
|
||||
});
|
||||
|
||||
it('should NOT exclude parameters for gpt-5-chat (it supports sampling params)', () => {
|
||||
const result = getOpenAILLMConfig({
|
||||
apiKey: 'test-api-key',
|
||||
|
|
|
|||
|
|
@ -220,20 +220,24 @@ export function getOpenAILLMConfig({
|
|||
llmConfig.include_reasoning = true;
|
||||
}
|
||||
|
||||
const allowXHigh = modelOptions.model ? /^gpt-5\\.2/.test(String(modelOptions.model)) : false;
|
||||
const sanitizedReasoningEffort =
|
||||
reasoning_effort === 'xhigh' && !allowXHigh ? undefined : reasoning_effort;
|
||||
|
||||
if (
|
||||
hasReasoningParams({ reasoning_effort, reasoning_summary }) &&
|
||||
hasReasoningParams({ reasoning_effort: sanitizedReasoningEffort, reasoning_summary }) &&
|
||||
(llmConfig.useResponsesApi === true ||
|
||||
(endpoint !== EModelEndpoint.openAI && endpoint !== EModelEndpoint.azureOpenAI))
|
||||
) {
|
||||
llmConfig.reasoning = removeNullishValues(
|
||||
{
|
||||
effort: reasoning_effort,
|
||||
effort: sanitizedReasoningEffort,
|
||||
summary: reasoning_summary,
|
||||
},
|
||||
true,
|
||||
) as OpenAI.Reasoning;
|
||||
} else if (hasReasoningParams({ reasoning_effort })) {
|
||||
llmConfig.reasoning_effort = reasoning_effort;
|
||||
} else if (hasReasoningParams({ reasoning_effort: sanitizedReasoningEffort })) {
|
||||
llmConfig.reasoning_effort = sanitizedReasoningEffort;
|
||||
}
|
||||
|
||||
if (llmConfig.max_tokens != null) {
|
||||
|
|
|
|||
|
|
@ -239,6 +239,7 @@ const openAIParams: Record<string, SettingDefinition> = {
|
|||
ReasoningEffort.low,
|
||||
ReasoningEffort.medium,
|
||||
ReasoningEffort.high,
|
||||
ReasoningEffort.xhigh,
|
||||
],
|
||||
enumMappings: {
|
||||
[ReasoningEffort.unset]: 'com_ui_auto',
|
||||
|
|
@ -247,6 +248,7 @@ const openAIParams: Record<string, SettingDefinition> = {
|
|||
[ReasoningEffort.low]: 'com_ui_low',
|
||||
[ReasoningEffort.medium]: 'com_ui_medium',
|
||||
[ReasoningEffort.high]: 'com_ui_high',
|
||||
[ReasoningEffort.xhigh]: 'com_ui_extra_high',
|
||||
},
|
||||
optionType: 'model',
|
||||
columnSpan: 4,
|
||||
|
|
|
|||
|
|
@ -169,6 +169,7 @@ export enum ReasoningEffort {
|
|||
low = 'low',
|
||||
medium = 'medium',
|
||||
high = 'high',
|
||||
xhigh = 'xhigh',
|
||||
}
|
||||
|
||||
export enum ReasoningSummary {
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue