mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-12-17 17:00:15 +01:00
Merge 2fd2a62886 into b288d81f5a
This commit is contained in:
commit
a145c204fd
6 changed files with 76 additions and 9 deletions
|
|
@ -50,8 +50,34 @@ export default function Parameters() {
|
||||||
.map((param) => (overriddenParamsMap[param.key] as SettingDefinition) ?? param);
|
.map((param) => (overriddenParamsMap[param.key] as SettingDefinition) ?? param);
|
||||||
}, [endpointType, endpointsConfig, model, provider]);
|
}, [endpointType, endpointsConfig, model, provider]);
|
||||||
|
|
||||||
|
const filteredParameters = useMemo((): SettingDefinition[] => {
|
||||||
|
const allowXHigh = /^gpt-5\\.2/.test(model ?? '');
|
||||||
|
|
||||||
|
return parameters?.map((param) => {
|
||||||
|
if (param?.key !== 'reasoning_effort' || !param.options) {
|
||||||
|
return param;
|
||||||
|
}
|
||||||
|
|
||||||
|
const filteredOptions = allowXHigh
|
||||||
|
? param.options
|
||||||
|
: param.options.filter((option) => option !== 'xhigh');
|
||||||
|
|
||||||
|
const filteredEnumMappings = param.enumMappings
|
||||||
|
? Object.fromEntries(
|
||||||
|
Object.entries(param.enumMappings).filter(([key]) => allowXHigh || key !== 'xhigh'),
|
||||||
|
)
|
||||||
|
: undefined;
|
||||||
|
|
||||||
|
return {
|
||||||
|
...param,
|
||||||
|
options: filteredOptions,
|
||||||
|
enumMappings: filteredEnumMappings,
|
||||||
|
};
|
||||||
|
});
|
||||||
|
}, [parameters, model]);
|
||||||
|
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
if (!parameters) {
|
if (!filteredParameters) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -65,7 +91,7 @@ export default function Parameters() {
|
||||||
// }),
|
// }),
|
||||||
// );
|
// );
|
||||||
const paramKeys = new Set(
|
const paramKeys = new Set(
|
||||||
parameters.filter((setting) => setting != null).map((setting) => setting.key),
|
filteredParameters.filter((setting) => setting != null).map((setting) => setting.key),
|
||||||
);
|
);
|
||||||
setConversation((prev) => {
|
setConversation((prev) => {
|
||||||
if (!prev) {
|
if (!prev) {
|
||||||
|
|
@ -76,6 +102,13 @@ export default function Parameters() {
|
||||||
|
|
||||||
const conversationKeys = Object.keys(updatedConversation);
|
const conversationKeys = Object.keys(updatedConversation);
|
||||||
const updatedKeys: string[] = [];
|
const updatedKeys: string[] = [];
|
||||||
|
|
||||||
|
const allowXHigh = /^gpt-5\\.2/.test(model ?? '');
|
||||||
|
if (!allowXHigh && updatedConversation.reasoning_effort === 'xhigh') {
|
||||||
|
updatedKeys.push('reasoning_effort');
|
||||||
|
delete updatedConversation.reasoning_effort;
|
||||||
|
}
|
||||||
|
|
||||||
conversationKeys.forEach((key) => {
|
conversationKeys.forEach((key) => {
|
||||||
// const defaultValue = defaultValueMap.get(key);
|
// const defaultValue = defaultValueMap.get(key);
|
||||||
// if (paramKeys.has(key) && defaultValue != null && prev[key] != null) {
|
// if (paramKeys.has(key) && defaultValue != null && prev[key] != null) {
|
||||||
|
|
@ -102,7 +135,7 @@ export default function Parameters() {
|
||||||
|
|
||||||
return updatedConversation;
|
return updatedConversation;
|
||||||
});
|
});
|
||||||
}, [parameters, setConversation]);
|
}, [filteredParameters, setConversation, model]);
|
||||||
|
|
||||||
const resetParameters = useCallback(() => {
|
const resetParameters = useCallback(() => {
|
||||||
setConversation((prev) => {
|
setConversation((prev) => {
|
||||||
|
|
@ -137,7 +170,7 @@ export default function Parameters() {
|
||||||
setIsDialogOpen(true);
|
setIsDialogOpen(true);
|
||||||
}, [conversation]);
|
}, [conversation]);
|
||||||
|
|
||||||
if (!parameters) {
|
if (!filteredParameters) {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -147,7 +180,7 @@ export default function Parameters() {
|
||||||
{' '}
|
{' '}
|
||||||
{/* This is the parent element containing all settings */}
|
{/* This is the parent element containing all settings */}
|
||||||
{/* Below is an example of an applied dynamic setting, each be contained by a div with the column span specified */}
|
{/* Below is an example of an applied dynamic setting, each be contained by a div with the column span specified */}
|
||||||
{parameters.map((setting) => {
|
{filteredParameters.map((setting) => {
|
||||||
const Component = componentMapping[setting.component];
|
const Component = componentMapping[setting.component];
|
||||||
if (!Component) {
|
if (!Component) {
|
||||||
return null;
|
return null;
|
||||||
|
|
|
||||||
|
|
@ -971,6 +971,7 @@
|
||||||
"com_ui_hide_password": "Hide password",
|
"com_ui_hide_password": "Hide password",
|
||||||
"com_ui_hide_qr": "Hide QR Code",
|
"com_ui_hide_qr": "Hide QR Code",
|
||||||
"com_ui_high": "High",
|
"com_ui_high": "High",
|
||||||
|
"com_ui_extra_high": "Extra High",
|
||||||
"com_ui_host": "Host",
|
"com_ui_host": "Host",
|
||||||
"com_ui_icon": "Icon",
|
"com_ui_icon": "Icon",
|
||||||
"com_ui_idea": "Ideas",
|
"com_ui_idea": "Ideas",
|
||||||
|
|
|
||||||
|
|
@ -210,6 +210,32 @@ describe('getOpenAILLMConfig', () => {
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
|
it('should allow xhigh reasoning_effort for gpt-5.2 models', () => {
|
||||||
|
const result = getOpenAILLMConfig({
|
||||||
|
apiKey: 'test-api-key',
|
||||||
|
streaming: true,
|
||||||
|
modelOptions: {
|
||||||
|
model: 'gpt-5.2-pro',
|
||||||
|
reasoning_effort: ReasoningEffort.xhigh,
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(result.llmConfig).toHaveProperty('reasoning_effort', ReasoningEffort.xhigh);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should drop xhigh reasoning_effort for non-gpt-5.2 models', () => {
|
||||||
|
const result = getOpenAILLMConfig({
|
||||||
|
apiKey: 'test-api-key',
|
||||||
|
streaming: true,
|
||||||
|
modelOptions: {
|
||||||
|
model: 'gpt-5-pro',
|
||||||
|
reasoning_effort: ReasoningEffort.xhigh,
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(result.llmConfig).not.toHaveProperty('reasoning_effort');
|
||||||
|
});
|
||||||
|
|
||||||
it('should NOT exclude parameters for gpt-5-chat (it supports sampling params)', () => {
|
it('should NOT exclude parameters for gpt-5-chat (it supports sampling params)', () => {
|
||||||
const result = getOpenAILLMConfig({
|
const result = getOpenAILLMConfig({
|
||||||
apiKey: 'test-api-key',
|
apiKey: 'test-api-key',
|
||||||
|
|
|
||||||
|
|
@ -220,20 +220,24 @@ export function getOpenAILLMConfig({
|
||||||
llmConfig.include_reasoning = true;
|
llmConfig.include_reasoning = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const allowXHigh = modelOptions.model ? /^gpt-5\\.2/.test(String(modelOptions.model)) : false;
|
||||||
|
const sanitizedReasoningEffort =
|
||||||
|
reasoning_effort === 'xhigh' && !allowXHigh ? undefined : reasoning_effort;
|
||||||
|
|
||||||
if (
|
if (
|
||||||
hasReasoningParams({ reasoning_effort, reasoning_summary }) &&
|
hasReasoningParams({ reasoning_effort: sanitizedReasoningEffort, reasoning_summary }) &&
|
||||||
(llmConfig.useResponsesApi === true ||
|
(llmConfig.useResponsesApi === true ||
|
||||||
(endpoint !== EModelEndpoint.openAI && endpoint !== EModelEndpoint.azureOpenAI))
|
(endpoint !== EModelEndpoint.openAI && endpoint !== EModelEndpoint.azureOpenAI))
|
||||||
) {
|
) {
|
||||||
llmConfig.reasoning = removeNullishValues(
|
llmConfig.reasoning = removeNullishValues(
|
||||||
{
|
{
|
||||||
effort: reasoning_effort,
|
effort: sanitizedReasoningEffort,
|
||||||
summary: reasoning_summary,
|
summary: reasoning_summary,
|
||||||
},
|
},
|
||||||
true,
|
true,
|
||||||
) as OpenAI.Reasoning;
|
) as OpenAI.Reasoning;
|
||||||
} else if (hasReasoningParams({ reasoning_effort })) {
|
} else if (hasReasoningParams({ reasoning_effort: sanitizedReasoningEffort })) {
|
||||||
llmConfig.reasoning_effort = reasoning_effort;
|
llmConfig.reasoning_effort = sanitizedReasoningEffort;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (llmConfig.max_tokens != null) {
|
if (llmConfig.max_tokens != null) {
|
||||||
|
|
|
||||||
|
|
@ -239,6 +239,7 @@ const openAIParams: Record<string, SettingDefinition> = {
|
||||||
ReasoningEffort.low,
|
ReasoningEffort.low,
|
||||||
ReasoningEffort.medium,
|
ReasoningEffort.medium,
|
||||||
ReasoningEffort.high,
|
ReasoningEffort.high,
|
||||||
|
ReasoningEffort.xhigh,
|
||||||
],
|
],
|
||||||
enumMappings: {
|
enumMappings: {
|
||||||
[ReasoningEffort.unset]: 'com_ui_auto',
|
[ReasoningEffort.unset]: 'com_ui_auto',
|
||||||
|
|
@ -247,6 +248,7 @@ const openAIParams: Record<string, SettingDefinition> = {
|
||||||
[ReasoningEffort.low]: 'com_ui_low',
|
[ReasoningEffort.low]: 'com_ui_low',
|
||||||
[ReasoningEffort.medium]: 'com_ui_medium',
|
[ReasoningEffort.medium]: 'com_ui_medium',
|
||||||
[ReasoningEffort.high]: 'com_ui_high',
|
[ReasoningEffort.high]: 'com_ui_high',
|
||||||
|
[ReasoningEffort.xhigh]: 'com_ui_extra_high',
|
||||||
},
|
},
|
||||||
optionType: 'model',
|
optionType: 'model',
|
||||||
columnSpan: 4,
|
columnSpan: 4,
|
||||||
|
|
|
||||||
|
|
@ -169,6 +169,7 @@ export enum ReasoningEffort {
|
||||||
low = 'low',
|
low = 'low',
|
||||||
medium = 'medium',
|
medium = 'medium',
|
||||||
high = 'high',
|
high = 'high',
|
||||||
|
xhigh = 'xhigh',
|
||||||
}
|
}
|
||||||
|
|
||||||
export enum ReasoningSummary {
|
export enum ReasoningSummary {
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue