🔧 fix: Clean empty strings from model_parameters for Agents/OpenAI (#11248)
Some checks are pending
Docker Dev Branch Images Build / build (Dockerfile, lc-dev, node) (push) Waiting to run
Docker Dev Branch Images Build / build (Dockerfile.multi, lc-dev-api, api-build) (push) Waiting to run
Docker Dev Images Build / build (Dockerfile, librechat-dev, node) (push) Waiting to run
Docker Dev Images Build / build (Dockerfile.multi, librechat-dev-api, api-build) (push) Waiting to run
Sync Locize Translations & Create Translation PR / Sync Translation Keys with Locize (push) Waiting to run
Sync Locize Translations & Create Translation PR / Create Translation PR on Version Published (push) Blocked by required conditions

This commit is contained in:
Danny Avila 2026-01-07 11:26:53 -05:00 committed by GitHub
parent 9845b3148e
commit 24e8a258cd
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
4 changed files with 170 additions and 1 deletions

View file

@ -56,6 +56,77 @@ describe('getOpenAILLMConfig', () => {
});
});
describe('Empty String Handling (Issue Fix)', () => {
it('should remove empty string values for numeric parameters', () => {
const result = getOpenAILLMConfig({
apiKey: 'test-api-key',
streaming: true,
modelOptions: {
model: 'gpt-4',
temperature: '' as unknown as number,
topP: '' as unknown as number,
max_tokens: '' as unknown as number,
},
});
expect(result.llmConfig).not.toHaveProperty('temperature');
expect(result.llmConfig).not.toHaveProperty('topP');
expect(result.llmConfig).not.toHaveProperty('maxTokens');
expect(result.llmConfig).not.toHaveProperty('max_tokens');
});
it('should remove empty string values for frequency and presence penalties', () => {
const result = getOpenAILLMConfig({
apiKey: 'test-api-key',
streaming: true,
modelOptions: {
model: 'gpt-4',
frequency_penalty: '' as unknown as number,
presence_penalty: '' as unknown as number,
},
});
expect(result.llmConfig).not.toHaveProperty('frequencyPenalty');
expect(result.llmConfig).not.toHaveProperty('presencePenalty');
expect(result.llmConfig).not.toHaveProperty('frequency_penalty');
expect(result.llmConfig).not.toHaveProperty('presence_penalty');
});
it('should preserve valid numeric values while removing empty strings', () => {
const result = getOpenAILLMConfig({
apiKey: 'test-api-key',
streaming: true,
modelOptions: {
model: 'gpt-4',
temperature: 0.7,
topP: '' as unknown as number,
max_tokens: 4096,
},
});
expect(result.llmConfig).toHaveProperty('temperature', 0.7);
expect(result.llmConfig).not.toHaveProperty('topP');
expect(result.llmConfig).toHaveProperty('maxTokens', 4096);
});
it('should preserve zero values (not treat them as empty)', () => {
const result = getOpenAILLMConfig({
apiKey: 'test-api-key',
streaming: true,
modelOptions: {
model: 'gpt-4',
temperature: 0,
frequency_penalty: 0,
presence_penalty: 0,
},
});
expect(result.llmConfig).toHaveProperty('temperature', 0);
expect(result.llmConfig).toHaveProperty('frequencyPenalty', 0);
expect(result.llmConfig).toHaveProperty('presencePenalty', 0);
});
});
describe('OpenAI Reasoning Models (o1/o3/gpt-5)', () => {
const reasoningModels = [
'o1',

View file

@ -139,6 +139,12 @@ export function getOpenAILLMConfig({
}): Pick<t.LLMConfigResult, 'llmConfig' | 'tools'> & {
azure?: t.AzureOptions;
} {
/** Clean empty strings from model options (e.g., temperature: "" should be removed) */
const cleanedModelOptions = removeNullishValues(
_modelOptions,
true,
) as Partial<t.OpenAIParameters>;
const {
reasoning_effort,
reasoning_summary,
@ -147,7 +153,7 @@ export function getOpenAILLMConfig({
frequency_penalty,
presence_penalty,
...modelOptions
} = _modelOptions;
} = cleanedModelOptions;
const llmConfig = Object.assign(
{