🤖 feat: Tool Calling Support for DeepSeek V3.2 + OpenRouter Reasoning (#10752)
Some checks are pending
Docker Dev Images Build / build (Dockerfile, librechat-dev, node) (push) Waiting to run
Docker Dev Images Build / build (Dockerfile.multi, librechat-dev-api, api-build) (push) Waiting to run
Sync Locize Translations & Create Translation PR / Sync Translation Keys with Locize (push) Waiting to run
Sync Locize Translations & Create Translation PR / Create Translation PR on Version Published (push) Blocked by required conditions

* 🔧 chore: Update @librechat/agents to version 3.0.35

*  feat: Add DeepSeek Model Pricing and Token Handling

- Introduced pricing and token limits for 'deepseek-chat' and 'deepseek-reasoner' models, including prompt and completion rates.
- Enhanced tests to validate pricing and token limits for DeepSeek models, ensuring correct handling of model variations and provider prefixes.
- Updated cache multipliers for DeepSeek models to reflect new pricing structure.
- Improved max output token handling for DeepSeek models, ensuring consistency across different endpoints.
This commit is contained in:
Danny Avila 2025-12-01 14:27:08 -05:00 committed by GitHub
parent 026890cd27
commit 4202db1c99
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
7 changed files with 144 additions and 11 deletions

View file

@ -766,6 +766,78 @@ describe('Deepseek Model Tests', () => {
const result = tokenValues[valueKey].prompt && multiplier === tokenValues[valueKey].prompt;
expect(result).toBe(true);
});
it('should return correct pricing for deepseek-chat', () => {
expect(getMultiplier({ model: 'deepseek-chat', tokenType: 'prompt' })).toBe(
tokenValues['deepseek-chat'].prompt,
);
expect(getMultiplier({ model: 'deepseek-chat', tokenType: 'completion' })).toBe(
tokenValues['deepseek-chat'].completion,
);
expect(tokenValues['deepseek-chat'].prompt).toBe(0.28);
expect(tokenValues['deepseek-chat'].completion).toBe(0.42);
});
it('should return correct pricing for deepseek-reasoner', () => {
expect(getMultiplier({ model: 'deepseek-reasoner', tokenType: 'prompt' })).toBe(
tokenValues['deepseek-reasoner'].prompt,
);
expect(getMultiplier({ model: 'deepseek-reasoner', tokenType: 'completion' })).toBe(
tokenValues['deepseek-reasoner'].completion,
);
expect(tokenValues['deepseek-reasoner'].prompt).toBe(0.28);
expect(tokenValues['deepseek-reasoner'].completion).toBe(0.42);
});
it('should handle DeepSeek model name variations with provider prefixes', () => {
const modelVariations = [
'deepseek/deepseek-chat',
'openrouter/deepseek-chat',
'deepseek/deepseek-reasoner',
];
modelVariations.forEach((model) => {
const promptMultiplier = getMultiplier({ model, tokenType: 'prompt' });
const completionMultiplier = getMultiplier({ model, tokenType: 'completion' });
expect(promptMultiplier).toBe(0.28);
expect(completionMultiplier).toBe(0.42);
});
});
it('should return correct cache multipliers for DeepSeek models', () => {
expect(getCacheMultiplier({ model: 'deepseek-chat', cacheType: 'write' })).toBe(
cacheTokenValues['deepseek-chat'].write,
);
expect(getCacheMultiplier({ model: 'deepseek-chat', cacheType: 'read' })).toBe(
cacheTokenValues['deepseek-chat'].read,
);
expect(getCacheMultiplier({ model: 'deepseek-reasoner', cacheType: 'write' })).toBe(
cacheTokenValues['deepseek-reasoner'].write,
);
expect(getCacheMultiplier({ model: 'deepseek-reasoner', cacheType: 'read' })).toBe(
cacheTokenValues['deepseek-reasoner'].read,
);
});
it('should return correct cache pricing values for DeepSeek models', () => {
expect(cacheTokenValues['deepseek-chat'].write).toBe(0.28);
expect(cacheTokenValues['deepseek-chat'].read).toBe(0.028);
expect(cacheTokenValues['deepseek-reasoner'].write).toBe(0.28);
expect(cacheTokenValues['deepseek-reasoner'].read).toBe(0.028);
expect(cacheTokenValues['deepseek'].write).toBe(0.28);
expect(cacheTokenValues['deepseek'].read).toBe(0.028);
});
it('should handle DeepSeek cache multipliers with model variations', () => {
const modelVariations = ['deepseek/deepseek-chat', 'openrouter/deepseek-reasoner'];
modelVariations.forEach((model) => {
const writeMultiplier = getCacheMultiplier({ model, cacheType: 'write' });
const readMultiplier = getCacheMultiplier({ model, cacheType: 'read' });
expect(writeMultiplier).toBe(0.28);
expect(readMultiplier).toBe(0.028);
});
});
});
describe('Qwen3 Model Tests', () => {