mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-12-16 16:30:15 +01:00
🤖 feat: Tool Calling Support for DeepSeek V3.2 + OpenRouter Reasoning (#10752)
Some checks are pending
Docker Dev Images Build / build (Dockerfile, librechat-dev, node) (push) Waiting to run
Docker Dev Images Build / build (Dockerfile.multi, librechat-dev-api, api-build) (push) Waiting to run
Sync Locize Translations & Create Translation PR / Sync Translation Keys with Locize (push) Waiting to run
Sync Locize Translations & Create Translation PR / Create Translation PR on Version Published (push) Blocked by required conditions
Some checks are pending
Docker Dev Images Build / build (Dockerfile, librechat-dev, node) (push) Waiting to run
Docker Dev Images Build / build (Dockerfile.multi, librechat-dev-api, api-build) (push) Waiting to run
Sync Locize Translations & Create Translation PR / Sync Translation Keys with Locize (push) Waiting to run
Sync Locize Translations & Create Translation PR / Create Translation PR on Version Published (push) Blocked by required conditions
* 🔧 chore: Update @librechat/agents to version 3.0.35 * ✨ feat: Add DeepSeek Model Pricing and Token Handling - Introduced pricing and token limits for 'deepseek-chat' and 'deepseek-reasoner' models, including prompt and completion rates. - Enhanced tests to validate pricing and token limits for DeepSeek models, ensuring correct handling of model variations and provider prefixes. - Updated cache multipliers for DeepSeek models to reflect new pricing structure. - Improved max output token handling for DeepSeek models, ensuring consistency across different endpoints.
This commit is contained in:
parent
026890cd27
commit
4202db1c99
7 changed files with 144 additions and 11 deletions
|
|
@ -84,7 +84,7 @@
|
|||
"@azure/storage-blob": "^12.27.0",
|
||||
"@keyv/redis": "^4.3.3",
|
||||
"@langchain/core": "^0.3.79",
|
||||
"@librechat/agents": "^3.0.34",
|
||||
"@librechat/agents": "^3.0.35",
|
||||
"@librechat/data-schemas": "*",
|
||||
"@modelcontextprotocol/sdk": "^1.21.0",
|
||||
"axios": "^1.12.1",
|
||||
|
|
|
|||
|
|
@ -140,6 +140,7 @@ const anthropicModels = {
|
|||
|
||||
const deepseekModels = {
|
||||
deepseek: 128000,
|
||||
'deepseek-chat': 128000,
|
||||
'deepseek-reasoner': 128000,
|
||||
'deepseek-r1': 128000,
|
||||
'deepseek-v3': 128000,
|
||||
|
|
@ -347,11 +348,21 @@ const anthropicMaxOutputs = {
|
|||
'claude-3-7-sonnet': 128000,
|
||||
};
|
||||
|
||||
/** Outputs from https://api-docs.deepseek.com/quick_start/pricing */
|
||||
const deepseekMaxOutputs = {
|
||||
deepseek: 8000, // deepseek-chat default: 4K, max: 8K
|
||||
'deepseek-chat': 8000,
|
||||
'deepseek-reasoner': 64000, // default: 32K, max: 64K
|
||||
'deepseek-r1': 64000,
|
||||
'deepseek-v3': 8000,
|
||||
'deepseek.r1': 64000,
|
||||
};
|
||||
|
||||
export const maxOutputTokensMap = {
|
||||
[EModelEndpoint.anthropic]: anthropicMaxOutputs,
|
||||
[EModelEndpoint.azureOpenAI]: modelMaxOutputs,
|
||||
[EModelEndpoint.openAI]: modelMaxOutputs,
|
||||
[EModelEndpoint.custom]: modelMaxOutputs,
|
||||
[EModelEndpoint.openAI]: { ...modelMaxOutputs, ...deepseekMaxOutputs },
|
||||
[EModelEndpoint.custom]: { ...modelMaxOutputs, ...deepseekMaxOutputs },
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue