mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-12-17 00:40:14 +01:00
🤖 feat: Support o4-mini and o3 Models (#6928)
* feat: Add support for new OpenAI models (o4-mini, o3) and update related logic * 🔧 fix: Rename 'resubmitFiles' to 'isResubmission' for consistency across types and hooks * 🔧 fix: Replace hardcoded 'pending_req' with CacheKeys.PENDING_REQ for consistency in cache handling * 🔧 fix: Update cache handling to use Time.ONE_MINUTE instead of hardcoded TTL and streamline imports * 🔧 fix: Enhance message handling logic to correctly identify parent messages and streamline imports in useSSE
This commit is contained in:
parent
88f4ad7c47
commit
52f146dd97
19 changed files with 69 additions and 53 deletions
|
|
@ -58,7 +58,7 @@ const payloadParser = ({ req, agent, endpoint }) => {
|
|||
|
||||
const legacyContentEndpoints = new Set([KnownEndpoints.groq, KnownEndpoints.deepseek]);
|
||||
|
||||
const noSystemModelRegex = [/\bo1\b/gi];
|
||||
const noSystemModelRegex = [/\b(o\d)\b/gi];
|
||||
|
||||
// const { processMemory, memoryInstructions } = require('~/server/services/Endpoints/agents/memory');
|
||||
// const { getFormattedMemories } = require('~/models/Memory');
|
||||
|
|
@ -975,7 +975,7 @@ class AgentClient extends BaseClient {
|
|||
})
|
||||
)?.llmConfig ?? clientOptions;
|
||||
}
|
||||
if (/\b(o1|o3)\b/i.test(clientOptions.model) && clientOptions.maxTokens != null) {
|
||||
if (/\b(o\d)\b/i.test(clientOptions.model) && clientOptions.maxTokens != null) {
|
||||
delete clientOptions.maxTokens;
|
||||
}
|
||||
try {
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue