🤖 feat: Support o4-mini and o3 Models (#6928)

* feat: Add support for new OpenAI models (o4-mini, o3) and update related logic

* 🔧 fix: Rename 'resubmitFiles' to 'isResubmission' for consistency across types and hooks

* 🔧 fix: Replace hardcoded 'pending_req' with CacheKeys.PENDING_REQ for consistency in cache handling

* 🔧 fix: Update cache handling to use Time.ONE_MINUTE instead of hardcoded TTL and streamline imports

* 🔧 fix: Enhance message handling logic to correctly identify parent messages and streamline imports in useSSE
This commit is contained in:
Danny Avila 2025-04-17 00:40:26 -04:00 committed by GitHub
parent 88f4ad7c47
commit 52f146dd97
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
19 changed files with 69 additions and 53 deletions

View file

@ -2,7 +2,9 @@ const z = require('zod');
const { EModelEndpoint } = require('librechat-data-provider');
const openAIModels = {
'o4-mini': 200000,
'o3-mini': 195000, // -5000 from max
o3: 200000,
o1: 195000, // -5000 from max
'o1-mini': 127500, // -500 from max
'o1-preview': 127500, // -500 from max

View file

@ -340,6 +340,15 @@ describe('getModelMaxTokens', () => {
expect(getModelMaxTokens('o1-preview-something')).toBe(o1PreviewTokens);
expect(getModelMaxTokens('openai/o1-preview-something')).toBe(o1PreviewTokens);
});
test('should return correct max context tokens for o4-mini and o3', () => {
const o4MiniTokens = maxTokensMap[EModelEndpoint.openAI]['o4-mini'];
const o3Tokens = maxTokensMap[EModelEndpoint.openAI]['o3'];
expect(getModelMaxTokens('o4-mini')).toBe(o4MiniTokens);
expect(getModelMaxTokens('openai/o4-mini')).toBe(o4MiniTokens);
expect(getModelMaxTokens('o3')).toBe(o3Tokens);
expect(getModelMaxTokens('openai/o3')).toBe(o3Tokens);
});
});
describe('matchModelName', () => {