mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-12-17 00:40:14 +01:00
✨ feat: GPT-5 Token Limits, Rates, Icon, Reasoning Support
This commit is contained in:
parent
8a5047c456
commit
430557676d
5 changed files with 151 additions and 4 deletions
|
|
@ -19,6 +19,9 @@ const openAIModels = {
|
|||
'gpt-4.1': 1047576,
|
||||
'gpt-4.1-mini': 1047576,
|
||||
'gpt-4.1-nano': 1047576,
|
||||
'gpt-5': 400000,
|
||||
'gpt-5-mini': 400000,
|
||||
'gpt-5-nano': 400000,
|
||||
'gpt-4o': 127500, // -500 from max
|
||||
'gpt-4o-mini': 127500, // -500 from max
|
||||
'gpt-4o-2024-05-13': 127500, // -500 from max
|
||||
|
|
@ -253,6 +256,9 @@ const modelMaxOutputs = {
|
|||
o1: 32268, // -500 from max: 32,768
|
||||
'o1-mini': 65136, // -500 from max: 65,536
|
||||
'o1-preview': 32268, // -500 from max: 32,768
|
||||
'gpt-5': 128000,
|
||||
'gpt-5-mini': 128000,
|
||||
'gpt-5-nano': 128000,
|
||||
'gpt-oss-20b': 131000,
|
||||
'gpt-oss-120b': 131000,
|
||||
system_default: 1024,
|
||||
|
|
|
|||
|
|
@ -156,6 +156,35 @@ describe('getModelMaxTokens', () => {
|
|||
);
|
||||
});
|
||||
|
||||
test('should return correct tokens for gpt-5 matches', () => {
|
||||
expect(getModelMaxTokens('gpt-5')).toBe(maxTokensMap[EModelEndpoint.openAI]['gpt-5']);
|
||||
expect(getModelMaxTokens('gpt-5-preview')).toBe(maxTokensMap[EModelEndpoint.openAI]['gpt-5']);
|
||||
expect(getModelMaxTokens('openai/gpt-5')).toBe(maxTokensMap[EModelEndpoint.openAI]['gpt-5']);
|
||||
expect(getModelMaxTokens('gpt-5-2025-01-30')).toBe(
|
||||
maxTokensMap[EModelEndpoint.openAI]['gpt-5'],
|
||||
);
|
||||
});
|
||||
|
||||
test('should return correct tokens for gpt-5-mini matches', () => {
|
||||
expect(getModelMaxTokens('gpt-5-mini')).toBe(maxTokensMap[EModelEndpoint.openAI]['gpt-5-mini']);
|
||||
expect(getModelMaxTokens('gpt-5-mini-preview')).toBe(
|
||||
maxTokensMap[EModelEndpoint.openAI]['gpt-5-mini'],
|
||||
);
|
||||
expect(getModelMaxTokens('openai/gpt-5-mini')).toBe(
|
||||
maxTokensMap[EModelEndpoint.openAI]['gpt-5-mini'],
|
||||
);
|
||||
});
|
||||
|
||||
test('should return correct tokens for gpt-5-nano matches', () => {
|
||||
expect(getModelMaxTokens('gpt-5-nano')).toBe(maxTokensMap[EModelEndpoint.openAI]['gpt-5-nano']);
|
||||
expect(getModelMaxTokens('gpt-5-nano-preview')).toBe(
|
||||
maxTokensMap[EModelEndpoint.openAI]['gpt-5-nano'],
|
||||
);
|
||||
expect(getModelMaxTokens('openai/gpt-5-nano')).toBe(
|
||||
maxTokensMap[EModelEndpoint.openAI]['gpt-5-nano'],
|
||||
);
|
||||
});
|
||||
|
||||
test('should return correct tokens for Anthropic models', () => {
|
||||
const models = [
|
||||
'claude-2.1',
|
||||
|
|
@ -363,6 +392,19 @@ describe('getModelMaxTokens', () => {
|
|||
});
|
||||
});
|
||||
|
||||
test('should return correct max output tokens for GPT-5 models', () => {
|
||||
const { getModelMaxOutputTokens } = require('./tokens');
|
||||
['gpt-5', 'gpt-5-mini', 'gpt-5-nano'].forEach((model) => {
|
||||
expect(getModelMaxOutputTokens(model)).toBe(maxOutputTokensMap[EModelEndpoint.openAI][model]);
|
||||
expect(getModelMaxOutputTokens(model, EModelEndpoint.openAI)).toBe(
|
||||
maxOutputTokensMap[EModelEndpoint.openAI][model],
|
||||
);
|
||||
expect(getModelMaxOutputTokens(model, EModelEndpoint.azureOpenAI)).toBe(
|
||||
maxOutputTokensMap[EModelEndpoint.azureOpenAI][model],
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
test('should return correct max output tokens for GPT-OSS models', () => {
|
||||
const { getModelMaxOutputTokens } = require('./tokens');
|
||||
['gpt-oss-20b', 'gpt-oss-120b'].forEach((model) => {
|
||||
|
|
@ -446,6 +488,25 @@ describe('matchModelName', () => {
|
|||
expect(matchModelName('gpt-4.1-nano-2024-08-06')).toBe('gpt-4.1-nano');
|
||||
});
|
||||
|
||||
it('should return the closest matching key for gpt-5 matches', () => {
|
||||
expect(matchModelName('openai/gpt-5')).toBe('gpt-5');
|
||||
expect(matchModelName('gpt-5-preview')).toBe('gpt-5');
|
||||
expect(matchModelName('gpt-5-2025-01-30')).toBe('gpt-5');
|
||||
expect(matchModelName('gpt-5-2025-01-30-0130')).toBe('gpt-5');
|
||||
});
|
||||
|
||||
it('should return the closest matching key for gpt-5-mini matches', () => {
|
||||
expect(matchModelName('openai/gpt-5-mini')).toBe('gpt-5-mini');
|
||||
expect(matchModelName('gpt-5-mini-preview')).toBe('gpt-5-mini');
|
||||
expect(matchModelName('gpt-5-mini-2025-01-30')).toBe('gpt-5-mini');
|
||||
});
|
||||
|
||||
it('should return the closest matching key for gpt-5-nano matches', () => {
|
||||
expect(matchModelName('openai/gpt-5-nano')).toBe('gpt-5-nano');
|
||||
expect(matchModelName('gpt-5-nano-preview')).toBe('gpt-5-nano');
|
||||
expect(matchModelName('gpt-5-nano-2025-01-30')).toBe('gpt-5-nano');
|
||||
});
|
||||
|
||||
// Tests for Google models
|
||||
it('should return the exact model name if it exists in maxTokensMap - Google models', () => {
|
||||
expect(matchModelName('text-bison-32k', EModelEndpoint.google)).toBe('text-bison-32k');
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue