🚀 feat: Support for GPT-4 Turbo/0125 Models (#1643)

This commit is contained in:
Danny Avila 2024-01-25 22:57:18 -05:00 committed by GitHub
parent d0730d2515
commit fcbaa74e4a
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
7 changed files with 44 additions and 5 deletions

View file

@ -101,7 +101,7 @@ GOOGLE_KEY=user_provided
#============#
OPENAI_API_KEY=user_provided
# OPENAI_MODELS=gpt-3.5-turbo-1106,gpt-4-1106-preview,gpt-3.5-turbo,gpt-3.5-turbo-16k,gpt-3.5-turbo-0301,gpt-4,gpt-4-0314,gpt-4-0613
# OPENAI_MODELS=gpt-3.5-turbo-1106,gpt-4-turbo-preview,gpt-4-1106-preview,gpt-3.5-turbo,gpt-3.5-turbo-16k,gpt-3.5-turbo-0301,gpt-4,gpt-4-0314,gpt-4-0613
DEBUG_OPENAI=false

View file

@ -35,6 +35,10 @@ const getValueKey = (model, endpoint) => {
return '4k';
} else if (modelName.includes('gpt-4-1106')) {
return 'gpt-4-1106';
} else if (modelName.includes('gpt-4-0125')) {
return 'gpt-4-1106';
} else if (modelName.includes('gpt-4-turbo')) {
return 'gpt-4-1106';
} else if (modelName.includes('gpt-4-32k')) {
return '32k';
} else if (modelName.includes('gpt-4')) {

View file

@ -84,6 +84,12 @@ describe('getMultiplier', () => {
expect(getMultiplier({ tokenType: 'completion', model: 'gpt-4-1106-vision-preview' })).toBe(
tokenValues['gpt-4-1106'].completion,
);
expect(getMultiplier({ tokenType: 'completion', model: 'gpt-4-0125-preview' })).toBe(
tokenValues['gpt-4-1106'].completion,
);
expect(getMultiplier({ tokenType: 'completion', model: 'gpt-4-turbo-vision-preview' })).toBe(
tokenValues['gpt-4-1106'].completion,
);
});
it('should return defaultRate if derived valueKey does not match any known patterns', () => {

View file

@ -45,13 +45,15 @@ const openAIModels = {
'gpt-4-32k': 32758, // -10 from max
'gpt-4-32k-0314': 32758, // -10 from max
'gpt-4-32k-0613': 32758, // -10 from max
'gpt-4-1106': 127990, // -10 from max
'gpt-4-0125': 127990, // -10 from max
'gpt-4-turbo': 127990, // -10 from max
'gpt-3.5-turbo': 4092, // -5 from max
'gpt-3.5-turbo-0613': 4092, // -5 from max
'gpt-3.5-turbo-0301': 4092, // -5 from max
'gpt-3.5-turbo-16k': 16375, // -10 from max
'gpt-3.5-turbo-16k-0613': 16375, // -10 from max
'gpt-3.5-turbo-1106': 16375, // -10 from max
'gpt-4-1106': 127990, // -10 from max
'mistral-': 31990, // -10 from max
};
@ -145,8 +147,9 @@ function matchModelName(modelName, endpoint = EModelEndpoint.openAI) {
const keys = Object.keys(tokensMap);
for (let i = keys.length - 1; i >= 0; i--) {
if (modelName.includes(keys[i])) {
return keys[i];
const modelKey = keys[i];
if (modelName.includes(modelKey)) {
return modelKey;
}
}

View file

@ -80,6 +80,20 @@ describe('getModelMaxTokens', () => {
);
});
// 01/25 Update
test('should return correct tokens for gpt-4-turbo/0125 matches', () => {
expect(getModelMaxTokens('gpt-4-turbo')).toBe(
maxTokensMap[EModelEndpoint.openAI]['gpt-4-turbo'],
);
expect(getModelMaxTokens('gpt-4-turbo-preview')).toBe(
maxTokensMap[EModelEndpoint.openAI]['gpt-4-turbo'],
);
expect(getModelMaxTokens('gpt-4-0125')).toBe(maxTokensMap[EModelEndpoint.openAI]['gpt-4-0125']);
expect(getModelMaxTokens('gpt-4-0125-preview')).toBe(
maxTokensMap[EModelEndpoint.openAI]['gpt-4-0125'],
);
});
test('should return correct tokens for Anthropic models', () => {
const models = [
'claude-2.1',
@ -166,6 +180,16 @@ describe('matchModelName', () => {
expect(matchModelName('gpt-4-1106-vision-preview')).toBe('gpt-4-1106');
});
// 01/25 Update
it('should return the closest matching key for gpt-4-turbo/0125 matches', () => {
expect(matchModelName('openai/gpt-4-0125')).toBe('gpt-4-0125');
expect(matchModelName('gpt-4-turbo-preview')).toBe('gpt-4-turbo');
expect(matchModelName('gpt-4-turbo-vision-preview')).toBe('gpt-4-turbo');
expect(matchModelName('gpt-4-0125')).toBe('gpt-4-0125');
expect(matchModelName('gpt-4-0125-preview')).toBe('gpt-4-0125');
expect(matchModelName('gpt-4-0125-vision-preview')).toBe('gpt-4-0125');
});
// Tests for Google models
it('should return the exact model name if it exists in maxTokensMap - Google models', () => {
expect(matchModelName('text-bison-32k', EModelEndpoint.google)).toBe('text-bison-32k');

View file

@ -317,7 +317,7 @@ DEBUG_OPENAI=false
- Leave it blank or commented out to use internal settings.
```bash
OPENAI_MODELS=gpt-3.5-turbo-1106,gpt-4-1106-preview,gpt-3.5-turbo,gpt-3.5-turbo-16k,gpt-3.5-turbo-0301,gpt-4,gpt-4-0314,gpt-4-0613
OPENAI_MODELS=gpt-3.5-turbo-1106,gpt-4-turbo-preview,gpt-4-1106-preview,gpt-3.5-turbo,gpt-3.5-turbo-16k,gpt-3.5-turbo-0301,gpt-4,gpt-4-0314,gpt-4-0613
```
- Titling is enabled by default when initiating a conversation.

View file

@ -93,6 +93,8 @@ export const defaultModels = {
[EModelEndpoint.openAI]: [
'gpt-3.5-turbo-16k-0613',
'gpt-3.5-turbo-16k',
'gpt-4-turbo-preview',
'gpt-4-0125-preview',
'gpt-4-1106-preview',
'gpt-3.5-turbo',
'gpt-3.5-turbo-1106',