v0.7.1 (#2502)

* chore: make openai package definition explicit

*  v0.7.1

* chore: gpt-4-vision correct context length

* add `llava` to vision models list
This commit is contained in:
Danny Avila 2024-04-23 08:57:20 -04:00 committed by GitHub
parent 0ae98ff011
commit 3df4fac118
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
14 changed files with 31 additions and 15 deletions

View file

@ -52,6 +52,8 @@ const getValueKey = (model, endpoint) => {
return 'gpt-3.5-turbo-1106';
} else if (modelName.includes('gpt-3.5')) {
return '4k';
} else if (modelName.includes('gpt-4-vision')) {
return 'gpt-4-1106';
} else if (modelName.includes('gpt-4-1106')) {
return 'gpt-4-1106';
} else if (modelName.includes('gpt-4-0125')) {

View file

@ -34,6 +34,13 @@ describe('getValueKey', () => {
expect(getValueKey('openai/gpt-4-1106')).toBe('gpt-4-1106');
expect(getValueKey('gpt-4-1106/openai/')).toBe('gpt-4-1106');
});
it('should return "gpt-4-1106" for model type of "gpt-4-1106"', () => {
expect(getValueKey('gpt-4-vision-preview')).toBe('gpt-4-1106');
expect(getValueKey('openai/gpt-4-1106')).toBe('gpt-4-1106');
expect(getValueKey('gpt-4-turbo')).toBe('gpt-4-1106');
expect(getValueKey('gpt-4-0125')).toBe('gpt-4-1106');
});
});
describe('getMultiplier', () => {

View file

@ -1,6 +1,6 @@
{
"name": "@librechat/backend",
"version": "0.7.0",
"version": "0.7.1",
"description": "",
"scripts": {
"start": "echo 'please run this from the root directory'",
@ -74,7 +74,7 @@
"multer": "^1.4.5-lts.1",
"nodejs-gpt": "^1.37.4",
"nodemailer": "^6.9.4",
"openai": "^4.36.0",
"openai": "4.36.0",
"openai-chat-tokens": "^0.2.8",
"openid-client": "^5.4.2",
"passport": "^0.6.0",

View file

@ -49,6 +49,7 @@ const openAIModels = {
'gpt-4-1106': 127990, // -10 from max
'gpt-4-0125': 127990, // -10 from max
'gpt-4-turbo': 127990, // -10 from max
'gpt-4-vision': 127990, // -10 from max
'gpt-3.5-turbo': 16375, // -10 from max
'gpt-3.5-turbo-0613': 4092, // -5 from max
'gpt-3.5-turbo-0301': 4092, // -5 from max

View file

@ -59,6 +59,12 @@ describe('getModelMaxTokens', () => {
expect(getModelMaxTokens('gpt-4-1106')).toBe(maxTokensMap[EModelEndpoint.openAI]['gpt-4-1106']);
});
test('should return correct tokens for gpt-4-vision exact match', () => {
expect(getModelMaxTokens('gpt-4-vision')).toBe(
maxTokensMap[EModelEndpoint.openAI]['gpt-4-vision'],
);
});
test('should return correct tokens for gpt-3.5-turbo-1106 partial match', () => {
expect(getModelMaxTokens('something-/gpt-3.5-turbo-1106')).toBe(
maxTokensMap[EModelEndpoint.openAI]['gpt-3.5-turbo-1106'],