mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-12-18 01:10:14 +01:00
🚀 feat: gpt-4o (#2692)
* 🚀 feat: gpt-4o
* update readme.md
* feat: Add new test case for getMultiplier function
* feat: Refactor getMultiplier function to use valueKey variable
This commit is contained in:
parent
5920672a8c
commit
638ac5bba6
9 changed files with 32 additions and 8 deletions
|
|
@ -140,7 +140,7 @@ GOOGLE_KEY=user_provided
|
||||||
#============#
|
#============#
|
||||||
|
|
||||||
OPENAI_API_KEY=user_provided
|
OPENAI_API_KEY=user_provided
|
||||||
# OPENAI_MODELS=gpt-3.5-turbo-0125,gpt-3.5-turbo-0301,gpt-3.5-turbo,gpt-4,gpt-4-0613,gpt-4-vision-preview,gpt-3.5-turbo-0613,gpt-3.5-turbo-16k-0613,gpt-4-0125-preview,gpt-4-turbo-preview,gpt-4-1106-preview,gpt-3.5-turbo-1106,gpt-3.5-turbo-instruct,gpt-3.5-turbo-instruct-0914,gpt-3.5-turbo-16k
|
# OPENAI_MODELS=gpt-4o,gpt-3.5-turbo-0125,gpt-3.5-turbo-0301,gpt-3.5-turbo,gpt-4,gpt-4-0613,gpt-4-vision-preview,gpt-3.5-turbo-0613,gpt-3.5-turbo-16k-0613,gpt-4-0125-preview,gpt-4-turbo-preview,gpt-4-1106-preview,gpt-3.5-turbo-1106,gpt-3.5-turbo-instruct,gpt-3.5-turbo-instruct-0914,gpt-3.5-turbo-16k
|
||||||
|
|
||||||
DEBUG_OPENAI=false
|
DEBUG_OPENAI=false
|
||||||
|
|
||||||
|
|
@ -162,7 +162,7 @@ DEBUG_OPENAI=false
|
||||||
|
|
||||||
ASSISTANTS_API_KEY=user_provided
|
ASSISTANTS_API_KEY=user_provided
|
||||||
# ASSISTANTS_BASE_URL=
|
# ASSISTANTS_BASE_URL=
|
||||||
# ASSISTANTS_MODELS=gpt-3.5-turbo-0125,gpt-3.5-turbo-16k-0613,gpt-3.5-turbo-16k,gpt-3.5-turbo,gpt-4,gpt-4-0314,gpt-4-32k-0314,gpt-4-0613,gpt-3.5-turbo-0613,gpt-3.5-turbo-1106,gpt-4-0125-preview,gpt-4-turbo-preview,gpt-4-1106-preview
|
# ASSISTANTS_MODELS=gpt-4o,gpt-3.5-turbo-0125,gpt-3.5-turbo-16k-0613,gpt-3.5-turbo-16k,gpt-3.5-turbo,gpt-4,gpt-4-0314,gpt-4-32k-0314,gpt-4-0613,gpt-3.5-turbo-0613,gpt-3.5-turbo-1106,gpt-4-0125-preview,gpt-4-turbo-preview,gpt-4-1106-preview
|
||||||
|
|
||||||
#============#
|
#============#
|
||||||
# OpenRouter #
|
# OpenRouter #
|
||||||
|
|
@ -174,7 +174,7 @@ ASSISTANTS_API_KEY=user_provided
|
||||||
# Plugins #
|
# Plugins #
|
||||||
#============#
|
#============#
|
||||||
|
|
||||||
# PLUGIN_MODELS=gpt-4,gpt-4-turbo-preview,gpt-4-0125-preview,gpt-4-1106-preview,gpt-4-0613,gpt-3.5-turbo,gpt-3.5-turbo-0125,gpt-3.5-turbo-1106,gpt-3.5-turbo-0613
|
# PLUGIN_MODELS=gpt-4o,gpt-4,gpt-4-turbo-preview,gpt-4-0125-preview,gpt-4-1106-preview,gpt-4-0613,gpt-3.5-turbo,gpt-3.5-turbo-0125,gpt-3.5-turbo-1106,gpt-3.5-turbo-0613
|
||||||
|
|
||||||
DEBUG_PLUGINS=true
|
DEBUG_PLUGINS=true
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -50,7 +50,7 @@
|
||||||
- 🔄 Edit, Resubmit, and Continue Messages with Conversation branching
|
- 🔄 Edit, Resubmit, and Continue Messages with Conversation branching
|
||||||
- 🌿 Fork Messages & Conversations for Advanced Context control
|
- 🌿 Fork Messages & Conversations for Advanced Context control
|
||||||
- 💬 Multimodal Chat:
|
- 💬 Multimodal Chat:
|
||||||
- Upload and analyze images with Claude 3, GPT-4, and Gemini Vision 📸
|
- Upload and analyze images with Claude 3, GPT-4 (including `gpt-4o`), and Gemini Vision 📸
|
||||||
- Chat with Files using Custom Endpoints, OpenAI, Azure, Anthropic, & Google. 🗃️
|
- Chat with Files using Custom Endpoints, OpenAI, Azure, Anthropic, & Google. 🗃️
|
||||||
- Advanced Agents with Files, Code Interpreter, Tools, and API Actions 🔦
|
- Advanced Agents with Files, Code Interpreter, Tools, and API Actions 🔦
|
||||||
- Available through the [OpenAI Assistants API](https://platform.openai.com/docs/assistants/overview) 🌤️
|
- Available through the [OpenAI Assistants API](https://platform.openai.com/docs/assistants/overview) 🌤️
|
||||||
|
|
|
||||||
|
|
@ -12,6 +12,7 @@ const tokenValues = {
|
||||||
'4k': { prompt: 1.5, completion: 2 },
|
'4k': { prompt: 1.5, completion: 2 },
|
||||||
'16k': { prompt: 3, completion: 4 },
|
'16k': { prompt: 3, completion: 4 },
|
||||||
'gpt-3.5-turbo-1106': { prompt: 1, completion: 2 },
|
'gpt-3.5-turbo-1106': { prompt: 1, completion: 2 },
|
||||||
|
'gpt-4o': { prompt: 5, completion: 15 },
|
||||||
'gpt-4-1106': { prompt: 10, completion: 30 },
|
'gpt-4-1106': { prompt: 10, completion: 30 },
|
||||||
'gpt-3.5-turbo-0125': { prompt: 0.5, completion: 1.5 },
|
'gpt-3.5-turbo-0125': { prompt: 0.5, completion: 1.5 },
|
||||||
'claude-3-opus': { prompt: 15, completion: 75 },
|
'claude-3-opus': { prompt: 15, completion: 75 },
|
||||||
|
|
@ -52,6 +53,8 @@ const getValueKey = (model, endpoint) => {
|
||||||
return 'gpt-3.5-turbo-1106';
|
return 'gpt-3.5-turbo-1106';
|
||||||
} else if (modelName.includes('gpt-3.5')) {
|
} else if (modelName.includes('gpt-3.5')) {
|
||||||
return '4k';
|
return '4k';
|
||||||
|
} else if (modelName.includes('gpt-4o')) {
|
||||||
|
return 'gpt-4o';
|
||||||
} else if (modelName.includes('gpt-4-vision')) {
|
} else if (modelName.includes('gpt-4-vision')) {
|
||||||
return 'gpt-4-1106';
|
return 'gpt-4-1106';
|
||||||
} else if (modelName.includes('gpt-4-1106')) {
|
} else if (modelName.includes('gpt-4-1106')) {
|
||||||
|
|
|
||||||
|
|
@ -41,6 +41,13 @@ describe('getValueKey', () => {
|
||||||
expect(getValueKey('gpt-4-turbo')).toBe('gpt-4-1106');
|
expect(getValueKey('gpt-4-turbo')).toBe('gpt-4-1106');
|
||||||
expect(getValueKey('gpt-4-0125')).toBe('gpt-4-1106');
|
expect(getValueKey('gpt-4-0125')).toBe('gpt-4-1106');
|
||||||
});
|
});
|
||||||
|
|
||||||
|
it('should return "gpt-4o" for model type of "gpt-4o"', () => {
|
||||||
|
expect(getValueKey('gpt-4o-2024-05-13')).toBe('gpt-4o');
|
||||||
|
expect(getValueKey('openai/gpt-4o')).toBe('gpt-4o');
|
||||||
|
expect(getValueKey('gpt-4o-turbo')).toBe('gpt-4o');
|
||||||
|
expect(getValueKey('gpt-4o-0125')).toBe('gpt-4o');
|
||||||
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
describe('getMultiplier', () => {
|
describe('getMultiplier', () => {
|
||||||
|
|
@ -84,6 +91,17 @@ describe('getMultiplier', () => {
|
||||||
);
|
);
|
||||||
});
|
});
|
||||||
|
|
||||||
|
it('should return the correct multiplier for gpt-4o', () => {
|
||||||
|
const valueKey = getValueKey('gpt-4o-2024-05-13');
|
||||||
|
expect(getMultiplier({ valueKey, tokenType: 'prompt' })).toBe(tokenValues['gpt-4o'].prompt);
|
||||||
|
expect(getMultiplier({ valueKey, tokenType: 'completion' })).toBe(
|
||||||
|
tokenValues['gpt-4o'].completion,
|
||||||
|
);
|
||||||
|
expect(getMultiplier({ valueKey, tokenType: 'completion' })).not.toBe(
|
||||||
|
tokenValues['gpt-4-1106'].completion,
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
it('should derive the valueKey from the model if not provided for new models', () => {
|
it('should derive the valueKey from the model if not provided for new models', () => {
|
||||||
expect(
|
expect(
|
||||||
getMultiplier({ tokenType: 'prompt', model: 'gpt-3.5-turbo-1106-some-other-info' }),
|
getMultiplier({ tokenType: 'prompt', model: 'gpt-3.5-turbo-1106-some-other-info' }),
|
||||||
|
|
|
||||||
|
|
@ -48,6 +48,7 @@ const openAIModels = {
|
||||||
'gpt-4-32k-0613': 32758, // -10 from max
|
'gpt-4-32k-0613': 32758, // -10 from max
|
||||||
'gpt-4-1106': 127990, // -10 from max
|
'gpt-4-1106': 127990, // -10 from max
|
||||||
'gpt-4-0125': 127990, // -10 from max
|
'gpt-4-0125': 127990, // -10 from max
|
||||||
|
'gpt-4o': 127990, // -10 from max
|
||||||
'gpt-4-turbo': 127990, // -10 from max
|
'gpt-4-turbo': 127990, // -10 from max
|
||||||
'gpt-4-vision': 127990, // -10 from max
|
'gpt-4-vision': 127990, // -10 from max
|
||||||
'gpt-3.5-turbo': 16375, // -10 from max
|
'gpt-3.5-turbo': 16375, // -10 from max
|
||||||
|
|
|
||||||
2
package-lock.json
generated
2
package-lock.json
generated
|
|
@ -29258,7 +29258,7 @@
|
||||||
},
|
},
|
||||||
"packages/data-provider": {
|
"packages/data-provider": {
|
||||||
"name": "librechat-data-provider",
|
"name": "librechat-data-provider",
|
||||||
"version": "0.6.3",
|
"version": "0.6.4",
|
||||||
"license": "ISC",
|
"license": "ISC",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@types/js-yaml": "^4.0.9",
|
"@types/js-yaml": "^4.0.9",
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,6 @@
|
||||||
{
|
{
|
||||||
"name": "librechat-data-provider",
|
"name": "librechat-data-provider",
|
||||||
"version": "0.6.3",
|
"version": "0.6.4",
|
||||||
"description": "data services for librechat apps",
|
"description": "data services for librechat apps",
|
||||||
"main": "dist/index.js",
|
"main": "dist/index.js",
|
||||||
"module": "dist/index.es.js",
|
"module": "dist/index.es.js",
|
||||||
|
|
|
||||||
|
|
@ -390,6 +390,7 @@ export const defaultModels = {
|
||||||
'claude-instant-1-100k',
|
'claude-instant-1-100k',
|
||||||
],
|
],
|
||||||
[EModelEndpoint.openAI]: [
|
[EModelEndpoint.openAI]: [
|
||||||
|
'gpt-4o',
|
||||||
'gpt-3.5-turbo-0125',
|
'gpt-3.5-turbo-0125',
|
||||||
'gpt-4-turbo',
|
'gpt-4-turbo',
|
||||||
'gpt-4-turbo-2024-04-09',
|
'gpt-4-turbo-2024-04-09',
|
||||||
|
|
@ -461,6 +462,7 @@ export const supportsBalanceCheck = {
|
||||||
};
|
};
|
||||||
|
|
||||||
export const visionModels = [
|
export const visionModels = [
|
||||||
|
'gpt-4o',
|
||||||
'gpt-4-turbo',
|
'gpt-4-turbo',
|
||||||
'gpt-4-vision',
|
'gpt-4-vision',
|
||||||
'llava',
|
'llava',
|
||||||
|
|
|
||||||
|
|
@ -76,7 +76,7 @@ export const isImageVisionTool = (tool: FunctionTool | FunctionToolCall) =>
|
||||||
|
|
||||||
export const openAISettings = {
|
export const openAISettings = {
|
||||||
model: {
|
model: {
|
||||||
default: 'gpt-3.5-turbo',
|
default: 'gpt-4o',
|
||||||
},
|
},
|
||||||
temperature: {
|
temperature: {
|
||||||
min: 0,
|
min: 0,
|
||||||
|
|
@ -211,7 +211,7 @@ export enum EAgent {
|
||||||
|
|
||||||
export const agentOptionSettings = {
|
export const agentOptionSettings = {
|
||||||
model: {
|
model: {
|
||||||
default: 'gpt-4-turbo',
|
default: 'gpt-4o',
|
||||||
},
|
},
|
||||||
temperature: {
|
temperature: {
|
||||||
min: 0,
|
min: 0,
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue