feat(OpenAI, PaLM): Add model support for new OpenAI models and codechat-bison (#516)

* feat(OpenAI, PaLM): add new models
refactor(chatgpt-client.js): use object to map max tokens for each model
refactor(askChatGPTBrowser.js, askGPTPlugins.js, askOpenAI.js): comment out unused function calls and error handling
feat(askGoogle.js): add support for codechat-bison model
refactor(endpoints.js): add gpt-4-0613 and gpt-3.5-turbo-16k to available models for OpenAI and GPT plugins
refactor(EditPresetDialog.jsx): hide examples for codechat-bison model in google endpoint

style(EndpointOptionsPopover.jsx): add cn utility function import and use it to set additionalButton className

refactor(Google/Settings.jsx): conditionally render custom name and prompt prefix fields based on model type

The code has been refactored to conditionally render the custom name and prompt prefix fields based on the type of model selected. If the model starts with 'codechat-', the fields will not be rendered.

refactor(Settings.jsx): remove duplicated code and wrap a section in a conditional statement based on a variable

style(Input): add z-index to Input component to fix overlapping issue
feat(GoogleOptions): disable Examples button when model starts with 'codechat-' prefix

* feat(.env.example, endpoints.js): add PLUGIN_MODELS environment variable and use it to get plugin models in endpoints.js
This commit is contained in:
Danny Avila 2023-06-13 16:42:01 -04:00 committed by GitHub
parent 42583e7344
commit 36a524a630
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
12 changed files with 168 additions and 131 deletions

View file

@ -3,7 +3,7 @@ const router = express.Router();
const { availableTools } = require('../../app/langchain/tools');
const getOpenAIModels = () => {
let models = ['gpt-4', 'text-davinci-003', 'gpt-3.5-turbo', 'gpt-3.5-turbo-0301'];
let models = ['gpt-4', 'gpt-4-0613', 'gpt-3.5-turbo', 'gpt-3.5-turbo-16k', 'gpt-3.5-turbo-0613', 'gpt-3.5-turbo-0301', 'text-davinci-003' ];
if (process.env.OPENAI_MODELS) models = String(process.env.OPENAI_MODELS).split(',');
return models;
@ -16,6 +16,13 @@ const getChatGPTBrowserModels = () => {
return models;
};
const getPluginModels = () => {
let models = ['gpt-4', 'gpt-4-0613', 'gpt-3.5-turbo', 'gpt-3.5-turbo-16k', 'gpt-3.5-turbo-0613', 'gpt-3.5-turbo-0301'];
if (process.env.PLUGIN_MODELS) models = String(process.env.PLUGIN_MODELS).split(',');
return models;
};
let i = 0;
router.get('/', async function (req, res) {
let key, palmUser;
@ -38,7 +45,7 @@ router.get('/', async function (req, res) {
const google =
key || palmUser
? { userProvide: palmUser, availableModels: ['chat-bison', 'text-bison'] }
? { userProvide: palmUser, availableModels: ['chat-bison', 'text-bison', 'codechat-bison'] }
: false;
const azureOpenAI = !!process.env.AZURE_OPENAI_API_KEY;
const apiKey = process.env.OPENAI_API_KEY || process.env.AZURE_OPENAI_API_KEY;
@ -46,7 +53,7 @@ router.get('/', async function (req, res) {
? { availableModels: getOpenAIModels(), userProvide: apiKey === 'user_provided' }
: false;
const gptPlugins = apiKey
? { availableModels: ['gpt-4', 'gpt-3.5-turbo', 'gpt-3.5-turbo-0301'], availableTools }
? { availableModels: getPluginModels(), availableTools }
: false;
const bingAI = process.env.BINGAI_TOKEN
? { userProvide: process.env.BINGAI_TOKEN == 'user_provided' }