LibreChat/api/server/routes/ask/askChatGPTBrowser.js
Danny Avila 36a524a630
feat(OpenAI, PaLM): Add model support for new OpenAI models and codechat-bison (#516)
* feat(OpenAI, PaLM): add new models
refactor(chatgpt-client.js): use object to map max tokens for each model
refactor(askChatGPTBrowser.js, askGPTPlugins.js, askOpenAI.js): comment out unused function calls and error handling
feat(askGoogle.js): add support for codechat-bison model
refactor(endpoints.js): add gpt-4-0613 and gpt-3.5-turbo-16k to available models for OpenAI and GPT plugins
refactor(EditPresetDialog.jsx): hide examples for codechat-bison model in google endpoint

style(EndpointOptionsPopover.jsx): add cn utility function import and use it to set additionalButton className

refactor(Google/Settings.jsx): conditionally render custom name and prompt prefix fields based on model type

The code has been refactored to conditionally render the custom name and prompt prefix fields based on the type of model selected. If the model starts with 'codechat-', the fields will not be rendered.

refactor(Settings.jsx): remove duplicated code and wrap a section in a conditional statement based on a variable

style(Input): add z-index to Input component to fix overlapping issue
feat(GoogleOptions): disable Examples button when model starts with 'codechat-' prefix

* feat(.env.example, endpoints.js): add PLUGIN_MODELS environment variable and use it to get plugin models in endpoints.js
2023-06-13 16:42:01 -04:00

235 lines
7 KiB
JavaScript

const express = require('express');
const crypto = require('crypto');
const router = express.Router();
// const { getChatGPTBrowserModels } = require('../endpoints');
const { browserClient } = require('../../../app/');
const { saveMessage, getConvoTitle, saveConvo, getConvo } = require('../../../models');
const { handleError, sendMessage, createOnProgress, handleText } = require('./handlers');
const requireJwtAuth = require('../../../middleware/requireJwtAuth');
router.post('/', requireJwtAuth, async (req, res) => {
const {
endpoint,
text,
overrideParentMessageId = null,
parentMessageId,
conversationId: oldConversationId
} = req.body;
if (text.length === 0) return handleError(res, { text: 'Prompt empty or too short' });
if (endpoint !== 'chatGPTBrowser') return handleError(res, { text: 'Illegal request' });
// build user message
const conversationId = oldConversationId || crypto.randomUUID();
const isNewConversation = !oldConversationId;
const userMessageId = crypto.randomUUID();
const userParentMessageId = parentMessageId || '00000000-0000-0000-0000-000000000000';
const userMessage = {
messageId: userMessageId,
sender: 'User',
text,
parentMessageId: userParentMessageId,
conversationId,
isCreatedByUser: true
};
// build endpoint option
const endpointOption = {
model: req.body?.model ?? 'text-davinci-002-render-sha',
token: req.body?.token ?? null
};
// const availableModels = getChatGPTBrowserModels();
// if (availableModels.find((model) => model === endpointOption.model) === undefined)
// return handleError(res, { text: 'Illegal request: model' });
console.log('ask log', {
userMessage,
endpointOption,
conversationId
});
if (!overrideParentMessageId) {
await saveMessage(userMessage);
await saveConvo(req.user.id, {
...userMessage,
...endpointOption,
conversationId,
endpoint
});
}
// eslint-disable-next-line no-use-before-define
return await ask({
isNewConversation,
userMessage,
endpointOption,
conversationId,
preSendRequest: true,
overrideParentMessageId,
req,
res
});
});
const ask = async ({
isNewConversation,
userMessage,
endpointOption,
conversationId,
overrideParentMessageId = null,
req,
res
}) => {
let { text, parentMessageId: userParentMessageId, messageId: userMessageId } = userMessage;
const userId = req.user.id;
res.writeHead(200, {
Connection: 'keep-alive',
'Content-Type': 'text/event-stream',
'Cache-Control': 'no-cache, no-transform',
'Access-Control-Allow-Origin': '*',
'X-Accel-Buffering': 'no'
});
let responseMessageId = crypto.randomUUID();
let getPartialMessage = null;
try {
let lastSavedTimestamp = 0;
const { onProgress: progressCallback, getPartialText } = createOnProgress({
onProgress: ({ text }) => {
const currentTimestamp = Date.now();
if (currentTimestamp - lastSavedTimestamp > 500) {
lastSavedTimestamp = currentTimestamp;
saveMessage({
messageId: responseMessageId,
sender: endpointOption?.jailbreak ? 'Sydney' : 'BingAI',
conversationId,
parentMessageId: overrideParentMessageId || userMessageId,
text: text,
unfinished: true,
cancelled: false,
error: false
});
}
}
});
getPartialMessage = getPartialText;
const abortController = new AbortController();
let response = await browserClient({
text,
parentMessageId: userParentMessageId,
conversationId,
...endpointOption,
abortController,
userId,
onProgress: progressCallback.call(null, { res, text }),
onEventMessage: (eventMessage) => {
let data = null;
try {
data = JSON.parse(eventMessage.data);
} catch (e) {
return;
}
sendMessage(res, {
message: { ...userMessage, conversationId: data.conversation_id },
created: true
});
}
});
console.log('CLIENT RESPONSE', response);
const newConversationId = response.conversationId || conversationId;
const newUserMassageId = response.parentMessageId || userMessageId;
const newResponseMessageId = response.messageId;
// STEP1 generate response message
response.text = response.response || '**ChatGPT refused to answer.**';
let responseMessage = {
conversationId: newConversationId,
messageId: responseMessageId,
newMessageId: newResponseMessageId,
parentMessageId: overrideParentMessageId || newUserMassageId,
text: await handleText(response),
sender: endpointOption?.chatGptLabel || 'ChatGPT',
unfinished: false,
cancelled: false,
error: false
};
await saveMessage(responseMessage);
responseMessage.messageId = newResponseMessageId;
// STEP2 update the conversation
// First update conversationId if needed
let conversationUpdate = { conversationId: newConversationId, endpoint: 'chatGPTBrowser' };
if (conversationId != newConversationId)
if (isNewConversation) {
// change the conversationId to new one
conversationUpdate = {
...conversationUpdate,
conversationId: conversationId,
newConversationId: newConversationId
};
} else {
// create new conversation
conversationUpdate = {
...conversationUpdate,
...endpointOption
};
}
await saveConvo(req.user.id, conversationUpdate);
conversationId = newConversationId;
// STEP3 update the user message
userMessage.conversationId = newConversationId;
userMessage.messageId = newUserMassageId;
// If response has parentMessageId, the fake userMessage.messageId should be updated to the real one.
if (!overrideParentMessageId)
await saveMessage({
...userMessage,
messageId: userMessageId,
newMessageId: newUserMassageId
});
userMessageId = newUserMassageId;
sendMessage(res, {
title: await getConvoTitle(req.user.id, conversationId),
final: true,
conversation: await getConvo(req.user.id, conversationId),
requestMessage: userMessage,
responseMessage: responseMessage
});
res.end();
if (userParentMessageId == '00000000-0000-0000-0000-000000000000') {
// const title = await titleConvo({ endpoint: endpointOption?.endpoint, text, response: responseMessage });
const title = await response.details.title;
await saveConvo(req.user.id, {
conversationId: conversationId,
title
});
}
} catch (error) {
const errorMessage = {
messageId: responseMessageId,
sender: 'ChatGPT',
conversationId,
parentMessageId: overrideParentMessageId || userMessageId,
unfinished: false,
cancelled: false,
// error: true,
text: `${getPartialMessage() ?? ''}\n\nError message: "${error.message}"`
};
await saveMessage(errorMessage);
handleError(res, errorMessage);
}
};
module.exports = router;