fix: customGpt persistence blindspot, also remove free gpt

This commit is contained in:
Danny Avila 2023-03-07 14:22:33 -05:00
parent da55ac3774
commit da61a2841f
5 changed files with 46 additions and 21 deletions

View file

@ -7,8 +7,10 @@
## Updates ## Updates
<details open> <details open>
<summary><strong>2023-03-06</strong></summary> <summary><strong>2023-03-07</strong></summary>
Due to increased interest in the repo, I've dockerized the app as of this update for quick setup! See setup instructions below. I realize this still takes some time with installing docker dependencies, so it's on the roadmap to have a deployed demo. Besides this, I've made major improvements for a lot of the existing features across the board, mainly UI/UX. Due to increased interest in the repo, I've dockerized the app as of this update for quick setup! See setup instructions below. I realize this still takes some time with installing docker dependencies, so it's on the roadmap to have a deployed demo. Besides this, I've made major improvements for a lot of the existing features across the board, mainly UI/UX.
Also worth noting, the method to access the Free Version is no longer working, so I've removed it from model selection until further notice.
</details> </details>
<details> <details>

View file

@ -75,17 +75,30 @@ module.exports = {
}, },
// getConvos: async () => await Conversation.find({}).sort({ created: -1 }).exec(), // getConvos: async () => await Conversation.find({}).sort({ created: -1 }).exec(),
getConvos: async (pageNumber = 1, pageSize = 12) => { getConvos: async (pageNumber = 1, pageSize = 12) => {
const skip = (pageNumber - 1) * pageSize; try {
// const limit = pageNumber * pageSize; const skip = (pageNumber - 1) * pageSize;
// const limit = pageNumber * pageSize;
const conversations = await Conversation.find({}) const conversations = await Conversation.find({})
.sort({ created: -1 }) .sort({ created: -1 })
.skip(skip) .skip(skip)
// .limit(limit) // .limit(limit)
.limit(pageSize) .limit(pageSize)
.exec(); .exec();
return conversations; return conversations;
} catch (error) {
console.log(error);
return { message: 'Error getting conversations' };
}
},
getConvo: async (conversationId) => {
try {
return await Conversation.findOne({ conversationId }).exec();
} catch (error) {
console.log(error);
return { message: 'Error getting single conversation' };
}
}, },
deleteConvos: async (filter) => { deleteConvos: async (filter) => {
let deleteCount = await Conversation.deleteMany(filter).exec(); let deleteCount = await Conversation.deleteMany(filter).exec();

View file

@ -1,10 +1,11 @@
const { saveMessage, deleteMessages } = require('./Message'); const { saveMessage, deleteMessages } = require('./Message');
const { getCustomGpts, updateCustomGpt, updateByLabel, deleteCustomGpts } = require('./CustomGpt'); const { getCustomGpts, updateCustomGpt, updateByLabel, deleteCustomGpts } = require('./CustomGpt');
const { saveConvo } = require('./Conversation'); const { getConvo, saveConvo } = require('./Conversation');
module.exports = { module.exports = {
saveMessage, saveMessage,
deleteMessages, deleteMessages,
getConvo,
saveConvo, saveConvo,
getCustomGpts, getCustomGpts,
updateCustomGpt, updateCustomGpt,

View file

@ -9,14 +9,13 @@ const {
customClient, customClient,
detectCode detectCode
} = require('../../app/'); } = require('../../app/');
const { saveMessage, deleteMessages, saveConvo } = require('../../models'); const { getConvo, saveMessage, deleteMessages, saveConvo } = require('../../models');
const { handleError, sendMessage } = require('./handlers'); const { handleError, sendMessage } = require('./handlers');
router.use('/bing', askBing); router.use('/bing', askBing);
router.post('/', async (req, res) => { router.post('/', async (req, res) => {
const { model, text, parentMessageId, conversationId, chatGptLabel, promptPrefix } = let { model, text, parentMessageId, conversationId, chatGptLabel, promptPrefix } = req.body;
req.body;
if (!text.trim().includes(' ') && text.length < 5) { if (!text.trim().includes(' ') && text.length < 5) {
return handleError(res, 'Prompt empty or too short'); return handleError(res, 'Prompt empty or too short');
} }
@ -43,6 +42,15 @@ router.post('/', async (req, res) => {
client = browserClient; client = browserClient;
} }
if (model === 'chatgptCustom' && !chatGptLabel && conversationId) {
const convo = await getConvo({ conversationId });
if (convo) {
console.log('found convo for custom gpt', { convo })
chatGptLabel = convo.chatGptLabel;
promptPrefix = convo.promptPrefix;
}
}
res.writeHead(200, { res.writeHead(200, {
Connection: 'keep-alive', Connection: 'keep-alive',
'Content-Type': 'text/event-stream', 'Content-Type': 'text/event-stream',

View file

@ -16,15 +16,16 @@ const initialState = {
_id: '2', _id: '2',
name: 'BingAI', name: 'BingAI',
value: 'bingai' value: 'bingai'
},
{
_id: '3',
name: 'ChatGPT',
value: 'chatgptBrowser'
} }
// {
// _id: '3',
// name: 'ChatGPT',
// value: 'chatgptBrowser'
// }
], ],
modelMap: {}, modelMap: {},
initial: { chatgpt: true, chatgptCustom: true, bingai: true, chatgptBrowser: true } // initial: { chatgpt: true, chatgptCustom: true, bingai: true, chatgptBrowser: true }
initial: { chatgpt: true, chatgptCustom: true, bingai: true, }
}; };
const currentSlice = createSlice({ const currentSlice = createSlice({
@ -37,7 +38,7 @@ const currentSlice = createSlice({
state.models = models; state.models = models;
const modelMap = {}; const modelMap = {};
models.slice(4).forEach((modelItem) => { models.slice(initialState.models.length).forEach((modelItem) => {
modelMap[modelItem.value] = { modelMap[modelItem.value] = {
chatGptLabel: modelItem.chatGptLabel, chatGptLabel: modelItem.chatGptLabel,
promptPrefix: modelItem.promptPrefix promptPrefix: modelItem.promptPrefix