mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-12-23 03:40:14 +01:00
feat: update env example.
feat: support OPENAI_REVERSE_PROXY feat: support set availModels in env file fix: chatgpt Browser send logic refactor. fix: title wrong usage of responseMessage BREAKING: some env paramaters has been changed!
This commit is contained in:
parent
a5202f84cc
commit
22b9524ad3
19 changed files with 259 additions and 197 deletions
|
|
@ -1,6 +1,7 @@
|
|||
const express = require('express');
|
||||
const crypto = require('crypto');
|
||||
const router = express.Router();
|
||||
const { getChatGPTBrowserModels } = require('../endpoints');
|
||||
const { titleConvo, browserClient } = require('../../../app/');
|
||||
const { saveMessage, getConvoTitle, saveConvo, updateConvo, getConvo } = require('../../../models');
|
||||
const { handleError, sendMessage, createOnProgress, handleText } = require('./handlers');
|
||||
|
|
@ -18,6 +19,7 @@ router.post('/', async (req, res) => {
|
|||
|
||||
// build user message
|
||||
const conversationId = oldConversationId || crypto.randomUUID();
|
||||
const isNewConversation = !oldConversationId;
|
||||
const userMessageId = crypto.randomUUID();
|
||||
const userParentMessageId = parentMessageId || '00000000-0000-0000-0000-000000000000';
|
||||
const userMessage = {
|
||||
|
|
@ -34,6 +36,10 @@ router.post('/', async (req, res) => {
|
|||
model: req.body?.model || 'text-davinci-002-render-sha'
|
||||
};
|
||||
|
||||
const availableModels = getChatGPTBrowserModels();
|
||||
if (availableModels.find(model => model === endpointOption.model) === undefined)
|
||||
return handleError(res, { text: 'Illegal request: model' });
|
||||
|
||||
console.log('ask log', {
|
||||
userMessage,
|
||||
endpointOption,
|
||||
|
|
@ -52,6 +58,7 @@ router.post('/', async (req, res) => {
|
|||
|
||||
// eslint-disable-next-line no-use-before-define
|
||||
return await ask({
|
||||
isNewConversation,
|
||||
userMessage,
|
||||
endpointOption,
|
||||
conversationId,
|
||||
|
|
@ -63,6 +70,7 @@ router.post('/', async (req, res) => {
|
|||
});
|
||||
|
||||
const ask = async ({
|
||||
isNewConversation,
|
||||
userMessage,
|
||||
endpointOption,
|
||||
conversationId,
|
||||
|
|
@ -71,9 +79,7 @@ const ask = async ({
|
|||
req,
|
||||
res
|
||||
}) => {
|
||||
const { text, parentMessageId: userParentMessageId, messageId: userMessageId } = userMessage;
|
||||
|
||||
const client = browserClient;
|
||||
let { text, parentMessageId: userParentMessageId, messageId: userMessageId } = userMessage;
|
||||
|
||||
res.writeHead(200, {
|
||||
Connection: 'keep-alive',
|
||||
|
|
@ -89,7 +95,7 @@ const ask = async ({
|
|||
const progressCallback = createOnProgress();
|
||||
const abortController = new AbortController();
|
||||
res.on('close', () => abortController.abort());
|
||||
let gptResponse = await client({
|
||||
let response = await browserClient({
|
||||
text,
|
||||
parentMessageId: userParentMessageId,
|
||||
conversationId,
|
||||
|
|
@ -98,50 +104,60 @@ const ask = async ({
|
|||
abortController
|
||||
});
|
||||
|
||||
gptResponse.text = gptResponse.response;
|
||||
console.log('CLIENT RESPONSE', gptResponse);
|
||||
console.log('CLIENT RESPONSE', response);
|
||||
|
||||
if (!gptResponse.parentMessageId) {
|
||||
gptResponse.parentMessageId = overrideParentMessageId || userMessageId;
|
||||
delete gptResponse.response;
|
||||
// STEP1 generate response message
|
||||
response.text = response.response || '**ChatGPT refused to answer.**';
|
||||
|
||||
let responseMessage = {
|
||||
conversationId: response.conversationId,
|
||||
messageId: response.messageId,
|
||||
parentMessageId: overrideParentMessageId || response.parentMessageId || userMessageId,
|
||||
text: await handleText(response),
|
||||
sender: endpointOption?.chatGptLabel || 'ChatGPT'
|
||||
};
|
||||
|
||||
await saveMessage(responseMessage);
|
||||
|
||||
// STEP2 update the conversation
|
||||
conversationId = responseMessage.conversationId || conversationId;
|
||||
|
||||
// First update conversationId if needed
|
||||
let conversationUpdate = { conversationId, endpoint: 'chatGPTBrowser' };
|
||||
if (conversationId != responseMessage.conversationId && isNewConversation)
|
||||
conversationUpdate = {
|
||||
...conversationUpdate,
|
||||
conversationId: conversationId,
|
||||
newConversationId: responseMessage.conversationId || conversationId
|
||||
};
|
||||
conversationId = responseMessage.conversationId || conversationId;
|
||||
|
||||
await saveConvo(req?.session?.user?.username, conversationUpdate);
|
||||
|
||||
// STEP3 update the user message
|
||||
userMessage.conversationId = conversationId;
|
||||
userMessage.messageId = responseMessage.parentMessageId;
|
||||
|
||||
// If response has parentMessageId, the fake userMessage.messageId should be updated to the real one.
|
||||
if (!overrideParentMessageId) {
|
||||
const oldUserMessageId = userMessageId;
|
||||
await saveMessage({ ...userMessage, messageId: oldUserMessageId, newMessageId: userMessage.messageId });
|
||||
}
|
||||
|
||||
gptResponse.sender = 'ChatGPT';
|
||||
// gptResponse.model = model;
|
||||
gptResponse.text = await handleText(gptResponse);
|
||||
// if (convo.chatGptLabel?.length > 0 && model === 'chatgptCustom') {
|
||||
// gptResponse.chatGptLabel = convo.chatGptLabel;
|
||||
// }
|
||||
|
||||
// if (convo.promptPrefix?.length > 0 && model === 'chatgptCustom') {
|
||||
// gptResponse.promptPrefix = convo.promptPrefix;
|
||||
// }
|
||||
|
||||
gptResponse.parentMessageId = overrideParentMessageId || userMessageId;
|
||||
|
||||
if (userParentMessageId.startsWith('000')) {
|
||||
await saveMessage({ ...userMessage, conversationId: gptResponse.conversationId });
|
||||
}
|
||||
|
||||
await saveMessage(gptResponse);
|
||||
await updateConvo(req?.session?.user?.username, {
|
||||
...gptResponse,
|
||||
oldConvoId: conversationId
|
||||
});
|
||||
userMessageId = userMessage.messageId;
|
||||
|
||||
sendMessage(res, {
|
||||
title: await getConvoTitle(req?.session?.user?.username, conversationId),
|
||||
final: true,
|
||||
conversation: await getConvo(req?.session?.user?.username, conversationId),
|
||||
requestMessage: userMessage,
|
||||
responseMessage: gptResponse
|
||||
responseMessage: responseMessage
|
||||
});
|
||||
res.end();
|
||||
|
||||
if (userParentMessageId == '00000000-0000-0000-0000-000000000000') {
|
||||
const title = await titleConvo({ endpoint: endpointOption?.endpoint, text, response: gptResponse });
|
||||
const title = await titleConvo({ endpoint: endpointOption?.endpoint, text, response: responseMessage });
|
||||
await updateConvo(req?.session?.user?.username, {
|
||||
conversationId: gptResponse.conversationId,
|
||||
conversationId: conversationId,
|
||||
title
|
||||
});
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
const express = require('express');
|
||||
const crypto = require('crypto');
|
||||
const router = express.Router();
|
||||
const { getOpenAIModels } = require('../endpoints');
|
||||
const { titleConvo, askClient } = require('../../../app/');
|
||||
const { saveMessage, getConvoTitle, saveConvo, updateConvo, getConvo } = require('../../../models');
|
||||
const { handleError, sendMessage, createOnProgress, handleText } = require('./handlers');
|
||||
|
|
@ -40,6 +41,10 @@ router.post('/', async (req, res) => {
|
|||
frequency_penalty: req.body?.frequency_penalty || 0
|
||||
};
|
||||
|
||||
const availableModels = getOpenAIModels();
|
||||
if (availableModels.find(model => model === endpointOption.model) === undefined)
|
||||
return handleError(res, { text: 'Illegal request: model' });
|
||||
|
||||
console.log('ask log', {
|
||||
userMessage,
|
||||
endpointOption,
|
||||
|
|
@ -150,7 +155,7 @@ const ask = async ({
|
|||
res.end();
|
||||
|
||||
if (userParentMessageId == '00000000-0000-0000-0000-000000000000') {
|
||||
const title = await titleConvo({ endpoint: endpointOption?.endpoint, text, response });
|
||||
const title = await titleConvo({ endpoint: endpointOption?.endpoint, text, response: responseMessage });
|
||||
await updateConvo(req?.session?.user?.username, {
|
||||
conversationId: conversationId,
|
||||
title
|
||||
|
|
|
|||
|
|
@ -1,17 +1,27 @@
|
|||
const express = require('express');
|
||||
const router = express.Router();
|
||||
|
||||
const getOpenAIModels = () => {
|
||||
let models = ['gpt-4', 'text-davinci-003', 'gpt-3.5-turbo', 'gpt-3.5-turbo-0301'];
|
||||
if (process.env.OPENAI_MODELS) models = String(process.env.OPENAI_MODELS).split(',');
|
||||
|
||||
return models;
|
||||
};
|
||||
|
||||
const getChatGPTBrowserModels = () => {
|
||||
let models = ['text-davinci-002-render-sha', 'text-davinci-002-render-paid', 'gpt-4'];
|
||||
if (process.env.CHATGPT_MODELS) models = String(process.env.CHATGPT_MODELS).split(',');
|
||||
|
||||
return models;
|
||||
};
|
||||
|
||||
router.get('/', function (req, res) {
|
||||
const azureOpenAI = !!process.env.AZURE_OPENAI_KEY;
|
||||
const openAI = process.env.OPENAI_KEY
|
||||
? { availableModels: ['gpt-4', 'text-davinci-003', 'gpt-3.5-turbo', 'gpt-3.5-turbo-0301'] }
|
||||
: false;
|
||||
const bingAI = !!process.env.BING_TOKEN;
|
||||
const chatGPTBrowser = process.env.OPENAI_KEY
|
||||
? { availableModels: ['Default (GPT-3.5)', 'Legacy (GPT-3.5)', 'GPT-4'] }
|
||||
: false;
|
||||
const openAI = process.env.OPENAI_KEY ? { availableModels: getOpenAIModels() } : false;
|
||||
const bingAI = !!process.env.BINGAI_TOKEN;
|
||||
const chatGPTBrowser = process.env.CHATGPT_TOKEN ? { availableModels: getChatGPTBrowserModels() } : false;
|
||||
|
||||
res.send(JSON.stringify({ azureOpenAI, openAI, bingAI, chatGPTBrowser }));
|
||||
});
|
||||
|
||||
module.exports = router;
|
||||
module.exports = { router, getOpenAIModels, getChatGPTBrowserModels };
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@ const prompts = require('./prompts');
|
|||
const search = require('./search');
|
||||
const tokenizer = require('./tokenizer');
|
||||
const me = require('./me');
|
||||
const endpoints = require('./endpoints');
|
||||
const { router: endpoints } = require('./endpoints');
|
||||
const { router: auth, authenticatedOr401, authenticatedOrRedirect } = require('./auth');
|
||||
|
||||
module.exports = {
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue