mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-12-17 17:00:15 +01:00
feat(chatgpt-browser): add support for multiple GPT models
This commit adds support for multiple GPT models in the chatGPT-browser client. The available models are now stored in a Map object, which maps the model label to its corresponding model. The commit adds a new component, ChatGPTOptions, to the client UI to allow the user to select the GPT model to use in the chat. The component is only displayed when the chatGPT-browser endpoint is selected.
This commit is contained in:
parent
eef2303c8e
commit
aa4fd57459
9 changed files with 69 additions and 13 deletions
|
|
@ -1,11 +1,11 @@
|
|||
require('dotenv').config();
|
||||
const { KeyvFile } = require('keyv-file');
|
||||
// const set = new Set([
|
||||
// 'gpt-4',
|
||||
// 'text-davinci-002-render',
|
||||
// 'text-davinci-002-render-paid',
|
||||
// 'text-davinci-002-render-sha'
|
||||
// ]);
|
||||
|
||||
const modelMap = new Map([
|
||||
['Default (GPT-3.5)', 'text-davinci-002-render-sha'],
|
||||
['Legacy (GPT-3.5)', 'text-davinci-002-render-paid'],
|
||||
['GPT-4', 'gpt-4']
|
||||
]);
|
||||
|
||||
const browserClient = async ({
|
||||
text,
|
||||
|
|
@ -25,7 +25,7 @@ const browserClient = async ({
|
|||
reverseProxyUrl: 'https://bypass.duti.tech/api/conversation',
|
||||
// Access token from https://chat.openai.com/api/auth/session
|
||||
accessToken: process.env.CHATGPT_TOKEN,
|
||||
model,
|
||||
model: modelMap.get(model),
|
||||
// debug: true
|
||||
proxy: process.env.PROXY || null
|
||||
};
|
||||
|
|
@ -37,7 +37,7 @@ const browserClient = async ({
|
|||
options = { ...options, parentMessageId, conversationId };
|
||||
}
|
||||
|
||||
// console.log('gptBrowser options', options, clientOptions);
|
||||
console.log('gptBrowser clientOptions', clientOptions);
|
||||
|
||||
if (parentMessageId === '00000000-0000-0000-0000-000000000000') {
|
||||
delete options.conversationId;
|
||||
|
|
|
|||
|
|
@ -67,7 +67,9 @@ const projectPath = path.join(__dirname, '..', '..', 'client');
|
|||
? { availableModels: ['gpt-4', 'text-davinci-003', 'gpt-3.5-turbo', 'gpt-3.5-turbo-0301'] }
|
||||
: false;
|
||||
const bingAI = !!process.env.BING_TOKEN;
|
||||
const chatGPTBrowser = !!process.env.CHATGPT_TOKEN;
|
||||
const chatGPTBrowser = process.env.OPENAI_KEY
|
||||
? { availableModels: ['Default (GPT-3.5)', 'Legacy (GPT-3.5)', 'GPT-4'] }
|
||||
: false;
|
||||
|
||||
res.send(JSON.stringify({ azureOpenAI, openAI, bingAI, chatGPTBrowser }));
|
||||
});
|
||||
|
|
@ -86,7 +88,7 @@ const projectPath = path.join(__dirname, '..', '..', 'client');
|
|||
})();
|
||||
|
||||
let messageCount = 0;
|
||||
process.on('uncaughtException', err => {
|
||||
process.on('uncaughtException', (err) => {
|
||||
if (!err.message.includes('fetch failed')) {
|
||||
console.error('There was an uncaught error:', err.message);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -50,6 +50,7 @@ router.post('/', async (req, res) => {
|
|||
});
|
||||
}
|
||||
|
||||
// eslint-disable-next-line no-use-before-define
|
||||
return await ask({
|
||||
userMessage,
|
||||
endpointOption,
|
||||
|
|
|
|||
|
|
@ -56,6 +56,7 @@ router.post('/', async (req, res) => {
|
|||
});
|
||||
}
|
||||
|
||||
// eslint-disable-next-line no-use-before-define
|
||||
return await ask({
|
||||
userMessage,
|
||||
endpointOption,
|
||||
|
|
|
|||
50
client/src/components/Input/ChatGPTOptions/index.jsx
Normal file
50
client/src/components/Input/ChatGPTOptions/index.jsx
Normal file
|
|
@ -0,0 +1,50 @@
|
|||
import React, { useEffect } from 'react';
|
||||
import { useRecoilState } from 'recoil';
|
||||
import ModelDropDown from '../../ui/ModelDropDown.jsx';
|
||||
import { cn } from '~/utils/';
|
||||
|
||||
import store from '~/store';
|
||||
|
||||
function ChatGPTOptions() {
|
||||
const [conversation, setConversation] = useRecoilState(store.conversation) || {};
|
||||
const { endpoint, conversationId } = conversation;
|
||||
const { model } = conversation;
|
||||
|
||||
console.log('ChatGPTOptions', endpoint, model);
|
||||
useEffect(() => {
|
||||
if (endpoint !== 'chatGPTBrowser') return;
|
||||
}, [conversation]);
|
||||
|
||||
if (endpoint !== 'chatGPTBrowser') return null;
|
||||
if (conversationId !== 'new') return null;
|
||||
|
||||
const setOption = param => newValue => {
|
||||
let update = {};
|
||||
update[param] = newValue;
|
||||
setConversation(prevState => ({
|
||||
...prevState,
|
||||
...update
|
||||
}));
|
||||
};
|
||||
|
||||
const cardStyle =
|
||||
'transition-colors shadow-md rounded-md min-w-[75px] font-normal bg-white border-black/10 hover:border-black/10 focus:border-black/10 dark:border-black/10 dark:hover:border-black/10 dark:focus:border-black/10 border dark:bg-gray-700 text-black dark:text-white';
|
||||
|
||||
return (
|
||||
<div className="openAIOptions-simple-container flex w-full items-center justify-center gap-2 show">
|
||||
<ModelDropDown
|
||||
model={model}
|
||||
setModel={setOption('model')}
|
||||
endpoint="chatGPTBrowser"
|
||||
showAbove={true}
|
||||
showLabel={false}
|
||||
className={cn(
|
||||
cardStyle,
|
||||
'min-w-48 z-50 flex h-[40px] w-48 items-center justify-center px-4 ring-0 hover:cursor-pointer hover:bg-slate-50 focus:ring-0 focus:ring-offset-0 data-[state=open]:bg-slate-50 dark:bg-gray-700 dark:hover:bg-gray-600 dark:data-[state=open]:bg-gray-600'
|
||||
)}
|
||||
/>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
export default ChatGPTOptions;
|
||||
|
|
@ -3,6 +3,7 @@ import { useRecoilValue, useRecoilState } from 'recoil';
|
|||
import SubmitButton from './SubmitButton';
|
||||
import AdjustToneButton from './AdjustToneButton';
|
||||
import OpenAIOptions from './OpenAIOptions';
|
||||
import ChatGPTOptions from './ChatGPTOptions';
|
||||
import BingAIOptions from './BingAIOptions';
|
||||
// import BingStyles from './BingStyles';
|
||||
import EndpointMenu from './Endpoints/NewConversationMenu';
|
||||
|
|
@ -132,6 +133,7 @@ export default function TextChat({ isSearchView = false }) {
|
|||
<div className="relative py-2 md:mb-[-16px] md:py-4 lg:mb-[-32px]">
|
||||
<span className="ml-1 flex flex-col items-center justify-center gap-0 md:order-none md:m-auto md:w-full md:gap-2">
|
||||
<OpenAIOptions />
|
||||
<ChatGPTOptions />
|
||||
<BingAIOptions />
|
||||
</span>
|
||||
</div>
|
||||
|
|
|
|||
|
|
@ -30,7 +30,7 @@ const buildPresetByConversation = ({ title, conversation, ...others }) => {
|
|||
} else if (endpoint === 'chatGPTBrowser') {
|
||||
preset = {
|
||||
endpoint,
|
||||
model: conversation?.model || 'text-davinci-002-render-sha',
|
||||
model: conversation?.model || 'Default (GPT-3.5)',
|
||||
title,
|
||||
...others
|
||||
};
|
||||
|
|
|
|||
|
|
@ -26,7 +26,7 @@ const buildDefaultConversation = ({ conversation, endpoint, lastConversationSetu
|
|||
conversation = {
|
||||
...conversation,
|
||||
endpoint,
|
||||
model: lastConversationSetup?.model || 'text-davinci-002-render-sha'
|
||||
model: lastConversationSetup?.model || 'Default (GPT-3.5)'
|
||||
};
|
||||
} else if (endpoint === null) {
|
||||
conversation = {
|
||||
|
|
|
|||
|
|
@ -50,7 +50,7 @@ const useMessageHandler = () => {
|
|||
} else if (endpoint === 'chatGPTBrowser') {
|
||||
endpointOption = {
|
||||
endpoint,
|
||||
model: currentConversation?.model || 'text-davinci-002-render-sha'
|
||||
model: currentConversation?.model || 'Default (GPT-3.5)'
|
||||
};
|
||||
responseSender = 'ChatGPT';
|
||||
} else if (endpoint === null) {
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue