mirror of
https://github.com/danny-avila/LibreChat.git
synced 2026-01-02 00:28:51 +01:00
feat: support user-provided token to bingAI and chatgptBrowser
This commit is contained in:
parent
a953fc9f2b
commit
bbf2f8a6ca
22 changed files with 309 additions and 86 deletions
|
|
@ -25,8 +25,9 @@ MONGO_URI="mongodb://127.0.0.1:27017/chatgpt-clone"
|
|||
OPENAI_KEY=
|
||||
|
||||
# Identify the available models, sperate by comma, and not space in it
|
||||
# The first will be default
|
||||
# Leave it blank to use internal settings.
|
||||
# OPENAI_MODELS=gpt-4,text-davinci-003,gpt-3.5-turbo,gpt-3.5-turbo-0301
|
||||
OPENAI_MODELS=gpt-3.5-turbo,gpt-3.5-turbo-0301,text-davinci-003,gpt-4
|
||||
|
||||
# Reverse proxy setting for OpenAI
|
||||
# https://github.com/waylaidwanderer/node-chatgpt-api#using-a-reverse-proxy
|
||||
|
|
@ -39,6 +40,8 @@ OPENAI_KEY=
|
|||
|
||||
# BingAI Tokens: the "_U" cookies value from bing.com
|
||||
# Leave it and BINGAI_USER_TOKEN blank to disable this endpoint.
|
||||
# Set to "user_providered" to allow user provided token.
|
||||
# BINGAI_TOKEN="user_providered"
|
||||
BINGAI_TOKEN=
|
||||
|
||||
# BingAI Host:
|
||||
|
|
@ -46,12 +49,6 @@ BINGAI_TOKEN=
|
|||
# Leave it blank to use default server.
|
||||
# BINGAI_HOST="https://cn.bing.com"
|
||||
|
||||
# BingAI User defined Token
|
||||
# Allow user to set their own token by client
|
||||
# Uncomment this to enable this feature.
|
||||
# (Not implemented yet.)
|
||||
# BINGAI_USER_TOKEN=1
|
||||
|
||||
|
||||
#############################
|
||||
# Endpoint chatGPT:
|
||||
|
|
@ -61,11 +58,14 @@ BINGAI_TOKEN=
|
|||
# Access token from https://chat.openai.com/api/auth/session
|
||||
# Exposes your access token to CHATGPT_REVERSE_PROXY
|
||||
# Leave it blank to disable this endpoint
|
||||
# Set to "user_provide" to allow user provided token.
|
||||
# CHATGPT_TOKEN="user_provide"
|
||||
CHATGPT_TOKEN=
|
||||
|
||||
# Identify the available models, sperate by comma, and not space in it
|
||||
# The first will be default
|
||||
# Leave it blank to use internal settings.
|
||||
# CHATGPT_MODELS=text-davinci-002-render-sha,text-davinci-002-render-paid,gpt-4
|
||||
CHATGPT_MODELS=text-davinci-002-render-sha,text-davinci-002-render-paid,gpt-4
|
||||
|
||||
# Reverse proxy setting for OpenAI
|
||||
# https://github.com/waylaidwanderer/node-chatgpt-api#using-a-reverse-proxy
|
||||
|
|
|
|||
|
|
@ -13,6 +13,7 @@ const askBing = async ({
|
|||
clientId,
|
||||
invocationId,
|
||||
toneStyle,
|
||||
token,
|
||||
onProgress
|
||||
}) => {
|
||||
const { BingAIClient } = await import('@waylaidwanderer/chatgpt-api');
|
||||
|
|
@ -22,7 +23,7 @@ const askBing = async ({
|
|||
|
||||
const bingAIClient = new BingAIClient({
|
||||
// "_U" cookie from bing.com
|
||||
userToken: process.env.BINGAI_TOKEN,
|
||||
userToken: process.env.BINGAI_TOKEN == 'user_provide' ? token : process.env.BINGAI_TOKEN ?? null,
|
||||
// If the above doesn't work, provide all your cookies as a string instead
|
||||
// cookies: '',
|
||||
debug: false,
|
||||
|
|
|
|||
|
|
@ -6,6 +6,7 @@ const browserClient = async ({
|
|||
parentMessageId,
|
||||
conversationId,
|
||||
model,
|
||||
token,
|
||||
onProgress,
|
||||
abortController
|
||||
}) => {
|
||||
|
|
@ -18,7 +19,7 @@ const browserClient = async ({
|
|||
// Warning: This will expose your access token to a third party. Consider the risks before using this.
|
||||
reverseProxyUrl: process.env.CHATGPT_REVERSE_PROXY || 'https://bypass.churchless.tech/api/conversation',
|
||||
// Access token from https://chat.openai.com/api/auth/session
|
||||
accessToken: process.env.CHATGPT_TOKEN,
|
||||
accessToken: process.env.CHATGPT_TOKEN == 'user_provide' ? token : process.env.CHATGPT_TOKEN ?? null,
|
||||
model: model,
|
||||
// debug: true
|
||||
proxy: process.env.PROXY || null
|
||||
|
|
|
|||
|
|
@ -39,7 +39,8 @@ router.post('/', async (req, res) => {
|
|||
jailbreakConversationId: req.body?.jailbreakConversationId ?? null,
|
||||
systemMessage: req.body?.systemMessage ?? null,
|
||||
context: req.body?.context ?? null,
|
||||
toneStyle: req.body?.toneStyle ?? 'fast'
|
||||
toneStyle: req.body?.toneStyle ?? 'fast',
|
||||
token: req.body?.token ?? null
|
||||
};
|
||||
else
|
||||
endpointOption = {
|
||||
|
|
@ -49,7 +50,8 @@ router.post('/', async (req, res) => {
|
|||
conversationSignature: req.body?.conversationSignature ?? null,
|
||||
clientId: req.body?.clientId ?? null,
|
||||
invocationId: req.body?.invocationId ?? null,
|
||||
toneStyle: req.body?.toneStyle ?? 'fast'
|
||||
toneStyle: req.body?.toneStyle ?? 'fast',
|
||||
token: req.body?.token ?? null
|
||||
};
|
||||
|
||||
console.log('ask log', {
|
||||
|
|
|
|||
|
|
@ -33,7 +33,8 @@ router.post('/', async (req, res) => {
|
|||
|
||||
// build endpoint option
|
||||
const endpointOption = {
|
||||
model: req.body?.model ?? 'text-davinci-002-render-sha'
|
||||
model: req.body?.model ?? 'text-davinci-002-render-sha',
|
||||
token: req.body?.token ?? null
|
||||
};
|
||||
|
||||
const availableModels = getChatGPTBrowserModels();
|
||||
|
|
|
|||
|
|
@ -18,8 +18,15 @@ const getChatGPTBrowserModels = () => {
|
|||
router.get('/', function (req, res) {
|
||||
const azureOpenAI = !!process.env.AZURE_OPENAI_KEY;
|
||||
const openAI = process.env.OPENAI_KEY ? { availableModels: getOpenAIModels() } : false;
|
||||
const bingAI = !!process.env.BINGAI_TOKEN;
|
||||
const chatGPTBrowser = process.env.CHATGPT_TOKEN ? { availableModels: getChatGPTBrowserModels() } : false;
|
||||
const bingAI = process.env.BINGAI_TOKEN
|
||||
? { userProvide: process.env.BINGAI_TOKEN == 'user_provide' }
|
||||
: false;
|
||||
const chatGPTBrowser = process.env.CHATGPT_TOKEN
|
||||
? {
|
||||
userProvide: process.env.CHATGPT_TOKEN == 'user_provide',
|
||||
availableModels: getChatGPTBrowserModels()
|
||||
}
|
||||
: false;
|
||||
|
||||
res.send(JSON.stringify({ azureOpenAI, openAI, bingAI, chatGPTBrowser }));
|
||||
});
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue