front and backend logic for model switching

This commit is contained in:
Daniel Avila 2023-02-13 21:15:28 -05:00
parent a5afd5c48f
commit c00a2c902b
9 changed files with 58 additions and 39 deletions

View file

@ -1,28 +1,24 @@
require('dotenv').config();
// const store = new Keyv(process.env.MONGODB_URI);
const Keyv = require('keyv');
const { KeyvFile } = require('keyv-file');
const clientOptions = {
// (Optional) Support for a reverse proxy for the completions endpoint (private API server).
// Warning: This will expose your `openaiApiKey` to a third-party. Consider the risks before using this.
const proxyOptions = {
reverseProxyUrl: 'https://chatgpt.pawan.krd/api/completions',
// (Optional) Parameters as described in https://platform.openai.com/docs/api-reference/completions
modelOptions: {
// You can override the model name and any other parameters here.
model: 'text-davinci-002-render'
},
// (Optional) Set custom instructions instead of "You are ChatGPT...".
// promptPrefix: 'You are Bob, a cowboy in Western times...',
// (Optional) Set a custom name for the user
// userLabel: 'User',
// (Optional) Set a custom name for ChatGPT
// chatGptLabel: 'ChatGPT',
// (Optional) Set to true to enable `console.debug()` logging
debug: false
};
const askClient = async (question, progressCallback, convo) => {
const davinciOptions = {
modelOptions: {
model: 'text-davinci-003'
},
debug: false
};
const askClient = async ({ model, text, progressCallback, convo }) => {
const clientOptions = model === 'chatgpt' ? proxyOptions : davinciOptions;
const ChatGPTClient = (await import('@waylaidwanderer/chatgpt-api')).default;
const client = new ChatGPTClient(process.env.CHATGPT_TOKEN, clientOptions, {
store: new KeyvFile({ filename: 'cache.json' })
@ -36,7 +32,7 @@ const askClient = async (question, progressCallback, convo) => {
options = { ...options, ...convo };
}
const res = await client.sendMessage(question, options);
const res = await client.sendMessage(text, options);
return res;
};