diff --git a/api/app/clients/chatgpt-client.js b/api/app/clients/chatgpt-client.js index 5392cc994d..9cdb630acd 100644 --- a/api/app/clients/chatgpt-client.js +++ b/api/app/clients/chatgpt-client.js @@ -1,6 +1,8 @@ require('dotenv').config(); const { KeyvFile } = require('keyv-file'); const { genAzureEndpoint } = require('../../utils/genAzureEndpoints'); +const tiktoken = require("@dqbd/tiktoken"); +const encoding_for_model = tiktoken.encoding_for_model; const askClient = async ({ text, @@ -24,6 +26,11 @@ const askClient = async ({ }; const azure = process.env.AZURE_OPENAI_API_KEY ? true : false; + if (promptPrefix == null) { + promptText = "You are ChatGPT, a large language model trained by OpenAI."; + } else { + promptText = promptPrefix; + } const maxContextTokens = model === 'gpt-4' ? 8191 : model === 'gpt-4-32k' ? 32767 : 4095; // 1 less than maximum const clientOptions = { reverseProxyUrl: process.env.OPENAI_REVERSE_PROXY || null, @@ -61,8 +68,25 @@ const askClient = async ({ ...(parentMessageId && conversationId ? { parentMessageId, conversationId } : {}) }; + const enc = encoding_for_model(model); + const text_tokens = enc.encode(text); + const prompt_tokens = enc.encode(promptText); + // console.log("Prompt tokens = ", prompt_tokens.length); + // console.log("Message Tokens = ", text_tokens.length); + const res = await client.sendMessage(text, { ...options, userId }); - return res; + // return res; + // create a new response object that includes the token counts +const newRes = { + ...res, + usage: { + prompt_tokens: prompt_tokens.length, + completion_tokens: text_tokens.length, + total_tokens: prompt_tokens.length + text_tokens.length + } +}; + +return newRes; }; module.exports = { askClient };