mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-12-16 16:30:15 +01:00
* feat(OpenAI, PaLM): add new models refactor(chatgpt-client.js): use object to map max tokens for each model refactor(askChatGPTBrowser.js, askGPTPlugins.js, askOpenAI.js): comment out unused function calls and error handling feat(askGoogle.js): add support for codechat-bison model refactor(endpoints.js): add gpt-4-0613 and gpt-3.5-turbo-16k to available models for OpenAI and GPT plugins refactor(EditPresetDialog.jsx): hide examples for codechat-bison model in google endpoint style(EndpointOptionsPopover.jsx): add cn utility function import and use it to set additionalButton className refactor(Google/Settings.jsx): conditionally render custom name and prompt prefix fields based on model type The code has been refactored to conditionally render the custom name and prompt prefix fields based on the type of model selected. If the model starts with 'codechat-', the fields will not be rendered. refactor(Settings.jsx): remove duplicated code and wrap a section in a conditional statement based on a variable style(Input): add z-index to Input component to fix overlapping issue feat(GoogleOptions): disable Examples button when model starts with 'codechat-' prefix * feat(.env.example, endpoints.js): add PLUGIN_MODELS environment variable and use it to get plugin models in endpoints.js
106 lines
2.9 KiB
JavaScript
106 lines
2.9 KiB
JavaScript
require('dotenv').config();
|
|
const { KeyvFile } = require('keyv-file');
|
|
const { genAzureChatCompletion } = require('../../utils/genAzureEndpoints');
|
|
const tiktoken = require('@dqbd/tiktoken');
|
|
const tiktokenModels = require('../../utils/tiktokenModels');
|
|
const encoding_for_model = tiktoken.encoding_for_model;
|
|
|
|
const askClient = async ({
|
|
text,
|
|
parentMessageId,
|
|
conversationId,
|
|
model,
|
|
oaiApiKey,
|
|
chatGptLabel,
|
|
promptPrefix,
|
|
temperature,
|
|
top_p,
|
|
presence_penalty,
|
|
frequency_penalty,
|
|
onProgress,
|
|
abortController,
|
|
userId
|
|
}) => {
|
|
const { ChatGPTClient } = await import('@waylaidwanderer/chatgpt-api');
|
|
const store = {
|
|
store: new KeyvFile({ filename: './data/cache.json' })
|
|
};
|
|
|
|
const azure = process.env.AZURE_OPENAI_API_KEY ? true : false;
|
|
let promptText = 'You are ChatGPT, a large language model trained by OpenAI.';
|
|
if (promptPrefix) {
|
|
promptText = promptPrefix;
|
|
}
|
|
|
|
const maxTokensMap = {
|
|
'gpt-4': 8191,
|
|
'gpt-4-0613': 8191,
|
|
'gpt-4-32k': 32767,
|
|
'gpt-4-32k-0613': 32767,
|
|
'gpt-3.5-turbo': 4095,
|
|
'gpt-3.5-turbo-0613': 4095,
|
|
'gpt-3.5-turbo-0301': 4095,
|
|
'gpt-3.5-turbo-16k': 15999,
|
|
};
|
|
|
|
const maxContextTokens = maxTokensMap[model] ?? 4095; // 1 less than maximum
|
|
const clientOptions = {
|
|
reverseProxyUrl: process.env.OPENAI_REVERSE_PROXY || null,
|
|
azure,
|
|
maxContextTokens,
|
|
modelOptions: {
|
|
model,
|
|
temperature,
|
|
top_p,
|
|
presence_penalty,
|
|
frequency_penalty
|
|
},
|
|
chatGptLabel,
|
|
promptPrefix,
|
|
proxy: process.env.PROXY || null
|
|
// debug: true
|
|
};
|
|
|
|
let apiKey = oaiApiKey ? oaiApiKey : process.env.OPENAI_API_KEY || null;
|
|
|
|
if (azure) {
|
|
apiKey = oaiApiKey ? oaiApiKey : process.env.AZURE_OPENAI_API_KEY || null;
|
|
clientOptions.reverseProxyUrl = genAzureChatCompletion({
|
|
azureOpenAIApiInstanceName: process.env.AZURE_OPENAI_API_INSTANCE_NAME,
|
|
azureOpenAIApiDeploymentName: process.env.AZURE_OPENAI_API_DEPLOYMENT_NAME,
|
|
azureOpenAIApiVersion: process.env.AZURE_OPENAI_API_VERSION
|
|
});
|
|
}
|
|
|
|
const client = new ChatGPTClient(apiKey, clientOptions, store);
|
|
|
|
const options = {
|
|
onProgress,
|
|
abortController,
|
|
...(parentMessageId && conversationId ? { parentMessageId, conversationId } : {})
|
|
};
|
|
|
|
let usage = {};
|
|
let enc = null;
|
|
try {
|
|
enc = encoding_for_model(tiktokenModels.has(model) ? model : 'gpt-3.5-turbo');
|
|
usage.prompt_tokens = (enc.encode(promptText)).length + (enc.encode(text)).length;
|
|
} catch (e) {
|
|
console.log('Error encoding prompt text', e);
|
|
}
|
|
|
|
const res = await client.sendMessage(text, { ...options, userId });
|
|
|
|
try {
|
|
usage.completion_tokens = (enc.encode(res.response)).length;
|
|
enc.free();
|
|
usage.total_tokens = usage.prompt_tokens + usage.completion_tokens;
|
|
res.usage = usage;
|
|
} catch (e) {
|
|
console.log('Error encoding response text', e);
|
|
}
|
|
|
|
return res;
|
|
};
|
|
|
|
module.exports = { askClient };
|