mirror of
https://github.com/danny-avila/LibreChat.git
synced 2026-01-28 05:06:13 +01:00
Build/Refactor: lint pre-commit hook and reformat repo to spec (#314)
* build/refactor: move lint/prettier packages to project root, install husky, add pre-commit hook * refactor: reformat files * build: put full eslintrc back with all rules
This commit is contained in:
parent
8d75b25104
commit
7fdc862042
157 changed files with 4836 additions and 2403 deletions
|
|
@ -23,7 +23,8 @@ const askBing = async ({
|
|||
|
||||
const bingAIClient = new BingAIClient({
|
||||
// "_U" cookie from bing.com
|
||||
userToken: process.env.BINGAI_TOKEN == 'user_provided' ? token : process.env.BINGAI_TOKEN ?? null,
|
||||
userToken:
|
||||
process.env.BINGAI_TOKEN == 'user_provided' ? token : process.env.BINGAI_TOKEN ?? null,
|
||||
// If the above doesn't work, provide all your cookies as a string instead
|
||||
// cookies: '',
|
||||
debug: false,
|
||||
|
|
|
|||
|
|
@ -18,9 +18,11 @@ const browserClient = async ({
|
|||
|
||||
const clientOptions = {
|
||||
// Warning: This will expose your access token to a third party. Consider the risks before using this.
|
||||
reverseProxyUrl: process.env.CHATGPT_REVERSE_PROXY || 'https://ai.fakeopen.com/api/conversation',
|
||||
reverseProxyUrl:
|
||||
process.env.CHATGPT_REVERSE_PROXY || 'https://ai.fakeopen.com/api/conversation',
|
||||
// Access token from https://chat.openai.com/api/auth/session
|
||||
accessToken: process.env.CHATGPT_TOKEN == 'user_provided' ? token : process.env.CHATGPT_TOKEN ?? null,
|
||||
accessToken:
|
||||
process.env.CHATGPT_TOKEN == 'user_provided' ? token : process.env.CHATGPT_TOKEN ?? null,
|
||||
model: model,
|
||||
debug: false,
|
||||
proxy: process.env.PROXY || null,
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
require('dotenv').config();
|
||||
const { KeyvFile } = require('keyv-file');
|
||||
const { genAzureEndpoint } = require('../../utils/genAzureEndpoints');
|
||||
const tiktoken = require("@dqbd/tiktoken");
|
||||
const tiktoken = require('@dqbd/tiktoken');
|
||||
const encoding_for_model = tiktoken.encoding_for_model;
|
||||
|
||||
const askClient = async ({
|
||||
|
|
@ -27,7 +27,7 @@ const askClient = async ({
|
|||
|
||||
const azure = process.env.AZURE_OPENAI_API_KEY ? true : false;
|
||||
if (promptPrefix == null) {
|
||||
promptText = "You are ChatGPT, a large language model trained by OpenAI.";
|
||||
promptText = 'You are ChatGPT, a large language model trained by OpenAI.';
|
||||
} else {
|
||||
promptText = promptPrefix;
|
||||
}
|
||||
|
|
@ -45,7 +45,7 @@ const askClient = async ({
|
|||
},
|
||||
chatGptLabel,
|
||||
promptPrefix,
|
||||
proxy: process.env.PROXY || null,
|
||||
proxy: process.env.PROXY || null
|
||||
// debug: true
|
||||
};
|
||||
|
||||
|
|
@ -77,16 +77,16 @@ const askClient = async ({
|
|||
const res = await client.sendMessage(text, { ...options, userId });
|
||||
// return res;
|
||||
// create a new response object that includes the token counts
|
||||
const newRes = {
|
||||
...res,
|
||||
usage: {
|
||||
prompt_tokens: prompt_tokens.length,
|
||||
completion_tokens: text_tokens.length,
|
||||
total_tokens: prompt_tokens.length + text_tokens.length
|
||||
}
|
||||
};
|
||||
const newRes = {
|
||||
...res,
|
||||
usage: {
|
||||
prompt_tokens: prompt_tokens.length,
|
||||
completion_tokens: text_tokens.length,
|
||||
total_tokens: prompt_tokens.length + text_tokens.length
|
||||
}
|
||||
};
|
||||
|
||||
return newRes;
|
||||
return newRes;
|
||||
};
|
||||
|
||||
module.exports = { askClient };
|
||||
|
|
|
|||
|
|
@ -3,7 +3,10 @@ const TextStream = require('../stream');
|
|||
const { google } = require('googleapis');
|
||||
const { Agent, ProxyAgent } = require('undici');
|
||||
const { getMessages, saveMessage, saveConvo } = require('../../models');
|
||||
const { encoding_for_model: encodingForModel, get_encoding: getEncoding } = require('@dqbd/tiktoken');
|
||||
const {
|
||||
encoding_for_model: encodingForModel,
|
||||
get_encoding: getEncoding
|
||||
} = require('@dqbd/tiktoken');
|
||||
|
||||
const tokenizersCache = {};
|
||||
|
||||
|
|
@ -65,7 +68,8 @@ class GoogleAgent {
|
|||
// The max prompt tokens is determined by the max context tokens minus the max response tokens.
|
||||
// Earlier messages will be dropped until the prompt is within the limit.
|
||||
this.maxResponseTokens = this.modelOptions.maxOutputTokens || 1024;
|
||||
this.maxPromptTokens = this.options.maxPromptTokens || this.maxContextTokens - this.maxResponseTokens;
|
||||
this.maxPromptTokens =
|
||||
this.options.maxPromptTokens || this.maxContextTokens - this.maxResponseTokens;
|
||||
|
||||
if (this.maxPromptTokens + this.maxResponseTokens > this.maxContextTokens) {
|
||||
throw new Error(
|
||||
|
|
@ -291,7 +295,10 @@ class GoogleAgent {
|
|||
try {
|
||||
const result = await this.getCompletion(message, messages, opts.abortController);
|
||||
blocked = result?.predictions?.[0]?.safetyAttributes?.blocked;
|
||||
reply = result?.predictions?.[0]?.candidates?.[0]?.content || result?.predictions?.[0]?.content || '';
|
||||
reply =
|
||||
result?.predictions?.[0]?.candidates?.[0]?.content ||
|
||||
result?.predictions?.[0]?.content ||
|
||||
'';
|
||||
if (blocked === true) {
|
||||
reply = `Google blocked a proper response to your message:\n${JSON.stringify(
|
||||
result.predictions[0].safetyAttributes
|
||||
|
|
|
|||
|
|
@ -16,10 +16,7 @@ class TextStream extends Readable {
|
|||
if (this.currentIndex < this.text.length) {
|
||||
setTimeout(() => {
|
||||
const remainingChars = this.text.length - this.currentIndex;
|
||||
const chunkSize = Math.min(
|
||||
this.randomInt(minChunkSize, maxChunkSize + 1),
|
||||
remainingChars
|
||||
);
|
||||
const chunkSize = Math.min(this.randomInt(minChunkSize, maxChunkSize + 1), remainingChars);
|
||||
|
||||
const chunk = this.text.slice(this.currentIndex, this.currentIndex + chunkSize);
|
||||
this.push(chunk);
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue