ci(backend-review.yml): add linter step to the backend review workflow (#625)

* ci(backend-review.yml): add linter step to the backend review workflow

* chore(backend-review.yml): remove prettier from lint-action configuration

* chore: apply new linting workflow

* chore(lint-staged.config.js): reorder lint-staged tasks for JavaScript and TypeScript files

* chore(eslint): update ignorePatterns in .eslintrc.js
chore(lint-action): remove prettier option in backend-review.yml
chore(package.json): add lint and lint:fix scripts

* chore(lint-staged.config.js): remove prettier --write command for js, jsx, ts, tsx files

* chore(titleConvo.js): remove unnecessary console.log statement
chore(titleConvo.js): add missing comma in options object

* chore: apply linting to all files

* chore(lint-staged.config.js): update lint-staged configuration to include prettier formatting
This commit is contained in:
Danny Avila 2023-07-14 09:36:49 -04:00 committed by GitHub
parent 637bb6bc11
commit e5336039fc
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
231 changed files with 1688 additions and 1526 deletions

View file

@ -1,6 +1,9 @@
const crypto = require('crypto');
const Keyv = require('keyv');
const { encoding_for_model: encodingForModel, get_encoding: getEncoding } = require('@dqbd/tiktoken');
const {
encoding_for_model: encodingForModel,
get_encoding: getEncoding,
} = require('@dqbd/tiktoken');
const { fetchEventSource } = require('@waylaidwanderer/fetch-event-source');
const { Agent, ProxyAgent } = require('undici');
const BaseClient = require('./BaseClient');
@ -9,11 +12,7 @@ const CHATGPT_MODEL = 'gpt-3.5-turbo';
const tokenizersCache = {};
class ChatGPTClient extends BaseClient {
constructor(
apiKey,
options = {},
cacheOptions = {},
) {
constructor(apiKey, options = {}, cacheOptions = {}) {
super(apiKey, options, cacheOptions);
cacheOptions.namespace = cacheOptions.namespace || 'chatgpt';
@ -49,13 +48,16 @@ class ChatGPTClient extends BaseClient {
model: modelOptions.model || CHATGPT_MODEL,
temperature: typeof modelOptions.temperature === 'undefined' ? 0.8 : modelOptions.temperature,
top_p: typeof modelOptions.top_p === 'undefined' ? 1 : modelOptions.top_p,
presence_penalty: typeof modelOptions.presence_penalty === 'undefined' ? 1 : modelOptions.presence_penalty,
presence_penalty:
typeof modelOptions.presence_penalty === 'undefined' ? 1 : modelOptions.presence_penalty,
stop: modelOptions.stop,
};
this.isChatGptModel = this.modelOptions.model.startsWith('gpt-');
const { isChatGptModel } = this;
this.isUnofficialChatGptModel = this.modelOptions.model.startsWith('text-chat') || this.modelOptions.model.startsWith('text-davinci-002-render');
this.isUnofficialChatGptModel =
this.modelOptions.model.startsWith('text-chat') ||
this.modelOptions.model.startsWith('text-davinci-002-render');
const { isUnofficialChatGptModel } = this;
// Davinci models have a max context length of 4097 tokens.
@ -64,10 +66,15 @@ class ChatGPTClient extends BaseClient {
// The max prompt tokens is determined by the max context tokens minus the max response tokens.
// Earlier messages will be dropped until the prompt is within the limit.
this.maxResponseTokens = this.modelOptions.max_tokens || 1024;
this.maxPromptTokens = this.options.maxPromptTokens || (this.maxContextTokens - this.maxResponseTokens);
this.maxPromptTokens =
this.options.maxPromptTokens || this.maxContextTokens - this.maxResponseTokens;
if (this.maxPromptTokens + this.maxResponseTokens > this.maxContextTokens) {
throw new Error(`maxPromptTokens + max_tokens (${this.maxPromptTokens} + ${this.maxResponseTokens} = ${this.maxPromptTokens + this.maxResponseTokens}) must be less than or equal to maxContextTokens (${this.maxContextTokens})`);
throw new Error(
`maxPromptTokens + max_tokens (${this.maxPromptTokens} + ${this.maxResponseTokens} = ${
this.maxPromptTokens + this.maxResponseTokens
}) must be less than or equal to maxContextTokens (${this.maxContextTokens})`,
);
}
this.userLabel = this.options.userLabel || 'User';
@ -249,13 +256,10 @@ class ChatGPTClient extends BaseClient {
}
});
}
const response = await fetch(
url,
{
...opts,
signal: abortController.signal,
},
);
const response = await fetch(url, {
...opts,
signal: abortController.signal,
});
if (response.status !== 200) {
const body = await response.text();
const error = new Error(`Failed to send message. HTTP ${response.status} - ${body}`);
@ -299,10 +303,7 @@ ${botMessage.message}
.trim();
}
async sendMessage(
message,
opts = {},
) {
async sendMessage(message, opts = {}) {
if (opts.clientOptions && typeof opts.clientOptions === 'object') {
this.setOptions(opts.clientOptions);
}
@ -310,9 +311,10 @@ ${botMessage.message}
const conversationId = opts.conversationId || crypto.randomUUID();
const parentMessageId = opts.parentMessageId || crypto.randomUUID();
let conversation = typeof opts.conversation === 'object'
? opts.conversation
: await this.conversationsCache.get(conversationId);
let conversation =
typeof opts.conversation === 'object'
? opts.conversation
: await this.conversationsCache.get(conversationId);
let isNewConversation = false;
if (!conversation) {
@ -357,7 +359,9 @@ ${botMessage.message}
if (progressMessage === '[DONE]') {
return;
}
const token = this.isChatGptModel ? progressMessage.choices[0].delta.content : progressMessage.choices[0].text;
const token = this.isChatGptModel
? progressMessage.choices[0].delta.content
: progressMessage.choices[0].text;
// first event's delta content is always undefined
if (!token) {
return;
@ -437,10 +441,11 @@ ${botMessage.message}
}
promptPrefix = `${this.startToken}Instructions:\n${promptPrefix}`;
} else {
const currentDateString = new Date().toLocaleDateString(
'en-us',
{ year: 'numeric', month: 'long', day: 'numeric' },
);
const currentDateString = new Date().toLocaleDateString('en-us', {
year: 'numeric',
month: 'long',
day: 'numeric',
});
promptPrefix = `${this.startToken}Instructions:\nYou are ChatGPT, a large language model trained by OpenAI. Respond conversationally.\nCurrent date: ${currentDateString}${this.endToken}\n\n`;
}
@ -459,7 +464,9 @@ ${botMessage.message}
let currentTokenCount;
if (isChatGptModel) {
currentTokenCount = this.getTokenCountForMessage(instructionsPayload) + this.getTokenCountForMessage(messagePayload);
currentTokenCount =
this.getTokenCountForMessage(instructionsPayload) +
this.getTokenCountForMessage(messagePayload);
} else {
currentTokenCount = this.getTokenCount(`${promptPrefix}${promptSuffix}`);
}
@ -473,8 +480,13 @@ ${botMessage.message}
const buildPromptBody = async () => {
if (currentTokenCount < maxTokenCount && orderedMessages.length > 0) {
const message = orderedMessages.pop();
const roleLabel = message?.isCreatedByUser || message?.role?.toLowerCase() === 'user' ? this.userLabel : this.chatGptLabel;
const messageString = `${this.startToken}${roleLabel}:\n${message?.text ?? message?.message}${this.endToken}\n`;
const roleLabel =
message?.isCreatedByUser || message?.role?.toLowerCase() === 'user'
? this.userLabel
: this.chatGptLabel;
const messageString = `${this.startToken}${roleLabel}:\n${
message?.text ?? message?.message
}${this.endToken}\n`;
let newPromptBody;
if (promptBody || isChatGptModel) {
newPromptBody = `${messageString}${promptBody}`;
@ -496,12 +508,14 @@ ${botMessage.message}
return false;
}
// This is the first message, so we can't add it. Just throw an error.
throw new Error(`Prompt is too long. Max token count is ${maxTokenCount}, but prompt is ${newTokenCount} tokens long.`);
throw new Error(
`Prompt is too long. Max token count is ${maxTokenCount}, but prompt is ${newTokenCount} tokens long.`,
);
}
promptBody = newPromptBody;
currentTokenCount = newTokenCount;
// wait for next tick to avoid blocking the event loop
await new Promise(resolve => setImmediate(resolve));
await new Promise((resolve) => setImmediate(resolve));
return buildPromptBody();
}
return true;
@ -517,7 +531,10 @@ ${botMessage.message}
}
// Use up to `this.maxContextTokens` tokens (prompt + response), but try to leave `this.maxTokens` tokens for the response.
this.modelOptions.max_tokens = Math.min(this.maxContextTokens - currentTokenCount, this.maxResponseTokens);
this.modelOptions.max_tokens = Math.min(
this.maxContextTokens - currentTokenCount,
this.maxResponseTokens,
);
if (this.options.debug) {
console.debug(`Prompt : ${prompt}`);
@ -534,13 +551,13 @@ ${botMessage.message}
}
/**
* Algorithm adapted from "6. Counting tokens for chat API calls" of
* https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb
*
* An additional 2 tokens need to be added for metadata after all messages have been counted.
*
* @param {*} message
*/
* Algorithm adapted from "6. Counting tokens for chat API calls" of
* https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb
*
* An additional 2 tokens need to be added for metadata after all messages have been counted.
*
* @param {*} message
*/
getTokenCountForMessage(message) {
let tokensPerMessage;
let nameAdjustment;
@ -558,7 +575,7 @@ ${botMessage.message}
const numTokens = this.getTokenCount(value);
// Adjust by `nameAdjustment` tokens if the property key is 'name'
const adjustment = (key === 'name') ? nameAdjustment : 0;
const adjustment = key === 'name' ? nameAdjustment : 0;
return numTokens + adjustment;
});
@ -567,4 +584,4 @@ ${botMessage.message}
}
}
module.exports = ChatGPTClient;
module.exports = ChatGPTClient;