mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-12-16 16:30:15 +01:00
* WIP: initial logging changes add several transports in ~/config/winston omit messages in logs, truncate long strings add short blurb in dotenv for debug logging GoogleClient: using logger OpenAIClient: using logger, handleOpenAIErrors Adding typedef for payload message bumped winston and using winston-daily-rotate-file moved config for server paths to ~/config dir Added `DEBUG_LOGGING=true` to .env.example * WIP: Refactor logging statements in code * WIP: Refactor logging statements and import configurations * WIP: Refactor logging statements and import configurations * refactor: broadcast Redis initialization message with `info` not `debug` * refactor: complete Refactor logging statements and import configurations * chore: delete unused tools * fix: circular dependencies due to accessing logger * refactor(handleText): handle booleans and write tests * refactor: redact sensitive values, better formatting * chore: improve log formatting, avoid passing strings to 2nd arg * fix(ci): fix jest tests due to logger changes * refactor(getAvailablePluginsController): cache plugins as they are static and avoids async addOpenAPISpecs call every time * chore: update docs * chore: update docs * chore: create separate meiliSync logger, clean up logs to avoid being unnecessarily verbose * chore: spread objects where they are commonly logged to allow string truncation * chore: improve error log formatting
24 lines
729 B
JavaScript
24 lines
729 B
JavaScript
const { load } = require('tiktoken/load');
|
|
const { Tiktoken } = require('tiktoken/lite');
|
|
const registry = require('tiktoken/registry.json');
|
|
const models = require('tiktoken/model_to_encoding.json');
|
|
const logger = require('~/config/winston');
|
|
|
|
const countTokens = async (text = '', modelName = 'gpt-3.5-turbo') => {
|
|
let encoder = null;
|
|
try {
|
|
const model = await load(registry[models[modelName]]);
|
|
encoder = new Tiktoken(model.bpe_ranks, model.special_tokens, model.pat_str);
|
|
const tokens = encoder.encode(text);
|
|
encoder.free();
|
|
return tokens.length;
|
|
} catch (e) {
|
|
logger.error('[countTokens]', e);
|
|
if (encoder) {
|
|
encoder.free();
|
|
}
|
|
return 0;
|
|
}
|
|
};
|
|
|
|
module.exports = countTokens;
|