mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-12-17 00:40:14 +01:00
🛠️ fix: Minor Fixes in Message, Ask/EditController, OpenAIClient, and countTokens (#1463)
* fix(Message): avoid overwriting unprovided properties * fix(OpenAIClient): return intermediateReply on user abort * fix(AskController): do not send/save final message if abort was triggered * fix(countTokens): avoid fetching remote registry and exclusively use cl100k_base or p50k_base weights for token counting * refactor(Message/messageSchema): rely on messageSchema for default values when saving messages * fix(EditController): do not send/save final message if abort was triggered * fix(config/helpers): fix module resolution error
This commit is contained in:
parent
e4c555f95a
commit
431fc6284f
7 changed files with 37 additions and 31 deletions
|
|
@ -1,13 +1,12 @@
|
|||
const { load } = require('tiktoken/load');
|
||||
const { Tiktoken } = require('tiktoken/lite');
|
||||
const registry = require('tiktoken/registry.json');
|
||||
const models = require('tiktoken/model_to_encoding.json');
|
||||
const p50k_base = require('tiktoken/encoders/p50k_base.json');
|
||||
const cl100k_base = require('tiktoken/encoders/cl100k_base.json');
|
||||
const logger = require('~/config/winston');
|
||||
|
||||
const countTokens = async (text = '', modelName = 'gpt-3.5-turbo') => {
|
||||
let encoder = null;
|
||||
try {
|
||||
const model = await load(registry[models[modelName]]);
|
||||
const model = modelName.includes('text-davinci-003') ? p50k_base : cl100k_base;
|
||||
encoder = new Tiktoken(model.bpe_ranks, model.special_tokens, model.pat_str);
|
||||
const tokens = encoder.encode(text);
|
||||
encoder.free();
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue