mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-12-16 16:30:15 +01:00
style(endpoints.js): fix indentation and add semicolons
fix(tokenizer.js): add try-catch block and error handling style(SetTokenDialog/index.jsx): fix typo in sentence refactor(data-service.ts): change argument format to match server API
This commit is contained in:
parent
e0d5e75e73
commit
5c5871afd8
4 changed files with 20 additions and 12 deletions
|
|
@ -23,9 +23,9 @@ router.get('/', function (req, res) {
|
|||
: false;
|
||||
const chatGPTBrowser = process.env.CHATGPT_TOKEN
|
||||
? {
|
||||
userProvide: process.env.CHATGPT_TOKEN == 'user_provide',
|
||||
availableModels: getChatGPTBrowserModels()
|
||||
}
|
||||
userProvide: process.env.CHATGPT_TOKEN == 'user_provide',
|
||||
availableModels: getChatGPTBrowserModels()
|
||||
}
|
||||
: false;
|
||||
|
||||
res.send(JSON.stringify({ azureOpenAI, openAI, bingAI, chatGPTBrowser }));
|
||||
|
|
|
|||
|
|
@ -6,13 +6,21 @@ const registry = require('@dqbd/tiktoken/registry.json');
|
|||
const models = require('@dqbd/tiktoken/model_to_encoding.json');
|
||||
|
||||
router.post('/', async (req, res) => {
|
||||
const { arg } = req.body;
|
||||
// console.log(typeof req.body === 'object' ? { ...req.body, ...req.query } : req.query);
|
||||
const model = await load(registry[models['gpt-3.5-turbo']]);
|
||||
const encoder = new Tiktoken(model.bpe_ranks, model.special_tokens, model.pat_str);
|
||||
const tokens = encoder.encode(arg.text);
|
||||
encoder.free();
|
||||
res.send({ count: tokens.length });
|
||||
try {
|
||||
const { arg } = req.body;
|
||||
|
||||
console.log('context:', arg, req.body);
|
||||
|
||||
// console.log(typeof req.body === 'object' ? { ...req.body, ...req.query } : req.query);
|
||||
const model = await load(registry[models['gpt-3.5-turbo']]);
|
||||
const encoder = new Tiktoken(model.bpe_ranks, model.special_tokens, model.pat_str);
|
||||
const tokens = encoder.encode(arg.text);
|
||||
encoder.free();
|
||||
res.send({ count: tokens.length });
|
||||
} catch (e) {
|
||||
console.error(e);
|
||||
res.status(500).send(e.message);
|
||||
}
|
||||
});
|
||||
|
||||
module.exports = router;
|
||||
|
|
|
|||
|
|
@ -84,7 +84,7 @@ const SetTokenDialog = ({ open, onOpenChange, endpoint }) => {
|
|||
)}
|
||||
/>
|
||||
<small className="text-red-600">
|
||||
Your token will be send to the server, but we won't save it.
|
||||
Your token will be sent to the server, but not saved.
|
||||
</small>
|
||||
{helpText?.[endpoint]}
|
||||
</div>
|
||||
|
|
|
|||
|
|
@ -66,5 +66,5 @@ export const getAIEndpoints = () => {
|
|||
}
|
||||
|
||||
export const updateTokenCount = (text: string) => {
|
||||
return request.post(endpoints.tokenizer(), {arg: {text}});
|
||||
return request.post(endpoints.tokenizer(), {arg: text});
|
||||
}
|
||||
Loading…
Add table
Add a link
Reference in a new issue