LibreChat/api/server/routes/ask/google.js
Danny Avila 365c39c405
feat: Accurate Token Usage Tracking & Optional Balance (#1018)
* refactor(Chains/llms): allow passing callbacks

* refactor(BaseClient): accurately count completion tokens as generation only

* refactor(OpenAIClient): remove unused getTokenCountForResponse, pass streaming var and callbacks in initializeLLM

* wip: summary prompt tokens

* refactor(summarizeMessages): new cut-off strategy that generates a better summary by adding context from beginning, truncating the middle, and providing the end
wip: draft out relevant providers and variables for token tracing

* refactor(createLLM): make streaming prop false by default

* chore: remove use of getTokenCountForResponse

* refactor(agents): use BufferMemory as ConversationSummaryBufferMemory token usage not easy to trace

* chore: remove passing of streaming prop, also console log useful vars for tracing

* feat: formatFromLangChain helper function to count tokens for ChatModelStart

* refactor(initializeLLM): add role for LLM tracing

* chore(formatFromLangChain): update JSDoc

* feat(formatMessages): formats langChain messages into OpenAI payload format

* chore: install openai-chat-tokens

* refactor(formatMessage): optimize conditional langChain logic
fix(formatFromLangChain): fix destructuring

* feat: accurate prompt tokens for ChatModelStart before generation

* refactor(handleChatModelStart): move to callbacks dir, use factory function

* refactor(initializeLLM): rename 'role' to 'context'

* feat(Balance/Transaction): new schema/models for tracking token spend
refactor(Key): factor out model export to separate file

* refactor(initializeClient): add req,res objects to client options

* feat: add-balance script to add to an existing users' token balance
refactor(Transaction): use multiplier map/function, return balance update

* refactor(Tx): update enum for tokenType, return 1 for multiplier if no map match

* refactor(Tx): add fair fallback value multiplier incase the config result is undefined

* refactor(Balance): rename 'tokens' to 'tokenCredits'

* feat: balance check, add tx.js for new tx-related methods and tests

* chore(summaryPrompts): update prompt token count

* refactor(callbacks): pass req, res
wip: check balance

* refactor(Tx): make convoId a String type, fix(calculateTokenValue)

* refactor(BaseClient): add conversationId as client prop when assigned

* feat(RunManager): track LLM runs with manager, track token spend from LLM,
refactor(OpenAIClient): use RunManager to create callbacks, pass user prop to langchain api calls

* feat(spendTokens): helper to spend prompt/completion tokens

* feat(checkBalance): add helper to check, log, deny request if balance doesn't have enough funds
refactor(Balance): static check method to return object instead of boolean now
wip(OpenAIClient): implement use of checkBalance

* refactor(initializeLLM): add token buffer to assure summary isn't generated when subsequent payload is too large
refactor(OpenAIClient): add checkBalance
refactor(createStartHandler): add checkBalance

* chore: remove prompt and completion token logging from route handler

* chore(spendTokens): add JSDoc

* feat(logTokenCost): record transactions for basic api calls

* chore(ask/edit): invoke getResponseSender only once per API call

* refactor(ask/edit): pass promptTokens to getIds and include in abort data

* refactor(getIds -> getReqData): rename function

* refactor(Tx): increase value if incomplete message

* feat: record tokenUsage when message is aborted

* refactor: subtract tokens when payload includes function_call

* refactor: add namespace for token_balance

* fix(spendTokens): only execute if corresponding token type amounts are defined

* refactor(checkBalance): throws Error if not enough token credits

* refactor(runTitleChain): pass and use signal, spread object props in create helpers, and use 'call' instead of 'run'

* fix(abortMiddleware): circular dependency, and default to empty string for completionTokens

* fix: properly cancel title requests when there isn't enough tokens to generate

* feat(predictNewSummary): custom chain for summaries to allow signal passing
refactor(summaryBuffer): use new custom chain

* feat(RunManager): add getRunByConversationId method, refactor: remove run and throw llm error on handleLLMError

* refactor(createStartHandler): if summary, add error details to runs

* fix(OpenAIClient): support aborting from summarization & showing error to user
refactor(summarizeMessages): remove unnecessary operations counting summaryPromptTokens and note for alternative, pass signal to summaryBuffer

* refactor(logTokenCost -> recordTokenUsage): rename

* refactor(checkBalance): include promptTokens in errorMessage

* refactor(checkBalance/spendTokens): move to models dir

* fix(createLanguageChain): correctly pass config

* refactor(initializeLLM/title): add tokenBuffer of 150 for balance check

* refactor(openAPIPlugin): pass signal and memory, filter functions by the one being called

* refactor(createStartHandler): add error to run if context is plugins as well

* refactor(RunManager/handleLLMError): throw error immediately if plugins, don't remove run

* refactor(PluginsClient): pass memory and signal to tools, cleanup error handling logic

* chore: use absolute equality for addTitle condition

* refactor(checkBalance): move checkBalance to execute after userMessage and tokenCounts are saved, also make conditional

* style: icon changes to match official

* fix(BaseClient): getTokenCountForResponse -> getTokenCount

* fix(formatLangChainMessages): add kwargs as fallback prop from lc_kwargs, update JSDoc

* refactor(Tx.create): does not update balance if CHECK_BALANCE is not enabled

* fix(e2e/cleanUp): cleanup new collections, import all model methods from index

* fix(config/add-balance): add uncaughtException listener

* fix: circular dependency

* refactor(initializeLLM/checkBalance): append new generations to errorMessage if cost exceeds balance

* fix(handleResponseMessage): only record token usage in this method if not error and completion is not skipped

* fix(createStartHandler): correct condition for generations

* chore: bump postcss due to moderate severity vulnerability

* chore: bump zod due to low severity vulnerability

* chore: bump openai & data-provider version

* feat(types): OpenAI Message types

* chore: update bun lockfile

* refactor(CodeBlock): add error block formatting

* refactor(utils/Plugin): factor out formatJSON and cn to separate files (json.ts and cn.ts), add extractJSON

* chore(logViolation): delete user_id after error is logged

* refactor(getMessageError -> Error): change to React.FC, add token_balance handling, use extractJSON to determine JSON instead of regex

* fix(DALL-E): use latest openai SDK

* chore: reorganize imports, fix type issue

* feat(server): add balance route

* fix(api/models): add auth

* feat(data-provider): /api/balance query

* feat: show balance if checking is enabled, refetch on final message or error

* chore: update docs, .env.example with token_usage info, add balance script command

* fix(Balance): fallback to empty obj for balance query

* style: slight adjustment of balance element

* docs(token_usage): add PR notes
2023-10-05 18:34:10 -04:00

181 lines
5.4 KiB
JavaScript

const express = require('express');
const router = express.Router();
const crypto = require('crypto');
const { GoogleClient } = require('../../../app');
const { saveMessage, getConvoTitle, saveConvo, getConvo } = require('../../../models');
const { handleError, sendMessage, createOnProgress } = require('../../utils');
const { getUserKey, checkUserKeyExpiry } = require('../../services/UserService');
const { setHeaders } = require('../../middleware');
router.post('/', setHeaders, async (req, res) => {
const { endpoint, text, parentMessageId, conversationId: oldConversationId } = req.body;
if (text.length === 0) {
return handleError(res, { text: 'Prompt empty or too short' });
}
if (endpoint !== 'google') {
return handleError(res, { text: 'Illegal request' });
}
// build endpoint option
const endpointOption = {
examples: req.body?.examples ?? [{ input: { content: '' }, output: { content: '' } }],
promptPrefix: req.body?.promptPrefix ?? null,
key: req.body?.key ?? null,
modelOptions: {
model: req.body?.model ?? 'chat-bison',
modelLabel: req.body?.modelLabel ?? null,
temperature: req.body?.temperature ?? 0.2,
maxOutputTokens: req.body?.maxOutputTokens ?? 1024,
topP: req.body?.topP ?? 0.95,
topK: req.body?.topK ?? 40,
},
};
const availableModels = ['chat-bison', 'text-bison', 'codechat-bison'];
if (availableModels.find((model) => model === endpointOption.modelOptions.model) === undefined) {
return handleError(res, { text: 'Illegal request: model' });
}
const conversationId = oldConversationId || crypto.randomUUID();
// eslint-disable-next-line no-use-before-define
return await ask({
text,
endpointOption,
conversationId,
parentMessageId,
req,
res,
});
});
const ask = async ({ text, endpointOption, parentMessageId = null, conversationId, req, res }) => {
let userMessage;
let userMessageId;
// let promptTokens;
let responseMessageId;
let lastSavedTimestamp = 0;
const { overrideParentMessageId = null } = req.body;
const user = req.user.id;
try {
const getReqData = (data = {}) => {
for (let key in data) {
if (key === 'userMessage') {
userMessage = data[key];
userMessageId = data[key].messageId;
} else if (key === 'responseMessageId') {
responseMessageId = data[key];
// } else if (key === 'promptTokens') {
// promptTokens = data[key];
} else if (!conversationId && key === 'conversationId') {
conversationId = data[key];
}
}
sendMessage(res, { message: userMessage, created: true });
};
const { onProgress: progressCallback } = createOnProgress({
onProgress: ({ text: partialText }) => {
const currentTimestamp = Date.now();
if (currentTimestamp - lastSavedTimestamp > 500) {
lastSavedTimestamp = currentTimestamp;
saveMessage({
messageId: responseMessageId,
sender: 'PaLM2',
conversationId,
parentMessageId: overrideParentMessageId || userMessageId,
text: partialText,
unfinished: true,
cancelled: false,
error: false,
user,
});
}
},
});
const abortController = new AbortController();
const isUserProvided = process.env.PALM_KEY === 'user_provided';
let key;
if (endpointOption.key && isUserProvided) {
checkUserKeyExpiry(
endpointOption.key,
'Your GOOGLE_TOKEN has expired. Please provide your token again.',
);
key = await getUserKey({ userId: user, name: 'google' });
key = JSON.parse(key);
delete endpointOption.key;
console.log('Using service account key provided by User for PaLM models');
}
try {
key = require('../../../data/auth.json');
} catch (e) {
console.log('No \'auth.json\' file (service account key) found in /api/data/ for PaLM models');
}
const clientOptions = {
// debug: true, // for testing
reverseProxyUrl: process.env.GOOGLE_REVERSE_PROXY || null,
proxy: process.env.PROXY || null,
...endpointOption,
};
const client = new GoogleClient(key, clientOptions);
let response = await client.sendMessage(text, {
getReqData,
user,
conversationId,
parentMessageId,
overrideParentMessageId,
onProgress: progressCallback.call(null, {
res,
text,
parentMessageId: overrideParentMessageId || userMessageId,
}),
abortController,
});
if (overrideParentMessageId) {
response.parentMessageId = overrideParentMessageId;
}
await saveConvo(user, {
...endpointOption,
...endpointOption.modelOptions,
conversationId,
endpoint: 'google',
});
await saveMessage({ ...response, user });
sendMessage(res, {
title: await getConvoTitle(user, conversationId),
final: true,
conversation: await getConvo(user, conversationId),
requestMessage: userMessage,
responseMessage: response,
});
res.end();
} catch (error) {
console.error(error);
const errorMessage = {
messageId: responseMessageId,
sender: 'PaLM2',
conversationId,
parentMessageId,
unfinished: false,
cancelled: false,
error: true,
text: error.message,
};
await saveMessage({ ...errorMessage, user });
handleError(res, errorMessage);
}
};
module.exports = router;