LibreChat/client/src/hooks/useServerStream.ts
Danny Avila 365c39c405
feat: Accurate Token Usage Tracking & Optional Balance (#1018)
* refactor(Chains/llms): allow passing callbacks

* refactor(BaseClient): accurately count completion tokens as generation only

* refactor(OpenAIClient): remove unused getTokenCountForResponse, pass streaming var and callbacks in initializeLLM

* wip: summary prompt tokens

* refactor(summarizeMessages): new cut-off strategy that generates a better summary by adding context from beginning, truncating the middle, and providing the end
wip: draft out relevant providers and variables for token tracing

* refactor(createLLM): make streaming prop false by default

* chore: remove use of getTokenCountForResponse

* refactor(agents): use BufferMemory as ConversationSummaryBufferMemory token usage not easy to trace

* chore: remove passing of streaming prop, also console log useful vars for tracing

* feat: formatFromLangChain helper function to count tokens for ChatModelStart

* refactor(initializeLLM): add role for LLM tracing

* chore(formatFromLangChain): update JSDoc

* feat(formatMessages): formats langChain messages into OpenAI payload format

* chore: install openai-chat-tokens

* refactor(formatMessage): optimize conditional langChain logic
fix(formatFromLangChain): fix destructuring

* feat: accurate prompt tokens for ChatModelStart before generation

* refactor(handleChatModelStart): move to callbacks dir, use factory function

* refactor(initializeLLM): rename 'role' to 'context'

* feat(Balance/Transaction): new schema/models for tracking token spend
refactor(Key): factor out model export to separate file

* refactor(initializeClient): add req,res objects to client options

* feat: add-balance script to add to an existing users' token balance
refactor(Transaction): use multiplier map/function, return balance update

* refactor(Tx): update enum for tokenType, return 1 for multiplier if no map match

* refactor(Tx): add fair fallback value multiplier incase the config result is undefined

* refactor(Balance): rename 'tokens' to 'tokenCredits'

* feat: balance check, add tx.js for new tx-related methods and tests

* chore(summaryPrompts): update prompt token count

* refactor(callbacks): pass req, res
wip: check balance

* refactor(Tx): make convoId a String type, fix(calculateTokenValue)

* refactor(BaseClient): add conversationId as client prop when assigned

* feat(RunManager): track LLM runs with manager, track token spend from LLM,
refactor(OpenAIClient): use RunManager to create callbacks, pass user prop to langchain api calls

* feat(spendTokens): helper to spend prompt/completion tokens

* feat(checkBalance): add helper to check, log, deny request if balance doesn't have enough funds
refactor(Balance): static check method to return object instead of boolean now
wip(OpenAIClient): implement use of checkBalance

* refactor(initializeLLM): add token buffer to assure summary isn't generated when subsequent payload is too large
refactor(OpenAIClient): add checkBalance
refactor(createStartHandler): add checkBalance

* chore: remove prompt and completion token logging from route handler

* chore(spendTokens): add JSDoc

* feat(logTokenCost): record transactions for basic api calls

* chore(ask/edit): invoke getResponseSender only once per API call

* refactor(ask/edit): pass promptTokens to getIds and include in abort data

* refactor(getIds -> getReqData): rename function

* refactor(Tx): increase value if incomplete message

* feat: record tokenUsage when message is aborted

* refactor: subtract tokens when payload includes function_call

* refactor: add namespace for token_balance

* fix(spendTokens): only execute if corresponding token type amounts are defined

* refactor(checkBalance): throws Error if not enough token credits

* refactor(runTitleChain): pass and use signal, spread object props in create helpers, and use 'call' instead of 'run'

* fix(abortMiddleware): circular dependency, and default to empty string for completionTokens

* fix: properly cancel title requests when there isn't enough tokens to generate

* feat(predictNewSummary): custom chain for summaries to allow signal passing
refactor(summaryBuffer): use new custom chain

* feat(RunManager): add getRunByConversationId method, refactor: remove run and throw llm error on handleLLMError

* refactor(createStartHandler): if summary, add error details to runs

* fix(OpenAIClient): support aborting from summarization & showing error to user
refactor(summarizeMessages): remove unnecessary operations counting summaryPromptTokens and note for alternative, pass signal to summaryBuffer

* refactor(logTokenCost -> recordTokenUsage): rename

* refactor(checkBalance): include promptTokens in errorMessage

* refactor(checkBalance/spendTokens): move to models dir

* fix(createLanguageChain): correctly pass config

* refactor(initializeLLM/title): add tokenBuffer of 150 for balance check

* refactor(openAPIPlugin): pass signal and memory, filter functions by the one being called

* refactor(createStartHandler): add error to run if context is plugins as well

* refactor(RunManager/handleLLMError): throw error immediately if plugins, don't remove run

* refactor(PluginsClient): pass memory and signal to tools, cleanup error handling logic

* chore: use absolute equality for addTitle condition

* refactor(checkBalance): move checkBalance to execute after userMessage and tokenCounts are saved, also make conditional

* style: icon changes to match official

* fix(BaseClient): getTokenCountForResponse -> getTokenCount

* fix(formatLangChainMessages): add kwargs as fallback prop from lc_kwargs, update JSDoc

* refactor(Tx.create): does not update balance if CHECK_BALANCE is not enabled

* fix(e2e/cleanUp): cleanup new collections, import all model methods from index

* fix(config/add-balance): add uncaughtException listener

* fix: circular dependency

* refactor(initializeLLM/checkBalance): append new generations to errorMessage if cost exceeds balance

* fix(handleResponseMessage): only record token usage in this method if not error and completion is not skipped

* fix(createStartHandler): correct condition for generations

* chore: bump postcss due to moderate severity vulnerability

* chore: bump zod due to low severity vulnerability

* chore: bump openai & data-provider version

* feat(types): OpenAI Message types

* chore: update bun lockfile

* refactor(CodeBlock): add error block formatting

* refactor(utils/Plugin): factor out formatJSON and cn to separate files (json.ts and cn.ts), add extractJSON

* chore(logViolation): delete user_id after error is logged

* refactor(getMessageError -> Error): change to React.FC, add token_balance handling, use extractJSON to determine JSON instead of regex

* fix(DALL-E): use latest openai SDK

* chore: reorganize imports, fix type issue

* feat(server): add balance route

* fix(api/models): add auth

* feat(data-provider): /api/balance query

* feat: show balance if checking is enabled, refetch on final message or error

* chore: update docs, .env.example with token_usage info, add balance script command

* fix(Balance): fallback to empty obj for balance query

* style: slight adjustment of balance element

* docs(token_usage): add PR notes
2023-10-05 18:34:10 -04:00

289 lines
7.8 KiB
TypeScript

import { useEffect } from 'react';
import { useResetRecoilState, useSetRecoilState } from 'recoil';
import {
/* @ts-ignore */
SSE,
createPayload,
useGetUserBalance,
tMessageSchema,
tConversationSchema,
useGetStartupConfig,
} from 'librechat-data-provider';
import type { TResPlugin, TMessage, TConversation, TSubmission } from 'librechat-data-provider';
import useConversations from './useConversations';
import { useAuthContext } from './AuthContext';
import store from '~/store';
type TResData = {
plugin: TResPlugin;
final?: boolean;
initial?: boolean;
requestMessage: TMessage;
responseMessage: TMessage;
conversation: TConversation;
};
export default function useServerStream(submission: TSubmission | null) {
const setMessages = useSetRecoilState(store.messages);
const setIsSubmitting = useSetRecoilState(store.isSubmitting);
const setConversation = useSetRecoilState(store.conversation);
const resetLatestMessage = useResetRecoilState(store.latestMessage);
const { token } = useAuthContext();
const { data: startupConfig } = useGetStartupConfig();
const { refreshConversations } = useConversations();
const balanceQuery = useGetUserBalance();
const messageHandler = (data: string, submission: TSubmission) => {
const {
messages,
message,
plugin,
plugins,
initialResponse,
isRegenerate = false,
} = submission;
if (isRegenerate) {
setMessages([
...messages,
{
...initialResponse,
text: data,
parentMessageId: message?.overrideParentMessageId ?? null,
messageId: message?.overrideParentMessageId + '_',
plugin: plugin ?? null,
plugins: plugins ?? [],
submitting: true,
// unfinished: true
},
]);
} else {
setMessages([
...messages,
message,
{
...initialResponse,
text: data,
parentMessageId: message?.messageId,
messageId: message?.messageId + '_',
plugin: plugin ?? null,
plugins: plugins ?? [],
submitting: true,
// unfinished: true
},
]);
}
};
const cancelHandler = (data: TResData, submission: TSubmission) => {
const { requestMessage, responseMessage, conversation } = data;
const { messages, isRegenerate = false } = submission;
// update the messages
if (isRegenerate) {
setMessages([...messages, responseMessage]);
} else {
setMessages([...messages, requestMessage, responseMessage]);
}
setIsSubmitting(false);
// refresh title
if (requestMessage.parentMessageId == '00000000-0000-0000-0000-000000000000') {
setTimeout(() => {
refreshConversations();
}, 2000);
// in case it takes too long.
setTimeout(() => {
refreshConversations();
}, 5000);
}
setConversation((prevState) => ({
...prevState,
...conversation,
}));
};
const createdHandler = (data: TResData, submission: TSubmission) => {
const { messages, message, initialResponse, isRegenerate = false } = submission;
if (isRegenerate) {
setMessages([
...messages,
{
...initialResponse,
parentMessageId: message?.overrideParentMessageId ?? null,
messageId: message?.overrideParentMessageId + '_',
submitting: true,
},
]);
} else {
setMessages([
...messages,
message,
{
...initialResponse,
parentMessageId: message?.messageId,
messageId: message?.messageId + '_',
submitting: true,
},
]);
}
const { conversationId } = message;
setConversation((prevState) =>
tConversationSchema.parse({
...prevState,
conversationId,
}),
);
resetLatestMessage();
};
const finalHandler = (data: TResData, submission: TSubmission) => {
const { requestMessage, responseMessage, conversation } = data;
const { messages, isRegenerate = false } = submission;
// update the messages
if (isRegenerate) {
setMessages([...messages, responseMessage]);
} else {
setMessages([...messages, requestMessage, responseMessage]);
}
setIsSubmitting(false);
// refresh title
if (requestMessage.parentMessageId == '00000000-0000-0000-0000-000000000000') {
setTimeout(() => {
refreshConversations();
}, 2000);
// in case it takes too long.
setTimeout(() => {
refreshConversations();
}, 5000);
}
setConversation((prevState) => ({
...prevState,
...conversation,
}));
};
const errorHandler = (data: TResData, submission: TSubmission) => {
const { messages, message } = submission;
console.log('Error:', data);
const errorResponse = tMessageSchema.parse({
...data,
error: true,
parentMessageId: message?.messageId,
});
setIsSubmitting(false);
setMessages([...messages, message, errorResponse]);
return;
};
const abortConversation = (conversationId = '', submission: TSubmission) => {
console.log(submission);
const { endpoint } = submission?.conversation || {};
fetch(`/api/ask/${endpoint}/abort`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
Authorization: `Bearer ${token}`,
},
body: JSON.stringify({
abortKey: conversationId,
}),
})
.then((response) => response.json())
.then((data) => {
console.log('aborted', data);
cancelHandler(data, submission);
})
.catch((error) => {
console.error('Error aborting request');
console.error(error);
// errorHandler({ text: 'Error aborting request' }, { ...submission, message });
});
return;
};
useEffect(() => {
if (submission === null) {
return;
}
if (Object.keys(submission).length === 0) {
return;
}
let { message } = submission;
const { server, payload } = createPayload(submission);
const events = new SSE(server, {
payload: JSON.stringify(payload),
headers: { 'Content-Type': 'application/json', Authorization: `Bearer ${token}` },
});
events.onmessage = (e: MessageEvent) => {
const data = JSON.parse(e.data);
if (data.final) {
const { plugins } = data;
finalHandler(data, { ...submission, plugins, message });
startupConfig?.checkBalance && balanceQuery.refetch();
console.log('final', data);
}
if (data.created) {
message = {
...data.message,
overrideParentMessageId: message?.overrideParentMessageId,
};
createdHandler(data, { ...submission, message });
} else {
const text = data.text || data.response;
const { plugin, plugins } = data;
if (data.message) {
messageHandler(text, { ...submission, plugin, plugins, message });
}
}
};
events.onopen = () => console.log('connection is opened');
events.oncancel = () =>
abortConversation(message?.conversationId ?? submission?.conversationId, submission);
events.onerror = function (e: MessageEvent) {
console.log('error in opening conn.');
startupConfig?.checkBalance && balanceQuery.refetch();
events.close();
const data = JSON.parse(e.data);
errorHandler(data, { ...submission, message });
};
setIsSubmitting(true);
events.stream();
return () => {
const isCancelled = events.readyState <= 1;
events.close();
// setSource(null);
if (isCancelled) {
const e = new Event('cancel');
events.dispatchEvent(e);
}
setIsSubmitting(false);
};
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [submission]);
}