mirror of
https://github.com/danny-avila/LibreChat.git
synced 2026-01-01 08:08:49 +01:00
✨ feat: Add token usage indicator to chat input
Add TokenUsageIndicator component with circular progress ring Create useTokenUsage hook with Jotai atom for state Add model context window lookups to data-provider Consolidate token utilities (output limits, TOKEN_DEFAULTS) Display input/output tokens and percentage of context used
This commit is contained in:
parent
4d7e6b4a58
commit
841a37e8cb
11 changed files with 710 additions and 348 deletions
|
|
@ -18,7 +18,9 @@ import {
|
|||
useQueryParams,
|
||||
useSubmitMessage,
|
||||
useFocusChatEffect,
|
||||
useTokenUsageComputation,
|
||||
} from '~/hooks';
|
||||
import TokenUsageIndicator from './TokenUsageIndicator';
|
||||
import { mainTextareaId, BadgeItem } from '~/common';
|
||||
import AttachFileChat from './Files/AttachFileChat';
|
||||
import FileFormChat from './Files/FileFormChat';
|
||||
|
|
@ -39,6 +41,7 @@ const ChatForm = memo(({ index = 0 }: { index?: number }) => {
|
|||
const submitButtonRef = useRef<HTMLButtonElement>(null);
|
||||
const textAreaRef = useRef<HTMLTextAreaElement>(null);
|
||||
useFocusChatEffect(textAreaRef);
|
||||
useTokenUsageComputation();
|
||||
const localize = useLocalize();
|
||||
|
||||
const [isCollapsed, setIsCollapsed] = useState(false);
|
||||
|
|
@ -332,6 +335,7 @@ const ChatForm = memo(({ index = 0 }: { index?: number }) => {
|
|||
}
|
||||
/>
|
||||
<div className="mx-auto flex" />
|
||||
<TokenUsageIndicator />
|
||||
{SpeechToText && (
|
||||
<AudioRecorder
|
||||
methods={methods}
|
||||
|
|
|
|||
87
client/src/components/Chat/Input/TokenUsageIndicator.tsx
Normal file
87
client/src/components/Chat/Input/TokenUsageIndicator.tsx
Normal file
|
|
@ -0,0 +1,87 @@
|
|||
import { memo } from 'react';
|
||||
import { TooltipAnchor } from '@librechat/client';
|
||||
import { useTokenUsage } from '~/hooks';
|
||||
import { cn } from '~/utils';
|
||||
|
||||
function formatTokens(n: number): string {
|
||||
if (n >= 1000000) {
|
||||
return `${(n / 1000000).toFixed(1)}M`;
|
||||
}
|
||||
if (n >= 1000) {
|
||||
return `${(n / 1000).toFixed(1)}K`;
|
||||
}
|
||||
return n.toString();
|
||||
}
|
||||
|
||||
const TokenUsageIndicator = memo(function TokenUsageIndicator() {
|
||||
const { inputTokens, outputTokens, maxContext } = useTokenUsage();
|
||||
|
||||
const totalUsed = inputTokens + outputTokens;
|
||||
const hasMaxContext = maxContext !== null && maxContext > 0;
|
||||
const percentage = hasMaxContext ? Math.min((totalUsed / maxContext) * 100, 100) : 0;
|
||||
|
||||
// Ring calculations
|
||||
const size = 28;
|
||||
const strokeWidth = 2.5;
|
||||
const radius = (size - strokeWidth) / 2;
|
||||
const circumference = 2 * Math.PI * radius;
|
||||
const offset = circumference - (percentage / 100) * circumference;
|
||||
|
||||
const tooltipText = hasMaxContext
|
||||
? `Input: ${formatTokens(inputTokens)} | Output: ${formatTokens(outputTokens)} | Max: ${formatTokens(maxContext)}`
|
||||
: `Input: ${formatTokens(inputTokens)} | Output: ${formatTokens(outputTokens)} | Max: N/A`;
|
||||
|
||||
// Color based on percentage
|
||||
const getProgressColor = () => {
|
||||
if (!hasMaxContext) {
|
||||
return 'stroke-text-secondary';
|
||||
}
|
||||
if (percentage > 90) {
|
||||
return 'stroke-red-500';
|
||||
}
|
||||
if (percentage > 75) {
|
||||
return 'stroke-yellow-500';
|
||||
}
|
||||
return 'stroke-green-500';
|
||||
};
|
||||
|
||||
return (
|
||||
<TooltipAnchor
|
||||
description={tooltipText}
|
||||
render={
|
||||
<div className="flex size-9 items-center justify-center rounded-full p-1 transition-colors hover:bg-surface-hover">
|
||||
<svg
|
||||
width={size}
|
||||
height={size}
|
||||
viewBox={`0 0 ${size} ${size}`}
|
||||
className="rotate-[-90deg]"
|
||||
>
|
||||
{/* Background ring */}
|
||||
<circle
|
||||
cx={size / 2}
|
||||
cy={size / 2}
|
||||
r={radius}
|
||||
fill="transparent"
|
||||
strokeWidth={strokeWidth}
|
||||
className="stroke-border-medium"
|
||||
/>
|
||||
{/* Progress ring */}
|
||||
<circle
|
||||
cx={size / 2}
|
||||
cy={size / 2}
|
||||
r={radius}
|
||||
fill="transparent"
|
||||
strokeWidth={strokeWidth}
|
||||
strokeDasharray={circumference}
|
||||
strokeDashoffset={hasMaxContext ? offset : circumference}
|
||||
strokeLinecap="round"
|
||||
className={cn('transition-all duration-300', getProgressColor())}
|
||||
/>
|
||||
</svg>
|
||||
</div>
|
||||
}
|
||||
/>
|
||||
);
|
||||
});
|
||||
|
||||
export default TokenUsageIndicator;
|
||||
|
|
@ -35,3 +35,4 @@ export { default as useTextToSpeech } from './Input/useTextToSpeech';
|
|||
export { default as useGenerationsByLatest } from './useGenerationsByLatest';
|
||||
export { default as useLocalizedConfig } from './useLocalizedConfig';
|
||||
export { default as useResourcePermissions } from './useResourcePermissions';
|
||||
export { default as useTokenUsage, useTokenUsageComputation } from './useTokenUsage';
|
||||
|
|
|
|||
77
client/src/hooks/useTokenUsage.ts
Normal file
77
client/src/hooks/useTokenUsage.ts
Normal file
|
|
@ -0,0 +1,77 @@
|
|||
import { useEffect, useMemo } from 'react';
|
||||
import { useSetAtom, useAtomValue } from 'jotai';
|
||||
import type { TMessage } from 'librechat-data-provider';
|
||||
import { getModelMaxTokens } from 'librechat-data-provider';
|
||||
import { tokenUsageAtom, type TokenUsage } from '~/store/tokenUsage';
|
||||
import { useGetMessagesByConvoId } from '~/data-provider';
|
||||
import { useChatContext } from '~/Providers';
|
||||
|
||||
/**
|
||||
* Hook to compute and update token usage from conversation messages.
|
||||
* Should be called in a component that has access to useChatContext.
|
||||
*/
|
||||
export function useTokenUsageComputation() {
|
||||
const { conversation } = useChatContext();
|
||||
const conversationId = conversation?.conversationId ?? '';
|
||||
const setTokenUsage = useSetAtom(tokenUsageAtom);
|
||||
|
||||
// Use the query hook to get reactive messages
|
||||
const { data: messages } = useGetMessagesByConvoId(conversationId, {
|
||||
enabled: !!conversationId && conversationId !== 'new',
|
||||
});
|
||||
|
||||
// Compute token usage whenever messages change
|
||||
const tokenData = useMemo(() => {
|
||||
let inputTokens = 0;
|
||||
let outputTokens = 0;
|
||||
|
||||
if (messages && Array.isArray(messages)) {
|
||||
for (const msg of messages as TMessage[]) {
|
||||
const count = msg.tokenCount ?? 0;
|
||||
if (msg.isCreatedByUser) {
|
||||
inputTokens += count;
|
||||
} else {
|
||||
outputTokens += count;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Determine max context: explicit setting or model default
|
||||
let maxContext: number | null = conversation?.maxContextTokens ?? null;
|
||||
|
||||
// If no explicit maxContextTokens, try to look up model default
|
||||
if (maxContext === null && conversation?.model) {
|
||||
const endpoint = conversation.endpointType ?? conversation.endpoint ?? '';
|
||||
const modelDefault = getModelMaxTokens(conversation.model, endpoint);
|
||||
if (modelDefault !== undefined) {
|
||||
maxContext = modelDefault;
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
inputTokens,
|
||||
outputTokens,
|
||||
maxContext,
|
||||
};
|
||||
}, [
|
||||
messages,
|
||||
conversation?.maxContextTokens,
|
||||
conversation?.model,
|
||||
conversation?.endpoint,
|
||||
conversation?.endpointType,
|
||||
]);
|
||||
|
||||
// Update the atom when computed values change
|
||||
useEffect(() => {
|
||||
setTokenUsage(tokenData);
|
||||
}, [tokenData, setTokenUsage]);
|
||||
}
|
||||
|
||||
/**
|
||||
* Hook to read the current token usage values.
|
||||
*/
|
||||
export function useTokenUsage(): TokenUsage {
|
||||
return useAtomValue(tokenUsageAtom);
|
||||
}
|
||||
|
||||
export default useTokenUsage;
|
||||
13
client/src/store/tokenUsage.ts
Normal file
13
client/src/store/tokenUsage.ts
Normal file
|
|
@ -0,0 +1,13 @@
|
|||
import { atom } from 'jotai';
|
||||
|
||||
export type TokenUsage = {
|
||||
inputTokens: number;
|
||||
outputTokens: number;
|
||||
maxContext: number | null; // null = N/A
|
||||
};
|
||||
|
||||
export const tokenUsageAtom = atom<TokenUsage>({
|
||||
inputTokens: 0,
|
||||
outputTokens: 0,
|
||||
maxContext: null,
|
||||
});
|
||||
Loading…
Add table
Add a link
Reference in a new issue