mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-12-25 20:58:50 +01:00
feat: enhance token usage computation by integrating agent details and optimizing message fetching logic
This commit is contained in:
parent
9c61d73076
commit
2a3f67e468
1 changed files with 58 additions and 25 deletions
|
|
@ -1,10 +1,10 @@
|
|||
import { useEffect, useMemo } from 'react';
|
||||
import { useParams } from 'react-router-dom';
|
||||
import { useSetAtom, useAtomValue } from 'jotai';
|
||||
import { getModelMaxTokens } from 'librechat-data-provider';
|
||||
import { getModelMaxTokens, isAgentsEndpoint } from 'librechat-data-provider';
|
||||
import type { TMessage } from 'librechat-data-provider';
|
||||
import { tokenUsageAtom, type TokenUsage } from '~/store/tokenUsage';
|
||||
import { useGetMessagesByConvoId } from '~/data-provider';
|
||||
import { useGetMessagesByConvoId, useGetAgentByIdQuery } from '~/data-provider';
|
||||
import { useChatContext } from '~/Providers';
|
||||
|
||||
/**
|
||||
|
|
@ -17,27 +17,47 @@ export function useTokenUsageComputation() {
|
|||
const setTokenUsage = useSetAtom(tokenUsageAtom);
|
||||
const { conversationId: paramId } = useParams();
|
||||
|
||||
// Determine the query key to use - same logic as useChatHelpers
|
||||
const queryParam = paramId === 'new' ? paramId : conversationId || paramId || '';
|
||||
// Determine if we need to fetch agent details for token limits
|
||||
const endpoint = conversation?.endpoint ?? '';
|
||||
const isAgent = isAgentsEndpoint(endpoint);
|
||||
const agentId = isAgent ? conversation?.agent_id : null;
|
||||
|
||||
// Use the query hook to get reactive messages
|
||||
// Subscribe to both the paramId-based key and conversationId-based key
|
||||
const { data: messages } = useGetMessagesByConvoId(queryParam, {
|
||||
enabled: !!queryParam,
|
||||
// Fetch full agent details (includes model and provider) when using agents endpoint
|
||||
const { data: agent } = useGetAgentByIdQuery(agentId, {
|
||||
enabled: !!agentId,
|
||||
});
|
||||
|
||||
// Also subscribe to the actual conversationId if different from queryParam
|
||||
// This ensures we get updates when conversation transitions from 'new' to actual ID
|
||||
const { data: messagesById } = useGetMessagesByConvoId(conversationId, {
|
||||
enabled: !!conversationId && conversationId !== 'new' && conversationId !== queryParam,
|
||||
/**
|
||||
* Determine the effective conversation ID for querying messages.
|
||||
*
|
||||
* Priority logic:
|
||||
* 1. If URL param is 'new' -> use 'new' (landing page, no conversation yet)
|
||||
* 2. If conversation has actual ID -> use it (active conversation)
|
||||
* 3. Fallback to URL param (handles direct navigation to /c/:id)
|
||||
*
|
||||
* This ensures we always query the correct messages cache key and avoids
|
||||
* dual subscriptions that would cause duplicate reactivity and API calls.
|
||||
*/
|
||||
const effectiveConversationId = useMemo(() => {
|
||||
if (paramId === 'new') {
|
||||
return 'new';
|
||||
}
|
||||
return conversationId || paramId || '';
|
||||
}, [paramId, conversationId]);
|
||||
|
||||
/**
|
||||
* Single subscription to messages for the effective conversation ID.
|
||||
* React Query caches messages by [QueryKeys.messages, id], so we'll
|
||||
* automatically get updates when:
|
||||
* - New messages are added via setMessages() in useChatHelpers
|
||||
* - Conversation transitions from 'new' to actual ID (cache is synced in setMessages)
|
||||
* - Messages are fetched from the server
|
||||
*/
|
||||
const { data: messages } = useGetMessagesByConvoId(effectiveConversationId, {
|
||||
enabled: !!effectiveConversationId,
|
||||
});
|
||||
|
||||
// Use whichever has more messages (handles transition from new -> actual ID)
|
||||
const effectiveMessages = useMemo(() => {
|
||||
const msgArray = messages ?? [];
|
||||
const msgByIdArray = messagesById ?? [];
|
||||
return msgByIdArray.length > msgArray.length ? msgByIdArray : msgArray;
|
||||
}, [messages, messagesById]);
|
||||
const effectiveMessages = useMemo<TMessage[]>(() => messages ?? [], [messages]);
|
||||
|
||||
// Compute token usage whenever messages change
|
||||
const tokenData = useMemo(() => {
|
||||
|
|
@ -45,7 +65,7 @@ export function useTokenUsageComputation() {
|
|||
let outputTokens = 0;
|
||||
|
||||
if (effectiveMessages && Array.isArray(effectiveMessages)) {
|
||||
for (const msg of effectiveMessages as TMessage[]) {
|
||||
for (const msg of effectiveMessages) {
|
||||
const count = msg.tokenCount ?? 0;
|
||||
if (msg.isCreatedByUser) {
|
||||
inputTokens += count;
|
||||
|
|
@ -59,11 +79,22 @@ export function useTokenUsageComputation() {
|
|||
let maxContext: number | null = conversation?.maxContextTokens ?? null;
|
||||
|
||||
// If no explicit maxContextTokens, try to look up model default
|
||||
if (maxContext === null && conversation?.model) {
|
||||
const endpoint = conversation.endpointType ?? conversation.endpoint ?? '';
|
||||
const modelDefault = getModelMaxTokens(conversation.model, endpoint);
|
||||
if (modelDefault !== undefined) {
|
||||
maxContext = modelDefault;
|
||||
if (maxContext === null) {
|
||||
// For agents endpoint, get the actual model from the fetched agent
|
||||
if (isAgent && agent?.model) {
|
||||
// Use agent's provider, or fall back to 'agents' endpoint for lookup
|
||||
const provider = agent.provider ?? endpoint;
|
||||
const modelDefault = getModelMaxTokens(agent.model, provider);
|
||||
if (modelDefault !== undefined) {
|
||||
maxContext = modelDefault;
|
||||
}
|
||||
} else if (conversation?.model) {
|
||||
// For other endpoints, use conversation model directly
|
||||
const effectiveEndpoint = conversation?.endpointType ?? endpoint;
|
||||
const modelDefault = getModelMaxTokens(conversation.model, effectiveEndpoint);
|
||||
if (modelDefault !== undefined) {
|
||||
maxContext = modelDefault;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -76,8 +107,10 @@ export function useTokenUsageComputation() {
|
|||
effectiveMessages,
|
||||
conversation?.maxContextTokens,
|
||||
conversation?.model,
|
||||
conversation?.endpoint,
|
||||
conversation?.endpointType,
|
||||
isAgent,
|
||||
agent,
|
||||
endpoint,
|
||||
]);
|
||||
|
||||
// Update the atom when computed values change
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue