feat: add real-time conversation cost tracking with proper token counting

- Add comprehensive ModelPricing service with 100+ models and historical pricing
- Create real-time ConversationCost component that displays in chat header
- Use actual token counts from model APIs instead of client-side estimation
- Fix BaseClient.js to preserve tokenCount in response messages
- Add tokenCount, usage, and tokens fields to message schema
- Update Header component to include ConversationCost display
- Support OpenAI, Anthropic, Google, and other major model providers
- Include color-coded cost display based on amount
- Add 32 unit tests for pricing calculation logic

🤖 Generated with Claude Code

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
constanttime 2025-08-17 20:13:49 +05:30
parent 543b617e1c
commit 3edf6fdf6b
9 changed files with 2041 additions and 1 deletions

View file

@ -779,13 +779,25 @@ class BaseClient {
}
}
// Persist usage metadata on the assistant message if available for accurate costing
if (this.getStreamUsage != null) {
const streamUsage = this.getStreamUsage();
if (streamUsage && (Number(streamUsage[this.inputTokensKey]) > 0 || Number(streamUsage[this.outputTokensKey]) > 0)) {
responseMessage.usage = {
prompt_tokens: streamUsage[this.inputTokensKey],
completion_tokens: streamUsage[this.outputTokensKey],
reasoning_tokens: streamUsage.reasoning_tokens,
input_token_details: streamUsage.input_token_details,
};
}
}
responseMessage.databasePromise = this.saveMessageToDatabase(
responseMessage,
saveOptions,
user,
);
this.savedMessageIds.add(responseMessage.messageId);
delete responseMessage.tokenCount;
return responseMessage;
}