🪙 feat: Add messageId to Transactions (#11987)
Some checks are pending
Docker Dev Branch Images Build / build (Dockerfile, lc-dev, node) (push) Waiting to run
Docker Dev Branch Images Build / build (Dockerfile.multi, lc-dev-api, api-build) (push) Waiting to run

* feat: Add messageId to transactions

* chore: field order

* feat: Enhance token usage tracking by adding messageId parameter

- Updated `recordTokenUsage` method in BaseClient to accept a new `messageId` parameter for improved tracking.
- Propagated `messageId` in the AgentClient when recording usage.
- Added tests to ensure `messageId` is correctly passed and handled in various scenarios, including propagation across multiple usage entries.

* chore: Correct field order in createGeminiImageTool function

- Moved the conversationId field to the correct position in the object being passed to the recordTokenUsage method, ensuring proper parameter alignment for improved functionality.

* refactor: Update OpenAIChatCompletionController and createResponse to use responseId instead of requestId

- Replaced instances of requestId with responseId in the OpenAIChatCompletionController for improved clarity in logging and tracking.
- Updated createResponse to include responseId in the requestBody, ensuring consistency across the handling of message identifiers.

* test: Add messageId to agent client tests

- Included messageId in the agent client tests to ensure proper handling and propagation of message identifiers during transaction recording.
- This update enhances the test coverage for scenarios involving messageId, aligning with recent changes in the tracking of message identifiers.

* fix: Update OpenAIChatCompletionController to use requestId for context

- Changed the context object in OpenAIChatCompletionController to use `requestId` instead of `responseId` for improved clarity and consistency in handling request identifiers.

* chore: field order
This commit is contained in:
Danny Avila 2026-02-27 23:50:13 -05:00 committed by GitHub
parent 6169d4f70b
commit 8b159079f5
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
10 changed files with 149 additions and 13 deletions

View file

@ -137,12 +137,14 @@ class BaseClient {
* @param {AppConfig['balance']} [balance]
* @param {number} promptTokens
* @param {number} completionTokens
* @param {string} [messageId]
* @returns {Promise<void>}
*/
async recordTokenUsage({ model, balance, promptTokens, completionTokens }) {
async recordTokenUsage({ model, balance, promptTokens, completionTokens, messageId }) {
logger.debug('[BaseClient] `recordTokenUsage` not implemented.', {
model,
balance,
messageId,
promptTokens,
completionTokens,
});
@ -796,6 +798,7 @@ class BaseClient {
completionTokens,
balance: balanceConfig,
model: responseMessage.model,
messageId: this.responseMessageId,
});
}

View file

@ -251,8 +251,9 @@ function checkForSafetyBlock(response) {
* @param {string} params.userId - The user ID
* @param {string} params.conversationId - The conversation ID
* @param {string} params.model - The model name
* @param {string} [params.messageId] - The response message ID for transaction correlation
*/
async function recordTokenUsage({ usageMetadata, req, userId, conversationId, model }) {
async function recordTokenUsage({ usageMetadata, req, userId, conversationId, model, messageId }) {
if (!usageMetadata) {
logger.debug('[GeminiImageGen] No usage metadata available for balance tracking');
return;
@ -288,6 +289,7 @@ async function recordTokenUsage({ usageMetadata, req, userId, conversationId, mo
{
user: userId,
model,
messageId,
conversationId,
context: 'image_generation',
balance,
@ -445,10 +447,14 @@ function createGeminiImageTool(fields = {}) {
];
const conversationId = runnableConfig?.configurable?.thread_id;
const messageId =
runnableConfig?.configurable?.run_id ??
runnableConfig?.configurable?.requestBody?.messageId;
recordTokenUsage({
usageMetadata: apiResponse.usageMetadata,
req,
userId,
messageId,
conversationId,
model: geminiModel,
}).catch((error) => {

View file

@ -663,6 +663,7 @@ class AgentClient extends BaseClient {
context,
balance,
transactions,
messageId: this.responseMessageId,
conversationId: this.conversationId,
user: this.user ?? this.options.req.user?.id,
endpointTokenConfig: this.options.endpointTokenConfig,
@ -1148,6 +1149,7 @@ class AgentClient extends BaseClient {
model: clientOptions.model,
balance: balanceConfig,
transactions: transactionsConfig,
messageId: this.responseMessageId,
}).catch((err) => {
logger.error(
'[api/server/controllers/agents/client.js #titleConvo] Error recording collected usage',
@ -1186,6 +1188,7 @@ class AgentClient extends BaseClient {
model,
context,
balance,
messageId: this.responseMessageId,
conversationId: this.conversationId,
user: this.user ?? this.options.req.user?.id,
endpointTokenConfig: this.options.endpointTokenConfig,
@ -1204,6 +1207,7 @@ class AgentClient extends BaseClient {
model,
balance,
context: 'reasoning',
messageId: this.responseMessageId,
conversationId: this.conversationId,
user: this.user ?? this.options.req.user?.id,
endpointTokenConfig: this.options.endpointTokenConfig,

View file

@ -263,6 +263,7 @@ describe('AgentClient - titleConvo', () => {
transactions: {
enabled: true,
},
messageId: 'response-123',
});
});

View file

@ -129,7 +129,6 @@ const OpenAIChatCompletionController = async (req, res) => {
const appConfig = req.config;
const requestStartTime = Date.now();
// Validate request
const validation = validateRequest(req.body);
if (isChatCompletionValidationFailure(validation)) {
return sendErrorResponse(res, 400, validation.error);
@ -150,20 +149,20 @@ const OpenAIChatCompletionController = async (req, res) => {
);
}
// Generate IDs
const requestId = `chatcmpl-${nanoid()}`;
const responseId = `chatcmpl-${nanoid()}`;
const conversationId = request.conversation_id ?? nanoid();
const parentMessageId = request.parent_message_id ?? null;
const created = Math.floor(Date.now() / 1000);
/** @type {import('@librechat/api').OpenAIResponseContext} — key must be `requestId` to match the type used by createChunk/buildNonStreamingResponse */
const context = {
created,
requestId,
requestId: responseId,
model: agentId,
};
logger.debug(
`[OpenAI API] Request ${requestId} started for agent ${agentId}, stream: ${request.stream}`,
`[OpenAI API] Response ${responseId} started for agent ${agentId}, stream: ${request.stream}`,
);
// Set up abort controller
@ -450,11 +449,11 @@ const OpenAIChatCompletionController = async (req, res) => {
agents: [primaryConfig],
messages: formattedMessages,
indexTokenCountMap,
runId: requestId,
runId: responseId,
signal: abortController.signal,
customHandlers: handlers,
requestBody: {
messageId: requestId,
messageId: responseId,
conversationId,
},
user: { id: userId },
@ -471,6 +470,10 @@ const OpenAIChatCompletionController = async (req, res) => {
thread_id: conversationId,
user_id: userId,
user: createSafeUser(req.user),
requestBody: {
messageId: responseId,
conversationId,
},
...(userMCPAuthMap != null && { userMCPAuthMap }),
},
signal: abortController.signal,
@ -496,6 +499,7 @@ const OpenAIChatCompletionController = async (req, res) => {
conversationId,
collectedUsage,
context: 'message',
messageId: responseId,
balance: balanceConfig,
transactions: transactionsConfig,
model: primaryConfig.model || agent.model_parameters?.model,
@ -509,7 +513,7 @@ const OpenAIChatCompletionController = async (req, res) => {
if (isStreaming) {
sendFinalChunk(handlerConfig);
res.end();
logger.debug(`[OpenAI API] Request ${requestId} completed in ${duration}ms (streaming)`);
logger.debug(`[OpenAI API] Response ${responseId} completed in ${duration}ms (streaming)`);
// Wait for artifact processing after response ends (non-blocking)
if (artifactPromises.length > 0) {
@ -548,7 +552,9 @@ const OpenAIChatCompletionController = async (req, res) => {
usage,
);
res.json(response);
logger.debug(`[OpenAI API] Request ${requestId} completed in ${duration}ms (non-streaming)`);
logger.debug(
`[OpenAI API] Response ${responseId} completed in ${duration}ms (non-streaming)`,
);
}
} catch (error) {
const errorMessage = error instanceof Error ? error.message : 'An error occurred';

View file

@ -486,6 +486,10 @@ const createResponse = async (req, res) => {
thread_id: conversationId,
user_id: userId,
user: createSafeUser(req.user),
requestBody: {
messageId: responseId,
conversationId,
},
...(userMCPAuthMap != null && { userMCPAuthMap }),
},
signal: abortController.signal,
@ -511,6 +515,7 @@ const createResponse = async (req, res) => {
conversationId,
collectedUsage,
context: 'message',
messageId: responseId,
balance: balanceConfig,
transactions: transactionsConfig,
model: primaryConfig.model || agent.model_parameters?.model,
@ -630,6 +635,10 @@ const createResponse = async (req, res) => {
thread_id: conversationId,
user_id: userId,
user: createSafeUser(req.user),
requestBody: {
messageId: responseId,
conversationId,
},
...(userMCPAuthMap != null && { userMCPAuthMap }),
},
signal: abortController.signal,
@ -655,6 +664,7 @@ const createResponse = async (req, res) => {
conversationId,
collectedUsage,
context: 'message',
messageId: responseId,
balance: balanceConfig,
transactions: transactionsConfig,
model: primaryConfig.model || agent.model_parameters?.model,

View file

@ -27,8 +27,15 @@ const { abortRun } = require('./abortRun');
* @param {string} params.conversationId - Conversation ID
* @param {Array<Object>} params.collectedUsage - Usage metadata from all models
* @param {string} [params.fallbackModel] - Fallback model name if not in usage
* @param {string} [params.messageId] - The response message ID for transaction correlation
*/
async function spendCollectedUsage({ userId, conversationId, collectedUsage, fallbackModel }) {
async function spendCollectedUsage({
userId,
conversationId,
collectedUsage,
fallbackModel,
messageId,
}) {
if (!collectedUsage || collectedUsage.length === 0) {
return;
}
@ -50,6 +57,7 @@ async function spendCollectedUsage({ userId, conversationId, collectedUsage, fal
const txMetadata = {
context: 'abort',
messageId,
conversationId,
user: userId,
model: usage.model ?? fallbackModel,
@ -144,6 +152,7 @@ async function abortMessage(req, res) {
conversationId: jobData?.conversationId,
collectedUsage,
fallbackModel: jobData?.model,
messageId: jobData?.responseMessageId,
});
} else {
// Fallback: no collected usage, use text-based token counting for primary model only