🪙 feat: Add messageId to Transactions (#11987)
Some checks are pending
Docker Dev Branch Images Build / build (Dockerfile, lc-dev, node) (push) Waiting to run
Docker Dev Branch Images Build / build (Dockerfile.multi, lc-dev-api, api-build) (push) Waiting to run

* feat: Add messageId to transactions

* chore: field order

* feat: Enhance token usage tracking by adding messageId parameter

- Updated `recordTokenUsage` method in BaseClient to accept a new `messageId` parameter for improved tracking.
- Propagated `messageId` in the AgentClient when recording usage.
- Added tests to ensure `messageId` is correctly passed and handled in various scenarios, including propagation across multiple usage entries.

* chore: Correct field order in createGeminiImageTool function

- Moved the conversationId field to the correct position in the object being passed to the recordTokenUsage method, ensuring proper parameter alignment for improved functionality.

* refactor: Update OpenAIChatCompletionController and createResponse to use responseId instead of requestId

- Replaced instances of requestId with responseId in the OpenAIChatCompletionController for improved clarity in logging and tracking.
- Updated createResponse to include responseId in the requestBody, ensuring consistency across the handling of message identifiers.

* test: Add messageId to agent client tests

- Included messageId in the agent client tests to ensure proper handling and propagation of message identifiers during transaction recording.
- This update enhances the test coverage for scenarios involving messageId, aligning with recent changes in the tracking of message identifiers.

* fix: Update OpenAIChatCompletionController to use requestId for context

- Changed the context object in OpenAIChatCompletionController to use `requestId` instead of `responseId` for improved clarity and consistency in handling request identifiers.

* chore: field order
This commit is contained in:
Danny Avila 2026-02-27 23:50:13 -05:00 committed by GitHub
parent 6169d4f70b
commit 8b159079f5
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
10 changed files with 149 additions and 13 deletions

View file

@ -137,12 +137,14 @@ class BaseClient {
* @param {AppConfig['balance']} [balance] * @param {AppConfig['balance']} [balance]
* @param {number} promptTokens * @param {number} promptTokens
* @param {number} completionTokens * @param {number} completionTokens
* @param {string} [messageId]
* @returns {Promise<void>} * @returns {Promise<void>}
*/ */
async recordTokenUsage({ model, balance, promptTokens, completionTokens }) { async recordTokenUsage({ model, balance, promptTokens, completionTokens, messageId }) {
logger.debug('[BaseClient] `recordTokenUsage` not implemented.', { logger.debug('[BaseClient] `recordTokenUsage` not implemented.', {
model, model,
balance, balance,
messageId,
promptTokens, promptTokens,
completionTokens, completionTokens,
}); });
@ -796,6 +798,7 @@ class BaseClient {
completionTokens, completionTokens,
balance: balanceConfig, balance: balanceConfig,
model: responseMessage.model, model: responseMessage.model,
messageId: this.responseMessageId,
}); });
} }

View file

@ -251,8 +251,9 @@ function checkForSafetyBlock(response) {
* @param {string} params.userId - The user ID * @param {string} params.userId - The user ID
* @param {string} params.conversationId - The conversation ID * @param {string} params.conversationId - The conversation ID
* @param {string} params.model - The model name * @param {string} params.model - The model name
* @param {string} [params.messageId] - The response message ID for transaction correlation
*/ */
async function recordTokenUsage({ usageMetadata, req, userId, conversationId, model }) { async function recordTokenUsage({ usageMetadata, req, userId, conversationId, model, messageId }) {
if (!usageMetadata) { if (!usageMetadata) {
logger.debug('[GeminiImageGen] No usage metadata available for balance tracking'); logger.debug('[GeminiImageGen] No usage metadata available for balance tracking');
return; return;
@ -288,6 +289,7 @@ async function recordTokenUsage({ usageMetadata, req, userId, conversationId, mo
{ {
user: userId, user: userId,
model, model,
messageId,
conversationId, conversationId,
context: 'image_generation', context: 'image_generation',
balance, balance,
@ -445,10 +447,14 @@ function createGeminiImageTool(fields = {}) {
]; ];
const conversationId = runnableConfig?.configurable?.thread_id; const conversationId = runnableConfig?.configurable?.thread_id;
const messageId =
runnableConfig?.configurable?.run_id ??
runnableConfig?.configurable?.requestBody?.messageId;
recordTokenUsage({ recordTokenUsage({
usageMetadata: apiResponse.usageMetadata, usageMetadata: apiResponse.usageMetadata,
req, req,
userId, userId,
messageId,
conversationId, conversationId,
model: geminiModel, model: geminiModel,
}).catch((error) => { }).catch((error) => {

View file

@ -663,6 +663,7 @@ class AgentClient extends BaseClient {
context, context,
balance, balance,
transactions, transactions,
messageId: this.responseMessageId,
conversationId: this.conversationId, conversationId: this.conversationId,
user: this.user ?? this.options.req.user?.id, user: this.user ?? this.options.req.user?.id,
endpointTokenConfig: this.options.endpointTokenConfig, endpointTokenConfig: this.options.endpointTokenConfig,
@ -1148,6 +1149,7 @@ class AgentClient extends BaseClient {
model: clientOptions.model, model: clientOptions.model,
balance: balanceConfig, balance: balanceConfig,
transactions: transactionsConfig, transactions: transactionsConfig,
messageId: this.responseMessageId,
}).catch((err) => { }).catch((err) => {
logger.error( logger.error(
'[api/server/controllers/agents/client.js #titleConvo] Error recording collected usage', '[api/server/controllers/agents/client.js #titleConvo] Error recording collected usage',
@ -1186,6 +1188,7 @@ class AgentClient extends BaseClient {
model, model,
context, context,
balance, balance,
messageId: this.responseMessageId,
conversationId: this.conversationId, conversationId: this.conversationId,
user: this.user ?? this.options.req.user?.id, user: this.user ?? this.options.req.user?.id,
endpointTokenConfig: this.options.endpointTokenConfig, endpointTokenConfig: this.options.endpointTokenConfig,
@ -1204,6 +1207,7 @@ class AgentClient extends BaseClient {
model, model,
balance, balance,
context: 'reasoning', context: 'reasoning',
messageId: this.responseMessageId,
conversationId: this.conversationId, conversationId: this.conversationId,
user: this.user ?? this.options.req.user?.id, user: this.user ?? this.options.req.user?.id,
endpointTokenConfig: this.options.endpointTokenConfig, endpointTokenConfig: this.options.endpointTokenConfig,

View file

@ -263,6 +263,7 @@ describe('AgentClient - titleConvo', () => {
transactions: { transactions: {
enabled: true, enabled: true,
}, },
messageId: 'response-123',
}); });
}); });

View file

@ -129,7 +129,6 @@ const OpenAIChatCompletionController = async (req, res) => {
const appConfig = req.config; const appConfig = req.config;
const requestStartTime = Date.now(); const requestStartTime = Date.now();
// Validate request
const validation = validateRequest(req.body); const validation = validateRequest(req.body);
if (isChatCompletionValidationFailure(validation)) { if (isChatCompletionValidationFailure(validation)) {
return sendErrorResponse(res, 400, validation.error); return sendErrorResponse(res, 400, validation.error);
@ -150,20 +149,20 @@ const OpenAIChatCompletionController = async (req, res) => {
); );
} }
// Generate IDs const responseId = `chatcmpl-${nanoid()}`;
const requestId = `chatcmpl-${nanoid()}`;
const conversationId = request.conversation_id ?? nanoid(); const conversationId = request.conversation_id ?? nanoid();
const parentMessageId = request.parent_message_id ?? null; const parentMessageId = request.parent_message_id ?? null;
const created = Math.floor(Date.now() / 1000); const created = Math.floor(Date.now() / 1000);
/** @type {import('@librechat/api').OpenAIResponseContext} — key must be `requestId` to match the type used by createChunk/buildNonStreamingResponse */
const context = { const context = {
created, created,
requestId, requestId: responseId,
model: agentId, model: agentId,
}; };
logger.debug( logger.debug(
`[OpenAI API] Request ${requestId} started for agent ${agentId}, stream: ${request.stream}`, `[OpenAI API] Response ${responseId} started for agent ${agentId}, stream: ${request.stream}`,
); );
// Set up abort controller // Set up abort controller
@ -450,11 +449,11 @@ const OpenAIChatCompletionController = async (req, res) => {
agents: [primaryConfig], agents: [primaryConfig],
messages: formattedMessages, messages: formattedMessages,
indexTokenCountMap, indexTokenCountMap,
runId: requestId, runId: responseId,
signal: abortController.signal, signal: abortController.signal,
customHandlers: handlers, customHandlers: handlers,
requestBody: { requestBody: {
messageId: requestId, messageId: responseId,
conversationId, conversationId,
}, },
user: { id: userId }, user: { id: userId },
@ -471,6 +470,10 @@ const OpenAIChatCompletionController = async (req, res) => {
thread_id: conversationId, thread_id: conversationId,
user_id: userId, user_id: userId,
user: createSafeUser(req.user), user: createSafeUser(req.user),
requestBody: {
messageId: responseId,
conversationId,
},
...(userMCPAuthMap != null && { userMCPAuthMap }), ...(userMCPAuthMap != null && { userMCPAuthMap }),
}, },
signal: abortController.signal, signal: abortController.signal,
@ -496,6 +499,7 @@ const OpenAIChatCompletionController = async (req, res) => {
conversationId, conversationId,
collectedUsage, collectedUsage,
context: 'message', context: 'message',
messageId: responseId,
balance: balanceConfig, balance: balanceConfig,
transactions: transactionsConfig, transactions: transactionsConfig,
model: primaryConfig.model || agent.model_parameters?.model, model: primaryConfig.model || agent.model_parameters?.model,
@ -509,7 +513,7 @@ const OpenAIChatCompletionController = async (req, res) => {
if (isStreaming) { if (isStreaming) {
sendFinalChunk(handlerConfig); sendFinalChunk(handlerConfig);
res.end(); res.end();
logger.debug(`[OpenAI API] Request ${requestId} completed in ${duration}ms (streaming)`); logger.debug(`[OpenAI API] Response ${responseId} completed in ${duration}ms (streaming)`);
// Wait for artifact processing after response ends (non-blocking) // Wait for artifact processing after response ends (non-blocking)
if (artifactPromises.length > 0) { if (artifactPromises.length > 0) {
@ -548,7 +552,9 @@ const OpenAIChatCompletionController = async (req, res) => {
usage, usage,
); );
res.json(response); res.json(response);
logger.debug(`[OpenAI API] Request ${requestId} completed in ${duration}ms (non-streaming)`); logger.debug(
`[OpenAI API] Response ${responseId} completed in ${duration}ms (non-streaming)`,
);
} }
} catch (error) { } catch (error) {
const errorMessage = error instanceof Error ? error.message : 'An error occurred'; const errorMessage = error instanceof Error ? error.message : 'An error occurred';

View file

@ -486,6 +486,10 @@ const createResponse = async (req, res) => {
thread_id: conversationId, thread_id: conversationId,
user_id: userId, user_id: userId,
user: createSafeUser(req.user), user: createSafeUser(req.user),
requestBody: {
messageId: responseId,
conversationId,
},
...(userMCPAuthMap != null && { userMCPAuthMap }), ...(userMCPAuthMap != null && { userMCPAuthMap }),
}, },
signal: abortController.signal, signal: abortController.signal,
@ -511,6 +515,7 @@ const createResponse = async (req, res) => {
conversationId, conversationId,
collectedUsage, collectedUsage,
context: 'message', context: 'message',
messageId: responseId,
balance: balanceConfig, balance: balanceConfig,
transactions: transactionsConfig, transactions: transactionsConfig,
model: primaryConfig.model || agent.model_parameters?.model, model: primaryConfig.model || agent.model_parameters?.model,
@ -630,6 +635,10 @@ const createResponse = async (req, res) => {
thread_id: conversationId, thread_id: conversationId,
user_id: userId, user_id: userId,
user: createSafeUser(req.user), user: createSafeUser(req.user),
requestBody: {
messageId: responseId,
conversationId,
},
...(userMCPAuthMap != null && { userMCPAuthMap }), ...(userMCPAuthMap != null && { userMCPAuthMap }),
}, },
signal: abortController.signal, signal: abortController.signal,
@ -655,6 +664,7 @@ const createResponse = async (req, res) => {
conversationId, conversationId,
collectedUsage, collectedUsage,
context: 'message', context: 'message',
messageId: responseId,
balance: balanceConfig, balance: balanceConfig,
transactions: transactionsConfig, transactions: transactionsConfig,
model: primaryConfig.model || agent.model_parameters?.model, model: primaryConfig.model || agent.model_parameters?.model,

View file

@ -27,8 +27,15 @@ const { abortRun } = require('./abortRun');
* @param {string} params.conversationId - Conversation ID * @param {string} params.conversationId - Conversation ID
* @param {Array<Object>} params.collectedUsage - Usage metadata from all models * @param {Array<Object>} params.collectedUsage - Usage metadata from all models
* @param {string} [params.fallbackModel] - Fallback model name if not in usage * @param {string} [params.fallbackModel] - Fallback model name if not in usage
* @param {string} [params.messageId] - The response message ID for transaction correlation
*/ */
async function spendCollectedUsage({ userId, conversationId, collectedUsage, fallbackModel }) { async function spendCollectedUsage({
userId,
conversationId,
collectedUsage,
fallbackModel,
messageId,
}) {
if (!collectedUsage || collectedUsage.length === 0) { if (!collectedUsage || collectedUsage.length === 0) {
return; return;
} }
@ -50,6 +57,7 @@ async function spendCollectedUsage({ userId, conversationId, collectedUsage, fal
const txMetadata = { const txMetadata = {
context: 'abort', context: 'abort',
messageId,
conversationId, conversationId,
user: userId, user: userId,
model: usage.model ?? fallbackModel, model: usage.model ?? fallbackModel,
@ -144,6 +152,7 @@ async function abortMessage(req, res) {
conversationId: jobData?.conversationId, conversationId: jobData?.conversationId,
collectedUsage, collectedUsage,
fallbackModel: jobData?.model, fallbackModel: jobData?.model,
messageId: jobData?.responseMessageId,
}); });
} else { } else {
// Fallback: no collected usage, use text-based token counting for primary model only // Fallback: no collected usage, use text-based token counting for primary model only

View file

@ -379,6 +379,7 @@ describe('recordCollectedUsage', () => {
await recordCollectedUsage(deps, { await recordCollectedUsage(deps, {
...baseParams, ...baseParams,
messageId: 'msg-123',
endpointTokenConfig, endpointTokenConfig,
collectedUsage, collectedUsage,
}); });
@ -389,6 +390,7 @@ describe('recordCollectedUsage', () => {
conversationId: 'convo-123', conversationId: 'convo-123',
model: 'gpt-4', model: 'gpt-4',
context: 'message', context: 'message',
messageId: 'msg-123',
balance: { enabled: true }, balance: { enabled: true },
transactions: { enabled: true }, transactions: { enabled: true },
endpointTokenConfig, endpointTokenConfig,
@ -431,4 +433,93 @@ describe('recordCollectedUsage', () => {
); );
}); });
}); });
describe('messageId propagation', () => {
it('should pass messageId to spendTokens', async () => {
const collectedUsage: UsageMetadata[] = [
{ input_tokens: 10, output_tokens: 5, model: 'gpt-4' },
];
await recordCollectedUsage(deps, {
...baseParams,
messageId: 'msg-1',
collectedUsage,
});
expect(mockSpendTokens).toHaveBeenCalledWith(
expect.objectContaining({ messageId: 'msg-1' }),
expect.any(Object),
);
});
it('should pass messageId to spendStructuredTokens for cache paths', async () => {
const collectedUsage: UsageMetadata[] = [
{
input_tokens: 100,
output_tokens: 50,
model: 'claude-3',
cache_creation_input_tokens: 25,
cache_read_input_tokens: 15,
},
];
await recordCollectedUsage(deps, {
...baseParams,
messageId: 'msg-cache-1',
collectedUsage,
});
expect(mockSpendStructuredTokens).toHaveBeenCalledWith(
expect.objectContaining({ messageId: 'msg-cache-1' }),
expect.any(Object),
);
expect(mockSpendTokens).not.toHaveBeenCalled();
});
it('should pass undefined messageId when not provided', async () => {
const collectedUsage: UsageMetadata[] = [
{ input_tokens: 10, output_tokens: 5, model: 'gpt-4' },
];
await recordCollectedUsage(deps, {
user: 'user-123',
conversationId: 'convo-123',
collectedUsage,
});
expect(mockSpendTokens).toHaveBeenCalledWith(
expect.objectContaining({ messageId: undefined }),
expect.any(Object),
);
});
it('should propagate messageId across multiple usage entries', async () => {
const collectedUsage: UsageMetadata[] = [
{ input_tokens: 100, output_tokens: 50, model: 'gpt-4' },
{ input_tokens: 200, output_tokens: 60, model: 'gpt-4' },
{
input_tokens: 150,
output_tokens: 30,
model: 'gpt-4',
input_token_details: { cache_creation: 10, cache_read: 5 },
},
];
await recordCollectedUsage(deps, {
...baseParams,
messageId: 'msg-multi',
collectedUsage,
});
expect(mockSpendTokens).toHaveBeenCalledTimes(2);
expect(mockSpendStructuredTokens).toHaveBeenCalledTimes(1);
for (const call of mockSpendTokens.mock.calls) {
expect(call[0]).toEqual(expect.objectContaining({ messageId: 'msg-multi' }));
}
expect(mockSpendStructuredTokens.mock.calls[0][0]).toEqual(
expect.objectContaining({ messageId: 'msg-multi' }),
);
});
});
}); });

View file

@ -23,6 +23,7 @@ interface TxMetadata {
user: string; user: string;
model?: string; model?: string;
context: string; context: string;
messageId?: string;
conversationId: string; conversationId: string;
balance?: Partial<TCustomConfig['balance']> | null; balance?: Partial<TCustomConfig['balance']> | null;
transactions?: Partial<TTransactionsConfig>; transactions?: Partial<TTransactionsConfig>;
@ -46,6 +47,7 @@ export interface RecordUsageParams {
collectedUsage: UsageMetadata[]; collectedUsage: UsageMetadata[];
model?: string; model?: string;
context?: string; context?: string;
messageId?: string;
balance?: Partial<TCustomConfig['balance']> | null; balance?: Partial<TCustomConfig['balance']> | null;
transactions?: Partial<TTransactionsConfig>; transactions?: Partial<TTransactionsConfig>;
endpointTokenConfig?: EndpointTokenConfig; endpointTokenConfig?: EndpointTokenConfig;
@ -68,6 +70,7 @@ export async function recordCollectedUsage(
user, user,
model, model,
balance, balance,
messageId,
transactions, transactions,
conversationId, conversationId,
collectedUsage, collectedUsage,
@ -108,11 +111,12 @@ export async function recordCollectedUsage(
total_output_tokens += Number(usage.output_tokens) || 0; total_output_tokens += Number(usage.output_tokens) || 0;
const txMetadata: TxMetadata = { const txMetadata: TxMetadata = {
user,
context, context,
balance, balance,
messageId,
transactions, transactions,
conversationId, conversationId,
user,
endpointTokenConfig, endpointTokenConfig,
model: usage.model ?? model, model: usage.model ?? model,
}; };

View file

@ -14,6 +14,7 @@ export interface ITransaction extends Document {
inputTokens?: number; inputTokens?: number;
writeTokens?: number; writeTokens?: number;
readTokens?: number; readTokens?: number;
messageId?: string;
createdAt?: Date; createdAt?: Date;
updatedAt?: Date; updatedAt?: Date;
} }
@ -52,6 +53,7 @@ const transactionSchema: Schema<ITransaction> = new Schema(
inputTokens: { type: Number }, inputTokens: { type: Number },
writeTokens: { type: Number }, writeTokens: { type: Number },
readTokens: { type: Number }, readTokens: { type: Number },
messageId: { type: String },
}, },
{ {
timestamps: true, timestamps: true,