mirror of
https://github.com/danny-avila/LibreChat.git
synced 2026-03-03 06:40:20 +01:00
🪙 feat: Add messageId to Transactions (#11987)
* feat: Add messageId to transactions * chore: field order * feat: Enhance token usage tracking by adding messageId parameter - Updated `recordTokenUsage` method in BaseClient to accept a new `messageId` parameter for improved tracking. - Propagated `messageId` in the AgentClient when recording usage. - Added tests to ensure `messageId` is correctly passed and handled in various scenarios, including propagation across multiple usage entries. * chore: Correct field order in createGeminiImageTool function - Moved the conversationId field to the correct position in the object being passed to the recordTokenUsage method, ensuring proper parameter alignment for improved functionality. * refactor: Update OpenAIChatCompletionController and createResponse to use responseId instead of requestId - Replaced instances of requestId with responseId in the OpenAIChatCompletionController for improved clarity in logging and tracking. - Updated createResponse to include responseId in the requestBody, ensuring consistency across the handling of message identifiers. * test: Add messageId to agent client tests - Included messageId in the agent client tests to ensure proper handling and propagation of message identifiers during transaction recording. - This update enhances the test coverage for scenarios involving messageId, aligning with recent changes in the tracking of message identifiers. * fix: Update OpenAIChatCompletionController to use requestId for context - Changed the context object in OpenAIChatCompletionController to use `requestId` instead of `responseId` for improved clarity and consistency in handling request identifiers. * chore: field order
This commit is contained in:
parent
6169d4f70b
commit
8b159079f5
10 changed files with 149 additions and 13 deletions
|
|
@ -129,7 +129,6 @@ const OpenAIChatCompletionController = async (req, res) => {
|
|||
const appConfig = req.config;
|
||||
const requestStartTime = Date.now();
|
||||
|
||||
// Validate request
|
||||
const validation = validateRequest(req.body);
|
||||
if (isChatCompletionValidationFailure(validation)) {
|
||||
return sendErrorResponse(res, 400, validation.error);
|
||||
|
|
@ -150,20 +149,20 @@ const OpenAIChatCompletionController = async (req, res) => {
|
|||
);
|
||||
}
|
||||
|
||||
// Generate IDs
|
||||
const requestId = `chatcmpl-${nanoid()}`;
|
||||
const responseId = `chatcmpl-${nanoid()}`;
|
||||
const conversationId = request.conversation_id ?? nanoid();
|
||||
const parentMessageId = request.parent_message_id ?? null;
|
||||
const created = Math.floor(Date.now() / 1000);
|
||||
|
||||
/** @type {import('@librechat/api').OpenAIResponseContext} — key must be `requestId` to match the type used by createChunk/buildNonStreamingResponse */
|
||||
const context = {
|
||||
created,
|
||||
requestId,
|
||||
requestId: responseId,
|
||||
model: agentId,
|
||||
};
|
||||
|
||||
logger.debug(
|
||||
`[OpenAI API] Request ${requestId} started for agent ${agentId}, stream: ${request.stream}`,
|
||||
`[OpenAI API] Response ${responseId} started for agent ${agentId}, stream: ${request.stream}`,
|
||||
);
|
||||
|
||||
// Set up abort controller
|
||||
|
|
@ -450,11 +449,11 @@ const OpenAIChatCompletionController = async (req, res) => {
|
|||
agents: [primaryConfig],
|
||||
messages: formattedMessages,
|
||||
indexTokenCountMap,
|
||||
runId: requestId,
|
||||
runId: responseId,
|
||||
signal: abortController.signal,
|
||||
customHandlers: handlers,
|
||||
requestBody: {
|
||||
messageId: requestId,
|
||||
messageId: responseId,
|
||||
conversationId,
|
||||
},
|
||||
user: { id: userId },
|
||||
|
|
@ -471,6 +470,10 @@ const OpenAIChatCompletionController = async (req, res) => {
|
|||
thread_id: conversationId,
|
||||
user_id: userId,
|
||||
user: createSafeUser(req.user),
|
||||
requestBody: {
|
||||
messageId: responseId,
|
||||
conversationId,
|
||||
},
|
||||
...(userMCPAuthMap != null && { userMCPAuthMap }),
|
||||
},
|
||||
signal: abortController.signal,
|
||||
|
|
@ -496,6 +499,7 @@ const OpenAIChatCompletionController = async (req, res) => {
|
|||
conversationId,
|
||||
collectedUsage,
|
||||
context: 'message',
|
||||
messageId: responseId,
|
||||
balance: balanceConfig,
|
||||
transactions: transactionsConfig,
|
||||
model: primaryConfig.model || agent.model_parameters?.model,
|
||||
|
|
@ -509,7 +513,7 @@ const OpenAIChatCompletionController = async (req, res) => {
|
|||
if (isStreaming) {
|
||||
sendFinalChunk(handlerConfig);
|
||||
res.end();
|
||||
logger.debug(`[OpenAI API] Request ${requestId} completed in ${duration}ms (streaming)`);
|
||||
logger.debug(`[OpenAI API] Response ${responseId} completed in ${duration}ms (streaming)`);
|
||||
|
||||
// Wait for artifact processing after response ends (non-blocking)
|
||||
if (artifactPromises.length > 0) {
|
||||
|
|
@ -548,7 +552,9 @@ const OpenAIChatCompletionController = async (req, res) => {
|
|||
usage,
|
||||
);
|
||||
res.json(response);
|
||||
logger.debug(`[OpenAI API] Request ${requestId} completed in ${duration}ms (non-streaming)`);
|
||||
logger.debug(
|
||||
`[OpenAI API] Response ${responseId} completed in ${duration}ms (non-streaming)`,
|
||||
);
|
||||
}
|
||||
} catch (error) {
|
||||
const errorMessage = error instanceof Error ? error.message : 'An error occurred';
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue