mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-09-22 06:00:56 +02:00

* WIP: initial logging changes add several transports in ~/config/winston omit messages in logs, truncate long strings add short blurb in dotenv for debug logging GoogleClient: using logger OpenAIClient: using logger, handleOpenAIErrors Adding typedef for payload message bumped winston and using winston-daily-rotate-file moved config for server paths to ~/config dir Added `DEBUG_LOGGING=true` to .env.example * WIP: Refactor logging statements in code * WIP: Refactor logging statements and import configurations * WIP: Refactor logging statements and import configurations * refactor: broadcast Redis initialization message with `info` not `debug` * refactor: complete Refactor logging statements and import configurations * chore: delete unused tools * fix: circular dependencies due to accessing logger * refactor(handleText): handle booleans and write tests * refactor: redact sensitive values, better formatting * chore: improve log formatting, avoid passing strings to 2nd arg * fix(ci): fix jest tests due to logger changes * refactor(getAvailablePluginsController): cache plugins as they are static and avoids async addOpenAPISpecs call every time * chore: update docs * chore: update docs * chore: create separate meiliSync logger, clean up logs to avoid being unnecessarily verbose * chore: spread objects where they are commonly logged to allow string truncation * chore: improve error log formatting
94 lines
2.5 KiB
JavaScript
94 lines
2.5 KiB
JavaScript
const { promptTokensEstimate } = require('openai-chat-tokens');
|
|
const { EModelEndpoint } = require('librechat-data-provider');
|
|
const { formatFromLangChain } = require('~/app/clients/prompts');
|
|
const checkBalance = require('~/models/checkBalance');
|
|
const { isEnabled } = require('~/server/utils');
|
|
const { logger } = require('~/config');
|
|
|
|
const createStartHandler = ({
|
|
context,
|
|
conversationId,
|
|
tokenBuffer = 0,
|
|
initialMessageCount,
|
|
manager,
|
|
}) => {
|
|
return async (_llm, _messages, runId, parentRunId, extraParams) => {
|
|
const { invocation_params } = extraParams;
|
|
const { model, functions, function_call } = invocation_params;
|
|
const messages = _messages[0].map(formatFromLangChain);
|
|
|
|
logger.debug(`[createStartHandler] handleChatModelStart: ${context}`, {
|
|
model,
|
|
function_call,
|
|
});
|
|
|
|
if (context !== 'title') {
|
|
logger.debug(`[createStartHandler] handleChatModelStart: ${context}`, {
|
|
functions,
|
|
});
|
|
}
|
|
|
|
const payload = { messages };
|
|
let prelimPromptTokens = 1;
|
|
|
|
if (functions) {
|
|
payload.functions = functions;
|
|
prelimPromptTokens += 2;
|
|
}
|
|
|
|
if (function_call) {
|
|
payload.function_call = function_call;
|
|
prelimPromptTokens -= 5;
|
|
}
|
|
|
|
prelimPromptTokens += promptTokensEstimate(payload);
|
|
logger.debug('[createStartHandler]', {
|
|
prelimPromptTokens,
|
|
tokenBuffer,
|
|
});
|
|
prelimPromptTokens += tokenBuffer;
|
|
|
|
try {
|
|
if (isEnabled(process.env.CHECK_BALANCE)) {
|
|
const generations =
|
|
initialMessageCount && messages.length > initialMessageCount
|
|
? messages.slice(initialMessageCount)
|
|
: null;
|
|
await checkBalance({
|
|
req: manager.req,
|
|
res: manager.res,
|
|
txData: {
|
|
user: manager.user,
|
|
tokenType: 'prompt',
|
|
amount: prelimPromptTokens,
|
|
debug: manager.debug,
|
|
generations,
|
|
model,
|
|
endpoint: EModelEndpoint.openAI,
|
|
},
|
|
});
|
|
}
|
|
} catch (err) {
|
|
logger.error(`[createStartHandler][${context}] checkBalance error`, err);
|
|
manager.abortController.abort();
|
|
if (context === 'summary' || context === 'plugins') {
|
|
manager.addRun(runId, { conversationId, error: err.message });
|
|
throw new Error(err);
|
|
}
|
|
return;
|
|
}
|
|
|
|
manager.addRun(runId, {
|
|
model,
|
|
messages,
|
|
functions,
|
|
function_call,
|
|
runId,
|
|
parentRunId,
|
|
conversationId,
|
|
prelimPromptTokens,
|
|
});
|
|
};
|
|
};
|
|
|
|
module.exports = createStartHandler;
|