mirror of
https://github.com/danny-avila/LibreChat.git
synced 2026-02-15 23:18:09 +01:00
refactor(api): Central Logging 📜 (#1348)
* WIP: initial logging changes add several transports in ~/config/winston omit messages in logs, truncate long strings add short blurb in dotenv for debug logging GoogleClient: using logger OpenAIClient: using logger, handleOpenAIErrors Adding typedef for payload message bumped winston and using winston-daily-rotate-file moved config for server paths to ~/config dir Added `DEBUG_LOGGING=true` to .env.example * WIP: Refactor logging statements in code * WIP: Refactor logging statements and import configurations * WIP: Refactor logging statements and import configurations * refactor: broadcast Redis initialization message with `info` not `debug` * refactor: complete Refactor logging statements and import configurations * chore: delete unused tools * fix: circular dependencies due to accessing logger * refactor(handleText): handle booleans and write tests * refactor: redact sensitive values, better formatting * chore: improve log formatting, avoid passing strings to 2nd arg * fix(ci): fix jest tests due to logger changes * refactor(getAvailablePluginsController): cache plugins as they are static and avoids async addOpenAPISpecs call every time * chore: update docs * chore: update docs * chore: create separate meiliSync logger, clean up logs to avoid being unnecessarily verbose * chore: spread objects where they are commonly logged to allow string truncation * chore: improve error log formatting
This commit is contained in:
parent
49571ac635
commit
ea1dd59ef4
115 changed files with 1271 additions and 1001 deletions
|
|
@ -3,6 +3,7 @@ const { encoding_for_model: encodingForModel, get_encoding: getEncoding } = requ
|
|||
const { getResponseSender, EModelEndpoint } = require('librechat-data-provider');
|
||||
const { getModelMaxTokens } = require('~/utils');
|
||||
const BaseClient = require('./BaseClient');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
const HUMAN_PROMPT = '\n\nHuman:';
|
||||
const AI_PROMPT = '\n\nAssistant:';
|
||||
|
|
@ -103,9 +104,8 @@ class AnthropicClient extends BaseClient {
|
|||
messages,
|
||||
parentMessageId,
|
||||
});
|
||||
if (this.options.debug) {
|
||||
console.debug('AnthropicClient: orderedMessages', orderedMessages, parentMessageId);
|
||||
}
|
||||
|
||||
logger.debug('[AnthropicClient] orderedMessages', { orderedMessages, parentMessageId });
|
||||
|
||||
const formattedMessages = orderedMessages.map((message) => ({
|
||||
author: message.isCreatedByUser ? this.userLabel : this.assistantLabel,
|
||||
|
|
@ -247,7 +247,7 @@ class AnthropicClient extends BaseClient {
|
|||
}
|
||||
|
||||
getCompletion() {
|
||||
console.log('AnthropicClient doesn\'t use getCompletion (all handled in sendCompletion)');
|
||||
logger.debug('AnthropicClient doesn\'t use getCompletion (all handled in sendCompletion)');
|
||||
}
|
||||
|
||||
async sendCompletion(payload, { onProgress, abortController }) {
|
||||
|
|
@ -262,12 +262,7 @@ class AnthropicClient extends BaseClient {
|
|||
modelOptions.stream = true;
|
||||
}
|
||||
|
||||
const { debug } = this.options;
|
||||
if (debug) {
|
||||
console.debug();
|
||||
console.debug(modelOptions);
|
||||
console.debug();
|
||||
}
|
||||
logger.debug('modelOptions', { modelOptions });
|
||||
|
||||
const client = this.getClient();
|
||||
const metadata = {
|
||||
|
|
@ -295,32 +290,23 @@ class AnthropicClient extends BaseClient {
|
|||
top_p,
|
||||
top_k,
|
||||
};
|
||||
if (this.options.debug) {
|
||||
console.log('AnthropicClient: requestOptions');
|
||||
console.dir(requestOptions, { depth: null });
|
||||
}
|
||||
logger.debug('[AnthropicClient]', { requestOptions });
|
||||
const response = await client.completions.create(requestOptions);
|
||||
|
||||
signal.addEventListener('abort', () => {
|
||||
if (this.options.debug) {
|
||||
console.log('AnthropicClient: message aborted!');
|
||||
}
|
||||
logger.debug('[AnthropicClient] message aborted!');
|
||||
response.controller.abort();
|
||||
});
|
||||
|
||||
for await (const completion of response) {
|
||||
if (this.options.debug) {
|
||||
// Uncomment to debug message stream
|
||||
// console.debug(completion);
|
||||
}
|
||||
// Uncomment to debug message stream
|
||||
// logger.debug(completion);
|
||||
text += completion.completion;
|
||||
onProgress(completion.completion);
|
||||
}
|
||||
|
||||
signal.removeEventListener('abort', () => {
|
||||
if (this.options.debug) {
|
||||
console.log('AnthropicClient: message aborted!');
|
||||
}
|
||||
logger.debug('[AnthropicClient] message aborted!');
|
||||
response.controller.abort();
|
||||
});
|
||||
|
||||
|
|
@ -336,9 +322,7 @@ class AnthropicClient extends BaseClient {
|
|||
}
|
||||
|
||||
getBuildMessagesOptions() {
|
||||
if (this.options.debug) {
|
||||
console.log('AnthropicClient doesn\'t use getBuildMessagesOptions');
|
||||
}
|
||||
logger.debug('AnthropicClient doesn\'t use getBuildMessagesOptions');
|
||||
}
|
||||
|
||||
static getTokenizer(encoding, isModelName = false, extendSpecialTokens = {}) {
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@ const TextStream = require('./TextStream');
|
|||
const { getConvo, getMessages, saveMessage, updateMessage, saveConvo } = require('~/models');
|
||||
const { addSpaceIfNeeded, isEnabled } = require('~/server/utils');
|
||||
const checkBalance = require('~/models/checkBalance');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
class BaseClient {
|
||||
constructor(apiKey, options = {}) {
|
||||
|
|
@ -41,15 +42,14 @@ class BaseClient {
|
|||
}
|
||||
|
||||
async getTokenCountForResponse(response) {
|
||||
if (this.options.debug) {
|
||||
console.debug('`recordTokenUsage` not implemented.', response);
|
||||
}
|
||||
logger.debug('`[BaseClient] recordTokenUsage` not implemented.', response);
|
||||
}
|
||||
|
||||
async recordTokenUsage({ promptTokens, completionTokens }) {
|
||||
if (this.options.debug) {
|
||||
console.debug('`recordTokenUsage` not implemented.', { promptTokens, completionTokens });
|
||||
}
|
||||
logger.debug('`[BaseClient] recordTokenUsage` not implemented.', {
|
||||
promptTokens,
|
||||
completionTokens,
|
||||
});
|
||||
}
|
||||
|
||||
getBuildMessagesOptions() {
|
||||
|
|
@ -194,14 +194,14 @@ class BaseClient {
|
|||
const update = {};
|
||||
|
||||
if (messageId === tokenCountMap.summaryMessage?.messageId) {
|
||||
this.options.debug && console.debug(`Adding summary props to ${messageId}.`);
|
||||
logger.debug(`[BaseClient] Adding summary props to ${messageId}.`);
|
||||
|
||||
update.summary = tokenCountMap.summaryMessage.content;
|
||||
update.summaryTokenCount = tokenCountMap.summaryMessage.tokenCount;
|
||||
}
|
||||
|
||||
if (message.tokenCount && !update.summaryTokenCount) {
|
||||
this.options.debug && console.debug(`Skipping ${messageId}: already had a token count.`);
|
||||
logger.debug(`[BaseClient] Skipping ${messageId}: already had a token count.`);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
|
@ -278,19 +278,17 @@ class BaseClient {
|
|||
if (instructions) {
|
||||
({ tokenCount, ..._instructions } = instructions);
|
||||
}
|
||||
this.options.debug && _instructions && console.debug('instructions tokenCount', tokenCount);
|
||||
_instructions && logger.debug('[BaseClient] instructions tokenCount: ' + tokenCount);
|
||||
let payload = this.addInstructions(formattedMessages, _instructions);
|
||||
let orderedWithInstructions = this.addInstructions(orderedMessages, instructions);
|
||||
|
||||
let { context, remainingContextTokens, messagesToRefine, summaryIndex } =
|
||||
await this.getMessagesWithinTokenLimit(orderedWithInstructions);
|
||||
|
||||
this.options.debug &&
|
||||
console.debug(
|
||||
'remainingContextTokens, this.maxContextTokens (1/2)',
|
||||
remainingContextTokens,
|
||||
this.maxContextTokens,
|
||||
);
|
||||
logger.debug('[BaseClient] Context Count (1/2)', {
|
||||
remainingContextTokens,
|
||||
maxContextTokens: this.maxContextTokens,
|
||||
});
|
||||
|
||||
let summaryMessage;
|
||||
let summaryTokenCount;
|
||||
|
|
@ -308,10 +306,9 @@ class BaseClient {
|
|||
|
||||
if (diff > 0) {
|
||||
payload = payload.slice(diff);
|
||||
this.options.debug &&
|
||||
console.debug(
|
||||
`Difference between original payload (${length}) and context (${context.length}): ${diff}`,
|
||||
);
|
||||
logger.debug(
|
||||
`[BaseClient] Difference between original payload (${length}) and context (${context.length}): ${diff}`,
|
||||
);
|
||||
}
|
||||
|
||||
const latestMessage = orderedWithInstructions[orderedWithInstructions.length - 1];
|
||||
|
|
@ -338,12 +335,10 @@ class BaseClient {
|
|||
// Make sure to only continue summarization logic if the summary message was generated
|
||||
shouldSummarize = summaryMessage && shouldSummarize;
|
||||
|
||||
this.options.debug &&
|
||||
console.debug(
|
||||
'remainingContextTokens, this.maxContextTokens (2/2)',
|
||||
remainingContextTokens,
|
||||
this.maxContextTokens,
|
||||
);
|
||||
logger.debug('[BaseClient] Context Count (2/2)', {
|
||||
remainingContextTokens,
|
||||
maxContextTokens: this.maxContextTokens,
|
||||
});
|
||||
|
||||
let tokenCountMap = orderedWithInstructions.reduce((map, message, index) => {
|
||||
const { messageId } = message;
|
||||
|
|
@ -361,19 +356,13 @@ class BaseClient {
|
|||
|
||||
const promptTokens = this.maxContextTokens - remainingContextTokens;
|
||||
|
||||
if (this.options.debug) {
|
||||
console.debug('<-------------------------PAYLOAD/TOKEN COUNT MAP------------------------->');
|
||||
console.debug('Payload:', payload);
|
||||
console.debug('Token Count Map:', tokenCountMap);
|
||||
console.debug(
|
||||
'Prompt Tokens',
|
||||
promptTokens,
|
||||
'remainingContextTokens',
|
||||
remainingContextTokens,
|
||||
'this.maxContextTokens',
|
||||
this.maxContextTokens,
|
||||
);
|
||||
}
|
||||
logger.debug('[BaseClient] Payload size:', payload.length);
|
||||
logger.debug('[BaseClient] tokenCountMap:', tokenCountMap);
|
||||
logger.debug('[BaseClient]', {
|
||||
promptTokens,
|
||||
remainingContextTokens,
|
||||
maxContextTokens: this.maxContextTokens,
|
||||
});
|
||||
|
||||
return { payload, tokenCountMap, promptTokens, messages: orderedWithInstructions };
|
||||
}
|
||||
|
|
@ -421,11 +410,11 @@ class BaseClient {
|
|||
);
|
||||
|
||||
if (tokenCountMap) {
|
||||
console.dir(tokenCountMap, { depth: null });
|
||||
logger.debug('[BaseClient] tokenCountMap', tokenCountMap);
|
||||
if (tokenCountMap[userMessage.messageId]) {
|
||||
userMessage.tokenCount = tokenCountMap[userMessage.messageId];
|
||||
console.log('userMessage.tokenCount', userMessage.tokenCount);
|
||||
console.log('userMessage', userMessage);
|
||||
logger.debug('[BaseClient] userMessage.tokenCount', userMessage.tokenCount);
|
||||
logger.debug('[BaseClient] userMessage', userMessage);
|
||||
}
|
||||
|
||||
this.handleTokenCountMap(tokenCountMap);
|
||||
|
|
@ -443,7 +432,6 @@ class BaseClient {
|
|||
user: this.user,
|
||||
tokenType: 'prompt',
|
||||
amount: promptTokens,
|
||||
debug: this.options.debug,
|
||||
model: this.modelOptions.model,
|
||||
endpoint: this.options.endpoint,
|
||||
},
|
||||
|
|
@ -483,9 +471,7 @@ class BaseClient {
|
|||
}
|
||||
|
||||
async loadHistory(conversationId, parentMessageId = null) {
|
||||
if (this.options.debug) {
|
||||
console.debug('Loading history for conversation', conversationId, parentMessageId);
|
||||
}
|
||||
logger.debug('[BaseClient] Loading history:', { conversationId, parentMessageId });
|
||||
|
||||
const messages = (await getMessages({ conversationId })) ?? [];
|
||||
|
||||
|
|
@ -516,9 +502,14 @@ class BaseClient {
|
|||
}
|
||||
}
|
||||
|
||||
if (this.options.debug && this.previous_summary) {
|
||||
if (this.previous_summary) {
|
||||
const { messageId, summary, tokenCount, summaryTokenCount } = this.previous_summary;
|
||||
console.debug('Previous summary:', { messageId, summary, tokenCount, summaryTokenCount });
|
||||
logger.debug('[BaseClient] Previous summary:', {
|
||||
messageId,
|
||||
summary,
|
||||
tokenCount,
|
||||
summaryTokenCount,
|
||||
});
|
||||
}
|
||||
|
||||
return orderedMessages;
|
||||
|
|
|
|||
|
|
@ -8,6 +8,7 @@ const { getResponseSender, EModelEndpoint, endpointSettings } = require('librech
|
|||
const { getModelMaxTokens } = require('~/utils');
|
||||
const { formatMessage } = require('./prompts');
|
||||
const BaseClient = require('./BaseClient');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
const loc = 'us-central1';
|
||||
const publisher = 'google';
|
||||
|
|
@ -42,8 +43,7 @@ class GoogleClient extends BaseClient {
|
|||
|
||||
jwtClient.authorize((err) => {
|
||||
if (err) {
|
||||
console.error('Error: jwtClient failed to authorize');
|
||||
console.error(err.message);
|
||||
logger.error('jwtClient failed to authorize', err);
|
||||
throw err;
|
||||
}
|
||||
});
|
||||
|
|
@ -58,11 +58,9 @@ class GoogleClient extends BaseClient {
|
|||
return new Promise((resolve, reject) => {
|
||||
jwtClient.authorize((err, tokens) => {
|
||||
if (err) {
|
||||
console.error('Error: jwtClient failed to authorize');
|
||||
console.error(err.message);
|
||||
logger.error('jwtClient failed to authorize', err);
|
||||
reject(err);
|
||||
} else {
|
||||
console.log('Access Token:', tokens.access_token);
|
||||
resolve(tokens.access_token);
|
||||
}
|
||||
});
|
||||
|
|
@ -213,8 +211,7 @@ class GoogleClient extends BaseClient {
|
|||
}
|
||||
|
||||
if (this.options.debug) {
|
||||
console.debug('GoogleClient buildMessages');
|
||||
console.dir(payload, { depth: null });
|
||||
logger.debug('GoogleClient buildMessages', payload);
|
||||
}
|
||||
|
||||
return { prompt: payload };
|
||||
|
|
@ -226,7 +223,10 @@ class GoogleClient extends BaseClient {
|
|||
parentMessageId,
|
||||
});
|
||||
if (this.options.debug) {
|
||||
console.debug('GoogleClient: orderedMessages', orderedMessages, parentMessageId);
|
||||
logger.debug('GoogleClient: orderedMessages, parentMessageId', {
|
||||
orderedMessages,
|
||||
parentMessageId,
|
||||
});
|
||||
}
|
||||
|
||||
const formattedMessages = orderedMessages.map((message) => ({
|
||||
|
|
@ -377,10 +377,7 @@ class GoogleClient extends BaseClient {
|
|||
const { debug } = this.options;
|
||||
const url = this.completionsUrl;
|
||||
if (debug) {
|
||||
console.debug();
|
||||
console.debug(url);
|
||||
console.debug(this.modelOptions);
|
||||
console.debug();
|
||||
logger.debug('GoogleClient _getCompletion', { url, payload });
|
||||
}
|
||||
const opts = {
|
||||
method: 'POST',
|
||||
|
|
@ -397,7 +394,7 @@ class GoogleClient extends BaseClient {
|
|||
|
||||
const client = await this.getClient();
|
||||
const res = await client.request({ url, method: 'POST', data: payload });
|
||||
console.dir(res.data, { depth: null });
|
||||
logger.debug('GoogleClient _getCompletion', { res });
|
||||
return res.data;
|
||||
}
|
||||
|
||||
|
|
@ -476,7 +473,7 @@ class GoogleClient extends BaseClient {
|
|||
}
|
||||
|
||||
getBuildMessagesOptions() {
|
||||
// console.log('GoogleClient doesn\'t use getBuildMessagesOptions');
|
||||
// logger.debug('GoogleClient doesn\'t use getBuildMessagesOptions');
|
||||
}
|
||||
|
||||
async sendCompletion(payload, opts = {}) {
|
||||
|
|
@ -484,13 +481,10 @@ class GoogleClient extends BaseClient {
|
|||
try {
|
||||
reply = await this.getCompletion(payload, opts);
|
||||
if (this.options.debug) {
|
||||
console.debug('result');
|
||||
console.debug(reply);
|
||||
logger.debug('GoogleClient sendCompletion', { reply });
|
||||
}
|
||||
} catch (err) {
|
||||
console.error('Error: failed to send completion to Google');
|
||||
console.error(err);
|
||||
console.error(err.message);
|
||||
logger.error('failed to send completion to Google', err);
|
||||
}
|
||||
return reply.trim();
|
||||
}
|
||||
|
|
|
|||
|
|
@ -14,6 +14,7 @@ const { summaryBuffer } = require('./memory');
|
|||
const { runTitleChain } = require('./chains');
|
||||
const { tokenSplit } = require('./document');
|
||||
const BaseClient = require('./BaseClient');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
// Cache to store Tiktoken instances
|
||||
const tokenizersCache = {};
|
||||
|
|
@ -123,7 +124,7 @@ class OpenAIClient extends BaseClient {
|
|||
}
|
||||
|
||||
if (this.options.debug) {
|
||||
console.debug('maxContextTokens', this.maxContextTokens);
|
||||
logger.debug('[OpenAIClient] maxContextTokens', this.maxContextTokens);
|
||||
}
|
||||
|
||||
this.maxResponseTokens = this.modelOptions.max_tokens || 1024;
|
||||
|
|
@ -175,7 +176,7 @@ class OpenAIClient extends BaseClient {
|
|||
}
|
||||
|
||||
if (this.azureEndpoint && this.options.debug) {
|
||||
console.debug('Using Azure endpoint');
|
||||
logger.debug('Using Azure endpoint');
|
||||
}
|
||||
|
||||
if (this.useOpenRouter) {
|
||||
|
|
@ -254,8 +255,7 @@ class OpenAIClient extends BaseClient {
|
|||
// Reset count
|
||||
tokenizerCallsCount = 1;
|
||||
} catch (error) {
|
||||
console.log('Free and reset encoders error');
|
||||
console.error(error);
|
||||
logger.error('[OpenAIClient] Free and reset encoders error', error);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -263,7 +263,7 @@ class OpenAIClient extends BaseClient {
|
|||
resetTokenizersIfNecessary() {
|
||||
if (tokenizerCallsCount >= 25) {
|
||||
if (this.options.debug) {
|
||||
console.debug('freeAndResetAllEncoders: reached 25 encodings, resetting...');
|
||||
logger.debug('[OpenAIClient] freeAndResetAllEncoders: reached 25 encodings, resetting...');
|
||||
}
|
||||
this.constructor.freeAndResetAllEncoders();
|
||||
}
|
||||
|
|
@ -403,11 +403,6 @@ class OpenAIClient extends BaseClient {
|
|||
return;
|
||||
}
|
||||
|
||||
if (this.options.debug) {
|
||||
// console.debug('progressMessage');
|
||||
// console.dir(progressMessage, { depth: null });
|
||||
}
|
||||
|
||||
if (progressMessage.choices) {
|
||||
streamResult = progressMessage;
|
||||
}
|
||||
|
|
@ -427,9 +422,7 @@ class OpenAIClient extends BaseClient {
|
|||
if (!token) {
|
||||
return;
|
||||
}
|
||||
if (this.options.debug) {
|
||||
// console.debug(token);
|
||||
}
|
||||
|
||||
if (token === this.endToken) {
|
||||
return;
|
||||
}
|
||||
|
|
@ -451,9 +444,9 @@ class OpenAIClient extends BaseClient {
|
|||
null,
|
||||
opts.abortController || new AbortController(),
|
||||
);
|
||||
if (this.options.debug) {
|
||||
console.debug(JSON.stringify(result));
|
||||
}
|
||||
|
||||
logger.debug('[OpenAIClient] sendCompletion: result', result);
|
||||
|
||||
if (this.isChatCompletion) {
|
||||
reply = result.choices[0].message.content;
|
||||
} else {
|
||||
|
|
@ -557,11 +550,13 @@ class OpenAIClient extends BaseClient {
|
|||
title = await runTitleChain({ llm, text, convo, signal: this.abortController.signal });
|
||||
} catch (e) {
|
||||
if (e?.message?.toLowerCase()?.includes('abort')) {
|
||||
this.options.debug && console.debug('Aborted title generation');
|
||||
logger.debug('[OpenAIClient] Aborted title generation');
|
||||
return;
|
||||
}
|
||||
console.log('There was an issue generating title with LangChain, trying the old method...');
|
||||
this.options.debug && console.error(e.message, e);
|
||||
logger.error(
|
||||
'[OpenAIClient] There was an issue generating title with LangChain, trying the old method...',
|
||||
e,
|
||||
);
|
||||
modelOptions.model = OPENAI_TITLE_MODEL ?? 'gpt-3.5-turbo';
|
||||
if (this.azure) {
|
||||
modelOptions.model = process.env.AZURE_OPENAI_DEFAULT_MODEL ?? modelOptions.model;
|
||||
|
|
@ -582,17 +577,16 @@ ${convo}
|
|||
try {
|
||||
title = (await this.sendPayload(instructionsPayload, { modelOptions })).replaceAll('"', '');
|
||||
} catch (e) {
|
||||
console.error(e);
|
||||
console.log('There was another issue generating the title, see error above.');
|
||||
logger.error('[OpenAIClient] There was another issue generating the title', e);
|
||||
}
|
||||
}
|
||||
|
||||
console.log('CONVERSATION TITLE', title);
|
||||
logger.debug('[OpenAIClient] Convo Title: ' + title);
|
||||
return title;
|
||||
}
|
||||
|
||||
async summarizeMessages({ messagesToRefine, remainingContextTokens }) {
|
||||
this.options.debug && console.debug('Summarizing messages...');
|
||||
logger.debug('[OpenAIClient] Summarizing messages...');
|
||||
let context = messagesToRefine;
|
||||
let prompt;
|
||||
|
||||
|
|
@ -615,8 +609,9 @@ ${convo}
|
|||
}
|
||||
|
||||
if (context.length === 0) {
|
||||
this.options.debug &&
|
||||
console.debug('Summary context is empty, using latest message within token limit');
|
||||
logger.debug(
|
||||
'[OpenAIClient] Summary context is empty, using latest message within token limit',
|
||||
);
|
||||
|
||||
promptBuffer = 32;
|
||||
const { text, ...latestMessage } = messagesToRefine[messagesToRefine.length - 1];
|
||||
|
|
@ -643,7 +638,7 @@ ${convo}
|
|||
// by recreating the summary prompt (single message) to avoid LangChain handling
|
||||
|
||||
const initialPromptTokens = this.maxContextTokens - remainingContextTokens;
|
||||
this.options.debug && console.debug(`initialPromptTokens: ${initialPromptTokens}`);
|
||||
logger.debug('[OpenAIClient] initialPromptTokens', initialPromptTokens);
|
||||
|
||||
const llm = this.initializeLLM({
|
||||
model: OPENAI_SUMMARY_MODEL,
|
||||
|
|
@ -669,9 +664,9 @@ ${convo}
|
|||
const summaryTokenCount = this.getTokenCountForMessage(summaryMessage);
|
||||
|
||||
if (this.options.debug) {
|
||||
console.debug('summaryMessage:', summaryMessage);
|
||||
console.debug(
|
||||
`remainingContextTokens: ${remainingContextTokens}, after refining: ${
|
||||
logger.debug('[OpenAIClient] summaryTokenCount', summaryTokenCount);
|
||||
logger.debug(
|
||||
`[OpenAIClient] Summarization complete: remainingContextTokens: ${remainingContextTokens}, after refining: ${
|
||||
remainingContextTokens - summaryTokenCount
|
||||
}`,
|
||||
);
|
||||
|
|
@ -680,7 +675,7 @@ ${convo}
|
|||
return { summaryMessage, summaryTokenCount };
|
||||
} catch (e) {
|
||||
if (e?.message?.toLowerCase()?.includes('abort')) {
|
||||
this.options.debug && console.debug('Aborted summarization');
|
||||
logger.debug('[OpenAIClient] Aborted summarization');
|
||||
const { run, runId } = this.runManager.getRunByConversationId(this.conversationId);
|
||||
if (run && run.error) {
|
||||
const { error } = run;
|
||||
|
|
@ -688,17 +683,13 @@ ${convo}
|
|||
throw new Error(error);
|
||||
}
|
||||
}
|
||||
console.error('Error summarizing messages');
|
||||
this.options.debug && console.error(e);
|
||||
logger.error('[OpenAIClient] Error summarizing messages', e);
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
async recordTokenUsage({ promptTokens, completionTokens }) {
|
||||
if (this.options.debug) {
|
||||
console.debug('promptTokens', promptTokens);
|
||||
console.debug('completionTokens', completionTokens);
|
||||
}
|
||||
logger.debug('[OpenAIClient]', { promptTokens, completionTokens });
|
||||
await spendTokens(
|
||||
{
|
||||
user: this.user,
|
||||
|
|
@ -736,14 +727,19 @@ ${convo}
|
|||
modelOptions.prompt = payload;
|
||||
}
|
||||
|
||||
const { debug } = this.options;
|
||||
const url = extractBaseURL(this.completionsUrl);
|
||||
if (debug) {
|
||||
console.debug('baseURL', url);
|
||||
console.debug('modelOptions', modelOptions);
|
||||
}
|
||||
const baseURL = extractBaseURL(this.completionsUrl);
|
||||
// let { messages: _msgsToLog, ...modelOptionsToLog } = modelOptions;
|
||||
// if (modelOptionsToLog.messages) {
|
||||
// _msgsToLog = modelOptionsToLog.messages.map((msg) => {
|
||||
// let { content, ...rest } = msg;
|
||||
|
||||
// if (content)
|
||||
// return { ...rest, content: truncateText(content) };
|
||||
// });
|
||||
// }
|
||||
logger.debug('[OpenAIClient] chatCompletion', { baseURL, modelOptions });
|
||||
const opts = {
|
||||
baseURL: url,
|
||||
baseURL,
|
||||
};
|
||||
|
||||
if (this.useOpenRouter) {
|
||||
|
|
@ -820,7 +816,7 @@ ${convo}
|
|||
|
||||
if (!chatCompletion && UnexpectedRoleError) {
|
||||
throw new Error(
|
||||
'OpenAIError: Invalid final message: OpenAI expects final message to include role=assistant',
|
||||
'OpenAI error: Invalid final message: OpenAI expects final message to include role=assistant',
|
||||
);
|
||||
} else if (!chatCompletion && error) {
|
||||
throw new Error(error);
|
||||
|
|
@ -843,27 +839,23 @@ ${convo}
|
|||
}
|
||||
if (
|
||||
err?.message?.includes(
|
||||
'OpenAIError: Invalid final message: OpenAI expects final message to include role=assistant',
|
||||
'OpenAI error: Invalid final message: OpenAI expects final message to include role=assistant',
|
||||
) ||
|
||||
err?.message?.includes('The server had an error processing your request') ||
|
||||
err?.message?.includes('missing finish_reason') ||
|
||||
(err instanceof OpenAI.OpenAIError && err?.message?.includes('missing finish_reason'))
|
||||
) {
|
||||
console.error(err);
|
||||
logger.error('[OpenAIClient] Known OpenAI error:', err);
|
||||
await abortController.abortCompletion();
|
||||
return intermediateReply;
|
||||
} else if (err instanceof OpenAI.APIError) {
|
||||
console.log(err.name);
|
||||
console.log(err.status);
|
||||
console.log(err.headers);
|
||||
if (intermediateReply) {
|
||||
return intermediateReply;
|
||||
} else {
|
||||
throw err;
|
||||
}
|
||||
} else {
|
||||
console.warn('[OpenAIClient.chatCompletion] Unhandled error type');
|
||||
console.error(err);
|
||||
logger.error('[OpenAIClient.chatCompletion] Unhandled error type', err);
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -10,6 +10,7 @@ const { SelfReflectionTool } = require('./tools');
|
|||
const { isEnabled } = require('~/server/utils');
|
||||
const { extractBaseURL } = require('~/utils');
|
||||
const { loadTools } = require('./tools/util');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
class PluginsClient extends OpenAIClient {
|
||||
constructor(apiKey, options = {}) {
|
||||
|
|
@ -85,17 +86,15 @@ class PluginsClient extends OpenAIClient {
|
|||
initialMessageCount: this.currentMessages.length + 1,
|
||||
});
|
||||
|
||||
if (this.options.debug) {
|
||||
console.debug(
|
||||
`<-----Agent Model: ${model.modelName} | Temp: ${model.temperature} | Functions: ${this.functionsAgent}----->`,
|
||||
);
|
||||
}
|
||||
logger.debug(
|
||||
`[PluginsClient] Agent Model: ${model.modelName} | Temp: ${model.temperature} | Functions: ${this.functionsAgent}`,
|
||||
);
|
||||
|
||||
// Map Messages to Langchain format
|
||||
const pastMessages = formatLangChainMessages(this.currentMessages.slice(0, -1), {
|
||||
userName: this.options?.name,
|
||||
});
|
||||
this.options.debug && console.debug('pastMessages: ', pastMessages);
|
||||
logger.debug('[PluginsClient] pastMessages: ' + pastMessages.length);
|
||||
|
||||
// TODO: use readOnly memory, TokenBufferMemory? (both unavailable in LangChainJS)
|
||||
const memory = new BufferMemory({
|
||||
|
|
@ -124,19 +123,16 @@ class PluginsClient extends OpenAIClient {
|
|||
return;
|
||||
}
|
||||
|
||||
if (this.options.debug) {
|
||||
console.debug('Requested Tools');
|
||||
console.debug(this.options.tools);
|
||||
console.debug('Loaded Tools');
|
||||
console.debug(this.tools.map((tool) => tool.name));
|
||||
}
|
||||
logger.debug('[PluginsClient] Requested Tools', this.options.tools);
|
||||
logger.debug(
|
||||
'[PluginsClient] Loaded Tools',
|
||||
this.tools.map((tool) => tool.name),
|
||||
);
|
||||
|
||||
const handleAction = (action, runId, callback = null) => {
|
||||
this.saveLatestAction(action);
|
||||
|
||||
if (this.options.debug) {
|
||||
console.debug('Latest Agent Action ', this.actions[this.actions.length - 1]);
|
||||
}
|
||||
logger.debug('[PluginsClient] Latest Agent Action ', this.actions[this.actions.length - 1]);
|
||||
|
||||
if (typeof callback === 'function') {
|
||||
callback(action, runId);
|
||||
|
|
@ -165,9 +161,7 @@ class PluginsClient extends OpenAIClient {
|
|||
}),
|
||||
});
|
||||
|
||||
if (this.options.debug) {
|
||||
console.debug('Loaded agent.');
|
||||
}
|
||||
logger.debug('[PluginsClient] Loaded agent.');
|
||||
}
|
||||
|
||||
async executorCall(message, { signal, stream, onToolStart, onToolEnd }) {
|
||||
|
|
@ -183,12 +177,10 @@ class PluginsClient extends OpenAIClient {
|
|||
});
|
||||
const input = attempts > 1 ? errorInput : message;
|
||||
|
||||
if (this.options.debug) {
|
||||
console.debug(`Attempt ${attempts} of ${maxAttempts}`);
|
||||
}
|
||||
logger.debug(`[PluginsClient] Attempt ${attempts} of ${maxAttempts}`);
|
||||
|
||||
if (this.options.debug && errorMessage.length > 0) {
|
||||
console.debug('Caught error, input:', input);
|
||||
if (errorMessage.length > 0) {
|
||||
logger.debug('[PluginsClient] Caught error, input:', input);
|
||||
}
|
||||
|
||||
try {
|
||||
|
|
@ -211,10 +203,10 @@ class PluginsClient extends OpenAIClient {
|
|||
]);
|
||||
break; // Exit the loop if the function call is successful
|
||||
} catch (err) {
|
||||
console.error(err);
|
||||
logger.error('[PluginsClient] executorCall error:', err);
|
||||
if (attempts === maxAttempts) {
|
||||
const { run } = this.runManager.getRunByConversationId(this.conversationId);
|
||||
const defaultOutput = `Encountered an error while attempting to respond. Error: ${err.message}`;
|
||||
const defaultOutput = `Encountered an error while attempting to respond: ${err.message}`;
|
||||
this.result.output = run && run.error ? run.error : defaultOutput;
|
||||
this.result.errorMessage = run && run.error ? run.error : err.message;
|
||||
this.result.intermediateSteps = this.actions;
|
||||
|
|
@ -226,8 +218,11 @@ class PluginsClient extends OpenAIClient {
|
|||
|
||||
async handleResponseMessage(responseMessage, saveOptions, user) {
|
||||
const { output, errorMessage, ...result } = this.result;
|
||||
this.options.debug &&
|
||||
console.debug('[handleResponseMessage] Output:', { output, errorMessage, ...result });
|
||||
logger.debug('[PluginsClient][handleResponseMessage] Output:', {
|
||||
output,
|
||||
errorMessage,
|
||||
...result,
|
||||
});
|
||||
const { error } = responseMessage;
|
||||
if (!error) {
|
||||
responseMessage.tokenCount = this.getTokenCountForResponse(responseMessage);
|
||||
|
|
@ -251,7 +246,7 @@ class PluginsClient extends OpenAIClient {
|
|||
this.setOptions(opts);
|
||||
return super.sendMessage(message, opts);
|
||||
}
|
||||
this.options.debug && console.log('Plugins sendMessage', message, opts);
|
||||
logger.debug('[PluginsClient] sendMessage', { message, opts });
|
||||
const {
|
||||
user,
|
||||
isEdited,
|
||||
|
|
@ -281,10 +276,10 @@ class PluginsClient extends OpenAIClient {
|
|||
);
|
||||
|
||||
if (tokenCountMap) {
|
||||
console.dir(tokenCountMap, { depth: null });
|
||||
logger.debug('[PluginsClient] tokenCountMap', { tokenCountMap });
|
||||
if (tokenCountMap[userMessage.messageId]) {
|
||||
userMessage.tokenCount = tokenCountMap[userMessage.messageId];
|
||||
console.log('userMessage.tokenCount', userMessage.tokenCount);
|
||||
logger.debug('[PluginsClient] userMessage.tokenCount', userMessage.tokenCount);
|
||||
}
|
||||
this.handleTokenCountMap(tokenCountMap);
|
||||
}
|
||||
|
|
@ -370,10 +365,7 @@ class PluginsClient extends OpenAIClient {
|
|||
return await this.handleResponseMessage(responseMessage, saveOptions, user);
|
||||
}
|
||||
|
||||
if (this.options.debug) {
|
||||
console.debug('Plugins completion phase: this.result');
|
||||
console.debug(this.result);
|
||||
}
|
||||
logger.debug('[PluginsClient] Completion phase: this.result', this.result);
|
||||
|
||||
const promptPrefix = buildPromptPrefix({
|
||||
result: this.result,
|
||||
|
|
@ -381,28 +373,20 @@ class PluginsClient extends OpenAIClient {
|
|||
functionsAgent: this.functionsAgent,
|
||||
});
|
||||
|
||||
if (this.options.debug) {
|
||||
console.debug('Plugins: promptPrefix');
|
||||
console.debug(promptPrefix);
|
||||
}
|
||||
logger.debug('[PluginsClient]', { promptPrefix });
|
||||
|
||||
payload = await this.buildCompletionPrompt({
|
||||
messages: this.currentMessages,
|
||||
promptPrefix,
|
||||
});
|
||||
|
||||
if (this.options.debug) {
|
||||
console.debug('buildCompletionPrompt Payload');
|
||||
console.debug(payload);
|
||||
}
|
||||
logger.debug('[PluginsClient] buildCompletionPrompt Payload', payload);
|
||||
responseMessage.text = await this.sendCompletion(payload, opts);
|
||||
return await this.handleResponseMessage(responseMessage, saveOptions, user);
|
||||
}
|
||||
|
||||
async buildCompletionPrompt({ messages, promptPrefix: _promptPrefix }) {
|
||||
if (this.options.debug) {
|
||||
console.debug('buildCompletionPrompt messages', messages);
|
||||
}
|
||||
logger.debug('[PluginsClient] buildCompletionPrompt messages', messages);
|
||||
|
||||
const orderedMessages = messages;
|
||||
let promptPrefix = _promptPrefix.trim();
|
||||
|
|
|
|||
|
|
@ -1,4 +1,5 @@
|
|||
const { Readable } = require('stream');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
class TextStream extends Readable {
|
||||
constructor(text, options = {}) {
|
||||
|
|
@ -38,7 +39,7 @@ class TextStream extends Readable {
|
|||
});
|
||||
|
||||
this.on('end', () => {
|
||||
// console.log('Stream ended');
|
||||
// logger.debug('[processTextStream] Stream ended');
|
||||
resolve();
|
||||
});
|
||||
|
||||
|
|
@ -50,7 +51,7 @@ class TextStream extends Readable {
|
|||
try {
|
||||
await streamPromise;
|
||||
} catch (err) {
|
||||
console.error('Error processing text stream:', err);
|
||||
logger.error('[processTextStream] Error in text stream:', err);
|
||||
// Handle the error appropriately, e.g., return an error message or throw an error
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,4 +1,5 @@
|
|||
const { ZeroShotAgentOutputParser } = require('langchain/agents');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
class CustomOutputParser extends ZeroShotAgentOutputParser {
|
||||
constructor(fields) {
|
||||
|
|
@ -64,9 +65,9 @@ class CustomOutputParser extends ZeroShotAgentOutputParser {
|
|||
const match = this.actionValues.exec(text); // old v2
|
||||
|
||||
if (!match) {
|
||||
console.log(
|
||||
'\n\n<----------------------HIT NO MATCH PARSING ERROR---------------------->\n\n',
|
||||
match,
|
||||
logger.debug(
|
||||
'\n\n<----------------------[CustomOutputParser] HIT NO MATCH PARSING ERROR---------------------->\n\n' +
|
||||
match,
|
||||
);
|
||||
const thoughts = text.replace(/[tT]hought:/, '').split('\n');
|
||||
// return {
|
||||
|
|
@ -84,9 +85,9 @@ class CustomOutputParser extends ZeroShotAgentOutputParser {
|
|||
let selectedTool = match?.[1].trim().toLowerCase();
|
||||
|
||||
if (match && selectedTool === 'n/a') {
|
||||
console.log(
|
||||
'\n\n<----------------------HIT N/A PARSING ERROR---------------------->\n\n',
|
||||
match,
|
||||
logger.debug(
|
||||
'\n\n<----------------------[CustomOutputParser] HIT N/A PARSING ERROR---------------------->\n\n' +
|
||||
match,
|
||||
);
|
||||
return {
|
||||
tool: 'self-reflection',
|
||||
|
|
@ -97,25 +98,25 @@ class CustomOutputParser extends ZeroShotAgentOutputParser {
|
|||
|
||||
let toolIsValid = this.checkIfValidTool(selectedTool);
|
||||
if (match && !toolIsValid) {
|
||||
console.log(
|
||||
'\n\n<----------------Tool invalid: Re-assigning Selected Tool---------------->\n\n',
|
||||
match,
|
||||
logger.debug(
|
||||
'\n\n<----------------[CustomOutputParser] Tool invalid: Re-assigning Selected Tool---------------->\n\n' +
|
||||
match,
|
||||
);
|
||||
selectedTool = this.getValidTool(selectedTool);
|
||||
}
|
||||
|
||||
if (match && !selectedTool) {
|
||||
console.log(
|
||||
'\n\n<----------------------HIT INVALID TOOL PARSING ERROR---------------------->\n\n',
|
||||
match,
|
||||
logger.debug(
|
||||
'\n\n<----------------------[CustomOutputParser] HIT INVALID TOOL PARSING ERROR---------------------->\n\n' +
|
||||
match,
|
||||
);
|
||||
selectedTool = 'self-reflection';
|
||||
}
|
||||
|
||||
if (match && !match[2]) {
|
||||
console.log(
|
||||
'\n\n<----------------------HIT NO ACTION INPUT PARSING ERROR---------------------->\n\n',
|
||||
match,
|
||||
logger.debug(
|
||||
'\n\n<----------------------[CustomOutputParser] HIT NO ACTION INPUT PARSING ERROR---------------------->\n\n' +
|
||||
match,
|
||||
);
|
||||
|
||||
// In case there is no action input, let's double-check if there is an action input in 'text' variable
|
||||
|
|
@ -139,7 +140,9 @@ class CustomOutputParser extends ZeroShotAgentOutputParser {
|
|||
}
|
||||
|
||||
if (match && selectedTool.length > this.longestToolName.length) {
|
||||
console.log('\n\n<----------------------HIT LONG PARSING ERROR---------------------->\n\n');
|
||||
logger.debug(
|
||||
'\n\n<----------------------[CustomOutputParser] HIT LONG PARSING ERROR---------------------->\n\n',
|
||||
);
|
||||
|
||||
let action, input, thought;
|
||||
let firstIndex = Infinity;
|
||||
|
|
@ -156,9 +159,9 @@ class CustomOutputParser extends ZeroShotAgentOutputParser {
|
|||
// In case there is no action input, let's double-check if there is an action input in 'text' variable
|
||||
const actionInputMatch = this.actionInputRegex.exec(text);
|
||||
if (action && actionInputMatch) {
|
||||
console.log(
|
||||
'\n\n<------Matched Action Input in Long Parsing Error------>\n\n',
|
||||
actionInputMatch,
|
||||
logger.debug(
|
||||
'\n\n<------[CustomOutputParser] Matched Action Input in Long Parsing Error------>\n\n' +
|
||||
actionInputMatch,
|
||||
);
|
||||
return {
|
||||
tool: action,
|
||||
|
|
@ -185,15 +188,14 @@ class CustomOutputParser extends ZeroShotAgentOutputParser {
|
|||
|
||||
const inputMatch = this.actionValues.exec(returnValues.log); //new
|
||||
if (inputMatch) {
|
||||
console.log('inputMatch');
|
||||
console.dir(inputMatch, { depth: null });
|
||||
logger.debug('[CustomOutputParser] inputMatch', inputMatch);
|
||||
returnValues.toolInput = inputMatch[1].replaceAll('"', '').trim();
|
||||
returnValues.log = returnValues.log.replace(this.actionValues, '');
|
||||
}
|
||||
|
||||
return returnValues;
|
||||
} else {
|
||||
console.log('No valid tool mentioned.', this.tools, text);
|
||||
logger.debug('[CustomOutputParser] No valid tool mentioned.', this.tools, text);
|
||||
return {
|
||||
tool: 'self-reflection',
|
||||
toolInput: 'Hypothetical actions: \n"' + text + '"\n',
|
||||
|
|
@ -202,8 +204,8 @@ class CustomOutputParser extends ZeroShotAgentOutputParser {
|
|||
}
|
||||
|
||||
// if (action && input) {
|
||||
// console.log('Action:', action);
|
||||
// console.log('Input:', input);
|
||||
// logger.debug('Action:', action);
|
||||
// logger.debug('Input:', input);
|
||||
// }
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -7,6 +7,8 @@ const {
|
|||
SystemMessagePromptTemplate,
|
||||
HumanMessagePromptTemplate,
|
||||
} = require('langchain/prompts');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
const PREFIX = 'You are a helpful AI assistant.';
|
||||
|
||||
function parseOutput(message) {
|
||||
|
|
@ -112,7 +114,7 @@ class FunctionsAgent extends Agent {
|
|||
valuesForLLM,
|
||||
callbackManager,
|
||||
);
|
||||
console.log('message', message);
|
||||
logger.debug('[FunctionsAgent] plan message', message);
|
||||
return parseOutput(message);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@ const { EModelEndpoint } = require('librechat-data-provider');
|
|||
const { formatFromLangChain } = require('~/app/clients/prompts');
|
||||
const checkBalance = require('~/models/checkBalance');
|
||||
const { isEnabled } = require('~/server/utils');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
const createStartHandler = ({
|
||||
context,
|
||||
|
|
@ -16,9 +17,15 @@ const createStartHandler = ({
|
|||
const { model, functions, function_call } = invocation_params;
|
||||
const messages = _messages[0].map(formatFromLangChain);
|
||||
|
||||
if (manager.debug) {
|
||||
console.log(`handleChatModelStart: ${context}`);
|
||||
console.dir({ model, functions, function_call }, { depth: null });
|
||||
logger.debug(`[createStartHandler] handleChatModelStart: ${context}`, {
|
||||
model,
|
||||
function_call,
|
||||
});
|
||||
|
||||
if (context !== 'title') {
|
||||
logger.debug(`[createStartHandler] handleChatModelStart: ${context}`, {
|
||||
functions,
|
||||
});
|
||||
}
|
||||
|
||||
const payload = { messages };
|
||||
|
|
@ -35,9 +42,10 @@ const createStartHandler = ({
|
|||
}
|
||||
|
||||
prelimPromptTokens += promptTokensEstimate(payload);
|
||||
if (manager.debug) {
|
||||
console.log('Prelim Prompt Tokens & Token Buffer', prelimPromptTokens, tokenBuffer);
|
||||
}
|
||||
logger.debug('[createStartHandler]', {
|
||||
prelimPromptTokens,
|
||||
tokenBuffer,
|
||||
});
|
||||
prelimPromptTokens += tokenBuffer;
|
||||
|
||||
try {
|
||||
|
|
@ -61,7 +69,7 @@ const createStartHandler = ({
|
|||
});
|
||||
}
|
||||
} catch (err) {
|
||||
console.error(`[${context}] checkBalance error`, err);
|
||||
logger.error(`[createStartHandler][${context}] checkBalance error`, err);
|
||||
manager.abortController.abort();
|
||||
if (context === 'summary' || context === 'plugins') {
|
||||
manager.addRun(runId, { conversationId, error: err.message });
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
const { z } = require('zod');
|
||||
const { langPrompt, createTitlePrompt, escapeBraces, getSnippet } = require('../prompts');
|
||||
const { createStructuredOutputChainFromZod } = require('langchain/chains/openai_functions');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
const langSchema = z.object({
|
||||
language: z.string().describe('The language of the input text (full noun, no abbreviations).'),
|
||||
|
|
@ -30,8 +31,7 @@ const runTitleChain = async ({ llm, text, convo, signal, callbacks }) => {
|
|||
try {
|
||||
snippet = getSnippet(text);
|
||||
} catch (e) {
|
||||
console.log('Error getting snippet of text for titleChain');
|
||||
console.log(e);
|
||||
logger.error('[runTitleChain] Error getting snippet of text for titleChain', e);
|
||||
}
|
||||
const languageChain = createLanguageChain({ llm, callbacks });
|
||||
const titleChain = createTitleChain({ llm, callbacks, convo: escapeBraces(convo) });
|
||||
|
|
|
|||
|
|
@ -1,5 +1,6 @@
|
|||
const { createStartHandler } = require('../callbacks');
|
||||
const spendTokens = require('../../../models/spendTokens');
|
||||
const { createStartHandler } = require('~/app/clients/callbacks');
|
||||
const spendTokens = require('~/models/spendTokens');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
class RunManager {
|
||||
constructor(fields) {
|
||||
|
|
@ -35,7 +36,7 @@ class RunManager {
|
|||
if (this.runs.has(runId)) {
|
||||
this.runs.delete(runId);
|
||||
} else {
|
||||
console.error(`Run with ID ${runId} does not exist.`);
|
||||
logger.error(`[api/app/clients/llm/RunManager] Run with ID ${runId} does not exist.`);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -57,10 +58,19 @@ class RunManager {
|
|||
{
|
||||
handleChatModelStart: createStartHandler({ ...metadata, manager: this }),
|
||||
handleLLMEnd: async (output, runId, _parentRunId) => {
|
||||
if (this.debug) {
|
||||
console.log(`handleLLMEnd: ${JSON.stringify(metadata)}`);
|
||||
console.dir({ output, runId, _parentRunId }, { depth: null });
|
||||
const { llmOutput, ..._output } = output;
|
||||
logger.debug(`[RunManager] handleLLMEnd: ${JSON.stringify(metadata)}`, {
|
||||
runId,
|
||||
_parentRunId,
|
||||
llmOutput,
|
||||
});
|
||||
|
||||
if (metadata.context !== 'title') {
|
||||
logger.debug('[RunManager] handleLLMEnd:', {
|
||||
output: _output,
|
||||
});
|
||||
}
|
||||
|
||||
const { tokenUsage } = output.llmOutput;
|
||||
const run = this.getRunById(runId);
|
||||
this.removeRun(runId);
|
||||
|
|
@ -74,8 +84,7 @@ class RunManager {
|
|||
await spendTokens(txData, tokenUsage);
|
||||
},
|
||||
handleLLMError: async (err) => {
|
||||
this.debug && console.log(`handleLLMError: ${JSON.stringify(metadata)}`);
|
||||
this.debug && console.error(err);
|
||||
logger.error(`[RunManager] handleLLMError: ${JSON.stringify(metadata)}`, err);
|
||||
if (metadata.context === 'title') {
|
||||
return;
|
||||
} else if (metadata.context === 'plugins') {
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
const { ConversationSummaryBufferMemory, ChatMessageHistory } = require('langchain/memory');
|
||||
const { formatLangChainMessages, SUMMARY_PROMPT } = require('../prompts');
|
||||
const { predictNewSummary } = require('../chains');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
const createSummaryBufferMemory = ({ llm, prompt, messages, ...rest }) => {
|
||||
const chatHistory = new ChatMessageHistory(messages);
|
||||
|
|
@ -22,9 +23,8 @@ const summaryBuffer = async ({
|
|||
prompt = SUMMARY_PROMPT,
|
||||
signal,
|
||||
}) => {
|
||||
if (debug && previous_summary) {
|
||||
console.log('<-----------PREVIOUS SUMMARY----------->\n\n');
|
||||
console.log(previous_summary);
|
||||
if (previous_summary) {
|
||||
logger.debug('[summaryBuffer]', { previous_summary });
|
||||
}
|
||||
|
||||
const formattedMessages = formatLangChainMessages(context, formatOptions);
|
||||
|
|
@ -46,8 +46,7 @@ const summaryBuffer = async ({
|
|||
const messages = await chatPromptMemory.chatHistory.getMessages();
|
||||
|
||||
if (debug) {
|
||||
console.log('<-----------SUMMARY BUFFER MESSAGES----------->\n\n');
|
||||
console.log(JSON.stringify(messages));
|
||||
logger.debug('[summaryBuffer]', { summary_buffer_messages: messages.length });
|
||||
}
|
||||
|
||||
const predictSummary = await predictNewSummary({
|
||||
|
|
@ -58,8 +57,7 @@ const summaryBuffer = async ({
|
|||
});
|
||||
|
||||
if (debug) {
|
||||
console.log('<-----------SUMMARY----------->\n\n');
|
||||
console.log(JSON.stringify(predictSummary));
|
||||
logger.debug('[summaryBuffer]', { summary: predictSummary });
|
||||
}
|
||||
|
||||
return { role: 'system', content: predictSummary };
|
||||
|
|
|
|||
|
|
@ -1,3 +1,5 @@
|
|||
const { logger } = require('~/config');
|
||||
|
||||
/**
|
||||
* The `addImages` function corrects any erroneous image URLs in the `responseMessage.text`
|
||||
* and appends image observations from `intermediateSteps` if they are not already present.
|
||||
|
|
@ -20,7 +22,7 @@
|
|||
*
|
||||
* addImages(intermediateSteps, responseMessage);
|
||||
*
|
||||
* console.log(responseMessage.text);
|
||||
* logger.debug(responseMessage.text);
|
||||
* // Outputs: 'Some text with \n'
|
||||
*
|
||||
* @returns {void}
|
||||
|
|
@ -62,7 +64,7 @@ function addImages(intermediateSteps, responseMessage) {
|
|||
if (observedImagePath && !responseMessage.text.includes(observedImagePath[0])) {
|
||||
responseMessage.text += '\n' + observation;
|
||||
if (process.env.DEBUG_PLUGINS) {
|
||||
console.debug('[addImages] added image from intermediateSteps');
|
||||
logger.debug('[addImages] added image from intermediateSteps:', observation);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
|
|
|||
|
|
@ -1,238 +0,0 @@
|
|||
const { Tool } = require('langchain/tools');
|
||||
const yaml = require('js-yaml');
|
||||
|
||||
/*
|
||||
export interface AIPluginToolParams {
|
||||
name: string;
|
||||
description: string;
|
||||
apiSpec: string;
|
||||
openaiSpec: string;
|
||||
model: BaseLanguageModel;
|
||||
}
|
||||
|
||||
export interface PathParameter {
|
||||
name: string;
|
||||
description: string;
|
||||
}
|
||||
|
||||
export interface Info {
|
||||
title: string;
|
||||
description: string;
|
||||
version: string;
|
||||
}
|
||||
export interface PathMethod {
|
||||
summary: string;
|
||||
operationId: string;
|
||||
parameters?: PathParameter[];
|
||||
}
|
||||
|
||||
interface ApiSpec {
|
||||
openapi: string;
|
||||
info: Info;
|
||||
paths: { [key: string]: { [key: string]: PathMethod } };
|
||||
}
|
||||
*/
|
||||
|
||||
function isJson(str) {
|
||||
try {
|
||||
JSON.parse(str);
|
||||
} catch (e) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
function convertJsonToYamlIfApplicable(spec) {
|
||||
if (isJson(spec)) {
|
||||
const jsonData = JSON.parse(spec);
|
||||
return yaml.dump(jsonData);
|
||||
}
|
||||
return spec;
|
||||
}
|
||||
|
||||
function extractShortVersion(openapiSpec) {
|
||||
openapiSpec = convertJsonToYamlIfApplicable(openapiSpec);
|
||||
try {
|
||||
const fullApiSpec = yaml.load(openapiSpec);
|
||||
const shortApiSpec = {
|
||||
openapi: fullApiSpec.openapi,
|
||||
info: fullApiSpec.info,
|
||||
paths: {},
|
||||
};
|
||||
|
||||
for (let path in fullApiSpec.paths) {
|
||||
shortApiSpec.paths[path] = {};
|
||||
for (let method in fullApiSpec.paths[path]) {
|
||||
shortApiSpec.paths[path][method] = {
|
||||
summary: fullApiSpec.paths[path][method].summary,
|
||||
operationId: fullApiSpec.paths[path][method].operationId,
|
||||
parameters: fullApiSpec.paths[path][method].parameters?.map((parameter) => ({
|
||||
name: parameter.name,
|
||||
description: parameter.description,
|
||||
})),
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
return yaml.dump(shortApiSpec);
|
||||
} catch (e) {
|
||||
console.log(e);
|
||||
return '';
|
||||
}
|
||||
}
|
||||
function printOperationDetails(operationId, openapiSpec) {
|
||||
openapiSpec = convertJsonToYamlIfApplicable(openapiSpec);
|
||||
let returnText = '';
|
||||
try {
|
||||
let doc = yaml.load(openapiSpec);
|
||||
let servers = doc.servers;
|
||||
let paths = doc.paths;
|
||||
let components = doc.components;
|
||||
|
||||
for (let path in paths) {
|
||||
for (let method in paths[path]) {
|
||||
let operation = paths[path][method];
|
||||
if (operation.operationId === operationId) {
|
||||
returnText += `The API request to do for operationId "${operationId}" is:\n`;
|
||||
returnText += `Method: ${method.toUpperCase()}\n`;
|
||||
|
||||
let url = servers[0].url + path;
|
||||
returnText += `Path: ${url}\n`;
|
||||
|
||||
returnText += 'Parameters:\n';
|
||||
if (operation.parameters) {
|
||||
for (let param of operation.parameters) {
|
||||
let required = param.required ? '' : ' (optional),';
|
||||
returnText += `- ${param.name} (${param.in},${required} ${param.schema.type}): ${param.description}\n`;
|
||||
}
|
||||
} else {
|
||||
returnText += ' None\n';
|
||||
}
|
||||
returnText += '\n';
|
||||
|
||||
let responseSchema = operation.responses['200'].content['application/json'].schema;
|
||||
|
||||
// Check if schema is a reference
|
||||
if (responseSchema.$ref) {
|
||||
// Extract schema name from reference
|
||||
let schemaName = responseSchema.$ref.split('/').pop();
|
||||
// Look up schema in components
|
||||
responseSchema = components.schemas[schemaName];
|
||||
}
|
||||
|
||||
returnText += 'Response schema:\n';
|
||||
returnText += '- Type: ' + responseSchema.type + '\n';
|
||||
returnText += '- Additional properties:\n';
|
||||
returnText += ' - Type: ' + responseSchema.additionalProperties?.type + '\n';
|
||||
if (responseSchema.additionalProperties?.properties) {
|
||||
returnText += ' - Properties:\n';
|
||||
for (let prop in responseSchema.additionalProperties.properties) {
|
||||
returnText += ` - ${prop} (${responseSchema.additionalProperties.properties[prop].type}): Description not provided in OpenAPI spec\n`;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if (returnText === '') {
|
||||
returnText += `No operation with operationId "${operationId}" found.`;
|
||||
}
|
||||
return returnText;
|
||||
} catch (e) {
|
||||
console.log(e);
|
||||
return '';
|
||||
}
|
||||
}
|
||||
|
||||
class AIPluginTool extends Tool {
|
||||
/*
|
||||
private _name: string;
|
||||
private _description: string;
|
||||
apiSpec: string;
|
||||
openaiSpec: string;
|
||||
model: BaseLanguageModel;
|
||||
*/
|
||||
|
||||
get name() {
|
||||
return this._name;
|
||||
}
|
||||
|
||||
get description() {
|
||||
return this._description;
|
||||
}
|
||||
|
||||
constructor(params) {
|
||||
super();
|
||||
this._name = params.name;
|
||||
this._description = params.description;
|
||||
this.apiSpec = params.apiSpec;
|
||||
this.openaiSpec = params.openaiSpec;
|
||||
this.model = params.model;
|
||||
}
|
||||
|
||||
async _call(input) {
|
||||
let date = new Date();
|
||||
let fullDate = `Date: ${date.getDate()}/${
|
||||
date.getMonth() + 1
|
||||
}/${date.getFullYear()}, Time: ${date.getHours()}:${date.getMinutes()}:${date.getSeconds()}`;
|
||||
const prompt = `${fullDate}\nQuestion: ${input} \n${this.apiSpec}.`;
|
||||
console.log(prompt);
|
||||
const gptResponse = await this.model.predict(prompt);
|
||||
let operationId = gptResponse.match(/operationId: (.*)/)?.[1];
|
||||
if (!operationId) {
|
||||
return 'No operationId found in the response';
|
||||
}
|
||||
if (operationId == 'No API path found to answer the question') {
|
||||
return 'No API path found to answer the question';
|
||||
}
|
||||
|
||||
let openApiData = printOperationDetails(operationId, this.openaiSpec);
|
||||
|
||||
return openApiData;
|
||||
}
|
||||
|
||||
static async fromPluginUrl(url, model) {
|
||||
const aiPluginRes = await fetch(url, {});
|
||||
if (!aiPluginRes.ok) {
|
||||
throw new Error(`Failed to fetch plugin from ${url} with status ${aiPluginRes.status}`);
|
||||
}
|
||||
const aiPluginJson = await aiPluginRes.json();
|
||||
const apiUrlRes = await fetch(aiPluginJson.api.url, {});
|
||||
if (!apiUrlRes.ok) {
|
||||
throw new Error(
|
||||
`Failed to fetch API spec from ${aiPluginJson.api.url} with status ${apiUrlRes.status}`,
|
||||
);
|
||||
}
|
||||
const apiUrlJson = await apiUrlRes.text();
|
||||
const shortApiSpec = extractShortVersion(apiUrlJson);
|
||||
return new AIPluginTool({
|
||||
name: aiPluginJson.name_for_model.toLowerCase(),
|
||||
description: `A \`tool\` to learn the API documentation for ${aiPluginJson.name_for_model.toLowerCase()}, after which you can use 'http_request' to make the actual API call. Short description of how to use the API's results: ${
|
||||
aiPluginJson.description_for_model
|
||||
})`,
|
||||
apiSpec: `
|
||||
As an AI, your task is to identify the operationId of the relevant API path based on the condensed OpenAPI specifications provided.
|
||||
|
||||
Please note:
|
||||
|
||||
1. Do not imagine URLs. Only use the information provided in the condensed OpenAPI specifications.
|
||||
|
||||
2. Do not guess the operationId. Identify it strictly based on the API paths and their descriptions.
|
||||
|
||||
Your output should only include:
|
||||
- operationId: The operationId of the relevant API path
|
||||
|
||||
If you cannot find a suitable API path based on the OpenAPI specifications, please answer only "operationId: No API path found to answer the question".
|
||||
|
||||
Now, based on the question above and the condensed OpenAPI specifications given below, identify the operationId:
|
||||
|
||||
\`\`\`
|
||||
${shortApiSpec}
|
||||
\`\`\`
|
||||
`,
|
||||
openaiSpec: apiUrlJson,
|
||||
model: model,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = AIPluginTool;
|
||||
|
|
@ -1,6 +1,7 @@
|
|||
const { StructuredTool } = require('langchain/tools');
|
||||
const { z } = require('zod');
|
||||
const { StructuredTool } = require('langchain/tools');
|
||||
const { SearchClient, AzureKeyCredential } = require('@azure/search-documents');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
class AzureAISearch extends StructuredTool {
|
||||
// Constants for default values
|
||||
|
|
@ -94,7 +95,7 @@ class AzureAISearch extends StructuredTool {
|
|||
}
|
||||
return JSON.stringify(resultDocuments);
|
||||
} catch (error) {
|
||||
console.error(`Azure AI Search request failed: ${error.message}`);
|
||||
logger.error('Azure AI Search request failed', error);
|
||||
return 'There was an error with Azure AI Search.';
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -3,13 +3,14 @@
|
|||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
const OpenAI = require('openai');
|
||||
// const { genAzureEndpoint } = require('../../../utils/genAzureEndpoints');
|
||||
// const { genAzureEndpoint } = require('~/utils/genAzureEndpoints');
|
||||
const { Tool } = require('langchain/tools');
|
||||
const { HttpsProxyAgent } = require('https-proxy-agent');
|
||||
const extractBaseURL = require('~/utils/extractBaseURL');
|
||||
const saveImageFromUrl = require('./saveImageFromUrl');
|
||||
const extractBaseURL = require('../../../utils/extractBaseURL');
|
||||
const { DALLE_REVERSE_PROXY, PROXY } = process.env;
|
||||
const { logger } = require('~/config');
|
||||
|
||||
const { DALLE_REVERSE_PROXY, PROXY } = process.env;
|
||||
class OpenAICreateImage extends Tool {
|
||||
constructor(fields = {}) {
|
||||
super();
|
||||
|
|
@ -102,9 +103,12 @@ Guidelines:
|
|||
|
||||
if (match) {
|
||||
imageName = match[0];
|
||||
console.log(imageName); // Output: img-lgCf7ppcbhqQrz6a5ear6FOb.png
|
||||
logger.debug('[DALL-E]', { imageName }); // Output: img-lgCf7ppcbhqQrz6a5ear6FOb.png
|
||||
} else {
|
||||
console.log('No image name found in the string.');
|
||||
logger.debug('[DALL-E] No image name found in the string.', {
|
||||
theImageUrl,
|
||||
data: resp.data[0],
|
||||
});
|
||||
}
|
||||
|
||||
this.outputPath = path.resolve(__dirname, '..', '..', '..', '..', 'client', 'public', 'images');
|
||||
|
|
@ -120,7 +124,7 @@ Guidelines:
|
|||
await saveImageFromUrl(theImageUrl, this.outputPath, imageName);
|
||||
this.result = this.getMarkdownImageUrl(imageName);
|
||||
} catch (error) {
|
||||
console.error('Error while saving the image:', error);
|
||||
logger.error('Error while saving the DALL-E image:', error);
|
||||
this.result = theImageUrl;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,5 +1,6 @@
|
|||
const { Tool } = require('langchain/tools');
|
||||
const { google } = require('googleapis');
|
||||
const { Tool } = require('langchain/tools');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
/**
|
||||
* Represents a tool that allows an agent to use the Google Custom Search API.
|
||||
|
|
@ -86,7 +87,7 @@ class GoogleSearchAPI extends Tool {
|
|||
});
|
||||
|
||||
// return response.data;
|
||||
// console.log(response.data);
|
||||
// logger.debug(response.data);
|
||||
|
||||
if (!response.data.items || response.data.items.length === 0) {
|
||||
return this.resultsToReadableFormat([
|
||||
|
|
@ -110,7 +111,7 @@ class GoogleSearchAPI extends Tool {
|
|||
|
||||
return this.resultsToReadableFormat(metadataResults);
|
||||
} catch (error) {
|
||||
console.log(`Error searching Google: ${error}`);
|
||||
logger.error('[GoogleSearchAPI]', error);
|
||||
// throw error;
|
||||
return 'There was an error searching Google.';
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,108 +0,0 @@
|
|||
const { Tool } = require('langchain/tools');
|
||||
|
||||
// class RequestsGetTool extends Tool {
|
||||
// constructor(headers = {}, { maxOutputLength } = {}) {
|
||||
// super();
|
||||
// this.name = 'requests_get';
|
||||
// this.headers = headers;
|
||||
// this.maxOutputLength = maxOutputLength || 2000;
|
||||
// this.description = `A portal to the internet. Use this when you need to get specific content from a website.
|
||||
// - Input should be a url (i.e. https://www.google.com). The output will be the text response of the GET request.`;
|
||||
// }
|
||||
|
||||
// async _call(input) {
|
||||
// const res = await fetch(input, {
|
||||
// headers: this.headers
|
||||
// });
|
||||
// const text = await res.text();
|
||||
// return text.slice(0, this.maxOutputLength);
|
||||
// }
|
||||
// }
|
||||
|
||||
// class RequestsPostTool extends Tool {
|
||||
// constructor(headers = {}, { maxOutputLength } = {}) {
|
||||
// super();
|
||||
// this.name = 'requests_post';
|
||||
// this.headers = headers;
|
||||
// this.maxOutputLength = maxOutputLength || Infinity;
|
||||
// this.description = `Use this when you want to POST to a website.
|
||||
// - Input should be a json string with two keys: "url" and "data".
|
||||
// - The value of "url" should be a string, and the value of "data" should be a dictionary of
|
||||
// - key-value pairs you want to POST to the url as a JSON body.
|
||||
// - Be careful to always use double quotes for strings in the json string
|
||||
// - The output will be the text response of the POST request.`;
|
||||
// }
|
||||
|
||||
// async _call(input) {
|
||||
// try {
|
||||
// const { url, data } = JSON.parse(input);
|
||||
// const res = await fetch(url, {
|
||||
// method: 'POST',
|
||||
// headers: this.headers,
|
||||
// body: JSON.stringify(data)
|
||||
// });
|
||||
// const text = await res.text();
|
||||
// return text.slice(0, this.maxOutputLength);
|
||||
// } catch (error) {
|
||||
// return `${error}`;
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
|
||||
class HttpRequestTool extends Tool {
|
||||
constructor(headers = {}, { maxOutputLength = Infinity } = {}) {
|
||||
super();
|
||||
this.headers = headers;
|
||||
this.name = 'http_request';
|
||||
this.maxOutputLength = maxOutputLength;
|
||||
this.description =
|
||||
'Executes HTTP methods (GET, POST, PUT, DELETE, etc.). The input is an object with three keys: "url", "method", and "data". Even for GET or DELETE, include "data" key as an empty string. "method" is the HTTP method, and "url" is the desired endpoint. If POST or PUT, "data" should contain a stringified JSON representing the body to send. Only one url per use.';
|
||||
}
|
||||
|
||||
async _call(input) {
|
||||
try {
|
||||
const urlPattern = /"url":\s*"([^"]*)"/;
|
||||
const methodPattern = /"method":\s*"([^"]*)"/;
|
||||
const dataPattern = /"data":\s*"([^"]*)"/;
|
||||
|
||||
const url = input.match(urlPattern)[1];
|
||||
const method = input.match(methodPattern)[1];
|
||||
let data = input.match(dataPattern)[1];
|
||||
|
||||
// Parse 'data' back to JSON if possible
|
||||
try {
|
||||
data = JSON.parse(data);
|
||||
} catch (e) {
|
||||
// If it's not a JSON string, keep it as is
|
||||
}
|
||||
|
||||
let options = {
|
||||
method: method,
|
||||
headers: this.headers,
|
||||
};
|
||||
|
||||
if (['POST', 'PUT', 'PATCH'].includes(method.toUpperCase()) && data) {
|
||||
if (typeof data === 'object') {
|
||||
options.body = JSON.stringify(data);
|
||||
} else {
|
||||
options.body = data;
|
||||
}
|
||||
options.headers['Content-Type'] = 'application/json';
|
||||
}
|
||||
|
||||
const res = await fetch(url, options);
|
||||
|
||||
const text = await res.text();
|
||||
if (text.includes('<html')) {
|
||||
return 'This tool is not designed to browse web pages. Only use it for API calls.';
|
||||
}
|
||||
|
||||
return text.slice(0, this.maxOutputLength);
|
||||
} catch (error) {
|
||||
console.log(error);
|
||||
return `${error}`;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = HttpRequestTool;
|
||||
|
|
@ -1,9 +1,10 @@
|
|||
// Generates image using stable diffusion webui's api (automatic1111)
|
||||
const fs = require('fs');
|
||||
const { Tool } = require('langchain/tools');
|
||||
const path = require('path');
|
||||
const axios = require('axios');
|
||||
const sharp = require('sharp');
|
||||
const { Tool } = require('langchain/tools');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
class StableDiffusionAPI extends Tool {
|
||||
constructor(fields) {
|
||||
|
|
@ -81,7 +82,7 @@ Guidelines:
|
|||
.toFile(this.outputPath + '/' + imageName);
|
||||
this.result = this.getMarkdownImageUrl(imageName);
|
||||
} catch (error) {
|
||||
console.error('Error while saving the image:', error);
|
||||
logger.error('[StableDiffusion] Error while saving the image:', error);
|
||||
// this.result = theImageUrl;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
/* eslint-disable no-useless-escape */
|
||||
const axios = require('axios');
|
||||
const { Tool } = require('langchain/tools');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
class WolframAlphaAPI extends Tool {
|
||||
constructor(fields) {
|
||||
|
|
@ -38,7 +39,7 @@ General guidelines:
|
|||
const response = await axios.get(url, { responseType: 'text' });
|
||||
return response.data;
|
||||
} catch (error) {
|
||||
console.error(`Error fetching raw text: ${error}`);
|
||||
logger.error('[WolframAlphaAPI] Error fetching raw text:', error);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
|
@ -68,11 +69,10 @@ General guidelines:
|
|||
return response;
|
||||
} catch (error) {
|
||||
if (error.response && error.response.data) {
|
||||
console.log('Error data:', error.response.data);
|
||||
logger.error('[WolframAlphaAPI] Error data:', error);
|
||||
return error.response.data;
|
||||
} else {
|
||||
console.log('Error querying Wolfram Alpha', error.message);
|
||||
// throw error;
|
||||
logger.error('[WolframAlphaAPI] Error querying Wolfram Alpha', error);
|
||||
return 'There was an error querying Wolfram Alpha.';
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,11 +1,12 @@
|
|||
require('dotenv').config();
|
||||
const { z } = require('zod');
|
||||
const fs = require('fs');
|
||||
const yaml = require('js-yaml');
|
||||
const { z } = require('zod');
|
||||
const path = require('path');
|
||||
const { DynamicStructuredTool } = require('langchain/tools');
|
||||
const yaml = require('js-yaml');
|
||||
const { createOpenAPIChain } = require('langchain/chains');
|
||||
const { DynamicStructuredTool } = require('langchain/tools');
|
||||
const { ChatPromptTemplate, HumanMessagePromptTemplate } = require('langchain/prompts');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
function addLinePrefix(text, prefix = '// ') {
|
||||
return text
|
||||
|
|
@ -52,7 +53,7 @@ async function readSpecFile(filePath) {
|
|||
}
|
||||
return yaml.load(fileContents);
|
||||
} catch (e) {
|
||||
console.error(e);
|
||||
logger.error('[readSpecFile] error', e);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
|
@ -83,54 +84,51 @@ async function getSpec(url) {
|
|||
return ValidSpecPath.parse(url);
|
||||
}
|
||||
|
||||
async function createOpenAPIPlugin({ data, llm, user, message, memory, signal, verbose = false }) {
|
||||
async function createOpenAPIPlugin({ data, llm, user, message, memory, signal }) {
|
||||
let spec;
|
||||
try {
|
||||
spec = await getSpec(data.api.url, verbose);
|
||||
spec = await getSpec(data.api.url);
|
||||
} catch (error) {
|
||||
verbose && console.debug('getSpec error', error);
|
||||
logger.error('[createOpenAPIPlugin] getSpec error', error);
|
||||
return null;
|
||||
}
|
||||
|
||||
if (!spec) {
|
||||
verbose && console.debug('No spec found');
|
||||
logger.warn('[createOpenAPIPlugin] No spec found');
|
||||
return null;
|
||||
}
|
||||
|
||||
const headers = {};
|
||||
const { auth, name_for_model, description_for_model, description_for_human } = data;
|
||||
if (auth && AuthDefinition.parse(auth)) {
|
||||
verbose && console.debug('auth detected', auth);
|
||||
logger.debug('[createOpenAPIPlugin] auth detected', auth);
|
||||
const { openai } = auth.verification_tokens;
|
||||
if (AuthBearer.parse(auth)) {
|
||||
headers.authorization = `Bearer ${openai}`;
|
||||
verbose && console.debug('added auth bearer', headers);
|
||||
logger.debug('[createOpenAPIPlugin] added auth bearer', headers);
|
||||
}
|
||||
}
|
||||
|
||||
const chainOptions = {
|
||||
llm,
|
||||
verbose,
|
||||
};
|
||||
const chainOptions = { llm };
|
||||
|
||||
if (data.headers && data.headers['librechat_user_id']) {
|
||||
verbose && console.debug('id detected', headers);
|
||||
logger.debug('[createOpenAPIPlugin] id detected', headers);
|
||||
headers[data.headers['librechat_user_id']] = user;
|
||||
}
|
||||
|
||||
if (Object.keys(headers).length > 0) {
|
||||
verbose && console.debug('headers detected', headers);
|
||||
logger.debug('[createOpenAPIPlugin] headers detected', headers);
|
||||
chainOptions.headers = headers;
|
||||
}
|
||||
|
||||
if (data.params) {
|
||||
verbose && console.debug('params detected', data.params);
|
||||
logger.debug('[createOpenAPIPlugin] params detected', data.params);
|
||||
chainOptions.params = data.params;
|
||||
}
|
||||
|
||||
let history = '';
|
||||
if (memory) {
|
||||
verbose && console.debug('openAPI chain: memory detected', memory);
|
||||
logger.debug('[createOpenAPIPlugin] openAPI chain: memory detected', memory);
|
||||
const { history: chat_history } = await memory.loadMemoryVariables({});
|
||||
history = chat_history?.length > 0 ? `\n\n## Chat History:\n${chat_history}\n` : '';
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,6 +1,4 @@
|
|||
const GoogleSearchAPI = require('./GoogleSearch');
|
||||
const HttpRequestTool = require('./HttpRequestTool');
|
||||
const AIPluginTool = require('./AIPluginTool');
|
||||
const OpenAICreateImage = require('./DALL-E');
|
||||
const DALLE3 = require('./structured/DALLE3');
|
||||
const StructuredSD = require('./structured/StableDiffusion');
|
||||
|
|
@ -20,8 +18,6 @@ const CodeBrew = require('./CodeBrew');
|
|||
module.exports = {
|
||||
availableTools,
|
||||
GoogleSearchAPI,
|
||||
HttpRequestTool,
|
||||
AIPluginTool,
|
||||
OpenAICreateImage,
|
||||
DALLE3,
|
||||
StableDiffusionAPI,
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
const axios = require('axios');
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
const axios = require('axios');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
async function saveImageFromUrl(url, outputPath, outputFilename) {
|
||||
try {
|
||||
|
|
@ -32,7 +33,7 @@ async function saveImageFromUrl(url, outputPath, outputFilename) {
|
|||
writer.on('error', reject);
|
||||
});
|
||||
} catch (error) {
|
||||
console.error('Error while saving the image:', error);
|
||||
logger.error('[saveImageFromUrl] Error while saving the image:', error);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
const { StructuredTool } = require('langchain/tools');
|
||||
const { z } = require('zod');
|
||||
const { StructuredTool } = require('langchain/tools');
|
||||
const { SearchClient, AzureKeyCredential } = require('@azure/search-documents');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
class AzureAISearch extends StructuredTool {
|
||||
// Constants for default values
|
||||
|
|
@ -94,7 +95,7 @@ class AzureAISearch extends StructuredTool {
|
|||
}
|
||||
return JSON.stringify(resultDocuments);
|
||||
} catch (error) {
|
||||
console.error(`Azure AI Search request failed: ${error.message}`);
|
||||
logger.error('Azure AI Search request failed', error);
|
||||
return 'There was an error with Azure AI Search.';
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -28,14 +28,14 @@ class RunCode extends StructuredTool {
|
|||
}
|
||||
|
||||
async _call({ code, language = 'python' }) {
|
||||
// console.log('<--------------- Running Code --------------->', { code, language });
|
||||
// logger.debug('<--------------- Running Code --------------->', { code, language });
|
||||
const response = await axios({
|
||||
url: `${this.url}/repl`,
|
||||
method: 'post',
|
||||
headers: this.headers,
|
||||
data: { code, language },
|
||||
});
|
||||
// console.log('<--------------- Sucessfully ran Code --------------->', response.data);
|
||||
// logger.debug('<--------------- Sucessfully ran Code --------------->', response.data);
|
||||
return response.data.result;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -42,14 +42,14 @@ class RunCode extends StructuredTool {
|
|||
}
|
||||
|
||||
async _call({ code, language = 'python' }) {
|
||||
// console.log('<--------------- Running Code --------------->', { code, language });
|
||||
// logger.debug('<--------------- Running Code --------------->', { code, language });
|
||||
const response = await axios({
|
||||
url: `${this.url}/repl`,
|
||||
method: 'post',
|
||||
headers: this.headers,
|
||||
data: { code, language },
|
||||
});
|
||||
// console.log('<--------------- Sucessfully ran Code --------------->', response.data);
|
||||
// logger.debug('<--------------- Sucessfully ran Code --------------->', response.data);
|
||||
return response.data.result;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -7,7 +7,9 @@ const OpenAI = require('openai');
|
|||
const { Tool } = require('langchain/tools');
|
||||
const { HttpsProxyAgent } = require('https-proxy-agent');
|
||||
const saveImageFromUrl = require('../saveImageFromUrl');
|
||||
const extractBaseURL = require('../../../../utils/extractBaseURL');
|
||||
const extractBaseURL = require('~/utils/extractBaseURL');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
const { DALLE3_SYSTEM_PROMPT, DALLE_REVERSE_PROXY, PROXY } = process.env;
|
||||
class DALLE3 extends Tool {
|
||||
constructor(fields = {}) {
|
||||
|
|
@ -126,9 +128,12 @@ Error Message: ${error.message}`;
|
|||
|
||||
if (match) {
|
||||
imageName = match[0];
|
||||
console.log(imageName); // Output: img-lgCf7ppcbhqQrz6a5ear6FOb.png
|
||||
logger.debug('[DALL-E-3]', { imageName }); // Output: img-lgCf7ppcbhqQrz6a5ear6FOb.png
|
||||
} else {
|
||||
console.log('No image name found in the string.');
|
||||
logger.debug('[DALL-E-3] No image name found in the string.', {
|
||||
theImageUrl,
|
||||
data: resp.data[0],
|
||||
});
|
||||
}
|
||||
|
||||
this.outputPath = path.resolve(
|
||||
|
|
@ -154,7 +159,7 @@ Error Message: ${error.message}`;
|
|||
await saveImageFromUrl(theImageUrl, this.outputPath, imageName);
|
||||
this.result = this.getMarkdownImageUrl(imageName);
|
||||
} catch (error) {
|
||||
console.error('Error while saving the image:', error);
|
||||
logger.error('Error while saving the image:', error);
|
||||
this.result = theImageUrl;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,9 +1,10 @@
|
|||
const { z } = require('zod');
|
||||
const axios = require('axios');
|
||||
const { StructuredTool } = require('langchain/tools');
|
||||
const { PromptTemplate } = require('langchain/prompts');
|
||||
const { createExtractionChainFromZod } = require('./extractionChain');
|
||||
// const { ChatOpenAI } = require('langchain/chat_models/openai');
|
||||
const axios = require('axios');
|
||||
const { z } = require('zod');
|
||||
const { createExtractionChainFromZod } = require('./extractionChain');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
const envs = ['Nodejs', 'Go', 'Bash', 'Rust', 'Python3', 'PHP', 'Java', 'Perl', 'DotNET'];
|
||||
const env = z.enum(envs);
|
||||
|
|
@ -34,8 +35,8 @@ async function extractEnvFromCode(code, model) {
|
|||
// const chatModel = new ChatOpenAI({ openAIApiKey, modelName: 'gpt-4-0613', temperature: 0 });
|
||||
const chain = createExtractionChainFromZod(zodSchema, model, { prompt, verbose: true });
|
||||
const result = await chain.run(code);
|
||||
console.log('<--------------- extractEnvFromCode --------------->');
|
||||
console.log(result);
|
||||
logger.debug('<--------------- extractEnvFromCode --------------->');
|
||||
logger.debug(result);
|
||||
return result.env;
|
||||
}
|
||||
|
||||
|
|
@ -69,7 +70,7 @@ class RunCommand extends StructuredTool {
|
|||
}
|
||||
|
||||
async _call(data) {
|
||||
console.log(`<--------------- Running ${data} --------------->`);
|
||||
logger.debug(`<--------------- Running ${data} --------------->`);
|
||||
const response = await axios({
|
||||
url: `${this.url}/commands`,
|
||||
method: 'post',
|
||||
|
|
@ -96,7 +97,7 @@ class ReadFile extends StructuredTool {
|
|||
}
|
||||
|
||||
async _call(data) {
|
||||
console.log(`<--------------- Reading ${data} --------------->`);
|
||||
logger.debug(`<--------------- Reading ${data} --------------->`);
|
||||
const response = await axios.get(`${this.url}/files`, { params: data, headers: this.headers });
|
||||
return response.data;
|
||||
}
|
||||
|
|
@ -121,12 +122,12 @@ class WriteFile extends StructuredTool {
|
|||
|
||||
async _call(data) {
|
||||
let { env, path, content } = data;
|
||||
console.log(`<--------------- environment ${env} typeof ${typeof env}--------------->`);
|
||||
logger.debug(`<--------------- environment ${env} typeof ${typeof env}--------------->`);
|
||||
if (env && !envs.includes(env)) {
|
||||
console.log(`<--------------- Invalid environment ${env} --------------->`);
|
||||
logger.debug(`<--------------- Invalid environment ${env} --------------->`);
|
||||
env = await extractEnvFromCode(content, this.model);
|
||||
} else if (!env) {
|
||||
console.log('<--------------- Undefined environment --------------->');
|
||||
logger.debug('<--------------- Undefined environment --------------->');
|
||||
env = await extractEnvFromCode(content, this.model);
|
||||
}
|
||||
|
||||
|
|
@ -139,7 +140,7 @@ class WriteFile extends StructuredTool {
|
|||
content,
|
||||
},
|
||||
};
|
||||
console.log('Writing to file', JSON.stringify(payload));
|
||||
logger.debug('Writing to file', JSON.stringify(payload));
|
||||
|
||||
await axios({
|
||||
url: `${this.url}/files`,
|
||||
|
|
|
|||
|
|
@ -1,10 +1,11 @@
|
|||
// Generates image using stable diffusion webui's api (automatic1111)
|
||||
const fs = require('fs');
|
||||
const { StructuredTool } = require('langchain/tools');
|
||||
const { z } = require('zod');
|
||||
const path = require('path');
|
||||
const axios = require('axios');
|
||||
const sharp = require('sharp');
|
||||
const { StructuredTool } = require('langchain/tools');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
class StableDiffusionAPI extends StructuredTool {
|
||||
constructor(fields) {
|
||||
|
|
@ -107,7 +108,7 @@ class StableDiffusionAPI extends StructuredTool {
|
|||
.toFile(this.outputPath + '/' + imageName);
|
||||
this.result = this.getMarkdownImageUrl(imageName);
|
||||
} catch (error) {
|
||||
console.error('Error while saving the image:', error);
|
||||
logger.error('[StableDiffusion] Error while saving the image:', error);
|
||||
// this.result = theImageUrl;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,7 +1,8 @@
|
|||
/* eslint-disable no-useless-escape */
|
||||
const axios = require('axios');
|
||||
const { StructuredTool } = require('langchain/tools');
|
||||
const { z } = require('zod');
|
||||
const { StructuredTool } = require('langchain/tools');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
class WolframAlphaAPI extends StructuredTool {
|
||||
constructor(fields) {
|
||||
|
|
@ -47,7 +48,7 @@ class WolframAlphaAPI extends StructuredTool {
|
|||
const response = await axios.get(url, { responseType: 'text' });
|
||||
return response.data;
|
||||
} catch (error) {
|
||||
console.error(`Error fetching raw text: ${error}`);
|
||||
logger.error('[WolframAlphaAPI] Error fetching raw text:', error);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
|
@ -78,11 +79,10 @@ class WolframAlphaAPI extends StructuredTool {
|
|||
return response;
|
||||
} catch (error) {
|
||||
if (error.response && error.response.data) {
|
||||
console.log('Error data:', error.response.data);
|
||||
logger.error('[WolframAlphaAPI] Error data:', error);
|
||||
return error.response.data;
|
||||
} else {
|
||||
console.log('Error querying Wolfram Alpha', error.message);
|
||||
// throw error;
|
||||
logger.error('[WolframAlphaAPI] Error querying Wolfram Alpha', error);
|
||||
return 'There was an error querying Wolfram Alpha.';
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@ const path = require('path');
|
|||
const OpenAI = require('openai');
|
||||
const DALLE3 = require('../DALLE3');
|
||||
const saveImageFromUrl = require('../../saveImageFromUrl');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
jest.mock('openai');
|
||||
|
||||
|
|
@ -145,10 +146,13 @@ describe('DALLE3', () => {
|
|||
},
|
||||
],
|
||||
};
|
||||
console.log = jest.fn(); // Mock console.log
|
||||
|
||||
generate.mockResolvedValue(mockResponse);
|
||||
await dalle._call(mockData);
|
||||
expect(console.log).toHaveBeenCalledWith('No image name found in the string.');
|
||||
expect(logger.debug).toHaveBeenCalledWith('[DALL-E-3] No image name found in the string.', {
|
||||
data: { url: 'http://example.com/invalid-url' },
|
||||
theImageUrl: 'http://example.com/invalid-url',
|
||||
});
|
||||
});
|
||||
|
||||
it('should create the directory if it does not exist', async () => {
|
||||
|
|
@ -182,9 +186,8 @@ describe('DALLE3', () => {
|
|||
const error = new Error('Error while saving the image');
|
||||
generate.mockResolvedValue(mockResponse);
|
||||
saveImageFromUrl.mockRejectedValue(error);
|
||||
console.error = jest.fn(); // Mock console.error
|
||||
const result = await dalle._call(mockData);
|
||||
expect(console.error).toHaveBeenCalledWith('Error while saving the image:', error);
|
||||
expect(logger.error).toHaveBeenCalledWith('Error while saving the image:', error);
|
||||
expect(result).toBe(mockResponse.data[0].url);
|
||||
});
|
||||
});
|
||||
|
|
|
|||
|
|
@ -1,4 +1,5 @@
|
|||
const OpenAI = require('openai');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
/**
|
||||
* Handles errors that may occur when making requests to OpenAI's API.
|
||||
|
|
@ -12,14 +13,14 @@ const OpenAI = require('openai');
|
|||
*/
|
||||
async function handleOpenAIErrors(err, errorCallback, context = 'stream') {
|
||||
if (err instanceof OpenAI.APIError && err?.message?.includes('abort')) {
|
||||
console.warn(`[OpenAIClient.chatCompletion][${context}] Aborted Message`);
|
||||
logger.warn(`[OpenAIClient.chatCompletion][${context}] Aborted Message`);
|
||||
}
|
||||
if (err instanceof OpenAI.OpenAIError && err?.message?.includes('missing finish_reason')) {
|
||||
console.warn(`[OpenAIClient.chatCompletion][${context}] Missing finish_reason`);
|
||||
logger.warn(`[OpenAIClient.chatCompletion][${context}] Missing finish_reason`);
|
||||
} else if (err instanceof OpenAI.APIError) {
|
||||
console.warn(`[OpenAIClient.chatCompletion][${context}] API Error`);
|
||||
logger.warn(`[OpenAIClient.chatCompletion][${context}] API error`);
|
||||
} else {
|
||||
console.warn(`[OpenAIClient.chatCompletion][${context}] Unhandled error type`);
|
||||
logger.warn(`[OpenAIClient.chatCompletion][${context}] Unhandled error type`);
|
||||
}
|
||||
|
||||
if (errorCallback) {
|
||||
|
|
|
|||
|
|
@ -1,17 +1,14 @@
|
|||
const { getUserPluginAuthValue } = require('../../../../server/services/PluginService');
|
||||
const { OpenAIEmbeddings } = require('langchain/embeddings/openai');
|
||||
const { ZapierToolKit } = require('langchain/agents');
|
||||
const { SerpAPI, ZapierNLAWrapper } = require('langchain/tools');
|
||||
const { ChatOpenAI } = require('langchain/chat_models/openai');
|
||||
const { Calculator } = require('langchain/tools/calculator');
|
||||
const { WebBrowser } = require('langchain/tools/webbrowser');
|
||||
const { SerpAPI, ZapierNLAWrapper } = require('langchain/tools');
|
||||
const { OpenAIEmbeddings } = require('langchain/embeddings/openai');
|
||||
const { getUserPluginAuthValue } = require('~/server/services/PluginService');
|
||||
const {
|
||||
availableTools,
|
||||
AIPluginTool,
|
||||
GoogleSearchAPI,
|
||||
WolframAlphaAPI,
|
||||
StructuredWolfram,
|
||||
HttpRequestTool,
|
||||
OpenAICreateImage,
|
||||
StableDiffusionAPI,
|
||||
DALLE3,
|
||||
|
|
@ -23,8 +20,9 @@ const {
|
|||
CodeSherpaTools,
|
||||
CodeBrew,
|
||||
} = require('../');
|
||||
const { loadSpecs } = require('./loadSpecs');
|
||||
const { loadToolSuite } = require('./loadToolSuite');
|
||||
const { loadSpecs } = require('./loadSpecs');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
const getOpenAIKey = async (options, user) => {
|
||||
let openAIApiKey = options.openAIApiKey ?? process.env.OPENAI_API_KEY;
|
||||
|
|
@ -64,7 +62,7 @@ const validateTools = async (user, tools = []) => {
|
|||
|
||||
return Array.from(validToolsSet.values());
|
||||
} catch (err) {
|
||||
console.log('There was a problem validating tools', err);
|
||||
logger.error('[validateTools] There was a problem validating tools', err);
|
||||
throw new Error(err);
|
||||
}
|
||||
};
|
||||
|
|
@ -161,15 +159,6 @@ const loadTools = async ({
|
|||
const zapier = new ZapierNLAWrapper({ apiKey });
|
||||
return ZapierToolKit.fromZapierNLAWrapper(zapier);
|
||||
},
|
||||
plugins: async () => {
|
||||
return [
|
||||
new HttpRequestTool(),
|
||||
await AIPluginTool.fromPluginUrl(
|
||||
'https://www.klarna.com/.well-known/ai-plugin.json',
|
||||
new ChatOpenAI({ openAIApiKey: options.openAIApiKey, temperature: 0 }),
|
||||
),
|
||||
];
|
||||
},
|
||||
};
|
||||
|
||||
const requestedTools = {};
|
||||
|
|
|
|||
|
|
@ -1,7 +1,8 @@
|
|||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
const { z } = require('zod');
|
||||
const { createOpenAPIPlugin } = require('../dynamic/OpenAPIPlugin');
|
||||
const { logger } = require('~/config');
|
||||
const { createOpenAPIPlugin } = require('~/app/clients/tools/dynamic/OpenAPIPlugin');
|
||||
|
||||
// The minimum Manifest definition
|
||||
const ManifestDefinition = z.object({
|
||||
|
|
@ -26,28 +27,17 @@ const ManifestDefinition = z.object({
|
|||
legal_info_url: z.string().optional(),
|
||||
});
|
||||
|
||||
function validateJson(json, verbose = true) {
|
||||
function validateJson(json) {
|
||||
try {
|
||||
return ManifestDefinition.parse(json);
|
||||
} catch (error) {
|
||||
if (verbose) {
|
||||
console.debug('validateJson error', error);
|
||||
}
|
||||
logger.debug('[validateJson] manifest parsing error', error);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// omit the LLM to return the well known jsons as objects
|
||||
async function loadSpecs({
|
||||
llm,
|
||||
user,
|
||||
message,
|
||||
tools = [],
|
||||
map = false,
|
||||
memory,
|
||||
signal,
|
||||
verbose = false,
|
||||
}) {
|
||||
async function loadSpecs({ llm, user, message, tools = [], map = false, memory, signal }) {
|
||||
const directoryPath = path.join(__dirname, '..', '.well-known');
|
||||
let files = [];
|
||||
|
||||
|
|
@ -60,7 +50,7 @@ async function loadSpecs({
|
|||
await fs.promises.access(filePath, fs.constants.F_OK);
|
||||
files.push(tools[i] + '.json');
|
||||
} catch (err) {
|
||||
console.error(`File ${tools[i] + '.json'} does not exist`);
|
||||
logger.error(`[loadSpecs] File ${tools[i] + '.json'} does not exist`, err);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -73,9 +63,7 @@ async function loadSpecs({
|
|||
const validJsons = [];
|
||||
const constructorMap = {};
|
||||
|
||||
if (verbose) {
|
||||
console.debug('files', files);
|
||||
}
|
||||
logger.debug('[validateJson] files', files);
|
||||
|
||||
for (const file of files) {
|
||||
if (path.extname(file) === '.json') {
|
||||
|
|
@ -84,7 +72,7 @@ async function loadSpecs({
|
|||
const json = JSON.parse(fileContent);
|
||||
|
||||
if (!validateJson(json)) {
|
||||
verbose && console.debug('Invalid json', json);
|
||||
logger.debug('[validateJson] Invalid json', json);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
|
@ -97,13 +85,12 @@ async function loadSpecs({
|
|||
memory,
|
||||
signal,
|
||||
user,
|
||||
verbose,
|
||||
});
|
||||
continue;
|
||||
}
|
||||
|
||||
if (llm) {
|
||||
validJsons.push(createOpenAPIPlugin({ data: json, llm, verbose }));
|
||||
validJsons.push(createOpenAPIPlugin({ data: json, llm }));
|
||||
continue;
|
||||
}
|
||||
|
||||
|
|
@ -117,10 +104,8 @@ async function loadSpecs({
|
|||
|
||||
const plugins = (await Promise.all(validJsons)).filter((plugin) => plugin);
|
||||
|
||||
// if (verbose) {
|
||||
// console.debug('plugins', plugins);
|
||||
// console.debug(plugins[0].name);
|
||||
// }
|
||||
// logger.debug('[validateJson] plugins', plugins);
|
||||
// logger.debug(plugins[0].name);
|
||||
|
||||
return plugins;
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue