LibreChat/api/server/services/ToolService.js
Danny Avila 0e50c07e3f
🤖 feat: Model Specs & Save Tools per Convo/Preset (#2578)
* WIP: first pass ModelSpecs

* refactor(onSelectEndpoint): use `getConvoSwitchLogic`

* feat: introduce iconURL, greeting, frontend fields for conversations/presets/messages

* feat: conversation.iconURL & greeting in Landing

* feat: conversation.iconURL & greeting in New Chat button

* feat: message.iconURL

* refactor: ConversationIcon -> ConvoIconURL

* WIP: add spec as a conversation field

* refactor: useAppStartup, set spec on initial load for new chat, allow undefined spec, add localStorage keys enum, additional type fields for spec

* feat: handle `showIconInMenu`, `showIconInHeader`, undefined `iconURL` and no specs on initial load

* chore: handle undefined or empty modelSpecs

* WIP: first pass, modelSpec schema for custom config

* refactor: move default filtered tools definition to ToolService

* feat: pass modelSpecs from backend via startupConfig

* refactor: modelSpecs config, return and define list

* fix: react error and include iconURL in responseMessage

* refactor: add iconURL to responseMessage only

* refactor: getIconEndpoint

* refactor: pass TSpecsConfig

* fix(assistants): differentiate compactAssistantSchema, correctly resets shared conversation state with other endpoints

* refactor: assistant id prefix localStorage key

* refactor: add more LocalStorageKeys and replace hardcoded values

* feat: prioritize spec on new chat behavior: last selected modelSpec behavior (localStorage)

* feat: first pass, interface config

* chore: WIP, todo: add warnings based on config.modelSpecs settings.

* feat: enforce modelSpecs if configured

* feat: show config file yaml errors

* chore: delete unused legacy Plugins component

* refactor: set tools to localStorage from recoil store

* chore: add stable recoil setter to useEffect deps

* refactor: save tools to conversation documents

* style(MultiSelectPop): dynamic height, remove unused import

* refactor(react-query): use localstorage keys and pass config to useAvailablePluginsQuery

* feat(utils): add mapPlugins

* refactor(Convo): use conversation.tools if defined, lastSelectedTools if not

* refactor: remove unused legacy code using `useSetOptions`, remove conditional flag `isMultiChat` for using legacy settings

* refactor(PluginStoreDialog): add exhaustive-deps which are stable react state setters

* fix(HeaderOptions): pass `popover` as true

* refactor(useSetStorage): use project enums

* refactor: use LocalStorageKeys enum

* fix: prevent setConversation from setting falsy values in lastSelectedTools

* refactor: use map for availableTools state and available Plugins query

* refactor(updateLastSelectedModel): organize logic better and add note on purpose

* fix(setAgentOption): prevent reseting last model to secondary model for gptPlugins

* refactor(buildDefaultConvo): use enum

* refactor: remove `useSetStorage` and consolidate areas where conversation state is saved to localStorage

* fix: conversations retain tools on refresh

* fix(gptPlugins): prevent nullish tools from being saved

* chore: delete useServerStream

* refactor: move initial plugins logic to useAppStartup

* refactor(MultiSelectDropDown): add more pass-in className props

* feat: use tools in presets

* chore: delete unused usePresetOptions

* refactor: new agentOptions default handling

* chore: note

* feat: add label and custom instructions to agents

* chore: remove 'disabled with tools' message

* style: move plugins to 2nd column in parameters

* fix: TPreset type for agentOptions

* fix: interface controls

* refactor: add interfaceConfig, use Separator within Switcher

* refactor: hide Assistants panel if interface.parameters are disabled

* fix(Header): only modelSpecs if list is greater than 0

* refactor: separate MessageIcon logic from useMessageHelpers for better react rule-following

* fix(AppService): don't use reserved keyword 'interface'

* feat: set existing Icon for custom endpoints through iconURL

* fix(ci): tests passing for App Service

* docs: refactor custom_config.md for readability and better organization, also include missing values

* docs: interface section and re-organize docs

* docs: update modelSpecs info

* chore: remove unused files

* chore: remove unused files

* chore: move useSetIndexOptions

* chore: remove unused file

* chore: move useConversation(s)

* chore: move useDefaultConvo

* chore: move useNavigateToConvo

* refactor: use plugin install hook so it can be used elsewhere

* chore: import order

* update docs

* refactor(OpenAI/Plugins): allow modelLabel as an initial value for chatGptLabel

* chore: remove unused EndpointOptionsPopover and hide 'Save as Preset' button if preset UI visibility disabled

* feat(loadDefaultInterface): issue warnings based on values

* feat: changelog for custom config file

* docs: add additional changelog note

* fix: prevent unavailable tool selection from preset and update availableTools on Plugin installations

* feat: add `filteredTools` option in custom config

* chore: changelog

* fix(MessageIcon): always overwrite conversation.iconURL in messageSettings

* fix(ModelSpecsMenu): icon edge cases

* fix(NewChat): dynamic icon

* fix(PluginsClient): always include endpoint in responseMessage

* fix: always include endpoint and iconURL in responseMessage across different response methods

* feat: interchangeable keys for modelSpec enforcing
2024-04-30 22:11:48 -04:00

370 lines
12 KiB
JavaScript

const fs = require('fs');
const path = require('path');
const { StructuredTool } = require('langchain/tools');
const { zodToJsonSchema } = require('zod-to-json-schema');
const { Calculator } = require('langchain/tools/calculator');
const {
Tools,
ContentTypes,
imageGenTools,
actionDelimiter,
ImageVisionTool,
openapiToFunction,
validateAndParseOpenAPISpec,
} = require('librechat-data-provider');
const { processFileURL, uploadImageBuffer } = require('~/server/services/Files/process');
const { loadActionSets, createActionTool, domainParser } = require('./ActionService');
const { recordUsage } = require('~/server/services/Threads');
const { loadTools } = require('~/app/clients/tools/util');
const { redactMessage } = require('~/config/parsers');
const { sleep } = require('~/server/utils');
const { logger } = require('~/config');
const filteredTools = new Set([
'ChatTool.js',
'CodeSherpa.js',
'CodeSherpaTools.js',
'E2BTools.js',
'extractionChain.js',
]);
/**
* Loads and formats tools from the specified tool directory.
*
* The directory is scanned for JavaScript files, excluding any files in the filter set.
* For each file, it attempts to load the file as a module and instantiate a class, if it's a subclass of `StructuredTool`.
* Each tool instance is then formatted to be compatible with the OpenAI Assistant.
* Additionally, instances of LangChain Tools are included in the result.
*
* @param {object} params - The parameters for the function.
* @param {string} params.directory - The directory path where the tools are located.
* @param {Array<string>} [params.adminFilter=[]] - Array of admin-defined tool keys to exclude from loading.
* @returns {Record<string, FunctionTool>} An object mapping each tool's plugin key to its instance.
*/
function loadAndFormatTools({ directory, adminFilter = [] }) {
const filter = new Set([...adminFilter, ...filteredTools]);
const tools = [];
/* Structured Tools Directory */
const files = fs.readdirSync(directory);
for (const file of files) {
if (file.endsWith('.js') && !filter.has(file)) {
const filePath = path.join(directory, file);
let ToolClass = null;
try {
ToolClass = require(filePath);
} catch (error) {
logger.error(`[loadAndFormatTools] Error loading tool from ${filePath}:`, error);
continue;
}
if (!ToolClass) {
continue;
}
if (ToolClass.prototype instanceof StructuredTool) {
/** @type {StructuredTool | null} */
let toolInstance = null;
try {
toolInstance = new ToolClass({ override: true });
} catch (error) {
logger.error(
`[loadAndFormatTools] Error initializing \`${file}\` tool; if it requires authentication, is the \`override\` field configured?`,
error,
);
continue;
}
if (!toolInstance) {
continue;
}
const formattedTool = formatToOpenAIAssistantTool(toolInstance);
tools.push(formattedTool);
}
}
}
/**
* Basic Tools; schema: { input: string }
*/
const basicToolInstances = [new Calculator()];
for (const toolInstance of basicToolInstances) {
const formattedTool = formatToOpenAIAssistantTool(toolInstance);
tools.push(formattedTool);
}
tools.push(ImageVisionTool);
return tools.reduce((map, tool) => {
map[tool.function.name] = tool;
return map;
}, {});
}
/**
* Formats a `StructuredTool` instance into a format that is compatible
* with OpenAI's ChatCompletionFunctions. It uses the `zodToJsonSchema`
* function to convert the schema of the `StructuredTool` into a JSON
* schema, which is then used as the parameters for the OpenAI function.
*
* @param {StructuredTool} tool - The StructuredTool to format.
* @returns {FunctionTool} The OpenAI Assistant Tool.
*/
function formatToOpenAIAssistantTool(tool) {
return {
type: Tools.function,
[Tools.function]: {
name: tool.name,
description: tool.description,
parameters: zodToJsonSchema(tool.schema),
},
};
}
/**
* Processes the required actions by calling the appropriate tools and returning the outputs.
* @param {OpenAIClient} client - OpenAI or StreamRunManager Client.
* @param {RequiredAction} requiredActions - The current required action.
* @returns {Promise<ToolOutput>} The outputs of the tools.
*/
const processVisionRequest = async (client, currentAction) => {
if (!client.visionPromise) {
return {
tool_call_id: currentAction.toolCallId,
output: 'No image details found.',
};
}
/** @type {ChatCompletion | undefined} */
const completion = await client.visionPromise;
if (completion.usage) {
recordUsage({
user: client.req.user.id,
model: client.req.body.model,
conversationId: (client.responseMessage ?? client.finalMessage).conversationId,
...completion.usage,
});
}
const output = completion?.choices?.[0]?.message?.content ?? 'No image details found.';
return {
tool_call_id: currentAction.toolCallId,
output,
};
};
/**
* Processes return required actions from run.
* @param {OpenAIClient | StreamRunManager} client - OpenAI (legacy) or StreamRunManager Client.
* @param {RequiredAction[]} requiredActions - The required actions to submit outputs for.
* @returns {Promise<ToolOutputs>} The outputs of the tools.
*/
async function processRequiredActions(client, requiredActions) {
logger.debug(
`[required actions] user: ${client.req.user.id} | thread_id: ${requiredActions[0].thread_id} | run_id: ${requiredActions[0].run_id}`,
requiredActions,
);
const tools = requiredActions.map((action) => action.tool);
const loadedTools = await loadTools({
user: client.req.user.id,
model: client.req.body.model ?? 'gpt-3.5-turbo-1106',
tools,
functions: true,
options: {
processFileURL,
req: client.req,
uploadImageBuffer,
openAIApiKey: client.apiKey,
fileStrategy: client.req.app.locals.fileStrategy,
returnMetadata: true,
},
skipSpecs: true,
});
const ToolMap = loadedTools.reduce((map, tool) => {
map[tool.name] = tool;
return map;
}, {});
const promises = [];
/** @type {Action[]} */
let actionSets = [];
let isActionTool = false;
const ActionToolMap = {};
const ActionBuildersMap = {};
for (let i = 0; i < requiredActions.length; i++) {
const currentAction = requiredActions[i];
if (currentAction.tool === ImageVisionTool.function.name) {
promises.push(processVisionRequest(client, currentAction));
continue;
}
let tool = ToolMap[currentAction.tool] ?? ActionToolMap[currentAction.tool];
const handleToolOutput = async (output) => {
requiredActions[i].output = output;
/** @type {FunctionToolCall & PartMetadata} */
const toolCall = {
function: {
name: currentAction.tool,
arguments: JSON.stringify(currentAction.toolInput),
output,
},
id: currentAction.toolCallId,
type: 'function',
progress: 1,
action: isActionTool,
};
const toolCallIndex = client.mappedOrder.get(toolCall.id);
if (imageGenTools.has(currentAction.tool)) {
const imageOutput = output;
toolCall.function.output = `${currentAction.tool} displayed an image. All generated images are already plainly visible, so don't repeat the descriptions in detail. Do not list download links as they are available in the UI already. The user may download the images by clicking on them, but do not mention anything about downloading to the user.`;
// Streams the "Finished" state of the tool call in the UI
client.addContentData({
[ContentTypes.TOOL_CALL]: toolCall,
index: toolCallIndex,
type: ContentTypes.TOOL_CALL,
});
await sleep(500);
/** @type {ImageFile} */
const imageDetails = {
...imageOutput,
...currentAction.toolInput,
};
const image_file = {
[ContentTypes.IMAGE_FILE]: imageDetails,
type: ContentTypes.IMAGE_FILE,
// Replace the tool call output with Image file
index: toolCallIndex,
};
client.addContentData(image_file);
// Update the stored tool call
client.seenToolCalls && client.seenToolCalls.set(toolCall.id, toolCall);
return {
tool_call_id: currentAction.toolCallId,
output: toolCall.function.output,
};
}
client.seenToolCalls && client.seenToolCalls.set(toolCall.id, toolCall);
client.addContentData({
[ContentTypes.TOOL_CALL]: toolCall,
index: toolCallIndex,
type: ContentTypes.TOOL_CALL,
// TODO: to append tool properties to stream, pass metadata rest to addContentData
// result: tool.result,
});
return {
tool_call_id: currentAction.toolCallId,
output,
};
};
if (!tool) {
// throw new Error(`Tool ${currentAction.tool} not found.`);
if (!actionSets.length) {
actionSets =
(await loadActionSets({
assistant_id: client.req.body.assistant_id,
})) ?? [];
}
let actionSet = null;
let currentDomain = '';
for (let action of actionSets) {
const domain = await domainParser(client.req, action.metadata.domain, true);
if (currentAction.tool.includes(domain)) {
currentDomain = domain;
actionSet = action;
break;
}
}
if (!actionSet) {
// TODO: try `function` if no action set is found
// throw new Error(`Tool ${currentAction.tool} not found.`);
continue;
}
let builders = ActionBuildersMap[actionSet.metadata.domain];
if (!builders) {
const validationResult = validateAndParseOpenAPISpec(actionSet.metadata.raw_spec);
if (!validationResult.spec) {
throw new Error(
`Invalid spec: user: ${client.req.user.id} | thread_id: ${requiredActions[0].thread_id} | run_id: ${requiredActions[0].run_id}`,
);
}
const { requestBuilders } = openapiToFunction(validationResult.spec);
ActionToolMap[actionSet.metadata.domain] = requestBuilders;
builders = requestBuilders;
}
const functionName = currentAction.tool.replace(`${actionDelimiter}${currentDomain}`, '');
const requestBuilder = builders[functionName];
if (!requestBuilder) {
// throw new Error(`Tool ${currentAction.tool} not found.`);
continue;
}
tool = createActionTool({ action: actionSet, requestBuilder });
isActionTool = !!tool;
ActionToolMap[currentAction.tool] = tool;
}
if (currentAction.tool === 'calculator') {
currentAction.toolInput = currentAction.toolInput.input;
}
try {
const promise = tool
._call(currentAction.toolInput)
.then(handleToolOutput)
.catch((error) => {
logger.error(`Error processing tool ${currentAction.tool}`, error);
return {
tool_call_id: currentAction.toolCallId,
output: `Error processing tool ${currentAction.tool}: ${redactMessage(error.message)}`,
};
});
promises.push(promise);
} catch (error) {
logger.error(
`tool_call_id: ${currentAction.toolCallId} | Error processing tool ${currentAction.tool}`,
error,
);
promises.push(
Promise.resolve({
tool_call_id: currentAction.toolCallId,
error: error.message,
}),
);
}
}
return {
tool_outputs: await Promise.all(promises),
};
}
module.exports = {
formatToOpenAIAssistantTool,
loadAndFormatTools,
processRequiredActions,
};