mirror of
https://github.com/danny-avila/LibreChat.git
synced 2026-01-18 00:15:30 +01:00
🚧 chore: merge latest dev build (#4288)
* fix: agent initialization, add `collectedUsage` handling * style: improve side panel styling * refactor(loadAgent): Optimize order agent project ID retrieval * feat: code execution * fix: typing issues * feat: ExecuteCode content part * refactor: use local state for default collapsed state of analysis content parts * fix: code parsing in ExecuteCode component * chore: bump agents package, export loadAuthValues * refactor: Update handleTools.js to use EnvVar for code execution tool authentication * WIP * feat: download code outputs * fix(useEventHandlers): type issues * feat: backend handling for code outputs * Refactor: Remove console.log statement in Part.tsx * refactor: add attachments to TMessage/messageSchema * WIP: prelim handling for code outputs * feat: attachments rendering * refactor: improve attachments rendering * fix: attachments, nullish edge case, handle attachments from event stream, bump agents package * fix filename download * fix: tool assignment for 'run code' on agent creation * fix: image handling by adding attachments * refactor: prevent agent creation without provider/model * refactor: remove unnecessary space in agent creation success message * refactor: select first model if selecting provider from empty on form * fix: Agent avatar bug * fix: `defaultAgentFormValues` causing boolean typing issue and typeerror * fix: capabilities counting as tools, causing duplication of them * fix: formatted messages edge case where consecutive content text type parts with the latter having tool_call_ids would cause consecutive AI messages to be created. furthermore, content could not be an array for tool_use messages (anthropic limitation) * chore: bump @librechat/agents dependency to version 1.6.9 * feat: bedrock agents * feat: new Agents icon * feat: agent titling * feat: agent landing * refactor: allow sharing agent globally only if user is admin or author * feat: initial AgentPanelSkeleton * feat: AgentPanelSkeleton * feat: collaborative agents * chore: add potential authorName as part of schema * chore: Remove unnecessary console.log statement * WIP: agent model parameters * chore: ToolsDialog typing and tool related localization chnages * refactor: update tool instance type (latest langchain class), and rename google tool to 'google' proper * chore: add back tools * feat: Agent knowledge files upload * refactor: better verbiage for disabled knowledge * chore: debug logs for file deletions * chore: debug logs for file deletions * feat: upload/delete agent knowledge/file-search files * feat: file search UI for agents * feat: first pass, file search tool * chore: update default agent capabilities and info
This commit is contained in:
parent
f33e75e2ee
commit
ad74350036
123 changed files with 3611 additions and 1541 deletions
|
|
@ -42,6 +42,8 @@ class BaseClient {
|
|||
this.conversationId;
|
||||
/** @type {string} */
|
||||
this.responseMessageId;
|
||||
/** @type {TAttachment[]} */
|
||||
this.attachments;
|
||||
/** The key for the usage object's input tokens
|
||||
* @type {string} */
|
||||
this.inputTokensKey = 'prompt_tokens';
|
||||
|
|
@ -629,6 +631,10 @@ class BaseClient {
|
|||
await this.userMessagePromise;
|
||||
}
|
||||
|
||||
if (this.artifactPromises) {
|
||||
responseMessage.attachments = (await Promise.all(this.artifactPromises)).filter((a) => a);
|
||||
}
|
||||
|
||||
this.responsePromise = this.saveMessageToDatabase(responseMessage, saveOptions, user);
|
||||
const messageCache = getLogStores(CacheKeys.MESSAGES);
|
||||
messageCache.set(
|
||||
|
|
|
|||
285
api/app/clients/prompts/formatAgentMessages.spec.js
Normal file
285
api/app/clients/prompts/formatAgentMessages.spec.js
Normal file
|
|
@ -0,0 +1,285 @@
|
|||
const { ToolMessage } = require('@langchain/core/messages');
|
||||
const { ContentTypes } = require('librechat-data-provider');
|
||||
const { HumanMessage, AIMessage, SystemMessage } = require('langchain/schema');
|
||||
const { formatAgentMessages } = require('./formatMessages');
|
||||
|
||||
describe('formatAgentMessages', () => {
|
||||
it('should format simple user and AI messages', () => {
|
||||
const payload = [
|
||||
{ role: 'user', content: 'Hello' },
|
||||
{ role: 'assistant', content: 'Hi there!' },
|
||||
];
|
||||
const result = formatAgentMessages(payload);
|
||||
expect(result).toHaveLength(2);
|
||||
expect(result[0]).toBeInstanceOf(HumanMessage);
|
||||
expect(result[1]).toBeInstanceOf(AIMessage);
|
||||
});
|
||||
|
||||
it('should handle system messages', () => {
|
||||
const payload = [{ role: 'system', content: 'You are a helpful assistant.' }];
|
||||
const result = formatAgentMessages(payload);
|
||||
expect(result).toHaveLength(1);
|
||||
expect(result[0]).toBeInstanceOf(SystemMessage);
|
||||
});
|
||||
|
||||
it('should format messages with content arrays', () => {
|
||||
const payload = [
|
||||
{
|
||||
role: 'user',
|
||||
content: [{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Hello' }],
|
||||
},
|
||||
];
|
||||
const result = formatAgentMessages(payload);
|
||||
expect(result).toHaveLength(1);
|
||||
expect(result[0]).toBeInstanceOf(HumanMessage);
|
||||
});
|
||||
|
||||
it('should handle tool calls and create ToolMessages', () => {
|
||||
const payload = [
|
||||
{
|
||||
role: 'assistant',
|
||||
content: [
|
||||
{
|
||||
type: ContentTypes.TEXT,
|
||||
[ContentTypes.TEXT]: 'Let me check that for you.',
|
||||
tool_call_ids: ['123'],
|
||||
},
|
||||
{
|
||||
type: ContentTypes.TOOL_CALL,
|
||||
tool_call: {
|
||||
id: '123',
|
||||
name: 'search',
|
||||
args: '{"query":"weather"}',
|
||||
output: 'The weather is sunny.',
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
];
|
||||
const result = formatAgentMessages(payload);
|
||||
expect(result).toHaveLength(2);
|
||||
expect(result[0]).toBeInstanceOf(AIMessage);
|
||||
expect(result[1]).toBeInstanceOf(ToolMessage);
|
||||
expect(result[0].tool_calls).toHaveLength(1);
|
||||
expect(result[1].tool_call_id).toBe('123');
|
||||
});
|
||||
|
||||
it('should handle multiple content parts in assistant messages', () => {
|
||||
const payload = [
|
||||
{
|
||||
role: 'assistant',
|
||||
content: [
|
||||
{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Part 1' },
|
||||
{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Part 2' },
|
||||
],
|
||||
},
|
||||
];
|
||||
const result = formatAgentMessages(payload);
|
||||
expect(result).toHaveLength(1);
|
||||
expect(result[0]).toBeInstanceOf(AIMessage);
|
||||
expect(result[0].content).toHaveLength(2);
|
||||
});
|
||||
|
||||
it('should throw an error for invalid tool call structure', () => {
|
||||
const payload = [
|
||||
{
|
||||
role: 'assistant',
|
||||
content: [
|
||||
{
|
||||
type: ContentTypes.TOOL_CALL,
|
||||
tool_call: {
|
||||
id: '123',
|
||||
name: 'search',
|
||||
args: '{"query":"weather"}',
|
||||
output: 'The weather is sunny.',
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
];
|
||||
expect(() => formatAgentMessages(payload)).toThrow('Invalid tool call structure');
|
||||
});
|
||||
|
||||
it('should handle tool calls with non-JSON args', () => {
|
||||
const payload = [
|
||||
{
|
||||
role: 'assistant',
|
||||
content: [
|
||||
{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Checking...', tool_call_ids: ['123'] },
|
||||
{
|
||||
type: ContentTypes.TOOL_CALL,
|
||||
tool_call: {
|
||||
id: '123',
|
||||
name: 'search',
|
||||
args: 'non-json-string',
|
||||
output: 'Result',
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
];
|
||||
const result = formatAgentMessages(payload);
|
||||
expect(result).toHaveLength(2);
|
||||
expect(result[0].tool_calls[0].args).toBe('non-json-string');
|
||||
});
|
||||
|
||||
it('should handle complex tool calls with multiple steps', () => {
|
||||
const payload = [
|
||||
{
|
||||
role: 'assistant',
|
||||
content: [
|
||||
{
|
||||
type: ContentTypes.TEXT,
|
||||
[ContentTypes.TEXT]: 'I\'ll search for that information.',
|
||||
tool_call_ids: ['search_1'],
|
||||
},
|
||||
{
|
||||
type: ContentTypes.TOOL_CALL,
|
||||
tool_call: {
|
||||
id: 'search_1',
|
||||
name: 'search',
|
||||
args: '{"query":"weather in New York"}',
|
||||
output: 'The weather in New York is currently sunny with a temperature of 75°F.',
|
||||
},
|
||||
},
|
||||
{
|
||||
type: ContentTypes.TEXT,
|
||||
[ContentTypes.TEXT]: 'Now, I\'ll convert the temperature.',
|
||||
tool_call_ids: ['convert_1'],
|
||||
},
|
||||
{
|
||||
type: ContentTypes.TOOL_CALL,
|
||||
tool_call: {
|
||||
id: 'convert_1',
|
||||
name: 'convert_temperature',
|
||||
args: '{"temperature": 75, "from": "F", "to": "C"}',
|
||||
output: '23.89°C',
|
||||
},
|
||||
},
|
||||
{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Here\'s your answer.' },
|
||||
],
|
||||
},
|
||||
];
|
||||
|
||||
const result = formatAgentMessages(payload);
|
||||
|
||||
expect(result).toHaveLength(5);
|
||||
expect(result[0]).toBeInstanceOf(AIMessage);
|
||||
expect(result[1]).toBeInstanceOf(ToolMessage);
|
||||
expect(result[2]).toBeInstanceOf(AIMessage);
|
||||
expect(result[3]).toBeInstanceOf(ToolMessage);
|
||||
expect(result[4]).toBeInstanceOf(AIMessage);
|
||||
|
||||
// Check first AIMessage
|
||||
expect(result[0].content).toBe('I\'ll search for that information.');
|
||||
expect(result[0].tool_calls).toHaveLength(1);
|
||||
expect(result[0].tool_calls[0]).toEqual({
|
||||
id: 'search_1',
|
||||
name: 'search',
|
||||
args: { query: 'weather in New York' },
|
||||
});
|
||||
|
||||
// Check first ToolMessage
|
||||
expect(result[1].tool_call_id).toBe('search_1');
|
||||
expect(result[1].name).toBe('search');
|
||||
expect(result[1].content).toBe(
|
||||
'The weather in New York is currently sunny with a temperature of 75°F.',
|
||||
);
|
||||
|
||||
// Check second AIMessage
|
||||
expect(result[2].content).toBe('Now, I\'ll convert the temperature.');
|
||||
expect(result[2].tool_calls).toHaveLength(1);
|
||||
expect(result[2].tool_calls[0]).toEqual({
|
||||
id: 'convert_1',
|
||||
name: 'convert_temperature',
|
||||
args: { temperature: 75, from: 'F', to: 'C' },
|
||||
});
|
||||
|
||||
// Check second ToolMessage
|
||||
expect(result[3].tool_call_id).toBe('convert_1');
|
||||
expect(result[3].name).toBe('convert_temperature');
|
||||
expect(result[3].content).toBe('23.89°C');
|
||||
|
||||
// Check final AIMessage
|
||||
expect(result[4].content).toStrictEqual([
|
||||
{ [ContentTypes.TEXT]: 'Here\'s your answer.', type: ContentTypes.TEXT },
|
||||
]);
|
||||
});
|
||||
|
||||
it.skip('should not produce two consecutive assistant messages and format content correctly', () => {
|
||||
const payload = [
|
||||
{ role: 'user', content: 'Hello' },
|
||||
{
|
||||
role: 'assistant',
|
||||
content: [{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Hi there!' }],
|
||||
},
|
||||
{
|
||||
role: 'assistant',
|
||||
content: [{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'How can I help you?' }],
|
||||
},
|
||||
{ role: 'user', content: 'What\'s the weather?' },
|
||||
{
|
||||
role: 'assistant',
|
||||
content: [
|
||||
{
|
||||
type: ContentTypes.TEXT,
|
||||
[ContentTypes.TEXT]: 'Let me check that for you.',
|
||||
tool_call_ids: ['weather_1'],
|
||||
},
|
||||
{
|
||||
type: ContentTypes.TOOL_CALL,
|
||||
tool_call: {
|
||||
id: 'weather_1',
|
||||
name: 'check_weather',
|
||||
args: '{"location":"New York"}',
|
||||
output: 'Sunny, 75°F',
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
role: 'assistant',
|
||||
content: [
|
||||
{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Here\'s the weather information.' },
|
||||
],
|
||||
},
|
||||
];
|
||||
|
||||
const result = formatAgentMessages(payload);
|
||||
|
||||
// Check correct message count and types
|
||||
expect(result).toHaveLength(6);
|
||||
expect(result[0]).toBeInstanceOf(HumanMessage);
|
||||
expect(result[1]).toBeInstanceOf(AIMessage);
|
||||
expect(result[2]).toBeInstanceOf(HumanMessage);
|
||||
expect(result[3]).toBeInstanceOf(AIMessage);
|
||||
expect(result[4]).toBeInstanceOf(ToolMessage);
|
||||
expect(result[5]).toBeInstanceOf(AIMessage);
|
||||
|
||||
// Check content of messages
|
||||
expect(result[0].content).toStrictEqual([
|
||||
{ [ContentTypes.TEXT]: 'Hello', type: ContentTypes.TEXT },
|
||||
]);
|
||||
expect(result[1].content).toStrictEqual([
|
||||
{ [ContentTypes.TEXT]: 'Hi there!', type: ContentTypes.TEXT },
|
||||
{ [ContentTypes.TEXT]: 'How can I help you?', type: ContentTypes.TEXT },
|
||||
]);
|
||||
expect(result[2].content).toStrictEqual([
|
||||
{ [ContentTypes.TEXT]: 'What\'s the weather?', type: ContentTypes.TEXT },
|
||||
]);
|
||||
expect(result[3].content).toBe('Let me check that for you.');
|
||||
expect(result[4].content).toBe('Sunny, 75°F');
|
||||
expect(result[5].content).toStrictEqual([
|
||||
{ [ContentTypes.TEXT]: 'Here\'s the weather information.', type: ContentTypes.TEXT },
|
||||
]);
|
||||
|
||||
// Check that there are no consecutive AIMessages
|
||||
const messageTypes = result.map((message) => message.constructor);
|
||||
for (let i = 0; i < messageTypes.length - 1; i++) {
|
||||
expect(messageTypes[i] === AIMessage && messageTypes[i + 1] === AIMessage).toBe(false);
|
||||
}
|
||||
|
||||
// Additional check to ensure the consecutive assistant messages were combined
|
||||
expect(result[1].content).toHaveLength(2);
|
||||
});
|
||||
});
|
||||
|
|
@ -155,10 +155,22 @@ const formatAgentMessages = (payload) => {
|
|||
|
||||
for (const part of message.content) {
|
||||
if (part.type === ContentTypes.TEXT && part.tool_call_ids) {
|
||||
// If there's pending content, add it as an AIMessage
|
||||
/*
|
||||
If there's pending content, it needs to be aggregated as a single string to prepare for tool calls.
|
||||
For Anthropic models, the "tool_calls" field on a message is only respected if content is a string.
|
||||
*/
|
||||
if (currentContent.length > 0) {
|
||||
messages.push(new AIMessage({ content: currentContent }));
|
||||
let content = currentContent.reduce((acc, curr) => {
|
||||
if (curr.type === ContentTypes.TEXT) {
|
||||
return `${acc}${curr[ContentTypes.TEXT]}\n`;
|
||||
}
|
||||
return acc;
|
||||
}, '');
|
||||
content = `${content}\n${part[ContentTypes.TEXT] ?? ''}`.trim();
|
||||
lastAIMessage = new AIMessage({ content });
|
||||
messages.push(lastAIMessage);
|
||||
currentContent = [];
|
||||
continue;
|
||||
}
|
||||
|
||||
// Create a new AIMessage with this text and prepare for tool calls
|
||||
|
|
|
|||
|
|
@ -25,7 +25,6 @@ module.exports = {
|
|||
// Basic Tools
|
||||
CodeBrew,
|
||||
AzureAiSearch,
|
||||
GoogleSearchAPI,
|
||||
WolframAlphaAPI,
|
||||
OpenAICreateImage,
|
||||
StableDiffusionAPI,
|
||||
|
|
@ -37,8 +36,9 @@ module.exports = {
|
|||
CodeSherpa,
|
||||
StructuredSD,
|
||||
StructuredACS,
|
||||
GoogleSearchAPI,
|
||||
CodeSherpaTools,
|
||||
TraversaalSearch,
|
||||
StructuredWolfram,
|
||||
TavilySearchResults,
|
||||
TraversaalSearch,
|
||||
};
|
||||
|
|
|
|||
|
|
@ -1,9 +1,9 @@
|
|||
const { z } = require('zod');
|
||||
const { StructuredTool } = require('langchain/tools');
|
||||
const { Tool } = require('@langchain/core/tools');
|
||||
const { SearchClient, AzureKeyCredential } = require('@azure/search-documents');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
class AzureAISearch extends StructuredTool {
|
||||
class AzureAISearch extends Tool {
|
||||
// Constants for default values
|
||||
static DEFAULT_API_VERSION = '2023-11-01';
|
||||
static DEFAULT_QUERY_TYPE = 'simple';
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@ const { z } = require('zod');
|
|||
const path = require('path');
|
||||
const OpenAI = require('openai');
|
||||
const { v4: uuidv4 } = require('uuid');
|
||||
const { Tool } = require('langchain/tools');
|
||||
const { Tool } = require('@langchain/core/tools');
|
||||
const { HttpsProxyAgent } = require('https-proxy-agent');
|
||||
const { FileContext } = require('librechat-data-provider');
|
||||
const { getImageBasename } = require('~/server/services/Files/images');
|
||||
|
|
|
|||
|
|
@ -4,11 +4,12 @@ const { getEnvironmentVariable } = require('@langchain/core/utils/env');
|
|||
|
||||
class GoogleSearchResults extends Tool {
|
||||
static lc_name() {
|
||||
return 'GoogleSearchResults';
|
||||
return 'google';
|
||||
}
|
||||
|
||||
constructor(fields = {}) {
|
||||
super(fields);
|
||||
this.name = 'google';
|
||||
this.envVarApiKey = 'GOOGLE_SEARCH_API_KEY';
|
||||
this.envVarSearchEngineId = 'GOOGLE_CSE_ID';
|
||||
this.override = fields.override ?? false;
|
||||
|
|
|
|||
|
|
@ -5,12 +5,12 @@ const path = require('path');
|
|||
const axios = require('axios');
|
||||
const sharp = require('sharp');
|
||||
const { v4: uuidv4 } = require('uuid');
|
||||
const { StructuredTool } = require('langchain/tools');
|
||||
const { Tool } = require('@langchain/core/tools');
|
||||
const { FileContext } = require('librechat-data-provider');
|
||||
const paths = require('~/config/paths');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
class StableDiffusionAPI extends StructuredTool {
|
||||
class StableDiffusionAPI extends Tool {
|
||||
constructor(fields) {
|
||||
super();
|
||||
/** @type {string} User ID */
|
||||
|
|
|
|||
|
|
@ -1,10 +1,10 @@
|
|||
/* eslint-disable no-useless-escape */
|
||||
const axios = require('axios');
|
||||
const { z } = require('zod');
|
||||
const { StructuredTool } = require('langchain/tools');
|
||||
const { Tool } = require('@langchain/core/tools');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
class WolframAlphaAPI extends StructuredTool {
|
||||
class WolframAlphaAPI extends Tool {
|
||||
constructor(fields) {
|
||||
super();
|
||||
/* Used to initialize the Tool without necessary variables. */
|
||||
|
|
|
|||
104
api/app/clients/tools/util/createFileSearchTool.js
Normal file
104
api/app/clients/tools/util/createFileSearchTool.js
Normal file
|
|
@ -0,0 +1,104 @@
|
|||
const { z } = require('zod');
|
||||
const axios = require('axios');
|
||||
const { tool } = require('@langchain/core/tools');
|
||||
const { Tools, EToolResources } = require('librechat-data-provider');
|
||||
const { getFiles } = require('~/models/File');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
/**
|
||||
*
|
||||
* @param {Object} options
|
||||
* @param {ServerRequest} options.req
|
||||
* @param {Agent['tool_resources']} options.tool_resources
|
||||
* @returns
|
||||
*/
|
||||
const createFileSearchTool = async (options) => {
|
||||
const { req, tool_resources } = options;
|
||||
const file_ids = tool_resources?.[EToolResources.file_search]?.file_ids ?? [];
|
||||
const files = (await getFiles({ file_id: { $in: file_ids } })).map((file) => ({
|
||||
file_id: file.file_id,
|
||||
filename: file.filename,
|
||||
}));
|
||||
|
||||
const fileList = files.map((file) => `- ${file.filename}`).join('\n');
|
||||
const toolDescription = `Performs a semantic search based on a natural language query across the following files:\n${fileList}`;
|
||||
|
||||
const FileSearch = tool(
|
||||
async ({ query }) => {
|
||||
if (files.length === 0) {
|
||||
return 'No files to search. Instruct the user to add files for the search.';
|
||||
}
|
||||
const jwtToken = req.headers.authorization.split(' ')[1];
|
||||
if (!jwtToken) {
|
||||
return 'There was an error authenticating the file search request.';
|
||||
}
|
||||
const queryPromises = files.map((file) =>
|
||||
axios
|
||||
.post(
|
||||
`${process.env.RAG_API_URL}/query`,
|
||||
{
|
||||
file_id: file.file_id,
|
||||
query,
|
||||
k: 5,
|
||||
},
|
||||
{
|
||||
headers: {
|
||||
Authorization: `Bearer ${jwtToken}`,
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
},
|
||||
)
|
||||
.catch((error) => {
|
||||
logger.error(
|
||||
`Error encountered in \`file_search\` while querying file_id ${file._id}:`,
|
||||
error,
|
||||
);
|
||||
return null;
|
||||
}),
|
||||
);
|
||||
|
||||
const results = await Promise.all(queryPromises);
|
||||
const validResults = results.filter((result) => result !== null);
|
||||
|
||||
if (validResults.length === 0) {
|
||||
return 'No results found or errors occurred while searching the files.';
|
||||
}
|
||||
|
||||
const formattedResults = validResults
|
||||
.flatMap((result) =>
|
||||
result.data.map(([docInfo, relevanceScore]) => ({
|
||||
filename: docInfo.metadata.source.split('/').pop(),
|
||||
content: docInfo.page_content,
|
||||
relevanceScore,
|
||||
})),
|
||||
)
|
||||
.sort((a, b) => b.relevanceScore - a.relevanceScore);
|
||||
|
||||
const formattedString = formattedResults
|
||||
.map(
|
||||
(result) =>
|
||||
`File: ${result.filename}\nRelevance: ${result.relevanceScore.toFixed(4)}\nContent: ${
|
||||
result.content
|
||||
}\n`,
|
||||
)
|
||||
.join('\n---\n');
|
||||
|
||||
return formattedString;
|
||||
},
|
||||
{
|
||||
name: Tools.file_search,
|
||||
description: toolDescription,
|
||||
schema: z.object({
|
||||
query: z
|
||||
.string()
|
||||
.describe(
|
||||
'A natural language query to search for relevant information in the files. Be specific and use keywords related to the information you\'re looking for. The query will be used for semantic similarity matching against the file contents.',
|
||||
),
|
||||
}),
|
||||
},
|
||||
);
|
||||
|
||||
return FileSearch;
|
||||
};
|
||||
|
||||
module.exports = createFileSearchTool;
|
||||
|
|
@ -1,8 +1,10 @@
|
|||
const { Tools } = require('librechat-data-provider');
|
||||
const { ZapierToolKit } = require('langchain/agents');
|
||||
const { Calculator } = require('langchain/tools/calculator');
|
||||
const { WebBrowser } = require('langchain/tools/webbrowser');
|
||||
const { SerpAPI, ZapierNLAWrapper } = require('langchain/tools');
|
||||
const { OpenAIEmbeddings } = require('langchain/embeddings/openai');
|
||||
const { createCodeExecutionTool, EnvVar } = require('@librechat/agents');
|
||||
const { getUserPluginAuthValue } = require('~/server/services/PluginService');
|
||||
const {
|
||||
availableTools,
|
||||
|
|
@ -24,6 +26,7 @@ const {
|
|||
StructuredWolfram,
|
||||
TavilySearchResults,
|
||||
} = require('../');
|
||||
const createFileSearchTool = require('./createFileSearchTool');
|
||||
const { loadToolSuite } = require('./loadToolSuite');
|
||||
const { loadSpecs } = require('./loadSpecs');
|
||||
const { logger } = require('~/config');
|
||||
|
|
@ -97,6 +100,45 @@ const validateTools = async (user, tools = []) => {
|
|||
}
|
||||
};
|
||||
|
||||
const loadAuthValues = async ({ userId, authFields }) => {
|
||||
let authValues = {};
|
||||
|
||||
/**
|
||||
* Finds the first non-empty value for the given authentication field, supporting alternate fields.
|
||||
* @param {string[]} fields Array of strings representing the authentication fields. Supports alternate fields delimited by "||".
|
||||
* @returns {Promise<{ authField: string, authValue: string} | null>} An object containing the authentication field and value, or null if not found.
|
||||
*/
|
||||
const findAuthValue = async (fields) => {
|
||||
for (const field of fields) {
|
||||
let value = process.env[field];
|
||||
if (value) {
|
||||
return { authField: field, authValue: value };
|
||||
}
|
||||
try {
|
||||
value = await getUserPluginAuthValue(userId, field);
|
||||
} catch (err) {
|
||||
if (field === fields[fields.length - 1] && !value) {
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
if (value) {
|
||||
return { authField: field, authValue: value };
|
||||
}
|
||||
}
|
||||
return null;
|
||||
};
|
||||
|
||||
for (let authField of authFields) {
|
||||
const fields = authField.split('||');
|
||||
const result = await findAuthValue(fields);
|
||||
if (result) {
|
||||
authValues[result.authField] = result.authValue;
|
||||
}
|
||||
}
|
||||
|
||||
return authValues;
|
||||
};
|
||||
|
||||
/**
|
||||
* Initializes a tool with authentication values for the given user, supporting alternate authentication fields.
|
||||
* Authentication fields can have alternates separated by "||", and the first defined variable will be used.
|
||||
|
|
@ -109,41 +151,7 @@ const validateTools = async (user, tools = []) => {
|
|||
*/
|
||||
const loadToolWithAuth = (userId, authFields, ToolConstructor, options = {}) => {
|
||||
return async function () {
|
||||
let authValues = {};
|
||||
|
||||
/**
|
||||
* Finds the first non-empty value for the given authentication field, supporting alternate fields.
|
||||
* @param {string[]} fields Array of strings representing the authentication fields. Supports alternate fields delimited by "||".
|
||||
* @returns {Promise<{ authField: string, authValue: string} | null>} An object containing the authentication field and value, or null if not found.
|
||||
*/
|
||||
const findAuthValue = async (fields) => {
|
||||
for (const field of fields) {
|
||||
let value = process.env[field];
|
||||
if (value) {
|
||||
return { authField: field, authValue: value };
|
||||
}
|
||||
try {
|
||||
value = await getUserPluginAuthValue(userId, field);
|
||||
} catch (err) {
|
||||
if (field === fields[fields.length - 1] && !value) {
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
if (value) {
|
||||
return { authField: field, authValue: value };
|
||||
}
|
||||
}
|
||||
return null;
|
||||
};
|
||||
|
||||
for (let authField of authFields) {
|
||||
const fields = authField.split('||');
|
||||
const result = await findAuthValue(fields);
|
||||
if (result) {
|
||||
authValues[result.authField] = result.authValue;
|
||||
}
|
||||
}
|
||||
|
||||
const authValues = await loadAuthValues({ userId, authFields });
|
||||
return new ToolConstructor({ ...options, ...authValues, userId });
|
||||
};
|
||||
};
|
||||
|
|
@ -264,6 +272,22 @@ const loadTools = async ({
|
|||
const remainingTools = [];
|
||||
|
||||
for (const tool of tools) {
|
||||
if (tool === Tools.execute_code) {
|
||||
const authValues = await loadAuthValues({
|
||||
userId: user.id,
|
||||
authFields: [EnvVar.CODE_API_KEY],
|
||||
});
|
||||
requestedTools[tool] = () =>
|
||||
createCodeExecutionTool({
|
||||
user_id: user.id,
|
||||
...authValues,
|
||||
});
|
||||
continue;
|
||||
} else if (tool === Tools.file_search) {
|
||||
requestedTools[tool] = () => createFileSearchTool(options);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (customConstructors[tool]) {
|
||||
requestedTools[tool] = customConstructors[tool];
|
||||
continue;
|
||||
|
|
@ -331,6 +355,7 @@ const loadTools = async ({
|
|||
|
||||
module.exports = {
|
||||
loadToolWithAuth,
|
||||
loadAuthValues,
|
||||
validateTools,
|
||||
loadTools,
|
||||
};
|
||||
|
|
|
|||
|
|
@ -1,8 +1,9 @@
|
|||
const { validateTools, loadTools } = require('./handleTools');
|
||||
const { validateTools, loadTools, loadAuthValues } = require('./handleTools');
|
||||
const handleOpenAIErrors = require('./handleOpenAIErrors');
|
||||
|
||||
module.exports = {
|
||||
handleOpenAIErrors,
|
||||
loadAuthValues,
|
||||
validateTools,
|
||||
loadTools,
|
||||
};
|
||||
|
|
|
|||
|
|
@ -1,11 +1,14 @@
|
|||
const mongoose = require('mongoose');
|
||||
const { SystemRoles } = require('librechat-data-provider');
|
||||
const { GLOBAL_PROJECT_NAME } = require('librechat-data-provider').Constants;
|
||||
const { CONFIG_STORE, STARTUP_CONFIG } = require('librechat-data-provider').CacheKeys;
|
||||
const {
|
||||
getProjectByName,
|
||||
addAgentIdsToProject,
|
||||
removeAgentIdsFromProject,
|
||||
removeAgentFromAllProjects,
|
||||
} = require('./Project');
|
||||
const getLogStores = require('~/cache/getLogStores');
|
||||
const agentSchema = require('./schema/agent');
|
||||
|
||||
const Agent = mongoose.model('agent', agentSchema);
|
||||
|
|
@ -30,6 +33,43 @@ const createAgent = async (agentData) => {
|
|||
*/
|
||||
const getAgent = async (searchParameter) => await Agent.findOne(searchParameter).lean();
|
||||
|
||||
/**
|
||||
* Load an agent based on the provided ID
|
||||
*
|
||||
* @param {Object} params
|
||||
* @param {ServerRequest} params.req
|
||||
* @param {string} params.agent_id
|
||||
* @returns {Promise<Agent|null>} The agent document as a plain object, or null if not found.
|
||||
*/
|
||||
const loadAgent = async ({ req, agent_id }) => {
|
||||
const agent = await getAgent({
|
||||
id: agent_id,
|
||||
});
|
||||
|
||||
if (agent.author.toString() === req.user.id) {
|
||||
return agent;
|
||||
}
|
||||
|
||||
if (!agent.projectIds) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const cache = getLogStores(CONFIG_STORE);
|
||||
/** @type {TStartupConfig} */
|
||||
const cachedStartupConfig = await cache.get(STARTUP_CONFIG);
|
||||
let { instanceProjectId } = cachedStartupConfig ?? {};
|
||||
if (!instanceProjectId) {
|
||||
instanceProjectId = (await getProjectByName(GLOBAL_PROJECT_NAME, '_id'))._id.toString();
|
||||
}
|
||||
|
||||
for (const projectObjectId of agent.projectIds) {
|
||||
const projectId = projectObjectId.toString();
|
||||
if (projectId === instanceProjectId) {
|
||||
return agent;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Update an agent with new data without overwriting existing
|
||||
* properties, or create a new agent if it doesn't exist.
|
||||
|
|
@ -41,10 +81,76 @@ const getAgent = async (searchParameter) => await Agent.findOne(searchParameter)
|
|||
* @returns {Promise<Agent>} The updated or newly created agent document as a plain object.
|
||||
*/
|
||||
const updateAgent = async (searchParameter, updateData) => {
|
||||
const options = { new: true, upsert: true };
|
||||
const options = { new: true, upsert: false };
|
||||
return await Agent.findOneAndUpdate(searchParameter, updateData, options).lean();
|
||||
};
|
||||
|
||||
/**
|
||||
* Modifies an agent with the resource file id.
|
||||
* @param {object} params
|
||||
* @param {ServerRequest} params.req
|
||||
* @param {string} params.agent_id
|
||||
* @param {string} params.tool_resource
|
||||
* @param {string} params.file_id
|
||||
* @returns {Promise<Agent>} The updated agent.
|
||||
*/
|
||||
const addAgentResourceFile = async ({ agent_id, tool_resource, file_id }) => {
|
||||
const searchParameter = { id: agent_id };
|
||||
const agent = await getAgent(searchParameter);
|
||||
|
||||
if (!agent) {
|
||||
throw new Error('Agent not found for adding resource file');
|
||||
}
|
||||
|
||||
const tool_resources = agent.tool_resources || {};
|
||||
|
||||
if (!tool_resources[tool_resource]) {
|
||||
tool_resources[tool_resource] = { file_ids: [] };
|
||||
}
|
||||
|
||||
if (!tool_resources[tool_resource].file_ids.includes(file_id)) {
|
||||
tool_resources[tool_resource].file_ids.push(file_id);
|
||||
}
|
||||
|
||||
const updateData = { tool_resources };
|
||||
|
||||
return await updateAgent(searchParameter, updateData);
|
||||
};
|
||||
|
||||
/**
|
||||
* Removes a resource file id from an agent.
|
||||
* @param {object} params
|
||||
* @param {ServerRequest} params.req
|
||||
* @param {string} params.agent_id
|
||||
* @param {string} params.tool_resource
|
||||
* @param {string} params.file_id
|
||||
* @returns {Promise<Agent>} The updated agent.
|
||||
*/
|
||||
const removeAgentResourceFile = async ({ agent_id, tool_resource, file_id }) => {
|
||||
const searchParameter = { id: agent_id };
|
||||
const agent = await getAgent(searchParameter);
|
||||
|
||||
if (!agent) {
|
||||
throw new Error('Agent not found for removing resource file');
|
||||
}
|
||||
|
||||
const tool_resources = agent.tool_resources || {};
|
||||
|
||||
if (tool_resources[tool_resource] && tool_resources[tool_resource].file_ids) {
|
||||
tool_resources[tool_resource].file_ids = tool_resources[tool_resource].file_ids.filter(
|
||||
(id) => id !== file_id,
|
||||
);
|
||||
|
||||
if (tool_resources[tool_resource].file_ids.length === 0) {
|
||||
delete tool_resources[tool_resource];
|
||||
}
|
||||
}
|
||||
|
||||
const updateData = { tool_resources };
|
||||
|
||||
return await updateAgent(searchParameter, updateData);
|
||||
};
|
||||
|
||||
/**
|
||||
* Deletes an agent based on the provided ID.
|
||||
*
|
||||
|
|
@ -79,12 +185,25 @@ const getListAgents = async (searchParameter) => {
|
|||
query = { $or: [globalQuery, query] };
|
||||
}
|
||||
|
||||
const agents = await Agent.find(query, {
|
||||
id: 1,
|
||||
name: 1,
|
||||
avatar: 1,
|
||||
projectIds: 1,
|
||||
}).lean();
|
||||
const agents = (
|
||||
await Agent.find(query, {
|
||||
id: 1,
|
||||
_id: 0,
|
||||
name: 1,
|
||||
avatar: 1,
|
||||
author: 1,
|
||||
projectIds: 1,
|
||||
isCollaborative: 1,
|
||||
}).lean()
|
||||
).map((agent) => {
|
||||
if (agent.author?.toString() !== author) {
|
||||
delete agent.author;
|
||||
}
|
||||
if (agent.author) {
|
||||
agent.author = agent.author.toString();
|
||||
}
|
||||
return agent;
|
||||
});
|
||||
|
||||
const hasMore = agents.length > 0;
|
||||
const firstId = agents.length > 0 ? agents[0].id : null;
|
||||
|
|
@ -102,13 +221,15 @@ const getListAgents = async (searchParameter) => {
|
|||
* Updates the projects associated with an agent, adding and removing project IDs as specified.
|
||||
* This function also updates the corresponding projects to include or exclude the agent ID.
|
||||
*
|
||||
* @param {string} agentId - The ID of the agent to update.
|
||||
* @param {string[]} [projectIds] - Array of project IDs to add to the agent.
|
||||
* @param {string[]} [removeProjectIds] - Array of project IDs to remove from the agent.
|
||||
* @param {Object} params - Parameters for updating the agent's projects.
|
||||
* @param {import('librechat-data-provider').TUser} params.user - Parameters for updating the agent's projects.
|
||||
* @param {string} params.agentId - The ID of the agent to update.
|
||||
* @param {string[]} [params.projectIds] - Array of project IDs to add to the agent.
|
||||
* @param {string[]} [params.removeProjectIds] - Array of project IDs to remove from the agent.
|
||||
* @returns {Promise<MongoAgent>} The updated agent document.
|
||||
* @throws {Error} If there's an error updating the agent or projects.
|
||||
*/
|
||||
const updateAgentProjects = async (agentId, projectIds, removeProjectIds) => {
|
||||
const updateAgentProjects = async ({ user, agentId, projectIds, removeProjectIds }) => {
|
||||
const updateOps = {};
|
||||
|
||||
if (removeProjectIds && removeProjectIds.length > 0) {
|
||||
|
|
@ -129,14 +250,36 @@ const updateAgentProjects = async (agentId, projectIds, removeProjectIds) => {
|
|||
return await getAgent({ id: agentId });
|
||||
}
|
||||
|
||||
return await updateAgent({ id: agentId }, updateOps);
|
||||
const updateQuery = { id: agentId, author: user.id };
|
||||
if (user.role === SystemRoles.ADMIN) {
|
||||
delete updateQuery.author;
|
||||
}
|
||||
|
||||
const updatedAgent = await updateAgent(updateQuery, updateOps);
|
||||
if (updatedAgent) {
|
||||
return updatedAgent;
|
||||
}
|
||||
if (updateOps.$addToSet) {
|
||||
for (const projectId of projectIds) {
|
||||
await removeAgentIdsFromProject(projectId, [agentId]);
|
||||
}
|
||||
} else if (updateOps.$pull) {
|
||||
for (const projectId of removeProjectIds) {
|
||||
await addAgentIdsToProject(projectId, [agentId]);
|
||||
}
|
||||
}
|
||||
|
||||
return await getAgent({ id: agentId });
|
||||
};
|
||||
|
||||
module.exports = {
|
||||
createAgent,
|
||||
getAgent,
|
||||
loadAgent,
|
||||
createAgent,
|
||||
updateAgent,
|
||||
deleteAgent,
|
||||
getListAgents,
|
||||
updateAgentProjects,
|
||||
addAgentResourceFile,
|
||||
removeAgentResourceFile,
|
||||
};
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@ const agentSchema = mongoose.Schema(
|
|||
id: {
|
||||
type: String,
|
||||
index: true,
|
||||
unique: true,
|
||||
required: true,
|
||||
},
|
||||
name: {
|
||||
|
|
@ -44,10 +45,6 @@ const agentSchema = mongoose.Schema(
|
|||
tool_kwargs: {
|
||||
type: [{ type: mongoose.Schema.Types.Mixed }],
|
||||
},
|
||||
file_ids: {
|
||||
type: [String],
|
||||
default: undefined,
|
||||
},
|
||||
actions: {
|
||||
type: [String],
|
||||
default: undefined,
|
||||
|
|
@ -57,6 +54,22 @@ const agentSchema = mongoose.Schema(
|
|||
ref: 'User',
|
||||
required: true,
|
||||
},
|
||||
authorName: {
|
||||
type: String,
|
||||
default: undefined,
|
||||
},
|
||||
isCollaborative: {
|
||||
type: Boolean,
|
||||
default: undefined,
|
||||
},
|
||||
conversation_starters: {
|
||||
type: [String],
|
||||
default: [],
|
||||
},
|
||||
tool_resources: {
|
||||
type: mongoose.Schema.Types.Mixed,
|
||||
default: {},
|
||||
},
|
||||
projectIds: {
|
||||
type: [mongoose.Schema.Types.ObjectId],
|
||||
ref: 'Project',
|
||||
|
|
|
|||
|
|
@ -115,6 +115,29 @@ const messageSchema = mongoose.Schema(
|
|||
iconURL: {
|
||||
type: String,
|
||||
},
|
||||
attachments: { type: [{ type: mongoose.Schema.Types.Mixed }], default: undefined },
|
||||
/*
|
||||
attachments: {
|
||||
type: [
|
||||
{
|
||||
file_id: String,
|
||||
filename: String,
|
||||
filepath: String,
|
||||
expiresAt: Date,
|
||||
width: Number,
|
||||
height: Number,
|
||||
type: String,
|
||||
conversationId: String,
|
||||
messageId: {
|
||||
type: String,
|
||||
required: true,
|
||||
},
|
||||
toolCallId: String,
|
||||
},
|
||||
],
|
||||
default: undefined,
|
||||
},
|
||||
*/
|
||||
},
|
||||
{ timestamps: true },
|
||||
);
|
||||
|
|
|
|||
|
|
@ -43,7 +43,7 @@
|
|||
"@langchain/core": "^0.2.18",
|
||||
"@langchain/google-genai": "^0.0.11",
|
||||
"@langchain/google-vertexai": "^0.0.17",
|
||||
"@librechat/agents": "^1.5.2",
|
||||
"@librechat/agents": "^1.6.9",
|
||||
"axios": "^1.7.7",
|
||||
"bcryptjs": "^2.4.3",
|
||||
"cheerio": "^1.0.0-rc.12",
|
||||
|
|
|
|||
|
|
@ -1,8 +1,13 @@
|
|||
const { Tools } = require('librechat-data-provider');
|
||||
const { GraphEvents, ToolEndHandler, ChatModelStreamHandler } = require('@librechat/agents');
|
||||
const { processCodeOutput } = require('~/server/services/Files/Code/process');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
/** @typedef {import('@librechat/agents').Graph} Graph */
|
||||
/** @typedef {import('@librechat/agents').EventHandler} EventHandler */
|
||||
/** @typedef {import('@librechat/agents').ModelEndData} ModelEndData */
|
||||
/** @typedef {import('@librechat/agents').ToolEndData} ToolEndData */
|
||||
/** @typedef {import('@librechat/agents').ToolEndCallback} ToolEndCallback */
|
||||
/** @typedef {import('@librechat/agents').ChatModelStreamHandler} ChatModelStreamHandler */
|
||||
/** @typedef {import('@librechat/agents').ContentAggregatorResult['aggregateContent']} ContentAggregator */
|
||||
/** @typedef {import('@librechat/agents').GraphEvents} GraphEvents */
|
||||
|
|
@ -58,11 +63,12 @@ class ModelEndHandler {
|
|||
* @param {Object} options - The options object.
|
||||
* @param {ServerResponse} options.res - The options object.
|
||||
* @param {ContentAggregator} options.aggregateContent - The options object.
|
||||
* @param {ToolEndCallback} options.toolEndCallback - Callback to use when tool ends.
|
||||
* @param {Array<UsageMetadata>} options.collectedUsage - The list of collected usage metadata.
|
||||
* @returns {Record<string, t.EventHandler>} The default handlers.
|
||||
* @throws {Error} If the request is not found.
|
||||
*/
|
||||
function getDefaultHandlers({ res, aggregateContent, collectedUsage }) {
|
||||
function getDefaultHandlers({ res, aggregateContent, toolEndCallback, collectedUsage }) {
|
||||
if (!res || !aggregateContent) {
|
||||
throw new Error(
|
||||
`[getDefaultHandlers] Missing required options: res: ${!res}, aggregateContent: ${!aggregateContent}`,
|
||||
|
|
@ -70,7 +76,7 @@ function getDefaultHandlers({ res, aggregateContent, collectedUsage }) {
|
|||
}
|
||||
const handlers = {
|
||||
[GraphEvents.CHAT_MODEL_END]: new ModelEndHandler(collectedUsage),
|
||||
[GraphEvents.TOOL_END]: new ToolEndHandler(),
|
||||
[GraphEvents.TOOL_END]: new ToolEndHandler(toolEndCallback),
|
||||
[GraphEvents.CHAT_MODEL_STREAM]: new ChatModelStreamHandler(),
|
||||
[GraphEvents.ON_RUN_STEP]: {
|
||||
/**
|
||||
|
|
@ -121,7 +127,67 @@ function getDefaultHandlers({ res, aggregateContent, collectedUsage }) {
|
|||
return handlers;
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* @param {Object} params
|
||||
* @param {ServerRequest} params.req
|
||||
* @param {ServerResponse} params.res
|
||||
* @param {Promise<MongoFile | { filename: string; filepath: string; expires: number;} | null>[]} params.artifactPromises
|
||||
* @returns {ToolEndCallback} The tool end callback.
|
||||
*/
|
||||
function createToolEndCallback({ req, res, artifactPromises }) {
|
||||
/**
|
||||
* @type {ToolEndCallback}
|
||||
*/
|
||||
return async (data, metadata) => {
|
||||
const output = data?.output;
|
||||
if (!output) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (output.name !== Tools.execute_code) {
|
||||
return;
|
||||
}
|
||||
|
||||
const { tool_call_id, artifact } = output;
|
||||
if (!artifact.files) {
|
||||
return;
|
||||
}
|
||||
|
||||
for (const file of artifact.files) {
|
||||
const { id, name } = file;
|
||||
artifactPromises.push(
|
||||
(async () => {
|
||||
const fileMetadata = await processCodeOutput({
|
||||
req,
|
||||
id,
|
||||
name,
|
||||
toolCallId: tool_call_id,
|
||||
messageId: metadata.run_id,
|
||||
sessionId: artifact.session_id,
|
||||
conversationId: metadata.thread_id,
|
||||
});
|
||||
if (!res.headersSent) {
|
||||
return fileMetadata;
|
||||
}
|
||||
|
||||
if (!fileMetadata) {
|
||||
return null;
|
||||
}
|
||||
|
||||
res.write(`event: attachment\ndata: ${JSON.stringify(fileMetadata)}\n\n`);
|
||||
return fileMetadata;
|
||||
})().catch((error) => {
|
||||
logger.error('Error processing code output:', error);
|
||||
return null;
|
||||
}),
|
||||
);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
sendEvent,
|
||||
getDefaultHandlers,
|
||||
createToolEndCallback,
|
||||
};
|
||||
|
|
|
|||
|
|
@ -10,7 +10,9 @@
|
|||
const { Callback, createMetadataAggregator } = require('@librechat/agents');
|
||||
const {
|
||||
Constants,
|
||||
openAISchema,
|
||||
EModelEndpoint,
|
||||
anthropicSchema,
|
||||
bedrockOutputParser,
|
||||
providerEndpointMap,
|
||||
removeNullishValues,
|
||||
|
|
@ -35,11 +37,10 @@ const { logger } = require('~/config');
|
|||
|
||||
/** @typedef {import('@librechat/agents').MessageContentComplex} MessageContentComplex */
|
||||
|
||||
// const providerSchemas = {
|
||||
// [EModelEndpoint.bedrock]: true,
|
||||
// };
|
||||
|
||||
const providerParsers = {
|
||||
[EModelEndpoint.openAI]: openAISchema,
|
||||
[EModelEndpoint.azureOpenAI]: openAISchema,
|
||||
[EModelEndpoint.anthropic]: anthropicSchema,
|
||||
[EModelEndpoint.bedrock]: bedrockOutputParser,
|
||||
};
|
||||
|
||||
|
|
@ -57,10 +58,11 @@ class AgentClient extends BaseClient {
|
|||
this.run;
|
||||
|
||||
const {
|
||||
maxContextTokens,
|
||||
modelOptions = {},
|
||||
contentParts,
|
||||
collectedUsage,
|
||||
artifactPromises,
|
||||
maxContextTokens,
|
||||
modelOptions = {},
|
||||
...clientOptions
|
||||
} = options;
|
||||
|
||||
|
|
@ -70,6 +72,8 @@ class AgentClient extends BaseClient {
|
|||
this.contentParts = contentParts;
|
||||
/** @type {Array<UsageMetadata>} */
|
||||
this.collectedUsage = collectedUsage;
|
||||
/** @type {ArtifactPromises} */
|
||||
this.artifactPromises = artifactPromises;
|
||||
this.options = Object.assign({ endpoint: options.endpoint }, clientOptions);
|
||||
}
|
||||
|
||||
|
|
@ -477,7 +481,6 @@ class AgentClient extends BaseClient {
|
|||
provider: providerEndpointMap[this.options.agent.provider],
|
||||
thread_id: this.conversationId,
|
||||
},
|
||||
run_id: this.responseMessageId,
|
||||
signal: abortController.signal,
|
||||
streamMode: 'values',
|
||||
version: 'v2',
|
||||
|
|
|
|||
|
|
@ -45,10 +45,9 @@ async function createRun({
|
|||
);
|
||||
|
||||
const graphConfig = {
|
||||
runId,
|
||||
llmConfig,
|
||||
tools,
|
||||
toolMap,
|
||||
llmConfig,
|
||||
instructions: agent.instructions,
|
||||
additional_instructions: agent.additional_instructions,
|
||||
};
|
||||
|
|
@ -59,6 +58,7 @@ async function createRun({
|
|||
}
|
||||
|
||||
return Run.create({
|
||||
runId,
|
||||
graphConfig,
|
||||
customHandlers,
|
||||
});
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
const { nanoid } = require('nanoid');
|
||||
const { FileContext, Constants } = require('librechat-data-provider');
|
||||
const { FileContext, Constants, Tools, SystemRoles } = require('librechat-data-provider');
|
||||
const {
|
||||
getAgent,
|
||||
createAgent,
|
||||
|
|
@ -14,6 +14,11 @@ const { updateAgentProjects } = require('~/models/Agent');
|
|||
const { deleteFileByFilter } = require('~/models/File');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
const systemTools = {
|
||||
[Tools.execute_code]: true,
|
||||
[Tools.file_search]: true,
|
||||
};
|
||||
|
||||
/**
|
||||
* Creates an Agent.
|
||||
* @route POST /Agents
|
||||
|
|
@ -27,9 +32,17 @@ const createAgentHandler = async (req, res) => {
|
|||
const { tools = [], provider, name, description, instructions, model, ...agentData } = req.body;
|
||||
const { id: userId } = req.user;
|
||||
|
||||
agentData.tools = tools
|
||||
.map((tool) => (typeof tool === 'string' ? req.app.locals.availableTools[tool] : tool))
|
||||
.filter(Boolean);
|
||||
agentData.tools = [];
|
||||
|
||||
for (const tool of tools) {
|
||||
if (req.app.locals.availableTools[tool]) {
|
||||
agentData.tools.push(tool);
|
||||
}
|
||||
|
||||
if (systemTools[tool]) {
|
||||
agentData.tools.push(tool);
|
||||
}
|
||||
}
|
||||
|
||||
Object.assign(agentData, {
|
||||
author: userId,
|
||||
|
|
@ -80,10 +93,24 @@ const getAgentHandler = async (req, res) => {
|
|||
return res.status(404).json({ error: 'Agent not found' });
|
||||
}
|
||||
|
||||
agent.author = agent.author.toString();
|
||||
agent.isCollaborative = !!agent.isCollaborative;
|
||||
|
||||
if (agent.author !== author) {
|
||||
delete agent.author;
|
||||
}
|
||||
|
||||
if (!agent.isCollaborative && agent.author !== author && req.user.role !== SystemRoles.ADMIN) {
|
||||
return res.status(200).json({
|
||||
id: agent.id,
|
||||
name: agent.name,
|
||||
avatar: agent.avatar,
|
||||
author: agent.author,
|
||||
projectIds: agent.projectIds,
|
||||
isCollaborative: agent.isCollaborative,
|
||||
});
|
||||
}
|
||||
|
||||
return res.status(200).json(agent);
|
||||
} catch (error) {
|
||||
logger.error('[/Agents/:id] Error retrieving agent', error);
|
||||
|
|
@ -106,12 +133,29 @@ const updateAgentHandler = async (req, res) => {
|
|||
const { projectIds, removeProjectIds, ...updateData } = req.body;
|
||||
|
||||
let updatedAgent;
|
||||
const query = { id, author: req.user.id };
|
||||
if (req.user.role === SystemRoles.ADMIN) {
|
||||
delete query.author;
|
||||
}
|
||||
if (Object.keys(updateData).length > 0) {
|
||||
updatedAgent = await updateAgent({ id, author: req.user.id }, updateData);
|
||||
updatedAgent = await updateAgent(query, updateData);
|
||||
}
|
||||
|
||||
if (projectIds || removeProjectIds) {
|
||||
updatedAgent = await updateAgentProjects(id, projectIds, removeProjectIds);
|
||||
updatedAgent = await updateAgentProjects({
|
||||
user: req.user,
|
||||
agentId: id,
|
||||
projectIds,
|
||||
removeProjectIds,
|
||||
});
|
||||
}
|
||||
|
||||
if (updatedAgent.author) {
|
||||
updatedAgent.author = updatedAgent.author.toString();
|
||||
}
|
||||
|
||||
if (updatedAgent.author !== req.user.id) {
|
||||
delete updatedAgent.author;
|
||||
}
|
||||
|
||||
return res.json(updatedAgent);
|
||||
|
|
|
|||
|
|
@ -149,7 +149,6 @@ const updateAssistant = async ({ req, openai, assistant_id, updateData }) => {
|
|||
* @param {string} params.assistant_id
|
||||
* @param {string} params.tool_resource
|
||||
* @param {string} params.file_id
|
||||
* @param {AssistantUpdateParams} params.updateData
|
||||
* @returns {Promise<Assistant>} The updated assistant.
|
||||
*/
|
||||
const addResourceFileId = async ({ req, openai, assistant_id, tool_resource, file_id }) => {
|
||||
|
|
|
|||
|
|
@ -10,6 +10,7 @@ const {
|
|||
} = require('~/server/middleware');
|
||||
const { initializeClient } = require('~/server/services/Endpoints/agents');
|
||||
const AgentController = require('~/server/controllers/agents/request');
|
||||
const addTitle = require('~/server/services/Endpoints/agents/title');
|
||||
|
||||
router.post('/abort', handleAbort());
|
||||
|
||||
|
|
@ -28,7 +29,7 @@ router.post(
|
|||
buildEndpointOption,
|
||||
setHeaders,
|
||||
async (req, res, next) => {
|
||||
await AgentController(req, res, next, initializeClient);
|
||||
await AgentController(req, res, next, initializeClient, addTitle);
|
||||
},
|
||||
);
|
||||
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@ const multer = require('multer');
|
|||
const express = require('express');
|
||||
const { PermissionTypes, Permissions } = require('librechat-data-provider');
|
||||
const { requireJwtAuth, generateCheckAccess } = require('~/server/middleware');
|
||||
const { getAvailableTools } = require('~/server/controllers/PluginController');
|
||||
const v1 = require('~/server/controllers/agents/v1');
|
||||
const actions = require('./actions');
|
||||
|
||||
|
|
@ -36,9 +37,7 @@ router.use('/actions', actions);
|
|||
* @route GET /agents/tools
|
||||
* @returns {TPlugin[]} 200 - application/json
|
||||
*/
|
||||
router.use('/tools', (req, res) => {
|
||||
res.json([]);
|
||||
});
|
||||
router.use('/tools', getAvailableTools);
|
||||
|
||||
/**
|
||||
* Creates an agent.
|
||||
|
|
|
|||
|
|
@ -10,7 +10,7 @@ const {
|
|||
} = require('~/server/middleware');
|
||||
const { initializeClient } = require('~/server/services/Endpoints/bedrock');
|
||||
const AgentController = require('~/server/controllers/agents/request');
|
||||
const addTitle = require('~/server/services/Endpoints/bedrock/title');
|
||||
const addTitle = require('~/server/services/Endpoints/agents/title');
|
||||
|
||||
router.post('/abort', handleAbort());
|
||||
|
||||
|
|
|
|||
|
|
@ -1,18 +1,22 @@
|
|||
const fs = require('fs').promises;
|
||||
const express = require('express');
|
||||
const { EnvVar } = require('@librechat/agents');
|
||||
const {
|
||||
isUUID,
|
||||
checkOpenAIStorage,
|
||||
FileSources,
|
||||
EModelEndpoint,
|
||||
isAgentsEndpoint,
|
||||
checkOpenAIStorage,
|
||||
} = require('librechat-data-provider');
|
||||
const {
|
||||
filterFile,
|
||||
processFileUpload,
|
||||
processDeleteRequest,
|
||||
processAgentFileUpload,
|
||||
} = require('~/server/services/Files/process');
|
||||
const { getStrategyFunctions } = require('~/server/services/Files/strategies');
|
||||
const { getOpenAIClient } = require('~/server/controllers/assistants/helpers');
|
||||
const { loadAuthValues } = require('~/app/clients/tools/util');
|
||||
const { getFiles } = require('~/models/File');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
|
|
@ -64,6 +68,11 @@ router.delete('/', async (req, res) => {
|
|||
|
||||
await processDeleteRequest({ req, files });
|
||||
|
||||
logger.debug(
|
||||
`[/files] Files deleted successfully: ${files.map(
|
||||
(f, i) => `${f.file_id}${i < files.length - 1 ? ', ' : ''}`,
|
||||
)}`,
|
||||
);
|
||||
res.status(200).json({ message: 'Files deleted successfully' });
|
||||
} catch (error) {
|
||||
logger.error('[/files] Error deleting files:', error);
|
||||
|
|
@ -71,6 +80,36 @@ router.delete('/', async (req, res) => {
|
|||
}
|
||||
});
|
||||
|
||||
router.get('/code/download/:sessionId/:fileId', async (req, res) => {
|
||||
try {
|
||||
const { sessionId, fileId } = req.params;
|
||||
const logPrefix = `Session ID: ${sessionId} | File ID: ${fileId} | Code output download requested by user `;
|
||||
logger.debug(logPrefix);
|
||||
|
||||
if (!sessionId || !fileId) {
|
||||
return res.status(400).send('Bad request');
|
||||
}
|
||||
|
||||
const { getDownloadStream } = getStrategyFunctions(FileSources.execute_code);
|
||||
if (!getDownloadStream) {
|
||||
logger.warn(
|
||||
`${logPrefix} has no stream method implemented for ${FileSources.execute_code} source`,
|
||||
);
|
||||
return res.status(501).send('Not Implemented');
|
||||
}
|
||||
|
||||
const result = await loadAuthValues({ userId: req.user.id, authFields: [EnvVar.CODE_API_KEY] });
|
||||
|
||||
/** @type {AxiosResponse<ReadableStream> | undefined} */
|
||||
const response = await getDownloadStream(`${sessionId}/${fileId}`, result[EnvVar.CODE_API_KEY]);
|
||||
res.set(response.headers);
|
||||
response.data.pipe(res);
|
||||
} catch (error) {
|
||||
logger.error('Error downloading file:', error);
|
||||
res.status(500).send('Error downloading file');
|
||||
}
|
||||
});
|
||||
|
||||
router.get('/download/:userId/:file_id', async (req, res) => {
|
||||
try {
|
||||
const { userId, file_id } = req.params;
|
||||
|
|
@ -154,6 +193,10 @@ router.post('/', async (req, res) => {
|
|||
metadata.temp_file_id = metadata.file_id;
|
||||
metadata.file_id = req.file_id;
|
||||
|
||||
if (isAgentsEndpoint(metadata.endpoint)) {
|
||||
return await processAgentFileUpload({ req, res, file, metadata });
|
||||
}
|
||||
|
||||
await processFileUpload({ req, res, file, metadata });
|
||||
} catch (error) {
|
||||
let message = 'Error processing file';
|
||||
|
|
|
|||
|
|
@ -49,6 +49,10 @@ module.exports = {
|
|||
process.env.BEDROCK_AWS_SECRET_ACCESS_KEY ?? process.env.BEDROCK_AWS_DEFAULT_REGION,
|
||||
),
|
||||
/* key will be part of separate config */
|
||||
[EModelEndpoint.agents]: generateConfig(process.env.I_AM_A_TEAPOT),
|
||||
[EModelEndpoint.agents]: generateConfig(
|
||||
process.env.EXPERIMENTAL_AGENTS,
|
||||
undefined,
|
||||
EModelEndpoint.agents,
|
||||
),
|
||||
},
|
||||
};
|
||||
|
|
|
|||
|
|
@ -1,13 +1,12 @@
|
|||
const { getAgent } = require('~/models/Agent');
|
||||
const { loadAgent } = require('~/models/Agent');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
const buildOptions = (req, endpoint, parsedBody) => {
|
||||
const { agent_id, instructions, spec, ...model_parameters } = parsedBody;
|
||||
|
||||
const agentPromise = getAgent({
|
||||
id: agent_id,
|
||||
// TODO: better author handling
|
||||
author: req.user.id,
|
||||
const agentPromise = loadAgent({
|
||||
req,
|
||||
agent_id,
|
||||
}).catch((error) => {
|
||||
logger.error(`[/agents/:${agent_id}] Error retrieving agent during build options step`, error);
|
||||
return undefined;
|
||||
|
|
|
|||
|
|
@ -14,14 +14,16 @@ const { tool } = require('@langchain/core/tools');
|
|||
const { createContentAggregator } = require('@librechat/agents');
|
||||
const {
|
||||
EModelEndpoint,
|
||||
providerEndpointMap,
|
||||
getResponseSender,
|
||||
providerEndpointMap,
|
||||
} = require('librechat-data-provider');
|
||||
const { getDefaultHandlers } = require('~/server/controllers/agents/callbacks');
|
||||
// for testing purposes
|
||||
// const createTavilySearchTool = require('~/app/clients/tools/structured/TavilySearch');
|
||||
const {
|
||||
getDefaultHandlers,
|
||||
createToolEndCallback,
|
||||
} = require('~/server/controllers/agents/callbacks');
|
||||
const initAnthropic = require('~/server/services/Endpoints/anthropic/initializeClient');
|
||||
const initOpenAI = require('~/server/services/Endpoints/openAI/initializeClient');
|
||||
const getBedrockOptions = require('~/server/services/Endpoints/bedrock/options');
|
||||
const { loadAgentTools } = require('~/server/services/ToolService');
|
||||
const AgentClient = require('~/server/controllers/agents/client');
|
||||
const { getModelMaxTokens } = require('~/utils');
|
||||
|
|
@ -50,6 +52,7 @@ const providerConfigMap = {
|
|||
[EModelEndpoint.openAI]: initOpenAI,
|
||||
[EModelEndpoint.azureOpenAI]: initOpenAI,
|
||||
[EModelEndpoint.anthropic]: initAnthropic,
|
||||
[EModelEndpoint.bedrock]: getBedrockOptions,
|
||||
};
|
||||
|
||||
const initializeClient = async ({ req, res, endpointOption }) => {
|
||||
|
|
@ -58,34 +61,33 @@ const initializeClient = async ({ req, res, endpointOption }) => {
|
|||
}
|
||||
|
||||
// TODO: use endpointOption to determine options/modelOptions
|
||||
/** @type {Array<UsageMetadata>} */
|
||||
const collectedUsage = [];
|
||||
/** @type {ArtifactPromises} */
|
||||
const artifactPromises = [];
|
||||
const { contentParts, aggregateContent } = createContentAggregator();
|
||||
const eventHandlers = getDefaultHandlers({ res, aggregateContent });
|
||||
|
||||
// const tools = [createTavilySearchTool()];
|
||||
// const tools = [_getWeather];
|
||||
// const tool_calls = [{ name: 'getPeople_action_swapi---dev' }];
|
||||
// const tool_calls = [{ name: 'dalle' }];
|
||||
// const tool_calls = [{ name: 'getItmOptions_action_YWlhcGkzLn' }];
|
||||
// const tool_calls = [{ name: 'tavily_search_results_json' }];
|
||||
// const tool_calls = [
|
||||
// { name: 'searchListings_action_emlsbG93NT' },
|
||||
// { name: 'searchAddress_action_emlsbG93NT' },
|
||||
// { name: 'searchMLS_action_emlsbG93NT' },
|
||||
// { name: 'searchCoordinates_action_emlsbG93NT' },
|
||||
// { name: 'searchUrl_action_emlsbG93NT' },
|
||||
// { name: 'getPropertyDetails_action_emlsbG93NT' },
|
||||
// ];
|
||||
const toolEndCallback = createToolEndCallback({ req, res, artifactPromises });
|
||||
const eventHandlers = getDefaultHandlers({
|
||||
res,
|
||||
aggregateContent,
|
||||
toolEndCallback,
|
||||
collectedUsage,
|
||||
});
|
||||
|
||||
if (!endpointOption.agent) {
|
||||
throw new Error('No agent promise provided');
|
||||
}
|
||||
|
||||
/** @type {Agent} */
|
||||
/** @type {Agent | null} */
|
||||
const agent = await endpointOption.agent;
|
||||
if (!agent) {
|
||||
throw new Error('Agent not found');
|
||||
}
|
||||
const { tools, toolMap } = await loadAgentTools({
|
||||
req,
|
||||
tools: agent.tools,
|
||||
agent_id: agent.id,
|
||||
tool_resources: agent.tool_resources,
|
||||
// openAIApiKey: process.env.OPENAI_API_KEY,
|
||||
});
|
||||
|
||||
|
|
@ -121,8 +123,11 @@ const initializeClient = async ({ req, res, endpointOption }) => {
|
|||
contentParts,
|
||||
modelOptions,
|
||||
eventHandlers,
|
||||
collectedUsage,
|
||||
artifactPromises,
|
||||
endpoint: EModelEndpoint.agents,
|
||||
configOptions: options.configOptions,
|
||||
attachments: endpointOption.attachments,
|
||||
maxContextTokens:
|
||||
agent.max_context_tokens ??
|
||||
getModelMaxTokens(modelOptions.model, providerEndpointMap[agent.provider]),
|
||||
|
|
|
|||
|
|
@ -33,7 +33,7 @@ const addTitle = async (req, { text, response, client }) => {
|
|||
conversationId: response.conversationId,
|
||||
title,
|
||||
},
|
||||
{ context: 'api/server/services/Endpoints/bedrock/title.js' },
|
||||
{ context: 'api/server/services/Endpoints/agents/title.js' },
|
||||
);
|
||||
};
|
||||
|
||||
34
api/server/services/Files/Code/crud.js
Normal file
34
api/server/services/Files/Code/crud.js
Normal file
|
|
@ -0,0 +1,34 @@
|
|||
// downloadStream.js
|
||||
|
||||
const axios = require('axios');
|
||||
const { getCodeBaseURL } = require('@librechat/agents');
|
||||
|
||||
const baseURL = getCodeBaseURL();
|
||||
|
||||
/**
|
||||
* Retrieves a download stream for a specified file.
|
||||
* @param {string} fileIdentifier - The identifier for the file (e.g., "sessionId/fileId").
|
||||
* @param {string} apiKey - The API key for authentication.
|
||||
* @returns {Promise<AxiosResponse>} A promise that resolves to a readable stream of the file content.
|
||||
* @throws {Error} If there's an error during the download process.
|
||||
*/
|
||||
async function getCodeOutputDownloadStream(fileIdentifier, apiKey) {
|
||||
try {
|
||||
const response = await axios({
|
||||
method: 'get',
|
||||
url: `${baseURL}/download/${fileIdentifier}`,
|
||||
responseType: 'stream',
|
||||
headers: {
|
||||
'User-Agent': 'LibreChat/1.0',
|
||||
'X-API-Key': apiKey,
|
||||
},
|
||||
timeout: 15000,
|
||||
});
|
||||
|
||||
return response;
|
||||
} catch (error) {
|
||||
throw new Error(`Error downloading file: ${error.message}`);
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { getCodeOutputDownloadStream };
|
||||
5
api/server/services/Files/Code/index.js
Normal file
5
api/server/services/Files/Code/index.js
Normal file
|
|
@ -0,0 +1,5 @@
|
|||
const crud = require('./crud');
|
||||
|
||||
module.exports = {
|
||||
...crud,
|
||||
};
|
||||
87
api/server/services/Files/Code/process.js
Normal file
87
api/server/services/Files/Code/process.js
Normal file
|
|
@ -0,0 +1,87 @@
|
|||
const path = require('path');
|
||||
const { v4 } = require('uuid');
|
||||
const axios = require('axios');
|
||||
const { getCodeBaseURL, EnvVar } = require('@librechat/agents');
|
||||
const { FileContext, imageExtRegex } = require('librechat-data-provider');
|
||||
const { convertImage } = require('~/server/services/Files/images/convert');
|
||||
const { loadAuthValues } = require('~/app/clients/tools/util');
|
||||
const { createFile } = require('~/models/File');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
/**
|
||||
* Process OpenAI image files, convert to target format, save and return file metadata.
|
||||
* @param {ServerRequest} params.req - The Express request object.
|
||||
* @param {string} params.id - The file ID.
|
||||
* @param {string} params.name - The filename.
|
||||
* @param {string} params.toolCallId - The tool call ID that generated the file.
|
||||
* @param {string} params.sessionId - The code execution session ID.
|
||||
* @param {string} params.conversationId - The current conversation ID.
|
||||
* @param {string} params.messageId - The current message ID.
|
||||
* @returns {Promise<MongoFile & { messageId: string, toolCallId: string } | { filename: string; filepath: string; expiresAt: number; conversationId: string; toolCallId: string; messageId: string } | undefined>} The file metadata or undefined if an error occurs.
|
||||
*/
|
||||
const processCodeOutput = async ({
|
||||
req,
|
||||
id,
|
||||
name,
|
||||
toolCallId,
|
||||
conversationId,
|
||||
messageId,
|
||||
sessionId,
|
||||
}) => {
|
||||
const currentDate = new Date();
|
||||
const baseURL = getCodeBaseURL();
|
||||
const fileExt = path.extname(name);
|
||||
if (!fileExt || !imageExtRegex.test(name)) {
|
||||
return {
|
||||
filename: name,
|
||||
filepath: `/api/files/code/download/${sessionId}/${id}`,
|
||||
/** Note: expires 24 hours after creation */
|
||||
expiresAt: currentDate.getTime() + 86400000,
|
||||
conversationId,
|
||||
toolCallId,
|
||||
messageId,
|
||||
};
|
||||
}
|
||||
|
||||
try {
|
||||
const formattedDate = currentDate.toISOString();
|
||||
const result = await loadAuthValues({ userId: req.user.id, authFields: [EnvVar.CODE_API_KEY] });
|
||||
const response = await axios({
|
||||
method: 'get',
|
||||
url: `${baseURL}/download/${sessionId}/${id}`,
|
||||
responseType: 'arraybuffer',
|
||||
headers: {
|
||||
'User-Agent': 'LibreChat/1.0',
|
||||
'X-API-Key': result[EnvVar.CODE_API_KEY],
|
||||
},
|
||||
timeout: 15000,
|
||||
});
|
||||
|
||||
const buffer = Buffer.from(response.data, 'binary');
|
||||
|
||||
const file_id = v4();
|
||||
const _file = await convertImage(req, buffer, 'high', `${file_id}${fileExt}`);
|
||||
const file = {
|
||||
..._file,
|
||||
file_id,
|
||||
usage: 1,
|
||||
filename: name,
|
||||
conversationId,
|
||||
user: req.user.id,
|
||||
type: `image/${req.app.locals.imageOutputType}`,
|
||||
createdAt: formattedDate,
|
||||
updatedAt: formattedDate,
|
||||
source: req.app.locals.fileStrategy,
|
||||
context: FileContext.execute_code,
|
||||
};
|
||||
createFile(file, true);
|
||||
/** Note: `messageId` & `toolCallId` are not part of file DB schema; message object records associated file ID */
|
||||
return Object.assign(file, { messageId, toolCallId });
|
||||
} catch (error) {
|
||||
logger.error('Error downloading file:', error);
|
||||
}
|
||||
};
|
||||
|
||||
module.exports = {
|
||||
processCodeOutput,
|
||||
};
|
||||
|
|
@ -8,6 +8,7 @@ const {
|
|||
FileSources,
|
||||
imageExtRegex,
|
||||
EModelEndpoint,
|
||||
EToolResources,
|
||||
mergeFileConfig,
|
||||
hostImageIdSuffix,
|
||||
checkOpenAIStorage,
|
||||
|
|
@ -16,6 +17,7 @@ const {
|
|||
} = require('librechat-data-provider');
|
||||
const { addResourceFileId, deleteResourceFileId } = require('~/server/controllers/assistants/v2');
|
||||
const { convertImage, resizeAndConvert } = require('~/server/services/Files/images');
|
||||
const { addAgentResourceFile, removeAgentResourceFile } = require('~/models/Agent');
|
||||
const { getOpenAIClient } = require('~/server/controllers/assistants/helpers');
|
||||
const { createFile, updateFileUsage, deleteFiles } = require('~/models/File');
|
||||
const { LB_QueueAsyncCall } = require('~/server/utils/queue');
|
||||
|
|
@ -124,6 +126,17 @@ const processDeleteRequest = async ({ req, files }) => {
|
|||
for (const file of files) {
|
||||
const source = file.source ?? FileSources.local;
|
||||
|
||||
if (req.body.agent_id && req.body.tool_resource) {
|
||||
promises.push(
|
||||
removeAgentResourceFile({
|
||||
req,
|
||||
file_id: file.file_id,
|
||||
agent_id: req.body.agent_id,
|
||||
tool_resource: req.body.tool_resource,
|
||||
}),
|
||||
);
|
||||
}
|
||||
|
||||
if (checkOpenAIStorage(source) && !client[source]) {
|
||||
await initializeClients();
|
||||
}
|
||||
|
|
@ -398,6 +411,95 @@ const processFileUpload = async ({ req, res, file, metadata }) => {
|
|||
res.status(200).json({ message: 'File uploaded and processed successfully', ...result });
|
||||
};
|
||||
|
||||
/**
|
||||
* Applies the current strategy for file uploads.
|
||||
* Saves file metadata to the database with an expiry TTL.
|
||||
* Files must be deleted from the server filesystem manually.
|
||||
*
|
||||
* @param {Object} params - The parameters object.
|
||||
* @param {Express.Request} params.req - The Express request object.
|
||||
* @param {Express.Response} params.res - The Express response object.
|
||||
* @param {Express.Multer.File} params.file - The uploaded file.
|
||||
* @param {FileMetadata} params.metadata - Additional metadata for the file.
|
||||
* @returns {Promise<void>}
|
||||
*/
|
||||
const processAgentFileUpload = async ({ req, res, file, metadata }) => {
|
||||
const { agent_id, tool_resource } = metadata;
|
||||
if (agent_id && !tool_resource) {
|
||||
throw new Error('No tool resource provided for agent file upload');
|
||||
}
|
||||
|
||||
if (tool_resource === EToolResources.file_search && file.mimetype.startsWith('image')) {
|
||||
throw new Error('Image uploads are not supported for file search tool resources');
|
||||
}
|
||||
|
||||
let messageAttachment = !!metadata.message_file;
|
||||
if (!messageAttachment && !agent_id) {
|
||||
throw new Error('No agent ID provided for agent file upload');
|
||||
}
|
||||
|
||||
const source =
|
||||
tool_resource === EToolResources.file_search
|
||||
? FileSources.vectordb
|
||||
: req.app.locals.fileStrategy;
|
||||
const { handleFileUpload } = getStrategyFunctions(source);
|
||||
const { file_id, temp_file_id } = metadata;
|
||||
|
||||
const {
|
||||
bytes,
|
||||
filename,
|
||||
filepath: _filepath,
|
||||
embedded,
|
||||
height,
|
||||
width,
|
||||
} = await handleFileUpload({
|
||||
req,
|
||||
file,
|
||||
file_id,
|
||||
});
|
||||
|
||||
let filepath = _filepath;
|
||||
|
||||
if (!messageAttachment && tool_resource) {
|
||||
await addAgentResourceFile({
|
||||
req,
|
||||
agent_id,
|
||||
file_id,
|
||||
tool_resource: tool_resource,
|
||||
});
|
||||
}
|
||||
|
||||
if (file.mimetype.startsWith('image')) {
|
||||
const result = await processImageFile({
|
||||
req,
|
||||
file,
|
||||
metadata: { file_id: v4() },
|
||||
returnFile: true,
|
||||
});
|
||||
filepath = result.filepath;
|
||||
}
|
||||
|
||||
const result = await createFile(
|
||||
{
|
||||
user: req.user.id,
|
||||
file_id,
|
||||
temp_file_id,
|
||||
bytes,
|
||||
filepath,
|
||||
filename: filename ?? file.originalname,
|
||||
context: messageAttachment ? FileContext.message_attachment : FileContext.agents,
|
||||
model: messageAttachment ? undefined : req.body.model,
|
||||
type: file.mimetype,
|
||||
embedded,
|
||||
source,
|
||||
height,
|
||||
width,
|
||||
},
|
||||
true,
|
||||
);
|
||||
res.status(200).json({ message: 'Agent file uploaded and processed successfully', ...result });
|
||||
};
|
||||
|
||||
/**
|
||||
* @param {object} params - The params object.
|
||||
* @param {OpenAI} params.openai - The OpenAI client instance.
|
||||
|
|
@ -654,5 +756,6 @@ module.exports = {
|
|||
uploadImageBuffer,
|
||||
processFileUpload,
|
||||
processDeleteRequest,
|
||||
processAgentFileUpload,
|
||||
retrieveAndProcessFile,
|
||||
};
|
||||
|
|
|
|||
|
|
@ -21,6 +21,7 @@ const {
|
|||
} = require('./Local');
|
||||
const { uploadOpenAIFile, deleteOpenAIFile, getOpenAIFileStream } = require('./OpenAI');
|
||||
const { uploadVectors, deleteVectors } = require('./VectorDB');
|
||||
const { getCodeOutputDownloadStream } = require('./Code');
|
||||
|
||||
/**
|
||||
* Firebase Storage Strategy Functions
|
||||
|
|
@ -103,6 +104,31 @@ const openAIStrategy = () => ({
|
|||
getDownloadStream: getOpenAIFileStream,
|
||||
});
|
||||
|
||||
/**
|
||||
* Code Output Strategy Functions
|
||||
*
|
||||
* Note: null values mean that the strategy is not supported.
|
||||
* */
|
||||
const codeOutputStrategy = () => ({
|
||||
/** @type {typeof saveFileFromURL | null} */
|
||||
saveURL: null,
|
||||
/** @type {typeof getLocalFileURL | null} */
|
||||
getFileURL: null,
|
||||
/** @type {typeof saveLocalBuffer | null} */
|
||||
saveBuffer: null,
|
||||
/** @type {typeof processLocalAvatar | null} */
|
||||
processAvatar: null,
|
||||
/** @type {typeof uploadLocalImage | null} */
|
||||
handleImageUpload: null,
|
||||
/** @type {typeof prepareImagesLocal | null} */
|
||||
prepareImagePayload: null,
|
||||
/** @type {typeof deleteLocalFile | null} */
|
||||
deleteFile: null,
|
||||
/** @type {typeof uploadVectors | null} */
|
||||
handleFileUpload: null,
|
||||
getDownloadStream: getCodeOutputDownloadStream,
|
||||
});
|
||||
|
||||
// Strategy Selector
|
||||
const getStrategyFunctions = (fileSource) => {
|
||||
if (fileSource === FileSources.firebase) {
|
||||
|
|
@ -115,6 +141,8 @@ const getStrategyFunctions = (fileSource) => {
|
|||
return openAIStrategy();
|
||||
} else if (fileSource === FileSources.vectordb) {
|
||||
return vectorStrategy();
|
||||
} else if (fileSource === FileSources.execute_code) {
|
||||
return codeOutputStrategy();
|
||||
} else {
|
||||
throw new Error('Invalid file source');
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,9 +1,8 @@
|
|||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
const { StructuredTool } = require('langchain/tools');
|
||||
const { tool: toolFn } = require('@langchain/core/tools');
|
||||
const { zodToJsonSchema } = require('zod-to-json-schema');
|
||||
const { Calculator } = require('langchain/tools/calculator');
|
||||
const { tool: toolFn, Tool } = require('@langchain/core/tools');
|
||||
const {
|
||||
Tools,
|
||||
ContentTypes,
|
||||
|
|
@ -70,7 +69,7 @@ function loadAndFormatTools({ directory, adminFilter = [], adminIncluded = [] })
|
|||
continue;
|
||||
}
|
||||
|
||||
if (!ToolClass || !(ToolClass.prototype instanceof StructuredTool)) {
|
||||
if (!ToolClass || !(ToolClass.prototype instanceof Tool)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
|
|
@ -378,11 +377,12 @@ async function processRequiredActions(client, requiredActions) {
|
|||
* @param {Object} params - Run params containing user and request information.
|
||||
* @param {ServerRequest} params.req - The request object.
|
||||
* @param {string} params.agent_id - The agent ID.
|
||||
* @param {string[]} params.tools - The agent's available tools.
|
||||
* @param {Agent['tools']} params.tools - The agent's available tools.
|
||||
* @param {Agent['tool_resources']} params.tool_resources - The agent's available tool resources.
|
||||
* @param {string | undefined} [params.openAIApiKey] - The OpenAI API key.
|
||||
* @returns {Promise<{ tools?: StructuredTool[]; toolMap?: Record<string, StructuredTool>}>} The combined toolMap.
|
||||
*/
|
||||
async function loadAgentTools({ req, agent_id, tools, openAIApiKey }) {
|
||||
async function loadAgentTools({ req, agent_id, tools, tool_resources, openAIApiKey }) {
|
||||
if (!tools || tools.length === 0) {
|
||||
return {};
|
||||
}
|
||||
|
|
@ -394,6 +394,7 @@ async function loadAgentTools({ req, agent_id, tools, openAIApiKey }) {
|
|||
options: {
|
||||
req,
|
||||
openAIApiKey,
|
||||
tool_resources,
|
||||
returnMetadata: true,
|
||||
processFileURL,
|
||||
uploadImageBuffer,
|
||||
|
|
@ -405,6 +406,10 @@ async function loadAgentTools({ req, agent_id, tools, openAIApiKey }) {
|
|||
const agentTools = [];
|
||||
for (let i = 0; i < loadedTools.length; i++) {
|
||||
const tool = loadedTools[i];
|
||||
if (tool.name && (tool.name === Tools.execute_code || tool.name === Tools.file_search)) {
|
||||
agentTools.push(tool);
|
||||
continue;
|
||||
}
|
||||
|
||||
const toolInstance = toolFn(
|
||||
async (...args) => {
|
||||
|
|
|
|||
|
|
@ -1,6 +1,8 @@
|
|||
const {
|
||||
Capabilities,
|
||||
EModelEndpoint,
|
||||
isAgentsEndpoint,
|
||||
AgentCapabilities,
|
||||
isAssistantsEndpoint,
|
||||
defaultRetrievalModels,
|
||||
defaultAssistantsVersion,
|
||||
|
|
@ -160,8 +162,8 @@ const isUserProvided = (value) => value === 'user_provided';
|
|||
/**
|
||||
* Generate the configuration for a given key and base URL.
|
||||
* @param {string} key
|
||||
* @param {string} baseURL
|
||||
* @param {string} endpoint
|
||||
* @param {string} [baseURL]
|
||||
* @param {string} [endpoint]
|
||||
* @returns {boolean | { userProvide: boolean, userProvideURL?: boolean }}
|
||||
*/
|
||||
function generateConfig(key, baseURL, endpoint) {
|
||||
|
|
@ -177,7 +179,7 @@ function generateConfig(key, baseURL, endpoint) {
|
|||
}
|
||||
|
||||
const assistants = isAssistantsEndpoint(endpoint);
|
||||
|
||||
const agents = isAgentsEndpoint(endpoint);
|
||||
if (assistants) {
|
||||
config.retrievalModels = defaultRetrievalModels;
|
||||
config.capabilities = [
|
||||
|
|
@ -189,6 +191,18 @@ function generateConfig(key, baseURL, endpoint) {
|
|||
];
|
||||
}
|
||||
|
||||
if (agents) {
|
||||
config.capabilities = [
|
||||
AgentCapabilities.file_search,
|
||||
AgentCapabilities.actions,
|
||||
AgentCapabilities.tools,
|
||||
];
|
||||
|
||||
if (key === 'EXPERIMENTAL_RUN_CODE') {
|
||||
config.capabilities.push(AgentCapabilities.execute_code);
|
||||
}
|
||||
}
|
||||
|
||||
if (assistants && endpoint === EModelEndpoint.azureAssistants) {
|
||||
config.version = defaultAssistantsVersion.azureAssistants;
|
||||
} else if (assistants) {
|
||||
|
|
|
|||
|
|
@ -646,9 +646,22 @@
|
|||
* @property {string} [temp_file_id] - The temporary identifier of the file.
|
||||
* @property {string} endpoint - The conversation endpoint origin for the file upload.
|
||||
* @property {string} [assistant_id] - The assistant ID if file upload is in the `knowledge` context.
|
||||
* @property {string} [tool_resource] - The relevant tool resource for the file upload.
|
||||
* @memberof typedefs
|
||||
*/
|
||||
|
||||
/**
|
||||
* @exports FileObject
|
||||
* @typedef {{file_id: string, filepath: string, source: string, bytes?: number, width?: number, height?: number}} FileObject
|
||||
* @memberof typedefs
|
||||
*
|
||||
|
||||
/**
|
||||
* @exports ArtifactPromises
|
||||
* @typedef {Promise<MongoFile | { filename: string; filepath: string; expires: number;} | null>[]} ArtifactPromises
|
||||
* @memberof typedefs
|
||||
*
|
||||
|
||||
/**
|
||||
* @typedef {Object} ImageOnlyMetadata
|
||||
* @property {number} width - The width of the image.
|
||||
|
|
@ -706,6 +719,12 @@
|
|||
* @memberof typedefs
|
||||
*/
|
||||
|
||||
/**
|
||||
* @exports TAttachment
|
||||
* @typedef {import('librechat-data-provider').TAttachment} TAttachment
|
||||
* @memberof typedefs
|
||||
*/
|
||||
|
||||
/**
|
||||
* @exports AssistantCreateParams
|
||||
* @typedef {import('librechat-data-provider').AssistantCreateParams} AssistantCreateParams
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue