mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-09-22 06:00:56 +02:00
Merge branch 'main' into refactor/improve-user-friendly-errors
This commit is contained in:
commit
ffbe093068
80 changed files with 2985 additions and 307 deletions
25
api/cache/cacheConfig.js
vendored
25
api/cache/cacheConfig.js
vendored
|
@ -1,4 +1,5 @@
|
|||
const fs = require('fs');
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { math, isEnabled } = require('@librechat/api');
|
||||
const { CacheKeys } = require('librechat-data-provider');
|
||||
|
||||
|
@ -34,13 +35,35 @@ if (FORCED_IN_MEMORY_CACHE_NAMESPACES.length > 0) {
|
|||
}
|
||||
}
|
||||
|
||||
/** Helper function to safely read Redis CA certificate from file
|
||||
* @returns {string|null} The contents of the CA certificate file, or null if not set or on error
|
||||
*/
|
||||
const getRedisCA = () => {
|
||||
const caPath = process.env.REDIS_CA;
|
||||
if (!caPath) {
|
||||
return null;
|
||||
}
|
||||
|
||||
try {
|
||||
if (fs.existsSync(caPath)) {
|
||||
return fs.readFileSync(caPath, 'utf8');
|
||||
} else {
|
||||
logger.warn(`Redis CA certificate file not found: ${caPath}`);
|
||||
return null;
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error(`Failed to read Redis CA certificate file '${caPath}':`, error);
|
||||
return null;
|
||||
}
|
||||
};
|
||||
|
||||
const cacheConfig = {
|
||||
FORCED_IN_MEMORY_CACHE_NAMESPACES,
|
||||
USE_REDIS,
|
||||
REDIS_URI: process.env.REDIS_URI,
|
||||
REDIS_USERNAME: process.env.REDIS_USERNAME,
|
||||
REDIS_PASSWORD: process.env.REDIS_PASSWORD,
|
||||
REDIS_CA: process.env.REDIS_CA ? fs.readFileSync(process.env.REDIS_CA, 'utf8') : null,
|
||||
REDIS_CA: getRedisCA(),
|
||||
REDIS_KEY_PREFIX: process.env[REDIS_KEY_PREFIX_VAR] || REDIS_KEY_PREFIX || '',
|
||||
REDIS_MAX_LISTENERS: math(process.env.REDIS_MAX_LISTENERS, 40),
|
||||
REDIS_PING_INTERVAL: math(process.env.REDIS_PING_INTERVAL, 0),
|
||||
|
|
|
@ -49,6 +49,14 @@ const createAgent = async (agentData) => {
|
|||
*/
|
||||
const getAgent = async (searchParameter) => await Agent.findOne(searchParameter).lean();
|
||||
|
||||
/**
|
||||
* Get multiple agent documents based on the provided search parameters.
|
||||
*
|
||||
* @param {Object} searchParameter - The search parameters to find agents.
|
||||
* @returns {Promise<Agent[]>} Array of agent documents as plain objects.
|
||||
*/
|
||||
const getAgents = async (searchParameter) => await Agent.find(searchParameter).lean();
|
||||
|
||||
/**
|
||||
* Load an agent based on the provided ID
|
||||
*
|
||||
|
@ -835,6 +843,7 @@ const countPromotedAgents = async () => {
|
|||
|
||||
module.exports = {
|
||||
getAgent,
|
||||
getAgents,
|
||||
loadAgent,
|
||||
createAgent,
|
||||
updateAgent,
|
||||
|
|
|
@ -42,7 +42,7 @@ const getToolFilesByIds = async (fileIds, toolResourceSet) => {
|
|||
$or: [],
|
||||
};
|
||||
|
||||
if (toolResourceSet.has(EToolResources.ocr)) {
|
||||
if (toolResourceSet.has(EToolResources.context)) {
|
||||
filter.$or.push({ text: { $exists: true, $ne: null }, context: FileContext.agents });
|
||||
}
|
||||
if (toolResourceSet.has(EToolResources.file_search)) {
|
||||
|
|
|
@ -158,7 +158,7 @@ describe('duplicateAgent', () => {
|
|||
});
|
||||
});
|
||||
|
||||
it('should handle tool_resources.ocr correctly', async () => {
|
||||
it('should convert `tool_resources.ocr` to `tool_resources.context`', async () => {
|
||||
const mockAgent = {
|
||||
id: 'agent_123',
|
||||
name: 'Test Agent',
|
||||
|
@ -178,7 +178,7 @@ describe('duplicateAgent', () => {
|
|||
expect(createAgent).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
tool_resources: {
|
||||
ocr: { enabled: true, config: 'test' },
|
||||
context: { enabled: true, config: 'test' },
|
||||
},
|
||||
}),
|
||||
);
|
||||
|
|
|
@ -2,7 +2,12 @@ const { z } = require('zod');
|
|||
const fs = require('fs').promises;
|
||||
const { nanoid } = require('nanoid');
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { agentCreateSchema, agentUpdateSchema } = require('@librechat/api');
|
||||
const {
|
||||
agentCreateSchema,
|
||||
agentUpdateSchema,
|
||||
mergeAgentOcrConversion,
|
||||
convertOcrToContextInPlace,
|
||||
} = require('@librechat/api');
|
||||
const {
|
||||
Tools,
|
||||
Constants,
|
||||
|
@ -198,19 +203,32 @@ const getAgentHandler = async (req, res, expandProperties = false) => {
|
|||
* @param {object} req.params - Request params
|
||||
* @param {string} req.params.id - Agent identifier.
|
||||
* @param {AgentUpdateParams} req.body - The Agent update parameters.
|
||||
* @returns {Agent} 200 - success response - application/json
|
||||
* @returns {Promise<Agent>} 200 - success response - application/json
|
||||
*/
|
||||
const updateAgentHandler = async (req, res) => {
|
||||
try {
|
||||
const id = req.params.id;
|
||||
const validatedData = agentUpdateSchema.parse(req.body);
|
||||
const { _id, ...updateData } = removeNullishValues(validatedData);
|
||||
|
||||
// Convert OCR to context in incoming updateData
|
||||
convertOcrToContextInPlace(updateData);
|
||||
|
||||
const existingAgent = await getAgent({ id });
|
||||
|
||||
if (!existingAgent) {
|
||||
return res.status(404).json({ error: 'Agent not found' });
|
||||
}
|
||||
|
||||
// Convert legacy OCR tool resource to context format in existing agent
|
||||
const ocrConversion = mergeAgentOcrConversion(existingAgent, updateData);
|
||||
if (ocrConversion.tool_resources) {
|
||||
updateData.tool_resources = ocrConversion.tool_resources;
|
||||
}
|
||||
if (ocrConversion.tools) {
|
||||
updateData.tools = ocrConversion.tools;
|
||||
}
|
||||
|
||||
let updatedAgent =
|
||||
Object.keys(updateData).length > 0
|
||||
? await updateAgent({ id }, updateData, {
|
||||
|
@ -255,7 +273,7 @@ const updateAgentHandler = async (req, res) => {
|
|||
* @param {object} req - Express Request
|
||||
* @param {object} req.params - Request params
|
||||
* @param {string} req.params.id - Agent identifier.
|
||||
* @returns {Agent} 201 - success response - application/json
|
||||
* @returns {Promise<Agent>} 201 - success response - application/json
|
||||
*/
|
||||
const duplicateAgentHandler = async (req, res) => {
|
||||
const { id } = req.params;
|
||||
|
@ -288,9 +306,19 @@ const duplicateAgentHandler = async (req, res) => {
|
|||
hour12: false,
|
||||
})})`;
|
||||
|
||||
if (_tool_resources?.[EToolResources.context]) {
|
||||
cloneData.tool_resources = {
|
||||
[EToolResources.context]: _tool_resources[EToolResources.context],
|
||||
};
|
||||
}
|
||||
|
||||
if (_tool_resources?.[EToolResources.ocr]) {
|
||||
cloneData.tool_resources = {
|
||||
[EToolResources.ocr]: _tool_resources[EToolResources.ocr],
|
||||
/** Legacy conversion from `ocr` to `context` */
|
||||
[EToolResources.context]: {
|
||||
...(_tool_resources[EToolResources.context] ?? {}),
|
||||
..._tool_resources[EToolResources.ocr],
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -382,7 +410,7 @@ const duplicateAgentHandler = async (req, res) => {
|
|||
* @param {object} req - Express Request
|
||||
* @param {object} req.params - Request params
|
||||
* @param {string} req.params.id - Agent identifier.
|
||||
* @returns {Agent} 200 - success response - application/json
|
||||
* @returns {Promise<Agent>} 200 - success response - application/json
|
||||
*/
|
||||
const deleteAgentHandler = async (req, res) => {
|
||||
try {
|
||||
|
@ -484,7 +512,7 @@ const getListAgentsHandler = async (req, res) => {
|
|||
* @param {Express.Multer.File} req.file - The avatar image file.
|
||||
* @param {object} req.body - Request body
|
||||
* @param {string} [req.body.avatar] - Optional avatar for the agent's avatar.
|
||||
* @returns {Object} 200 - success response - application/json
|
||||
* @returns {Promise<void>} 200 - success response - application/json
|
||||
*/
|
||||
const uploadAgentAvatarHandler = async (req, res) => {
|
||||
try {
|
||||
|
|
|
@ -512,6 +512,7 @@ describe('Agent Controllers - Mass Assignment Protection', () => {
|
|||
mockReq.params.id = existingAgentId;
|
||||
mockReq.body = {
|
||||
tool_resources: {
|
||||
/** Legacy conversion from `ocr` to `context` */
|
||||
ocr: {
|
||||
file_ids: ['ocr1', 'ocr2'],
|
||||
},
|
||||
|
@ -531,7 +532,8 @@ describe('Agent Controllers - Mass Assignment Protection', () => {
|
|||
|
||||
const updatedAgent = mockRes.json.mock.calls[0][0];
|
||||
expect(updatedAgent.tool_resources).toBeDefined();
|
||||
expect(updatedAgent.tool_resources.ocr).toBeDefined();
|
||||
expect(updatedAgent.tool_resources.ocr).toBeUndefined();
|
||||
expect(updatedAgent.tool_resources.context).toBeDefined();
|
||||
expect(updatedAgent.tool_resources.execute_code).toBeDefined();
|
||||
expect(updatedAgent.tool_resources.invalid_tool).toBeUndefined();
|
||||
});
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
const { logger } = require('@librechat/data-schemas');
|
||||
const { PermissionBits, hasPermissions, ResourceType } = require('librechat-data-provider');
|
||||
const { getEffectivePermissions } = require('~/server/services/PermissionService');
|
||||
const { getAgent } = require('~/models/Agent');
|
||||
const { getAgents } = require('~/models/Agent');
|
||||
const { getFiles } = require('~/models/File');
|
||||
|
||||
/**
|
||||
|
@ -10,11 +10,12 @@ const { getFiles } = require('~/models/File');
|
|||
*/
|
||||
const checkAgentBasedFileAccess = async ({ userId, role, fileId }) => {
|
||||
try {
|
||||
// Find agents that have this file in their tool_resources
|
||||
const agentsWithFile = await getAgent({
|
||||
/** Agents that have this file in their tool_resources */
|
||||
const agentsWithFile = await getAgents({
|
||||
$or: [
|
||||
{ 'tool_resources.file_search.file_ids': fileId },
|
||||
{ 'tool_resources.execute_code.file_ids': fileId },
|
||||
{ 'tool_resources.file_search.file_ids': fileId },
|
||||
{ 'tool_resources.context.file_ids': fileId },
|
||||
{ 'tool_resources.ocr.file_ids': fileId },
|
||||
],
|
||||
});
|
||||
|
@ -24,7 +25,7 @@ const checkAgentBasedFileAccess = async ({ userId, role, fileId }) => {
|
|||
}
|
||||
|
||||
// Check if user has access to any of these agents
|
||||
for (const agent of Array.isArray(agentsWithFile) ? agentsWithFile : [agentsWithFile]) {
|
||||
for (const agent of agentsWithFile) {
|
||||
// Check if user is the agent author
|
||||
if (agent.author && agent.author.toString() === userId) {
|
||||
logger.debug(`[fileAccess] User is author of agent ${agent.id}`);
|
||||
|
@ -83,7 +84,6 @@ const fileAccess = async (req, res, next) => {
|
|||
});
|
||||
}
|
||||
|
||||
// Get the file
|
||||
const [file] = await getFiles({ file_id: fileId });
|
||||
if (!file) {
|
||||
return res.status(404).json({
|
||||
|
@ -92,20 +92,18 @@ const fileAccess = async (req, res, next) => {
|
|||
});
|
||||
}
|
||||
|
||||
// Check if user owns the file
|
||||
if (file.user && file.user.toString() === userId) {
|
||||
req.fileAccess = { file };
|
||||
return next();
|
||||
}
|
||||
|
||||
// Check agent-based access (file inherits agent permissions)
|
||||
/** Agent-based access (file inherits agent permissions) */
|
||||
const hasAgentAccess = await checkAgentBasedFileAccess({ userId, role: userRole, fileId });
|
||||
if (hasAgentAccess) {
|
||||
req.fileAccess = { file };
|
||||
return next();
|
||||
}
|
||||
|
||||
// No access
|
||||
logger.warn(`[fileAccess] User ${userId} denied access to file ${fileId}`);
|
||||
return res.status(403).json({
|
||||
error: 'Forbidden',
|
||||
|
|
483
api/server/middleware/accessResources/fileAccess.spec.js
Normal file
483
api/server/middleware/accessResources/fileAccess.spec.js
Normal file
|
@ -0,0 +1,483 @@
|
|||
const mongoose = require('mongoose');
|
||||
const { ResourceType, PrincipalType, PrincipalModel } = require('librechat-data-provider');
|
||||
const { MongoMemoryServer } = require('mongodb-memory-server');
|
||||
const { fileAccess } = require('./fileAccess');
|
||||
const { User, Role, AclEntry } = require('~/db/models');
|
||||
const { createAgent } = require('~/models/Agent');
|
||||
const { createFile } = require('~/models/File');
|
||||
|
||||
describe('fileAccess middleware', () => {
|
||||
let mongoServer;
|
||||
let req, res, next;
|
||||
let testUser, otherUser, thirdUser;
|
||||
|
||||
beforeAll(async () => {
|
||||
mongoServer = await MongoMemoryServer.create();
|
||||
const mongoUri = mongoServer.getUri();
|
||||
await mongoose.connect(mongoUri);
|
||||
});
|
||||
|
||||
afterAll(async () => {
|
||||
await mongoose.disconnect();
|
||||
await mongoServer.stop();
|
||||
});
|
||||
|
||||
beforeEach(async () => {
|
||||
await mongoose.connection.dropDatabase();
|
||||
|
||||
// Create test role
|
||||
await Role.create({
|
||||
name: 'test-role',
|
||||
permissions: {
|
||||
AGENTS: {
|
||||
USE: true,
|
||||
CREATE: true,
|
||||
SHARED_GLOBAL: false,
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
// Create test users
|
||||
testUser = await User.create({
|
||||
email: 'test@example.com',
|
||||
name: 'Test User',
|
||||
username: 'testuser',
|
||||
role: 'test-role',
|
||||
});
|
||||
|
||||
otherUser = await User.create({
|
||||
email: 'other@example.com',
|
||||
name: 'Other User',
|
||||
username: 'otheruser',
|
||||
role: 'test-role',
|
||||
});
|
||||
|
||||
thirdUser = await User.create({
|
||||
email: 'third@example.com',
|
||||
name: 'Third User',
|
||||
username: 'thirduser',
|
||||
role: 'test-role',
|
||||
});
|
||||
|
||||
// Setup request/response objects
|
||||
req = {
|
||||
user: { id: testUser._id.toString(), role: testUser.role },
|
||||
params: {},
|
||||
};
|
||||
res = {
|
||||
status: jest.fn().mockReturnThis(),
|
||||
json: jest.fn(),
|
||||
};
|
||||
next = jest.fn();
|
||||
|
||||
jest.clearAllMocks();
|
||||
});
|
||||
|
||||
describe('basic file access', () => {
|
||||
test('should allow access when user owns the file', async () => {
|
||||
// Create a file owned by testUser
|
||||
await createFile({
|
||||
user: testUser._id.toString(),
|
||||
file_id: 'file_owned_by_user',
|
||||
filepath: '/test/file.txt',
|
||||
filename: 'file.txt',
|
||||
type: 'text/plain',
|
||||
size: 100,
|
||||
});
|
||||
|
||||
req.params.file_id = 'file_owned_by_user';
|
||||
await fileAccess(req, res, next);
|
||||
|
||||
expect(next).toHaveBeenCalled();
|
||||
expect(req.fileAccess).toBeDefined();
|
||||
expect(req.fileAccess.file).toBeDefined();
|
||||
expect(res.status).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
test('should deny access when user does not own the file and no agent access', async () => {
|
||||
// Create a file owned by otherUser
|
||||
await createFile({
|
||||
user: otherUser._id.toString(),
|
||||
file_id: 'file_owned_by_other',
|
||||
filepath: '/test/file.txt',
|
||||
filename: 'file.txt',
|
||||
type: 'text/plain',
|
||||
size: 100,
|
||||
});
|
||||
|
||||
req.params.file_id = 'file_owned_by_other';
|
||||
await fileAccess(req, res, next);
|
||||
|
||||
expect(next).not.toHaveBeenCalled();
|
||||
expect(res.status).toHaveBeenCalledWith(403);
|
||||
expect(res.json).toHaveBeenCalledWith({
|
||||
error: 'Forbidden',
|
||||
message: 'Insufficient permissions to access this file',
|
||||
});
|
||||
});
|
||||
|
||||
test('should return 404 when file does not exist', async () => {
|
||||
req.params.file_id = 'non_existent_file';
|
||||
await fileAccess(req, res, next);
|
||||
|
||||
expect(next).not.toHaveBeenCalled();
|
||||
expect(res.status).toHaveBeenCalledWith(404);
|
||||
expect(res.json).toHaveBeenCalledWith({
|
||||
error: 'Not Found',
|
||||
message: 'File not found',
|
||||
});
|
||||
});
|
||||
|
||||
test('should return 400 when file_id is missing', async () => {
|
||||
// Don't set file_id in params
|
||||
await fileAccess(req, res, next);
|
||||
|
||||
expect(next).not.toHaveBeenCalled();
|
||||
expect(res.status).toHaveBeenCalledWith(400);
|
||||
expect(res.json).toHaveBeenCalledWith({
|
||||
error: 'Bad Request',
|
||||
message: 'file_id is required',
|
||||
});
|
||||
});
|
||||
|
||||
test('should return 401 when user is not authenticated', async () => {
|
||||
req.user = null;
|
||||
req.params.file_id = 'some_file';
|
||||
|
||||
await fileAccess(req, res, next);
|
||||
|
||||
expect(next).not.toHaveBeenCalled();
|
||||
expect(res.status).toHaveBeenCalledWith(401);
|
||||
expect(res.json).toHaveBeenCalledWith({
|
||||
error: 'Unauthorized',
|
||||
message: 'Authentication required',
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('agent-based file access', () => {
|
||||
beforeEach(async () => {
|
||||
// Create a file owned by otherUser (not testUser)
|
||||
await createFile({
|
||||
user: otherUser._id.toString(),
|
||||
file_id: 'shared_file_via_agent',
|
||||
filepath: '/test/shared.txt',
|
||||
filename: 'shared.txt',
|
||||
type: 'text/plain',
|
||||
size: 100,
|
||||
});
|
||||
});
|
||||
|
||||
test('should allow access when user is author of agent with file', async () => {
|
||||
// Create agent owned by testUser with the file
|
||||
await createAgent({
|
||||
id: `agent_${Date.now()}`,
|
||||
name: 'Test Agent',
|
||||
provider: 'openai',
|
||||
model: 'gpt-4',
|
||||
author: testUser._id,
|
||||
tool_resources: {
|
||||
file_search: {
|
||||
file_ids: ['shared_file_via_agent'],
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
req.params.file_id = 'shared_file_via_agent';
|
||||
await fileAccess(req, res, next);
|
||||
|
||||
expect(next).toHaveBeenCalled();
|
||||
expect(req.fileAccess).toBeDefined();
|
||||
expect(req.fileAccess.file).toBeDefined();
|
||||
});
|
||||
|
||||
test('should allow access when user has VIEW permission on agent with file', async () => {
|
||||
// Create agent owned by otherUser
|
||||
const agent = await createAgent({
|
||||
id: `agent_${Date.now()}`,
|
||||
name: 'Shared Agent',
|
||||
provider: 'openai',
|
||||
model: 'gpt-4',
|
||||
author: otherUser._id,
|
||||
tool_resources: {
|
||||
execute_code: {
|
||||
file_ids: ['shared_file_via_agent'],
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
// Grant VIEW permission to testUser
|
||||
await AclEntry.create({
|
||||
principalType: PrincipalType.USER,
|
||||
principalId: testUser._id,
|
||||
principalModel: PrincipalModel.USER,
|
||||
resourceType: ResourceType.AGENT,
|
||||
resourceId: agent._id,
|
||||
permBits: 1, // VIEW permission
|
||||
grantedBy: otherUser._id,
|
||||
});
|
||||
|
||||
req.params.file_id = 'shared_file_via_agent';
|
||||
await fileAccess(req, res, next);
|
||||
|
||||
expect(next).toHaveBeenCalled();
|
||||
expect(req.fileAccess).toBeDefined();
|
||||
});
|
||||
|
||||
test('should check file in ocr tool_resources', async () => {
|
||||
await createAgent({
|
||||
id: `agent_ocr_${Date.now()}`,
|
||||
name: 'OCR Agent',
|
||||
provider: 'openai',
|
||||
model: 'gpt-4',
|
||||
author: testUser._id,
|
||||
tool_resources: {
|
||||
ocr: {
|
||||
file_ids: ['shared_file_via_agent'],
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
req.params.file_id = 'shared_file_via_agent';
|
||||
await fileAccess(req, res, next);
|
||||
|
||||
expect(next).toHaveBeenCalled();
|
||||
expect(req.fileAccess).toBeDefined();
|
||||
});
|
||||
|
||||
test('should deny access when user has no permission on agent with file', async () => {
|
||||
// Create agent owned by otherUser without granting permission to testUser
|
||||
const agent = await createAgent({
|
||||
id: `agent_${Date.now()}`,
|
||||
name: 'Private Agent',
|
||||
provider: 'openai',
|
||||
model: 'gpt-4',
|
||||
author: otherUser._id,
|
||||
tool_resources: {
|
||||
file_search: {
|
||||
file_ids: ['shared_file_via_agent'],
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
// Create ACL entry for otherUser only (owner)
|
||||
await AclEntry.create({
|
||||
principalType: PrincipalType.USER,
|
||||
principalId: otherUser._id,
|
||||
principalModel: PrincipalModel.USER,
|
||||
resourceType: ResourceType.AGENT,
|
||||
resourceId: agent._id,
|
||||
permBits: 15, // All permissions
|
||||
grantedBy: otherUser._id,
|
||||
});
|
||||
|
||||
req.params.file_id = 'shared_file_via_agent';
|
||||
await fileAccess(req, res, next);
|
||||
|
||||
expect(next).not.toHaveBeenCalled();
|
||||
expect(res.status).toHaveBeenCalledWith(403);
|
||||
});
|
||||
});
|
||||
|
||||
describe('multiple agents with same file', () => {
|
||||
/**
|
||||
* This test suite verifies that when multiple agents have the same file,
|
||||
* all agents are checked for permissions, not just the first one found.
|
||||
* This ensures users can access files through any agent they have permission for.
|
||||
*/
|
||||
|
||||
test('should check ALL agents with file, not just first one', async () => {
|
||||
// Create a file owned by someone else
|
||||
await createFile({
|
||||
user: otherUser._id.toString(),
|
||||
file_id: 'multi_agent_file',
|
||||
filepath: '/test/multi.txt',
|
||||
filename: 'multi.txt',
|
||||
type: 'text/plain',
|
||||
size: 100,
|
||||
});
|
||||
|
||||
// Create first agent (owned by otherUser, no access for testUser)
|
||||
const agent1 = await createAgent({
|
||||
id: 'agent_no_access',
|
||||
name: 'No Access Agent',
|
||||
provider: 'openai',
|
||||
model: 'gpt-4',
|
||||
author: otherUser._id,
|
||||
tool_resources: {
|
||||
file_search: {
|
||||
file_ids: ['multi_agent_file'],
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
// Create ACL for agent1 - only otherUser has access
|
||||
await AclEntry.create({
|
||||
principalType: PrincipalType.USER,
|
||||
principalId: otherUser._id,
|
||||
principalModel: PrincipalModel.USER,
|
||||
resourceType: ResourceType.AGENT,
|
||||
resourceId: agent1._id,
|
||||
permBits: 15,
|
||||
grantedBy: otherUser._id,
|
||||
});
|
||||
|
||||
// Create second agent (owned by thirdUser, but testUser has VIEW access)
|
||||
const agent2 = await createAgent({
|
||||
id: 'agent_with_access',
|
||||
name: 'Accessible Agent',
|
||||
provider: 'openai',
|
||||
model: 'gpt-4',
|
||||
author: thirdUser._id,
|
||||
tool_resources: {
|
||||
file_search: {
|
||||
file_ids: ['multi_agent_file'],
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
// Grant testUser VIEW access to agent2
|
||||
await AclEntry.create({
|
||||
principalType: PrincipalType.USER,
|
||||
principalId: testUser._id,
|
||||
principalModel: PrincipalModel.USER,
|
||||
resourceType: ResourceType.AGENT,
|
||||
resourceId: agent2._id,
|
||||
permBits: 1, // VIEW permission
|
||||
grantedBy: thirdUser._id,
|
||||
});
|
||||
|
||||
req.params.file_id = 'multi_agent_file';
|
||||
await fileAccess(req, res, next);
|
||||
|
||||
/**
|
||||
* Should succeed because testUser has access to agent2,
|
||||
* even though they don't have access to agent1.
|
||||
* The fix ensures all agents are checked, not just the first one.
|
||||
*/
|
||||
expect(next).toHaveBeenCalled();
|
||||
expect(req.fileAccess).toBeDefined();
|
||||
expect(res.status).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
test('should find file in any agent tool_resources type', async () => {
|
||||
// Create a file
|
||||
await createFile({
|
||||
user: otherUser._id.toString(),
|
||||
file_id: 'multi_tool_file',
|
||||
filepath: '/test/tool.txt',
|
||||
filename: 'tool.txt',
|
||||
type: 'text/plain',
|
||||
size: 100,
|
||||
});
|
||||
|
||||
// Agent 1: file in file_search (no access for testUser)
|
||||
await createAgent({
|
||||
id: 'agent_file_search',
|
||||
name: 'File Search Agent',
|
||||
provider: 'openai',
|
||||
model: 'gpt-4',
|
||||
author: otherUser._id,
|
||||
tool_resources: {
|
||||
file_search: {
|
||||
file_ids: ['multi_tool_file'],
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
// Agent 2: same file in execute_code (testUser has access)
|
||||
await createAgent({
|
||||
id: 'agent_execute_code',
|
||||
name: 'Execute Code Agent',
|
||||
provider: 'openai',
|
||||
model: 'gpt-4',
|
||||
author: thirdUser._id,
|
||||
tool_resources: {
|
||||
execute_code: {
|
||||
file_ids: ['multi_tool_file'],
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
// Agent 3: same file in ocr (testUser also has access)
|
||||
await createAgent({
|
||||
id: 'agent_ocr',
|
||||
name: 'OCR Agent',
|
||||
provider: 'openai',
|
||||
model: 'gpt-4',
|
||||
author: testUser._id, // testUser owns this one
|
||||
tool_resources: {
|
||||
ocr: {
|
||||
file_ids: ['multi_tool_file'],
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
req.params.file_id = 'multi_tool_file';
|
||||
await fileAccess(req, res, next);
|
||||
|
||||
/**
|
||||
* Should succeed because testUser owns agent3,
|
||||
* even if other agents with the file are found first.
|
||||
*/
|
||||
expect(next).toHaveBeenCalled();
|
||||
expect(req.fileAccess).toBeDefined();
|
||||
});
|
||||
});
|
||||
|
||||
describe('edge cases', () => {
|
||||
test('should handle agent with empty tool_resources', async () => {
|
||||
await createFile({
|
||||
user: otherUser._id.toString(),
|
||||
file_id: 'orphan_file',
|
||||
filepath: '/test/orphan.txt',
|
||||
filename: 'orphan.txt',
|
||||
type: 'text/plain',
|
||||
size: 100,
|
||||
});
|
||||
|
||||
// Create agent with no files in tool_resources
|
||||
await createAgent({
|
||||
id: `agent_empty_${Date.now()}`,
|
||||
name: 'Empty Resources Agent',
|
||||
provider: 'openai',
|
||||
model: 'gpt-4',
|
||||
author: testUser._id,
|
||||
tool_resources: {},
|
||||
});
|
||||
|
||||
req.params.file_id = 'orphan_file';
|
||||
await fileAccess(req, res, next);
|
||||
|
||||
expect(next).not.toHaveBeenCalled();
|
||||
expect(res.status).toHaveBeenCalledWith(403);
|
||||
});
|
||||
|
||||
test('should handle agent with null tool_resources', async () => {
|
||||
await createFile({
|
||||
user: otherUser._id.toString(),
|
||||
file_id: 'another_orphan_file',
|
||||
filepath: '/test/orphan2.txt',
|
||||
filename: 'orphan2.txt',
|
||||
type: 'text/plain',
|
||||
size: 100,
|
||||
});
|
||||
|
||||
// Create agent with null tool_resources
|
||||
await createAgent({
|
||||
id: `agent_null_${Date.now()}`,
|
||||
name: 'Null Resources Agent',
|
||||
provider: 'openai',
|
||||
model: 'gpt-4',
|
||||
author: testUser._id,
|
||||
tool_resources: null,
|
||||
});
|
||||
|
||||
req.params.file_id = 'another_orphan_file';
|
||||
await fileAccess(req, res, next);
|
||||
|
||||
expect(next).not.toHaveBeenCalled();
|
||||
expect(res.status).toHaveBeenCalledWith(403);
|
||||
});
|
||||
});
|
||||
});
|
|
@ -552,7 +552,7 @@ const processAgentFileUpload = async ({ req, res, metadata }) => {
|
|||
throw new Error('File search is not enabled for Agents');
|
||||
}
|
||||
// Note: File search processing continues to dual storage logic below
|
||||
} else if (tool_resource === EToolResources.ocr) {
|
||||
} else if (tool_resource === EToolResources.context) {
|
||||
const { file_id, temp_file_id = null } = metadata;
|
||||
|
||||
/**
|
||||
|
|
|
@ -353,7 +353,12 @@ async function processRequiredActions(client, requiredActions) {
|
|||
async function loadAgentTools({ req, res, agent, signal, tool_resources, openAIApiKey }) {
|
||||
if (!agent.tools || agent.tools.length === 0) {
|
||||
return {};
|
||||
} else if (agent.tools && agent.tools.length === 1 && agent.tools[0] === AgentCapabilities.ocr) {
|
||||
} else if (
|
||||
agent.tools &&
|
||||
agent.tools.length === 1 &&
|
||||
/** Legacy handling for `ocr` as may still exist in existing Agents */
|
||||
(agent.tools[0] === AgentCapabilities.context || agent.tools[0] === AgentCapabilities.ocr)
|
||||
) {
|
||||
return {};
|
||||
}
|
||||
|
||||
|
|
|
@ -44,7 +44,7 @@ async function reinitMCPServer({
|
|||
const oauthStart =
|
||||
_oauthStart ??
|
||||
(async (authURL) => {
|
||||
logger.info(`[MCP Reinitialize] OAuth URL received: ${authURL}`);
|
||||
logger.info(`[MCP Reinitialize] OAuth URL received for ${serverName}`);
|
||||
oauthUrl = authURL;
|
||||
oauthRequired = true;
|
||||
});
|
||||
|
|
32
client/src/Providers/DragDropContext.tsx
Normal file
32
client/src/Providers/DragDropContext.tsx
Normal file
|
@ -0,0 +1,32 @@
|
|||
import React, { createContext, useContext, useMemo } from 'react';
|
||||
import { useChatContext } from './ChatContext';
|
||||
|
||||
interface DragDropContextValue {
|
||||
conversationId: string | null | undefined;
|
||||
agentId: string | null | undefined;
|
||||
}
|
||||
|
||||
const DragDropContext = createContext<DragDropContextValue | undefined>(undefined);
|
||||
|
||||
export function DragDropProvider({ children }: { children: React.ReactNode }) {
|
||||
const { conversation } = useChatContext();
|
||||
|
||||
/** Context value only created when conversation fields change */
|
||||
const contextValue = useMemo<DragDropContextValue>(
|
||||
() => ({
|
||||
conversationId: conversation?.conversationId,
|
||||
agentId: conversation?.agent_id,
|
||||
}),
|
||||
[conversation?.conversationId, conversation?.agent_id],
|
||||
);
|
||||
|
||||
return <DragDropContext.Provider value={contextValue}>{children}</DragDropContext.Provider>;
|
||||
}
|
||||
|
||||
export function useDragDropContext() {
|
||||
const context = useContext(DragDropContext);
|
||||
if (!context) {
|
||||
throw new Error('useDragDropContext must be used within DragDropProvider');
|
||||
}
|
||||
return context;
|
||||
}
|
|
@ -1,9 +1,10 @@
|
|||
import React, { createContext, useContext, ReactNode, useMemo } from 'react';
|
||||
import { PermissionTypes, Permissions } from 'librechat-data-provider';
|
||||
import type { TPromptGroup } from 'librechat-data-provider';
|
||||
import type { PromptOption } from '~/common';
|
||||
import CategoryIcon from '~/components/Prompts/Groups/CategoryIcon';
|
||||
import { usePromptGroupsNav, useHasAccess } from '~/hooks';
|
||||
import { useGetAllPromptGroups } from '~/data-provider';
|
||||
import { usePromptGroupsNav } from '~/hooks';
|
||||
import { mapPromptGroups } from '~/utils';
|
||||
|
||||
type AllPromptGroupsData =
|
||||
|
@ -19,14 +20,21 @@ type PromptGroupsContextType =
|
|||
data: AllPromptGroupsData;
|
||||
isLoading: boolean;
|
||||
};
|
||||
hasAccess: boolean;
|
||||
})
|
||||
| null;
|
||||
|
||||
const PromptGroupsContext = createContext<PromptGroupsContextType>(null);
|
||||
|
||||
export const PromptGroupsProvider = ({ children }: { children: ReactNode }) => {
|
||||
const promptGroupsNav = usePromptGroupsNav();
|
||||
const hasAccess = useHasAccess({
|
||||
permissionType: PermissionTypes.PROMPTS,
|
||||
permission: Permissions.USE,
|
||||
});
|
||||
|
||||
const promptGroupsNav = usePromptGroupsNav(hasAccess);
|
||||
const { data: allGroupsData, isLoading: isLoadingAll } = useGetAllPromptGroups(undefined, {
|
||||
enabled: hasAccess,
|
||||
select: (data) => {
|
||||
const mappedArray: PromptOption[] = data.map((group) => ({
|
||||
id: group._id ?? '',
|
||||
|
@ -55,11 +63,12 @@ export const PromptGroupsProvider = ({ children }: { children: ReactNode }) => {
|
|||
() => ({
|
||||
...promptGroupsNav,
|
||||
allPromptGroups: {
|
||||
data: allGroupsData,
|
||||
isLoading: isLoadingAll,
|
||||
data: hasAccess ? allGroupsData : undefined,
|
||||
isLoading: hasAccess ? isLoadingAll : false,
|
||||
},
|
||||
hasAccess,
|
||||
}),
|
||||
[promptGroupsNav, allGroupsData, isLoadingAll],
|
||||
[promptGroupsNav, allGroupsData, isLoadingAll, hasAccess],
|
||||
);
|
||||
|
||||
return (
|
||||
|
|
|
@ -23,6 +23,7 @@ export * from './SetConvoContext';
|
|||
export * from './SearchContext';
|
||||
export * from './BadgeRowContext';
|
||||
export * from './SidePanelContext';
|
||||
export * from './DragDropContext';
|
||||
export * from './MCPPanelContext';
|
||||
export * from './ArtifactsContext';
|
||||
export * from './PromptGroupsContext';
|
||||
|
|
|
@ -11,9 +11,9 @@ import {
|
|||
AgentListResponse,
|
||||
} from 'librechat-data-provider';
|
||||
import type t from 'librechat-data-provider';
|
||||
import { useLocalize, useDefaultConvo } from '~/hooks';
|
||||
import { useChatContext } from '~/Providers';
|
||||
import { renderAgentAvatar } from '~/utils';
|
||||
import { useLocalize } from '~/hooks';
|
||||
|
||||
interface SupportContact {
|
||||
name?: string;
|
||||
|
@ -34,11 +34,11 @@ interface AgentDetailProps {
|
|||
*/
|
||||
const AgentDetail: React.FC<AgentDetailProps> = ({ agent, isOpen, onClose }) => {
|
||||
const localize = useLocalize();
|
||||
// const navigate = useNavigate();
|
||||
const { conversation, newConversation } = useChatContext();
|
||||
const queryClient = useQueryClient();
|
||||
const { showToast } = useToastContext();
|
||||
const dialogRef = useRef<HTMLDivElement>(null);
|
||||
const queryClient = useQueryClient();
|
||||
const getDefaultConversation = useDefaultConvo();
|
||||
const { conversation, newConversation } = useChatContext();
|
||||
|
||||
/**
|
||||
* Navigate to chat with the selected agent
|
||||
|
@ -62,13 +62,22 @@ const AgentDetail: React.FC<AgentDetailProps> = ({ agent, isOpen, onClose }) =>
|
|||
);
|
||||
queryClient.invalidateQueries([QueryKeys.messages]);
|
||||
|
||||
/** Template with agent configuration */
|
||||
const template = {
|
||||
conversationId: Constants.NEW_CONVO as string,
|
||||
endpoint: EModelEndpoint.agents,
|
||||
agent_id: agent.id,
|
||||
title: localize('com_agents_chat_with', { name: agent.name || localize('com_ui_agent') }),
|
||||
};
|
||||
|
||||
const currentConvo = getDefaultConversation({
|
||||
conversation: { ...(conversation ?? {}), ...template },
|
||||
preset: template,
|
||||
});
|
||||
|
||||
newConversation({
|
||||
template: {
|
||||
conversationId: Constants.NEW_CONVO as string,
|
||||
endpoint: EModelEndpoint.agents,
|
||||
agent_id: agent.id,
|
||||
title: `Chat with ${agent.name || 'Agent'}`,
|
||||
},
|
||||
template: currentConvo,
|
||||
preset: template,
|
||||
});
|
||||
}
|
||||
};
|
||||
|
|
|
@ -20,6 +20,7 @@ jest.mock('react-router-dom', () => ({
|
|||
jest.mock('~/hooks', () => ({
|
||||
useMediaQuery: jest.fn(() => false), // Mock as desktop by default
|
||||
useLocalize: jest.fn(),
|
||||
useDefaultConvo: jest.fn(),
|
||||
}));
|
||||
|
||||
jest.mock('@librechat/client', () => ({
|
||||
|
@ -47,7 +48,12 @@ const mockWriteText = jest.fn();
|
|||
|
||||
const mockNavigate = jest.fn();
|
||||
const mockShowToast = jest.fn();
|
||||
const mockLocalize = jest.fn((key: string) => key);
|
||||
const mockLocalize = jest.fn((key: string, values?: Record<string, any>) => {
|
||||
if (key === 'com_agents_chat_with' && values?.name) {
|
||||
return `Chat with ${values.name}`;
|
||||
}
|
||||
return key;
|
||||
});
|
||||
|
||||
const mockAgent: t.Agent = {
|
||||
id: 'test-agent-id',
|
||||
|
@ -106,8 +112,12 @@ describe('AgentDetail', () => {
|
|||
(useNavigate as jest.Mock).mockReturnValue(mockNavigate);
|
||||
const { useToastContext } = require('@librechat/client');
|
||||
(useToastContext as jest.Mock).mockReturnValue({ showToast: mockShowToast });
|
||||
const { useLocalize } = require('~/hooks');
|
||||
const { useLocalize, useDefaultConvo } = require('~/hooks');
|
||||
(useLocalize as jest.Mock).mockReturnValue(mockLocalize);
|
||||
(useDefaultConvo as jest.Mock).mockReturnValue(() => ({
|
||||
conversationId: Constants.NEW_CONVO,
|
||||
endpoint: EModelEndpoint.agents,
|
||||
}));
|
||||
|
||||
// Mock useChatContext
|
||||
const { useChatContext } = require('~/Providers');
|
||||
|
@ -227,6 +237,10 @@ describe('AgentDetail', () => {
|
|||
template: {
|
||||
conversationId: Constants.NEW_CONVO,
|
||||
endpoint: EModelEndpoint.agents,
|
||||
},
|
||||
preset: {
|
||||
conversationId: Constants.NEW_CONVO,
|
||||
endpoint: EModelEndpoint.agents,
|
||||
agent_id: 'test-agent-id',
|
||||
title: 'Chat with Test Agent',
|
||||
},
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
import React, { useRef, useState, useMemo } from 'react';
|
||||
import * as Ariakit from '@ariakit/react';
|
||||
import { useSetRecoilState } from 'recoil';
|
||||
import { useRecoilState } from 'recoil';
|
||||
import { FileSearch, ImageUpIcon, TerminalSquareIcon, FileType2Icon } from 'lucide-react';
|
||||
import { EToolResources, EModelEndpoint, defaultAgentCapabilities } from 'librechat-data-provider';
|
||||
import {
|
||||
|
@ -42,7 +42,9 @@ const AttachFileMenu = ({
|
|||
const isUploadDisabled = disabled ?? false;
|
||||
const inputRef = useRef<HTMLInputElement>(null);
|
||||
const [isPopoverActive, setIsPopoverActive] = useState(false);
|
||||
const setEphemeralAgent = useSetRecoilState(ephemeralAgentByConvoId(conversationId));
|
||||
const [ephemeralAgent, setEphemeralAgent] = useRecoilState(
|
||||
ephemeralAgentByConvoId(conversationId),
|
||||
);
|
||||
const [toolResource, setToolResource] = useState<EToolResources | undefined>();
|
||||
const { handleFileChange } = useFileHandling({
|
||||
overrideEndpoint: EModelEndpoint.agents,
|
||||
|
@ -64,7 +66,10 @@ const AttachFileMenu = ({
|
|||
* */
|
||||
const capabilities = useAgentCapabilities(agentsConfig?.capabilities ?? defaultAgentCapabilities);
|
||||
|
||||
const { fileSearchAllowedByAgent, codeAllowedByAgent } = useAgentToolPermissions(agentId);
|
||||
const { fileSearchAllowedByAgent, codeAllowedByAgent } = useAgentToolPermissions(
|
||||
agentId,
|
||||
ephemeralAgent,
|
||||
);
|
||||
|
||||
const handleUploadClick = (isImage?: boolean) => {
|
||||
if (!inputRef.current) {
|
||||
|
@ -89,11 +94,11 @@ const AttachFileMenu = ({
|
|||
},
|
||||
];
|
||||
|
||||
if (capabilities.ocrEnabled) {
|
||||
if (capabilities.contextEnabled) {
|
||||
items.push({
|
||||
label: localize('com_ui_upload_ocr_text'),
|
||||
onClick: () => {
|
||||
setToolResource(EToolResources.ocr);
|
||||
setToolResource(EToolResources.context);
|
||||
onAction();
|
||||
},
|
||||
icon: <FileType2Icon className="icon-md" />,
|
||||
|
|
|
@ -1,14 +1,16 @@
|
|||
import React, { useMemo } from 'react';
|
||||
import { useRecoilValue } from 'recoil';
|
||||
import { OGDialog, OGDialogTemplate } from '@librechat/client';
|
||||
import { ImageUpIcon, FileSearch, TerminalSquareIcon, FileType2Icon } from 'lucide-react';
|
||||
import { EToolResources, defaultAgentCapabilities } from 'librechat-data-provider';
|
||||
import { ImageUpIcon, FileSearch, TerminalSquareIcon, FileType2Icon } from 'lucide-react';
|
||||
import {
|
||||
useAgentToolPermissions,
|
||||
useAgentCapabilities,
|
||||
useGetAgentsConfig,
|
||||
useLocalize,
|
||||
} from '~/hooks';
|
||||
import { useChatContext } from '~/Providers';
|
||||
import { ephemeralAgentByConvoId } from '~/store';
|
||||
import { useDragDropContext } from '~/Providers';
|
||||
|
||||
interface DragDropModalProps {
|
||||
onOptionSelect: (option: EToolResources | undefined) => void;
|
||||
|
@ -32,9 +34,11 @@ const DragDropModal = ({ onOptionSelect, setShowModal, files, isVisible }: DragD
|
|||
* Use definition for agents endpoint for ephemeral agents
|
||||
* */
|
||||
const capabilities = useAgentCapabilities(agentsConfig?.capabilities ?? defaultAgentCapabilities);
|
||||
const { conversation } = useChatContext();
|
||||
const { conversationId, agentId } = useDragDropContext();
|
||||
const ephemeralAgent = useRecoilValue(ephemeralAgentByConvoId(conversationId ?? ''));
|
||||
const { fileSearchAllowedByAgent, codeAllowedByAgent } = useAgentToolPermissions(
|
||||
conversation?.agent_id,
|
||||
agentId,
|
||||
ephemeralAgent,
|
||||
);
|
||||
|
||||
const options = useMemo(() => {
|
||||
|
@ -60,10 +64,10 @@ const DragDropModal = ({ onOptionSelect, setShowModal, files, isVisible }: DragD
|
|||
icon: <TerminalSquareIcon className="icon-md" />,
|
||||
});
|
||||
}
|
||||
if (capabilities.ocrEnabled) {
|
||||
if (capabilities.contextEnabled) {
|
||||
_options.push({
|
||||
label: localize('com_ui_upload_ocr_text'),
|
||||
value: EToolResources.ocr,
|
||||
value: EToolResources.context,
|
||||
icon: <FileType2Icon className="icon-md" />,
|
||||
});
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
import { useDragHelpers } from '~/hooks';
|
||||
import DragDropOverlay from '~/components/Chat/Input/Files/DragDropOverlay';
|
||||
import DragDropModal from '~/components/Chat/Input/Files/DragDropModal';
|
||||
import { DragDropProvider } from '~/Providers';
|
||||
import { cn } from '~/utils';
|
||||
|
||||
interface DragDropWrapperProps {
|
||||
|
@ -19,12 +20,14 @@ export default function DragDropWrapper({ children, className }: DragDropWrapper
|
|||
{children}
|
||||
{/** Always render overlay to avoid mount/unmount overhead */}
|
||||
<DragDropOverlay isActive={isActive} />
|
||||
<DragDropModal
|
||||
files={draggedFiles}
|
||||
isVisible={showModal}
|
||||
setShowModal={setShowModal}
|
||||
onOptionSelect={handleOptionSelect}
|
||||
/>
|
||||
<DragDropProvider>
|
||||
<DragDropModal
|
||||
files={draggedFiles}
|
||||
isVisible={showModal}
|
||||
setShowModal={setShowModal}
|
||||
onOptionSelect={handleOptionSelect}
|
||||
/>
|
||||
</DragDropProvider>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
|
|
@ -2,14 +2,13 @@ import { useState, useRef, useEffect, useMemo, memo, useCallback } from 'react';
|
|||
import { AutoSizer, List } from 'react-virtualized';
|
||||
import { Spinner, useCombobox } from '@librechat/client';
|
||||
import { useSetRecoilState, useRecoilValue } from 'recoil';
|
||||
import { PermissionTypes, Permissions } from 'librechat-data-provider';
|
||||
import type { TPromptGroup } from 'librechat-data-provider';
|
||||
import type { PromptOption } from '~/common';
|
||||
import { removeCharIfLast, detectVariables } from '~/utils';
|
||||
import VariableDialog from '~/components/Prompts/Groups/VariableDialog';
|
||||
import { usePromptGroupsContext } from '~/Providers';
|
||||
import { useLocalize, useHasAccess } from '~/hooks';
|
||||
import MentionItem from './MentionItem';
|
||||
import { useLocalize } from '~/hooks';
|
||||
import store from '~/store';
|
||||
|
||||
const commandChar = '/';
|
||||
|
@ -54,12 +53,7 @@ function PromptsCommand({
|
|||
submitPrompt: (textPrompt: string) => void;
|
||||
}) {
|
||||
const localize = useLocalize();
|
||||
const hasAccess = useHasAccess({
|
||||
permissionType: PermissionTypes.PROMPTS,
|
||||
permission: Permissions.USE,
|
||||
});
|
||||
|
||||
const { allPromptGroups } = usePromptGroupsContext();
|
||||
const { allPromptGroups, hasAccess } = usePromptGroupsContext();
|
||||
const { data, isLoading } = allPromptGroups;
|
||||
|
||||
const [activeIndex, setActiveIndex] = useState(0);
|
||||
|
|
|
@ -24,35 +24,45 @@ const SearchBar = forwardRef((props: SearchBarProps, ref: React.Ref<HTMLDivEleme
|
|||
const inputRef = useRef<HTMLInputElement>(null);
|
||||
const [showClearIcon, setShowClearIcon] = useState(false);
|
||||
|
||||
const { newConversation } = useNewConvo();
|
||||
const { newConversation: newConvo } = useNewConvo();
|
||||
const [search, setSearchState] = useRecoilState(store.search);
|
||||
|
||||
const clearSearch = useCallback(() => {
|
||||
if (location.pathname.includes('/search')) {
|
||||
newConversation({ disableFocus: true });
|
||||
navigate('/c/new', { replace: true });
|
||||
}
|
||||
}, [newConversation, location.pathname, navigate]);
|
||||
const clearSearch = useCallback(
|
||||
(pathname?: string) => {
|
||||
if (pathname?.includes('/search') || pathname === '/c/new') {
|
||||
queryClient.removeQueries([QueryKeys.messages]);
|
||||
newConvo({ disableFocus: true });
|
||||
navigate('/c/new');
|
||||
}
|
||||
},
|
||||
[newConvo, navigate, queryClient],
|
||||
);
|
||||
|
||||
const clearText = useCallback(() => {
|
||||
setShowClearIcon(false);
|
||||
setText('');
|
||||
setSearchState((prev) => ({
|
||||
...prev,
|
||||
query: '',
|
||||
debouncedQuery: '',
|
||||
isTyping: false,
|
||||
}));
|
||||
clearSearch();
|
||||
inputRef.current?.focus();
|
||||
}, [setSearchState, clearSearch]);
|
||||
const clearText = useCallback(
|
||||
(pathname?: string) => {
|
||||
setShowClearIcon(false);
|
||||
setText('');
|
||||
setSearchState((prev) => ({
|
||||
...prev,
|
||||
query: '',
|
||||
debouncedQuery: '',
|
||||
isTyping: false,
|
||||
}));
|
||||
clearSearch(pathname);
|
||||
inputRef.current?.focus();
|
||||
},
|
||||
[setSearchState, clearSearch],
|
||||
);
|
||||
|
||||
const handleKeyUp = (e: React.KeyboardEvent<HTMLInputElement>) => {
|
||||
const { value } = e.target as HTMLInputElement;
|
||||
if (e.key === 'Backspace' && value === '') {
|
||||
clearText();
|
||||
}
|
||||
};
|
||||
const handleKeyUp = useCallback(
|
||||
(e: React.KeyboardEvent<HTMLInputElement>) => {
|
||||
const { value } = e.target as HTMLInputElement;
|
||||
if (e.key === 'Backspace' && value === '') {
|
||||
clearText(location.pathname);
|
||||
}
|
||||
},
|
||||
[clearText, location.pathname],
|
||||
);
|
||||
|
||||
const sendRequest = useCallback(
|
||||
(value: string) => {
|
||||
|
@ -85,8 +95,6 @@ const SearchBar = forwardRef((props: SearchBarProps, ref: React.Ref<HTMLDivEleme
|
|||
debouncedSetDebouncedQuery(value);
|
||||
if (value.length > 0 && location.pathname !== '/search') {
|
||||
navigate('/search', { replace: true });
|
||||
} else if (value.length === 0 && location.pathname === '/search') {
|
||||
navigate('/c/new', { replace: true });
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -132,7 +140,7 @@ const SearchBar = forwardRef((props: SearchBarProps, ref: React.Ref<HTMLDivEleme
|
|||
showClearIcon ? 'opacity-100' : 'opacity-0',
|
||||
isSmallScreen === true ? 'right-[16px]' : '',
|
||||
)}
|
||||
onClick={clearText}
|
||||
onClick={() => clearText(location.pathname)}
|
||||
tabIndex={showClearIcon ? 0 : -1}
|
||||
disabled={!showClearIcon}
|
||||
>
|
||||
|
|
|
@ -6,6 +6,7 @@ import { LocalStorageKeys } from 'librechat-data-provider';
|
|||
import { useFormContext, Controller } from 'react-hook-form';
|
||||
import type { MenuItemProps } from '@librechat/client';
|
||||
import type { ReactNode } from 'react';
|
||||
import { usePromptGroupsContext } from '~/Providers';
|
||||
import { useCategories } from '~/hooks';
|
||||
import { cn } from '~/utils';
|
||||
|
||||
|
@ -22,8 +23,9 @@ const CategorySelector: React.FC<CategorySelectorProps> = ({
|
|||
}) => {
|
||||
const { t } = useTranslation();
|
||||
const formContext = useFormContext();
|
||||
const { categories, emptyCategory } = useCategories();
|
||||
const [isOpen, setIsOpen] = useState(false);
|
||||
const { hasAccess } = usePromptGroupsContext();
|
||||
const { categories, emptyCategory } = useCategories({ hasAccess });
|
||||
|
||||
const control = formContext?.control;
|
||||
const watch = formContext?.watch;
|
||||
|
|
|
@ -7,6 +7,7 @@ import CategorySelector from '~/components/Prompts/Groups/CategorySelector';
|
|||
import VariablesDropdown from '~/components/Prompts/VariablesDropdown';
|
||||
import PromptVariables from '~/components/Prompts/PromptVariables';
|
||||
import Description from '~/components/Prompts/Description';
|
||||
import { usePromptGroupsContext } from '~/Providers';
|
||||
import { useLocalize, useHasAccess } from '~/hooks';
|
||||
import Command from '~/components/Prompts/Command';
|
||||
import { useCreatePrompt } from '~/data-provider';
|
||||
|
@ -37,10 +38,12 @@ const CreatePromptForm = ({
|
|||
}) => {
|
||||
const localize = useLocalize();
|
||||
const navigate = useNavigate();
|
||||
const hasAccess = useHasAccess({
|
||||
const { hasAccess: hasUseAccess } = usePromptGroupsContext();
|
||||
const hasCreateAccess = useHasAccess({
|
||||
permissionType: PermissionTypes.PROMPTS,
|
||||
permission: Permissions.CREATE,
|
||||
});
|
||||
const hasAccess = hasUseAccess && hasCreateAccess;
|
||||
|
||||
useEffect(() => {
|
||||
let timeoutId: ReturnType<typeof setTimeout>;
|
||||
|
|
|
@ -11,8 +11,8 @@ import store from '~/store';
|
|||
|
||||
export default function FilterPrompts({ className = '' }: { className?: string }) {
|
||||
const localize = useLocalize();
|
||||
const { name, setName } = usePromptGroupsContext();
|
||||
const { categories } = useCategories('h-4 w-4');
|
||||
const { name, setName, hasAccess } = usePromptGroupsContext();
|
||||
const { categories } = useCategories({ className: 'h-4 w-4', hasAccess });
|
||||
const [displayName, setDisplayName] = useState(name || '');
|
||||
const [isSearching, setIsSearching] = useState(false);
|
||||
const [categoryFilter, setCategory] = useRecoilState(store.promptsCategory);
|
||||
|
|
|
@ -167,6 +167,7 @@ const PromptForm = () => {
|
|||
const params = useParams();
|
||||
const localize = useLocalize();
|
||||
const { showToast } = useToastContext();
|
||||
const { hasAccess } = usePromptGroupsContext();
|
||||
const alwaysMakeProd = useRecoilValue(store.alwaysMakeProd);
|
||||
const promptId = params.promptId || '';
|
||||
|
||||
|
@ -179,10 +180,12 @@ const PromptForm = () => {
|
|||
const [showSidePanel, setShowSidePanel] = useState(false);
|
||||
const sidePanelWidth = '320px';
|
||||
|
||||
const { data: group, isLoading: isLoadingGroup } = useGetPromptGroup(promptId);
|
||||
const { data: group, isLoading: isLoadingGroup } = useGetPromptGroup(promptId, {
|
||||
enabled: hasAccess && !!promptId,
|
||||
});
|
||||
const { data: prompts = [], isLoading: isLoadingPrompts } = useGetPrompts(
|
||||
{ groupId: promptId },
|
||||
{ enabled: !!promptId },
|
||||
{ enabled: hasAccess && !!promptId },
|
||||
);
|
||||
|
||||
const { hasPermission, isLoading: permissionsLoading } = useResourcePermissions(
|
||||
|
|
|
@ -79,9 +79,9 @@ export default function AgentConfig({ createMutation }: Pick<AgentPanelProps, 'c
|
|||
}, [fileMap, agentFiles]);
|
||||
|
||||
const {
|
||||
ocrEnabled,
|
||||
codeEnabled,
|
||||
toolsEnabled,
|
||||
contextEnabled,
|
||||
actionsEnabled,
|
||||
artifactsEnabled,
|
||||
webSearchEnabled,
|
||||
|
@ -291,7 +291,7 @@ export default function AgentConfig({ createMutation }: Pick<AgentPanelProps, 'c
|
|||
{(codeEnabled ||
|
||||
fileSearchEnabled ||
|
||||
artifactsEnabled ||
|
||||
ocrEnabled ||
|
||||
contextEnabled ||
|
||||
webSearchEnabled) && (
|
||||
<div className="mb-4 flex w-full flex-col items-start gap-3">
|
||||
<label className="text-token-text-primary block font-medium">
|
||||
|
@ -301,8 +301,8 @@ export default function AgentConfig({ createMutation }: Pick<AgentPanelProps, 'c
|
|||
{codeEnabled && <CodeForm agent_id={agent_id} files={code_files} />}
|
||||
{/* Web Search */}
|
||||
{webSearchEnabled && <SearchForm />}
|
||||
{/* File Context (OCR) */}
|
||||
{ocrEnabled && <FileContext agent_id={agent_id} files={context_files} />}
|
||||
{/* File Context */}
|
||||
{contextEnabled && <FileContext agent_id={agent_id} files={context_files} />}
|
||||
{/* Artifacts */}
|
||||
{artifactsEnabled && <Artifacts />}
|
||||
{/* File Search */}
|
||||
|
|
|
@ -47,7 +47,7 @@ export default function FileContext({
|
|||
|
||||
const { handleFileChange } = useFileHandling({
|
||||
overrideEndpoint: EModelEndpoint.agents,
|
||||
additionalMetadata: { agent_id, tool_resource: EToolResources.ocr },
|
||||
additionalMetadata: { agent_id, tool_resource: EToolResources.context },
|
||||
fileSetter: setFiles,
|
||||
});
|
||||
const { handleSharePointFiles, isProcessing, downloadProgress } = useSharePointFileHandling({
|
||||
|
@ -113,7 +113,7 @@ export default function FileContext({
|
|||
<HoverCardTrigger asChild>
|
||||
<span className="flex items-center gap-2">
|
||||
<label className="text-token-text-primary block font-medium">
|
||||
{localize('com_agents_file_context')}
|
||||
{localize('com_agents_file_context_label')}
|
||||
</label>
|
||||
<CircleHelpIcon className="h-4 w-4 text-text-tertiary" />
|
||||
</span>
|
||||
|
@ -122,7 +122,7 @@ export default function FileContext({
|
|||
<HoverCardContent side={ESide.Top} className="w-80">
|
||||
<div className="space-y-2">
|
||||
<p className="text-sm text-text-secondary">
|
||||
{localize('com_agents_file_context_info')}
|
||||
{localize('com_agents_file_context_description')}
|
||||
</p>
|
||||
</div>
|
||||
</HoverCardContent>
|
||||
|
@ -130,13 +130,13 @@ export default function FileContext({
|
|||
</div>
|
||||
</HoverCard>
|
||||
<div className="flex flex-col gap-3">
|
||||
{/* File Context (OCR) Files */}
|
||||
{/* File Context Files */}
|
||||
<FileRow
|
||||
files={files}
|
||||
setFiles={setFiles}
|
||||
setFilesLoading={setFilesLoading}
|
||||
agent_id={agent_id}
|
||||
tool_resource={EToolResources.ocr}
|
||||
tool_resource={EToolResources.context}
|
||||
Wrapper={({ children }) => <div className="flex flex-wrap gap-2">{children}</div>}
|
||||
/>
|
||||
<div>
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
import { renderHook } from '@testing-library/react';
|
||||
import { Tools, Constants } from 'librechat-data-provider';
|
||||
import { Tools, Constants, EToolResources } from 'librechat-data-provider';
|
||||
import type { TEphemeralAgent } from 'librechat-data-provider';
|
||||
import useAgentToolPermissions from '../useAgentToolPermissions';
|
||||
|
||||
// Mock dependencies
|
||||
|
@ -15,57 +16,165 @@ jest.mock('~/Providers', () => ({
|
|||
import { useGetAgentByIdQuery } from '~/data-provider';
|
||||
import { useAgentsMapContext } from '~/Providers';
|
||||
|
||||
type HookProps = {
|
||||
agentId?: string | null;
|
||||
ephemeralAgent?: TEphemeralAgent | null;
|
||||
};
|
||||
|
||||
describe('useAgentToolPermissions', () => {
|
||||
beforeEach(() => {
|
||||
jest.clearAllMocks();
|
||||
});
|
||||
|
||||
describe('Ephemeral Agent Scenarios', () => {
|
||||
it('should return true for all tools when agentId is null', () => {
|
||||
describe('Ephemeral Agent Scenarios (without ephemeralAgent parameter)', () => {
|
||||
it('should return false for all tools when agentId is null and no ephemeralAgent provided', () => {
|
||||
(useAgentsMapContext as jest.Mock).mockReturnValue({});
|
||||
(useGetAgentByIdQuery as jest.Mock).mockReturnValue({ data: undefined });
|
||||
|
||||
const { result } = renderHook(() => useAgentToolPermissions(null));
|
||||
|
||||
expect(result.current.fileSearchAllowedByAgent).toBe(true);
|
||||
expect(result.current.codeAllowedByAgent).toBe(true);
|
||||
expect(result.current.fileSearchAllowedByAgent).toBe(false);
|
||||
expect(result.current.codeAllowedByAgent).toBe(false);
|
||||
expect(result.current.tools).toBeUndefined();
|
||||
});
|
||||
|
||||
it('should return true for all tools when agentId is undefined', () => {
|
||||
it('should return false for all tools when agentId is undefined and no ephemeralAgent provided', () => {
|
||||
(useAgentsMapContext as jest.Mock).mockReturnValue({});
|
||||
(useGetAgentByIdQuery as jest.Mock).mockReturnValue({ data: undefined });
|
||||
|
||||
const { result } = renderHook(() => useAgentToolPermissions(undefined));
|
||||
|
||||
expect(result.current.fileSearchAllowedByAgent).toBe(true);
|
||||
expect(result.current.codeAllowedByAgent).toBe(true);
|
||||
expect(result.current.fileSearchAllowedByAgent).toBe(false);
|
||||
expect(result.current.codeAllowedByAgent).toBe(false);
|
||||
expect(result.current.tools).toBeUndefined();
|
||||
});
|
||||
|
||||
it('should return true for all tools when agentId is empty string', () => {
|
||||
it('should return false for all tools when agentId is empty string and no ephemeralAgent provided', () => {
|
||||
(useAgentsMapContext as jest.Mock).mockReturnValue({});
|
||||
(useGetAgentByIdQuery as jest.Mock).mockReturnValue({ data: undefined });
|
||||
|
||||
const { result } = renderHook(() => useAgentToolPermissions(''));
|
||||
|
||||
expect(result.current.fileSearchAllowedByAgent).toBe(false);
|
||||
expect(result.current.codeAllowedByAgent).toBe(false);
|
||||
expect(result.current.tools).toBeUndefined();
|
||||
});
|
||||
|
||||
it('should return false for all tools when agentId is EPHEMERAL_AGENT_ID and no ephemeralAgent provided', () => {
|
||||
(useAgentsMapContext as jest.Mock).mockReturnValue({});
|
||||
(useGetAgentByIdQuery as jest.Mock).mockReturnValue({ data: undefined });
|
||||
|
||||
const { result } = renderHook(() => useAgentToolPermissions(Constants.EPHEMERAL_AGENT_ID));
|
||||
|
||||
expect(result.current.fileSearchAllowedByAgent).toBe(false);
|
||||
expect(result.current.codeAllowedByAgent).toBe(false);
|
||||
expect(result.current.tools).toBeUndefined();
|
||||
});
|
||||
});
|
||||
|
||||
describe('Ephemeral Agent with Tool Settings', () => {
|
||||
it('should return true for file_search when ephemeralAgent has file_search enabled', () => {
|
||||
(useAgentsMapContext as jest.Mock).mockReturnValue({});
|
||||
(useGetAgentByIdQuery as jest.Mock).mockReturnValue({ data: undefined });
|
||||
|
||||
const ephemeralAgent = {
|
||||
[EToolResources.file_search]: true,
|
||||
};
|
||||
|
||||
const { result } = renderHook(() => useAgentToolPermissions(null, ephemeralAgent));
|
||||
|
||||
expect(result.current.fileSearchAllowedByAgent).toBe(true);
|
||||
expect(result.current.codeAllowedByAgent).toBe(false);
|
||||
expect(result.current.tools).toBeUndefined();
|
||||
});
|
||||
|
||||
it('should return true for execute_code when ephemeralAgent has execute_code enabled', () => {
|
||||
(useAgentsMapContext as jest.Mock).mockReturnValue({});
|
||||
(useGetAgentByIdQuery as jest.Mock).mockReturnValue({ data: undefined });
|
||||
|
||||
const ephemeralAgent = {
|
||||
[EToolResources.execute_code]: true,
|
||||
};
|
||||
|
||||
const { result } = renderHook(() => useAgentToolPermissions(undefined, ephemeralAgent));
|
||||
|
||||
expect(result.current.fileSearchAllowedByAgent).toBe(false);
|
||||
expect(result.current.codeAllowedByAgent).toBe(true);
|
||||
expect(result.current.tools).toBeUndefined();
|
||||
});
|
||||
|
||||
it('should return true for both tools when ephemeralAgent has both enabled', () => {
|
||||
(useAgentsMapContext as jest.Mock).mockReturnValue({});
|
||||
(useGetAgentByIdQuery as jest.Mock).mockReturnValue({ data: undefined });
|
||||
|
||||
const ephemeralAgent = {
|
||||
[EToolResources.file_search]: true,
|
||||
[EToolResources.execute_code]: true,
|
||||
};
|
||||
|
||||
const { result } = renderHook(() => useAgentToolPermissions('', ephemeralAgent));
|
||||
|
||||
expect(result.current.fileSearchAllowedByAgent).toBe(true);
|
||||
expect(result.current.codeAllowedByAgent).toBe(true);
|
||||
expect(result.current.tools).toBeUndefined();
|
||||
});
|
||||
|
||||
it('should return true for all tools when agentId is EPHEMERAL_AGENT_ID', () => {
|
||||
it('should return false for tools when ephemeralAgent has them explicitly disabled', () => {
|
||||
(useAgentsMapContext as jest.Mock).mockReturnValue({});
|
||||
(useGetAgentByIdQuery as jest.Mock).mockReturnValue({ data: undefined });
|
||||
|
||||
const { result } = renderHook(() =>
|
||||
useAgentToolPermissions(Constants.EPHEMERAL_AGENT_ID)
|
||||
const ephemeralAgent = {
|
||||
[EToolResources.file_search]: false,
|
||||
[EToolResources.execute_code]: false,
|
||||
};
|
||||
|
||||
const { result } = renderHook(() =>
|
||||
useAgentToolPermissions(Constants.EPHEMERAL_AGENT_ID, ephemeralAgent),
|
||||
);
|
||||
|
||||
expect(result.current.fileSearchAllowedByAgent).toBe(true);
|
||||
expect(result.current.codeAllowedByAgent).toBe(true);
|
||||
expect(result.current.fileSearchAllowedByAgent).toBe(false);
|
||||
expect(result.current.codeAllowedByAgent).toBe(false);
|
||||
expect(result.current.tools).toBeUndefined();
|
||||
});
|
||||
|
||||
it('should handle ephemeralAgent with ocr property without affecting other tools', () => {
|
||||
(useAgentsMapContext as jest.Mock).mockReturnValue({});
|
||||
(useGetAgentByIdQuery as jest.Mock).mockReturnValue({ data: undefined });
|
||||
|
||||
const ephemeralAgent = {
|
||||
[EToolResources.file_search]: true,
|
||||
};
|
||||
|
||||
const { result } = renderHook(() => useAgentToolPermissions(null, ephemeralAgent));
|
||||
|
||||
expect(result.current.fileSearchAllowedByAgent).toBe(true);
|
||||
expect(result.current.codeAllowedByAgent).toBe(false);
|
||||
expect(result.current.tools).toBeUndefined();
|
||||
});
|
||||
|
||||
it('should not affect regular agents when ephemeralAgent is provided', () => {
|
||||
const agentId = 'regular-agent';
|
||||
const mockAgent = {
|
||||
id: agentId,
|
||||
tools: [Tools.file_search],
|
||||
};
|
||||
|
||||
(useAgentsMapContext as jest.Mock).mockReturnValue({
|
||||
[agentId]: mockAgent,
|
||||
});
|
||||
(useGetAgentByIdQuery as jest.Mock).mockReturnValue({ data: undefined });
|
||||
|
||||
const ephemeralAgent = {
|
||||
[EToolResources.execute_code]: true,
|
||||
};
|
||||
|
||||
const { result } = renderHook(() => useAgentToolPermissions(agentId, ephemeralAgent));
|
||||
|
||||
// Should use regular agent's tools, not ephemeralAgent
|
||||
expect(result.current.fileSearchAllowedByAgent).toBe(true);
|
||||
expect(result.current.codeAllowedByAgent).toBe(false);
|
||||
expect(result.current.tools).toEqual([Tools.file_search]);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Regular Agent with Tools', () => {
|
||||
|
@ -300,7 +409,7 @@ describe('useAgentToolPermissions', () => {
|
|||
expect(firstResult.codeAllowedByAgent).toBe(secondResult.codeAllowedByAgent);
|
||||
// Tools array reference should be the same since it comes from useMemo
|
||||
expect(firstResult.tools).toBe(secondResult.tools);
|
||||
|
||||
|
||||
// Verify the actual values are correct
|
||||
expect(secondResult.fileSearchAllowedByAgent).toBe(true);
|
||||
expect(secondResult.codeAllowedByAgent).toBe(false);
|
||||
|
@ -318,10 +427,9 @@ describe('useAgentToolPermissions', () => {
|
|||
(useAgentsMapContext as jest.Mock).mockReturnValue(mockAgents);
|
||||
(useGetAgentByIdQuery as jest.Mock).mockReturnValue({ data: undefined });
|
||||
|
||||
const { result, rerender } = renderHook(
|
||||
({ agentId }) => useAgentToolPermissions(agentId),
|
||||
{ initialProps: { agentId: agentId1 } }
|
||||
);
|
||||
const { result, rerender } = renderHook(({ agentId }) => useAgentToolPermissions(agentId), {
|
||||
initialProps: { agentId: agentId1 },
|
||||
});
|
||||
|
||||
expect(result.current.fileSearchAllowedByAgent).toBe(true);
|
||||
expect(result.current.codeAllowedByAgent).toBe(false);
|
||||
|
@ -345,24 +453,34 @@ describe('useAgentToolPermissions', () => {
|
|||
});
|
||||
(useGetAgentByIdQuery as jest.Mock).mockReturnValue({ data: undefined });
|
||||
|
||||
const ephemeralAgent = {
|
||||
[EToolResources.file_search]: true,
|
||||
[EToolResources.execute_code]: true,
|
||||
};
|
||||
|
||||
const { result, rerender } = renderHook(
|
||||
({ agentId }) => useAgentToolPermissions(agentId),
|
||||
{ initialProps: { agentId: null } }
|
||||
({ agentId, ephemeralAgent }) => useAgentToolPermissions(agentId, ephemeralAgent),
|
||||
{ initialProps: { agentId: null, ephemeralAgent } as HookProps },
|
||||
);
|
||||
|
||||
// Start with ephemeral agent (null)
|
||||
// Start with ephemeral agent (null) with tools enabled
|
||||
expect(result.current.fileSearchAllowedByAgent).toBe(true);
|
||||
expect(result.current.codeAllowedByAgent).toBe(true);
|
||||
|
||||
// Switch to regular agent
|
||||
rerender({ agentId: regularAgentId });
|
||||
rerender({ agentId: regularAgentId, ephemeralAgent });
|
||||
expect(result.current.fileSearchAllowedByAgent).toBe(false);
|
||||
expect(result.current.codeAllowedByAgent).toBe(false);
|
||||
|
||||
// Switch back to ephemeral
|
||||
rerender({ agentId: '' });
|
||||
rerender({ agentId: '', ephemeralAgent });
|
||||
expect(result.current.fileSearchAllowedByAgent).toBe(true);
|
||||
expect(result.current.codeAllowedByAgent).toBe(true);
|
||||
|
||||
// Switch to ephemeral without tools
|
||||
rerender({ agentId: null, ephemeralAgent: undefined });
|
||||
expect(result.current.fileSearchAllowedByAgent).toBe(false);
|
||||
expect(result.current.codeAllowedByAgent).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
|
@ -403,9 +521,9 @@ describe('useAgentToolPermissions', () => {
|
|||
|
||||
it('should handle query loading state', () => {
|
||||
const agentId = 'loading-agent';
|
||||
|
||||
|
||||
(useAgentsMapContext as jest.Mock).mockReturnValue({});
|
||||
(useGetAgentByIdQuery as jest.Mock).mockReturnValue({
|
||||
(useGetAgentByIdQuery as jest.Mock).mockReturnValue({
|
||||
data: undefined,
|
||||
isLoading: true,
|
||||
error: null,
|
||||
|
@ -421,9 +539,9 @@ describe('useAgentToolPermissions', () => {
|
|||
|
||||
it('should handle query error state', () => {
|
||||
const agentId = 'error-agent';
|
||||
|
||||
|
||||
(useAgentsMapContext as jest.Mock).mockReturnValue({});
|
||||
(useGetAgentByIdQuery as jest.Mock).mockReturnValue({
|
||||
(useGetAgentByIdQuery as jest.Mock).mockReturnValue({
|
||||
data: undefined,
|
||||
isLoading: false,
|
||||
error: new Error('Failed to fetch agent'),
|
|
@ -1,5 +1,5 @@
|
|||
import { renderHook } from '@testing-library/react';
|
||||
import { Tools } from 'librechat-data-provider';
|
||||
import { Tools, EToolResources } from 'librechat-data-provider';
|
||||
import useAgentToolPermissions from '../useAgentToolPermissions';
|
||||
|
||||
// Mock the dependencies
|
||||
|
@ -20,36 +20,36 @@ describe('useAgentToolPermissions', () => {
|
|||
});
|
||||
|
||||
describe('when no agentId is provided', () => {
|
||||
it('should allow all tools for ephemeral agents', () => {
|
||||
it('should disallow all tools for ephemeral agents when no ephemeralAgent settings provided', () => {
|
||||
mockUseAgentsMapContext.mockReturnValue({});
|
||||
mockUseGetAgentByIdQuery.mockReturnValue({ data: undefined });
|
||||
|
||||
const { result } = renderHook(() => useAgentToolPermissions(null));
|
||||
|
||||
expect(result.current.fileSearchAllowedByAgent).toBe(true);
|
||||
expect(result.current.codeAllowedByAgent).toBe(true);
|
||||
expect(result.current.fileSearchAllowedByAgent).toBe(false);
|
||||
expect(result.current.codeAllowedByAgent).toBe(false);
|
||||
expect(result.current.tools).toBeUndefined();
|
||||
});
|
||||
|
||||
it('should allow all tools when agentId is undefined', () => {
|
||||
it('should disallow all tools when agentId is undefined and no ephemeralAgent settings', () => {
|
||||
mockUseAgentsMapContext.mockReturnValue({});
|
||||
mockUseGetAgentByIdQuery.mockReturnValue({ data: undefined });
|
||||
|
||||
const { result } = renderHook(() => useAgentToolPermissions(undefined));
|
||||
|
||||
expect(result.current.fileSearchAllowedByAgent).toBe(true);
|
||||
expect(result.current.codeAllowedByAgent).toBe(true);
|
||||
expect(result.current.fileSearchAllowedByAgent).toBe(false);
|
||||
expect(result.current.codeAllowedByAgent).toBe(false);
|
||||
expect(result.current.tools).toBeUndefined();
|
||||
});
|
||||
|
||||
it('should allow all tools when agentId is empty string', () => {
|
||||
it('should disallow all tools when agentId is empty string and no ephemeralAgent settings', () => {
|
||||
mockUseAgentsMapContext.mockReturnValue({});
|
||||
mockUseGetAgentByIdQuery.mockReturnValue({ data: undefined });
|
||||
|
||||
const { result } = renderHook(() => useAgentToolPermissions(''));
|
||||
|
||||
expect(result.current.fileSearchAllowedByAgent).toBe(true);
|
||||
expect(result.current.codeAllowedByAgent).toBe(true);
|
||||
expect(result.current.fileSearchAllowedByAgent).toBe(false);
|
||||
expect(result.current.codeAllowedByAgent).toBe(false);
|
||||
expect(result.current.tools).toBeUndefined();
|
||||
});
|
||||
});
|
||||
|
@ -177,4 +177,74 @@ describe('useAgentToolPermissions', () => {
|
|||
expect(result.current.tools).toBeUndefined();
|
||||
});
|
||||
});
|
||||
|
||||
describe('when ephemeralAgent settings are provided', () => {
|
||||
it('should allow file_search when ephemeralAgent has file_search enabled', () => {
|
||||
mockUseAgentsMapContext.mockReturnValue({});
|
||||
mockUseGetAgentByIdQuery.mockReturnValue({ data: undefined });
|
||||
|
||||
const ephemeralAgent = {
|
||||
[EToolResources.file_search]: true,
|
||||
};
|
||||
|
||||
const { result } = renderHook(() => useAgentToolPermissions(null, ephemeralAgent));
|
||||
|
||||
expect(result.current.fileSearchAllowedByAgent).toBe(true);
|
||||
expect(result.current.codeAllowedByAgent).toBe(false);
|
||||
expect(result.current.tools).toBeUndefined();
|
||||
});
|
||||
|
||||
it('should allow execute_code when ephemeralAgent has execute_code enabled', () => {
|
||||
mockUseAgentsMapContext.mockReturnValue({});
|
||||
mockUseGetAgentByIdQuery.mockReturnValue({ data: undefined });
|
||||
|
||||
const ephemeralAgent = {
|
||||
[EToolResources.execute_code]: true,
|
||||
};
|
||||
|
||||
const { result } = renderHook(() => useAgentToolPermissions(undefined, ephemeralAgent));
|
||||
|
||||
expect(result.current.fileSearchAllowedByAgent).toBe(false);
|
||||
expect(result.current.codeAllowedByAgent).toBe(true);
|
||||
expect(result.current.tools).toBeUndefined();
|
||||
});
|
||||
|
||||
it('should allow both tools when ephemeralAgent has both enabled', () => {
|
||||
mockUseAgentsMapContext.mockReturnValue({});
|
||||
mockUseGetAgentByIdQuery.mockReturnValue({ data: undefined });
|
||||
|
||||
const ephemeralAgent = {
|
||||
[EToolResources.file_search]: true,
|
||||
[EToolResources.execute_code]: true,
|
||||
};
|
||||
|
||||
const { result } = renderHook(() => useAgentToolPermissions('', ephemeralAgent));
|
||||
|
||||
expect(result.current.fileSearchAllowedByAgent).toBe(true);
|
||||
expect(result.current.codeAllowedByAgent).toBe(true);
|
||||
expect(result.current.tools).toBeUndefined();
|
||||
});
|
||||
|
||||
it('should not affect regular agents when ephemeralAgent is provided', () => {
|
||||
const agentId = 'regular-agent';
|
||||
const agent = {
|
||||
id: agentId,
|
||||
tools: [Tools.file_search],
|
||||
};
|
||||
|
||||
mockUseAgentsMapContext.mockReturnValue({ [agentId]: agent });
|
||||
mockUseGetAgentByIdQuery.mockReturnValue({ data: undefined });
|
||||
|
||||
const ephemeralAgent = {
|
||||
[EToolResources.execute_code]: true,
|
||||
};
|
||||
|
||||
const { result } = renderHook(() => useAgentToolPermissions(agentId, ephemeralAgent));
|
||||
|
||||
// Should use regular agent's tools, not ephemeralAgent
|
||||
expect(result.current.fileSearchAllowedByAgent).toBe(true);
|
||||
expect(result.current.codeAllowedByAgent).toBe(false);
|
||||
expect(result.current.tools).toEqual([Tools.file_search]);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
|
|
@ -6,6 +6,7 @@ interface AgentCapabilitiesResult {
|
|||
actionsEnabled: boolean;
|
||||
artifactsEnabled: boolean;
|
||||
ocrEnabled: boolean;
|
||||
contextEnabled: boolean;
|
||||
fileSearchEnabled: boolean;
|
||||
webSearchEnabled: boolean;
|
||||
codeEnabled: boolean;
|
||||
|
@ -34,6 +35,11 @@ export default function useAgentCapabilities(
|
|||
[capabilities],
|
||||
);
|
||||
|
||||
const contextEnabled = useMemo(
|
||||
() => capabilities?.includes(AgentCapabilities.context) ?? false,
|
||||
[capabilities],
|
||||
);
|
||||
|
||||
const fileSearchEnabled = useMemo(
|
||||
() => capabilities?.includes(AgentCapabilities.file_search) ?? false,
|
||||
[capabilities],
|
||||
|
@ -54,6 +60,7 @@ export default function useAgentCapabilities(
|
|||
codeEnabled,
|
||||
toolsEnabled,
|
||||
actionsEnabled,
|
||||
contextEnabled,
|
||||
artifactsEnabled,
|
||||
webSearchEnabled,
|
||||
fileSearchEnabled,
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
import { useMemo } from 'react';
|
||||
import { Tools, Constants } from 'librechat-data-provider';
|
||||
import { Tools, Constants, EToolResources } from 'librechat-data-provider';
|
||||
import type { TEphemeralAgent } from 'librechat-data-provider';
|
||||
import { useGetAgentByIdQuery } from '~/data-provider';
|
||||
import { useAgentsMapContext } from '~/Providers';
|
||||
|
||||
|
@ -16,11 +17,13 @@ function isEphemeralAgent(agentId: string | null | undefined): boolean {
|
|||
/**
|
||||
* Hook to determine whether specific tools are allowed for a given agent.
|
||||
*
|
||||
* @param agentId - The ID of the agent. If null/undefined/empty, returns true for all tools (ephemeral agent behavior)
|
||||
* @param agentId - The ID of the agent. If null/undefined/empty, checks ephemeralAgent settings
|
||||
* @param ephemeralAgent - Optional ephemeral agent settings for tool permissions
|
||||
* @returns Object with boolean flags for file_search and execute_code permissions, plus the tools array
|
||||
*/
|
||||
export default function useAgentToolPermissions(
|
||||
agentId: string | null | undefined,
|
||||
ephemeralAgent?: TEphemeralAgent | null,
|
||||
): AgentToolPermissionsResult {
|
||||
const agentsMap = useAgentsMapContext();
|
||||
|
||||
|
@ -37,22 +40,26 @@ export default function useAgentToolPermissions(
|
|||
);
|
||||
|
||||
const fileSearchAllowedByAgent = useMemo(() => {
|
||||
// Allow for ephemeral agents
|
||||
if (isEphemeralAgent(agentId)) return true;
|
||||
// Check ephemeral agent settings
|
||||
if (isEphemeralAgent(agentId)) {
|
||||
return ephemeralAgent?.[EToolResources.file_search] ?? false;
|
||||
}
|
||||
// If agentId exists but agent not found, disallow
|
||||
if (!selectedAgent) return false;
|
||||
// Check if the agent has the file_search tool
|
||||
return tools?.includes(Tools.file_search) ?? false;
|
||||
}, [agentId, selectedAgent, tools]);
|
||||
}, [agentId, selectedAgent, tools, ephemeralAgent]);
|
||||
|
||||
const codeAllowedByAgent = useMemo(() => {
|
||||
// Allow for ephemeral agents
|
||||
if (isEphemeralAgent(agentId)) return true;
|
||||
// Check ephemeral agent settings
|
||||
if (isEphemeralAgent(agentId)) {
|
||||
return ephemeralAgent?.[EToolResources.execute_code] ?? false;
|
||||
}
|
||||
// If agentId exists but agent not found, disallow
|
||||
if (!selectedAgent) return false;
|
||||
// Check if the agent has the execute_code tool
|
||||
return tools?.includes(Tools.execute_code) ?? false;
|
||||
}, [agentId, selectedAgent, tools]);
|
||||
}, [agentId, selectedAgent, tools, ephemeralAgent]);
|
||||
|
||||
return {
|
||||
fileSearchAllowedByAgent,
|
||||
|
|
|
@ -71,7 +71,7 @@ export default function useDragHelpers() {
|
|||
const capabilities = agentsConfig?.capabilities ?? defaultAgentCapabilities;
|
||||
const fileSearchEnabled = capabilities.includes(AgentCapabilities.file_search) === true;
|
||||
const codeEnabled = capabilities.includes(AgentCapabilities.execute_code) === true;
|
||||
const ocrEnabled = capabilities.includes(AgentCapabilities.ocr) === true;
|
||||
const contextEnabled = capabilities.includes(AgentCapabilities.context) === true;
|
||||
|
||||
/** Get agent permissions at drop time */
|
||||
const agentId = conversationRef.current?.agent_id;
|
||||
|
@ -99,7 +99,7 @@ export default function useDragHelpers() {
|
|||
allImages ||
|
||||
(fileSearchEnabled && fileSearchAllowedByAgent) ||
|
||||
(codeEnabled && codeAllowedByAgent) ||
|
||||
ocrEnabled;
|
||||
contextEnabled;
|
||||
|
||||
if (!shouldShowModal) {
|
||||
// Fallback: directly handle files without showing modal
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
import { useGetCategories } from '~/data-provider';
|
||||
import CategoryIcon from '~/components/Prompts/Groups/CategoryIcon';
|
||||
import { useLocalize, TranslationKeys } from '~/hooks';
|
||||
import { useGetCategories } from '~/data-provider';
|
||||
|
||||
const loadingCategories: { label: TranslationKeys; value: string }[] = [
|
||||
{
|
||||
|
@ -14,9 +14,17 @@ const emptyCategory: { label: TranslationKeys; value: string } = {
|
|||
value: '',
|
||||
};
|
||||
|
||||
const useCategories = (className = '') => {
|
||||
const useCategories = ({
|
||||
className = '',
|
||||
hasAccess = true,
|
||||
}: {
|
||||
className?: string;
|
||||
hasAccess?: boolean;
|
||||
}) => {
|
||||
const localize = useLocalize();
|
||||
|
||||
const { data: categories = loadingCategories } = useGetCategories({
|
||||
enabled: hasAccess,
|
||||
select: (data) =>
|
||||
data.map((category) => ({
|
||||
label: localize(category.label as TranslationKeys),
|
||||
|
|
|
@ -3,7 +3,7 @@ import { useRecoilState } from 'recoil';
|
|||
import { usePromptGroupsInfiniteQuery } from '~/data-provider';
|
||||
import store from '~/store';
|
||||
|
||||
export default function usePromptGroupsNav() {
|
||||
export default function usePromptGroupsNav(hasAccess = true) {
|
||||
const [pageSize] = useRecoilState(store.promptsPageSize);
|
||||
const [category] = useRecoilState(store.promptsCategory);
|
||||
const [name, setName] = useRecoilState(store.promptsName);
|
||||
|
@ -14,21 +14,26 @@ export default function usePromptGroupsNav() {
|
|||
|
||||
const prevFiltersRef = useRef({ name, category });
|
||||
|
||||
const groupsQuery = usePromptGroupsInfiniteQuery({
|
||||
name,
|
||||
pageSize,
|
||||
category,
|
||||
});
|
||||
const groupsQuery = usePromptGroupsInfiniteQuery(
|
||||
{
|
||||
name,
|
||||
pageSize,
|
||||
category,
|
||||
},
|
||||
{
|
||||
enabled: hasAccess,
|
||||
},
|
||||
);
|
||||
|
||||
// Get the current page data
|
||||
const currentPageData = useMemo(() => {
|
||||
if (!groupsQuery.data?.pages || groupsQuery.data.pages.length === 0) {
|
||||
if (!hasAccess || !groupsQuery.data?.pages || groupsQuery.data.pages.length === 0) {
|
||||
return null;
|
||||
}
|
||||
// Ensure we don't go out of bounds
|
||||
const pageIndex = Math.min(currentPageIndex, groupsQuery.data.pages.length - 1);
|
||||
return groupsQuery.data.pages[pageIndex];
|
||||
}, [groupsQuery.data?.pages, currentPageIndex]);
|
||||
}, [hasAccess, groupsQuery.data?.pages, currentPageIndex]);
|
||||
|
||||
// Get prompt groups for current page
|
||||
const promptGroups = useMemo(() => {
|
||||
|
@ -54,7 +59,7 @@ export default function usePromptGroupsNav() {
|
|||
|
||||
// Navigate to next page
|
||||
const nextPage = useCallback(async () => {
|
||||
if (!hasNextPage) return;
|
||||
if (!hasAccess || !hasNextPage) return;
|
||||
|
||||
const nextPageIndex = currentPageIndex + 1;
|
||||
|
||||
|
@ -72,16 +77,18 @@ export default function usePromptGroupsNav() {
|
|||
}
|
||||
|
||||
setCurrentPageIndex(nextPageIndex);
|
||||
}, [currentPageIndex, hasNextPage, groupsQuery]);
|
||||
}, [hasAccess, currentPageIndex, hasNextPage, groupsQuery]);
|
||||
|
||||
// Navigate to previous page
|
||||
const prevPage = useCallback(() => {
|
||||
if (!hasPreviousPage) return;
|
||||
if (!hasAccess || !hasPreviousPage) return;
|
||||
setCurrentPageIndex(currentPageIndex - 1);
|
||||
}, [currentPageIndex, hasPreviousPage]);
|
||||
}, [hasAccess, currentPageIndex, hasPreviousPage]);
|
||||
|
||||
// Reset when filters change
|
||||
useEffect(() => {
|
||||
if (!hasAccess) return;
|
||||
|
||||
const filtersChanged =
|
||||
prevFiltersRef.current.name !== name || prevFiltersRef.current.category !== category;
|
||||
|
||||
|
@ -90,18 +97,18 @@ export default function usePromptGroupsNav() {
|
|||
cursorHistoryRef.current = [null];
|
||||
prevFiltersRef.current = { name, category };
|
||||
}
|
||||
}, [name, category]);
|
||||
}, [hasAccess, name, category]);
|
||||
|
||||
return {
|
||||
promptGroups,
|
||||
promptGroups: hasAccess ? promptGroups : [],
|
||||
groupsQuery,
|
||||
currentPage,
|
||||
totalPages,
|
||||
hasNextPage,
|
||||
hasPreviousPage,
|
||||
hasNextPage: hasAccess && hasNextPage,
|
||||
hasPreviousPage: hasAccess && hasPreviousPage,
|
||||
nextPage,
|
||||
prevPage,
|
||||
isFetching: groupsQuery.isFetching,
|
||||
isFetching: hasAccess ? groupsQuery.isFetching : false,
|
||||
name,
|
||||
setName,
|
||||
};
|
||||
|
|
|
@ -10,9 +10,7 @@
|
|||
"com_agents_create_error": "حدث خطأ أثناء إنشاء الوكيل الخاص بك",
|
||||
"com_agents_description_placeholder": "اختياري: اشرح عميلك هنا",
|
||||
"com_agents_enable_file_search": "تمكين البحث عن الملفات",
|
||||
"com_agents_file_context": "سياق الملف (قارئ الحروف البصري)",
|
||||
"com_agents_file_context_disabled": "يحب أولاً إنشاء الوكيل قبل رفع الملف لمحلل سياق الملف",
|
||||
"com_agents_file_context_info": "الملفات المرفوعة كـ \"سياق\" تتم معالجتها باستخدام قارئ الحروف البصري (OCR) لاستخراج النص، والذي يُضاف بعد ذلك إلى التعليمات الموجِهة للوكيل. مثالية للوثائق والصور التي تحتوي على نص أو ملفات PDF حيث تحتاج إلى المحتوى النصي الكامل للملف.",
|
||||
"com_agents_file_search_disabled": "يجب إنشاء الوكيل قبل تحميل الملفات للبحث في الملفات.",
|
||||
"com_agents_file_search_info": "عند التمكين، سيتم إعلام الوكيل بأسماء الملفات المدرجة أدناه بالضبط، مما يتيح له استرجاع السياق ذي الصلة من هذه الملفات.",
|
||||
"com_agents_instructions_placeholder": "التعليمات النظامية التي يستخدمها الوكيل",
|
||||
|
|
|
@ -10,9 +10,7 @@
|
|||
"com_agents_create_error": "S'ha produït un error en crear el teu agent.",
|
||||
"com_agents_description_placeholder": "Opcional: Descriu el teu Agent aquí",
|
||||
"com_agents_enable_file_search": "Habilita la Cerca de Fitxers",
|
||||
"com_agents_file_context": "Context de Fitxer (OCR)",
|
||||
"com_agents_file_context_disabled": "Cal crear l'agent abans de pujar fitxers per al Context de Fitxer.",
|
||||
"com_agents_file_context_info": "Els fitxers pujats com a \"Context\" es processen amb OCR per extreure'n el text, que s'afegeix a les instruccions de l'Agent. Ideal per a documents, imatges amb text o PDFs on cal el contingut complet del fitxer.",
|
||||
"com_agents_file_search_disabled": "Cal crear l'agent abans de pujar fitxers per a la Cerca de Fitxers.",
|
||||
"com_agents_file_search_info": "Quan està habilitat, l'agent serà informat dels noms exactes dels fitxers llistats a continuació, i podrà recuperar-ne el context rellevant.",
|
||||
"com_agents_instructions_placeholder": "Les instruccions de sistema que utilitza l'agent",
|
||||
|
|
|
@ -10,9 +10,7 @@
|
|||
"com_agents_create_error": "Der opstod en fejl ved oprettelsen af din agent.",
|
||||
"com_agents_description_placeholder": "Valgfrit: Beskriv din agent her",
|
||||
"com_agents_enable_file_search": "Aktivér filsøgning",
|
||||
"com_agents_file_context": "Filkontekst (OCR)",
|
||||
"com_agents_file_context_disabled": "Agenten skal oprettes, før der uploades filer til File Context.",
|
||||
"com_agents_file_context_info": "Filer, der uploades som \"Context\", behandles ved hjælp af OCR for at udtrække tekst, som derefter føjes til agentens instruktioner. Ideel til dokumenter, billeder med tekst eller PDF'er, hvor du har brug for det fulde tekstindhold i en fil.",
|
||||
"com_agents_file_search_disabled": "Agent skal oprettes inden uploading af filer til filsøgning.",
|
||||
"com_agents_file_search_info": "Når den er aktiveret, får agenten besked om de nøjagtige filnavne, der er anført nedenfor, så den kan hente relevant kontekst fra disse filer.",
|
||||
"com_agents_instructions_placeholder": "De systeminstruktioner, som agenten bruger",
|
||||
|
|
|
@ -59,9 +59,7 @@
|
|||
"com_agents_error_timeout_suggestion": "Bitte überprüfe deine Internetverbindung und versuche es erneut.",
|
||||
"com_agents_error_timeout_title": "Verbindungs-Timeout",
|
||||
"com_agents_error_title": "Es ist ein Fehler aufgetreten",
|
||||
"com_agents_file_context": "Datei-Kontext (OCR)",
|
||||
"com_agents_file_context_disabled": "Der Agent muss vor dem Hochladen von Dateien für den Datei-Kontext erstellt werden.",
|
||||
"com_agents_file_context_info": "Als „Kontext“ hochgeladene Dateien werden mit OCR verarbeitet, um Text zu extrahieren, der dann den Anweisungen des Agenten hinzugefügt wird. Ideal für Dokumente, Bilder mit Text oder PDFs, wenn Sie den vollständigen Textinhalt einer Datei benötigen",
|
||||
"com_agents_file_search_disabled": "Der Agent muss erstellt werden, bevor Dateien für die Dateisuche hochgeladen werden können.",
|
||||
"com_agents_file_search_info": "Wenn aktiviert, wird der Agent über die unten aufgelisteten exakten Dateinamen informiert und kann dadurch relevante Informationen aus diesen Dateien abrufen",
|
||||
"com_agents_grid_announcement": "Zeige {{count}} Agenten in der Kategorie {{category}}",
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
"com_agents_category_sales_description": "Agents focused on sales processes, customer relations",
|
||||
"com_agents_category_tab_label": "{{category}} category, {{position}} of {{total}}",
|
||||
"com_agents_category_tabs_label": "Agent Categories",
|
||||
"com_agents_chat_with": "Chat with {{name}}",
|
||||
"com_agents_clear_search": "Clear search",
|
||||
"com_agents_code_interpreter": "When enabled, allows your agent to leverage the LibreChat Code Interpreter API to run generated code, including file processing, securely. Requires a valid API key.",
|
||||
"com_agents_code_interpreter_title": "Code Interpreter API",
|
||||
|
@ -59,9 +60,9 @@
|
|||
"com_agents_error_timeout_suggestion": "Please check your internet connection and try again.",
|
||||
"com_agents_error_timeout_title": "Connection Timeout",
|
||||
"com_agents_error_title": "Something went wrong",
|
||||
"com_agents_file_context": "File Context (OCR)",
|
||||
"com_agents_file_context_description": "Files uploaded as \"Context\" are parsed as text to supplement the Agent's instructions. If OCR is available, or if configured for the uploaded filetype, the process is used to extract text. Ideal for documents, images with text, or PDFs where you need the full text content of a file",
|
||||
"com_agents_file_context_disabled": "Agent must be created before uploading files for File Context.",
|
||||
"com_agents_file_context_info": "Files uploaded as \"Context\" are processed using OCR to extract text, which is then added to the Agent's instructions. Ideal for documents, images with text, or PDFs where you need the full text content of a file",
|
||||
"com_agents_file_context_label": "File Context",
|
||||
"com_agents_file_search_disabled": "Agent must be created before uploading files for File Search.",
|
||||
"com_agents_file_search_info": "When enabled, the agent will be informed of the exact filenames listed below, allowing it to retrieve relevant context from these files.",
|
||||
"com_agents_grid_announcement": "Showing {{count}} agents in {{category}} category",
|
||||
|
|
|
@ -10,7 +10,6 @@
|
|||
"com_agents_create_error": "Hubo un error al crear su agente.",
|
||||
"com_agents_description_placeholder": "Opcional: Describa su Agente aquí",
|
||||
"com_agents_enable_file_search": "Habilitar búsqueda de archivos",
|
||||
"com_agents_file_context": "Archivo de contexto (OCR)",
|
||||
"com_agents_file_context_disabled": "Es necesario crear el Agente antes de subir archivos.",
|
||||
"com_agents_file_search_disabled": "Es necesario crear el Agente antes de subir archivos para la Búsqueda de Archivos.",
|
||||
"com_agents_file_search_info": "Cuando está habilitado, se informará al agente sobre los nombres exactos de los archivos listados a continuación, permitiéndole recuperar el contexto relevante de estos archivos.",
|
||||
|
|
|
@ -10,9 +10,7 @@
|
|||
"com_agents_create_error": "Agendi loomisel tekkis viga.",
|
||||
"com_agents_description_placeholder": "Valikuline: Kirjelda oma agenti siin",
|
||||
"com_agents_enable_file_search": "Luba failiotsing",
|
||||
"com_agents_file_context": "Faili kontekst (OCR)",
|
||||
"com_agents_file_context_disabled": "Agent tuleb luua enne failide üleslaadimist failikontekstiks.",
|
||||
"com_agents_file_context_info": "Failid, mis on laetud \"konteksti\", töödeldakse OCR-iga tekstiks ja lisatakse seejärel agendi juhistesse. See on eriti kasulik dokumentide, tekstiga piltide või PDF-ide puhul, kui vaja läheb kogu faili tekstilist sisu.",
|
||||
"com_agents_file_search_disabled": "Agent tuleb luua enne failide üleslaadimist failiotsinguks.",
|
||||
"com_agents_file_search_info": "Kui see on lubatud, teavitatakse agenti täpselt allpool loetletud failinimedest, mis võimaldab tal nendest failidest asjakohast konteksti hankida.",
|
||||
"com_agents_instructions_placeholder": "Süsteemijuhised, mida agent kasutab",
|
||||
|
|
|
@ -10,9 +10,7 @@
|
|||
"com_agents_create_error": "در ایجاد کارگزار شما خطایی روی داد.",
|
||||
"com_agents_description_placeholder": "اختیاری: کارگزار خود را در اینجا شرح دهید",
|
||||
"com_agents_enable_file_search": "جستجوی فایل را فعال کنید",
|
||||
"com_agents_file_context": "زمینه فایل (OCR)",
|
||||
"com_agents_file_context_disabled": "کارگزار باید قبل از آپلود فایل ها برای File Context ایجاد شود.",
|
||||
"com_agents_file_context_info": "فایلهای آپلود شده بهعنوان «Context» با استفاده از OCR برای استخراج متن پردازش میشوند، که سپس به دستورالعملهای کارگزار اضافه میشود. ایده آل برای اسناد، تصاویر با متن یا PDF که در آن به محتوای متن کامل یک فایل نیاز دارید",
|
||||
"com_agents_file_search_disabled": "کارگزار باید قبل از آپلود فایل ها برای جستجوی فایل ایجاد شود.",
|
||||
"com_agents_file_search_info": "وقتی فعال باشد، کارگزار از نامهای دقیق فایلهای فهرستشده در زیر مطلع میشود و به او اجازه میدهد متن مربوطه را از این فایلها بازیابی کند.",
|
||||
"com_agents_instructions_placeholder": "دستورالعمل های سیستمی که کارگزار استفاده می کند",
|
||||
|
|
|
@ -4,9 +4,7 @@
|
|||
"com_agents_create_error": "Agentin luonnissa tapahtui virhe.",
|
||||
"com_agents_description_placeholder": "Valinnainen: Lisää tähän agentin kuvaus",
|
||||
"com_agents_enable_file_search": "Käytä Tiedostohakua",
|
||||
"com_agents_file_context": "Tiedostokonteksti (OCR)",
|
||||
"com_agents_file_context_disabled": "Agentti täytyy luoda ennen tiedostojen lataamista Tiedostokontekstiin",
|
||||
"com_agents_file_context_info": "\"Kontekstiksi\" ladatuista tiedostoista luetaan sisältö tekstintunnistuksen (OCR) avulla agentin ohjeisiin lisättäväksi. Soveltuu erityisesti asiakirjojen, tekstiä sisältävien kuvien tai PDF-tiedostojen käsittelyyn, kun haluat hyödyntää tiedoston koko tekstisisällön.",
|
||||
"com_agents_file_search_disabled": "Agentti täytyy luoda ennen tiedostojen lataamista Tiedostohakuun",
|
||||
"com_agents_file_search_info": "Asetuksen ollessa päällä agentti saa tiedoksi alla luetellut tiedostonimet, jolloin se voi hakea vastausten pohjaksi asiayhteyksiä tiedostojen sisällöistä.",
|
||||
"com_agents_instructions_placeholder": "Agentin käyttämät järjestelmäohjeet",
|
||||
|
|
|
@ -10,9 +10,7 @@
|
|||
"com_agents_create_error": "Une erreur s'est produite lors de la création de votre agent.",
|
||||
"com_agents_description_placeholder": "Décrivez votre Agent ici (facultatif)",
|
||||
"com_agents_enable_file_search": "Activer la recherche de fichiers",
|
||||
"com_agents_file_context": "Contexte du fichier (OCR)",
|
||||
"com_agents_file_context_disabled": "L'agent doit être créé avant de charger des fichiers pour le contexte de fichiers.",
|
||||
"com_agents_file_context_info": "Les fichiers téléchargés en tant que \"Contexte\" sont traités à l'aide de l'OCR pour en extraire le texte, qui est ensuite ajouté aux instructions de l'agent. Idéal pour les documents, les images contenant du texte ou les PDF pour lesquels vous avez besoin du contenu textuel complet d'un fichier.",
|
||||
"com_agents_file_search_disabled": "L'agent doit être créé avant de pouvoir télécharger des fichiers pour la Recherche de Fichiers.",
|
||||
"com_agents_file_search_info": "Lorsque cette option est activée, l'agent sera informé des noms exacts des fichiers listés ci-dessous, lui permettant d'extraire le contexte pertinent de ces fichiers.",
|
||||
"com_agents_instructions_placeholder": "Les instructions système que l'agent utilise",
|
||||
|
|
|
@ -57,9 +57,7 @@
|
|||
"com_agents_error_timeout_suggestion": "אנא בדוק את חיבור האינטרנט שלך ונסה שוב",
|
||||
"com_agents_error_timeout_title": "זמן התפוגה של החיבור",
|
||||
"com_agents_error_title": "משהו השתבש",
|
||||
"com_agents_file_context": "קבצי הקשר (OCR)",
|
||||
"com_agents_file_context_disabled": "יש ליצור סוכן לפני שמעלים קבצים עבור הקשר קבצים",
|
||||
"com_agents_file_context_info": "קבצים שהועלו כ\"הקשר\" מעובדים באמצעות OCR (זיהוי אופטי של תווים) כדי להפיק טקסט אשר לאחר מכן מתווסף להוראות הסוכן. אידיאלי עבור מסמכים, תמונות עם טקסט או קובצי PDF בהם אתה צריך את התוכן הטקסטואלי המלא של הקובץ.",
|
||||
"com_agents_file_search_disabled": "יש ליצור את הסוכן לפני העלאת קבצים לחיפוש",
|
||||
"com_agents_file_search_info": "כאשר הסוכן מופעל הוא יקבל מידע על שמות הקבצים המפורטים להלן, כדי שהוא יוכל לאחזר את הקשר רלוונטי.",
|
||||
"com_agents_grid_announcement": "מציג {{count}} סוכנים מהקטגוריה {{category}}",
|
||||
|
|
|
@ -10,9 +10,7 @@
|
|||
"com_agents_create_error": "Hiba történt az ügynök létrehozása során.",
|
||||
"com_agents_description_placeholder": "Opcionális: Itt írja le az ügynökét",
|
||||
"com_agents_enable_file_search": "Fájlkeresés engedélyezése",
|
||||
"com_agents_file_context": "Fájlkontextus (OCR)",
|
||||
"com_agents_file_context_disabled": "Az ügynököt először létre kell hozni, mielőtt fájlokat tölthet fel a fájlkontextushoz.",
|
||||
"com_agents_file_context_info": "A „Kontextusként” feltöltött fájlokat OCR-rel dolgozzuk fel a szöveg kinyeréséhez, amelyet aztán az ügynök utasításaihoz adunk. Ideális dokumentumokhoz, szöveges képekhez vagy PDF-ekhez, ahol a teljes szövegtartalomra szükség van.",
|
||||
"com_agents_file_search_disabled": "Az ügynököt először létre kell hozni, mielőtt fájlokat tölthet fel a fájlkereséshez.",
|
||||
"com_agents_file_search_info": "Ha engedélyezve van, az ügynök értesül az alább felsorolt pontos fájlnevekről, lehetővé téve számára, hogy releváns kontextust szerezzen ezekből a fájlokból.",
|
||||
"com_agents_instructions_placeholder": "Az ügynök által használt rendszerutasítások",
|
||||
|
|
|
@ -10,7 +10,6 @@
|
|||
"com_agents_create_error": "Ձեր գործակալը ստեղծելիս սխալ է տեղի ունեցել։",
|
||||
"com_agents_description_placeholder": "Կամընտրական: Այստեղ նկարագրեք ձեր գործակալին",
|
||||
"com_agents_enable_file_search": "Միացնել ֆայլերի որոնումը",
|
||||
"com_agents_file_context": "Ֆայլի ճանաչում (OCR)",
|
||||
"com_agents_file_context_disabled": "Գործակալը պետք է ստեղծվի ֆայլերը վերբեռնելուց առաջ ֆայլերի ճանաչման համար:",
|
||||
"com_agents_mcp_icon_size": "Նվազագույն չափը՝ 128 x 128 px",
|
||||
"com_agents_mcp_name_placeholder": "Անհատական գործիք",
|
||||
|
|
|
@ -8,9 +8,7 @@
|
|||
"com_agents_create_error": "Si è verificato un errore durante la creazione del tuo agente.",
|
||||
"com_agents_description_placeholder": "Opzionale: Descrivi qui il tuo Agente",
|
||||
"com_agents_enable_file_search": "Abilita Ricerca File",
|
||||
"com_agents_file_context": "Contesto del File (OCR)",
|
||||
"com_agents_file_context_disabled": "L'agente deve essere creato prima di caricare i file per il Contesto del File.",
|
||||
"com_agents_file_context_info": "I file caricati come \"Contesto\" vengono elaborati tramite OCR per estrarre il testo, che viene poi aggiunto alle istruzioni dell'Agente. Ideale per documenti, immagini con testo o PDF in cui è necessario il contenuto di testo completo di un file",
|
||||
"com_agents_file_search_disabled": "L'Agente deve essere creato prima di caricare file per la Ricerca File.",
|
||||
"com_agents_file_search_info": "Quando abilitato, l'agente verrà informato dei nomi esatti dei file elencati di seguito, permettendogli di recuperare il contesto pertinente da questi file.",
|
||||
"com_agents_instructions_placeholder": "Le istruzioni di sistema utilizzate dall'agente",
|
||||
|
|
|
@ -10,9 +10,7 @@
|
|||
"com_agents_create_error": "エージェントの作成中にエラーが発生しました。",
|
||||
"com_agents_description_placeholder": "オプション: エージェントの説明を入力してください",
|
||||
"com_agents_enable_file_search": "ファイル検索を有効にする",
|
||||
"com_agents_file_context": "ファイルコンテキスト(OCR)",
|
||||
"com_agents_file_context_disabled": "ファイル検索用のファイルをアップロードする前に、エージェントを作成する必要があります。",
|
||||
"com_agents_file_context_info": "「コンテキスト」としてアップロードされたファイルは、OCR処理によってテキストが抽出され、エージェントの指示に追加されます。ファイルの全文コンテンツが必要な文書、テキストを含む画像、PDFに最適です。",
|
||||
"com_agents_file_search_disabled": "ファイル検索用のファイルをアップロードする前に、エージェントを作成する必要があります。",
|
||||
"com_agents_file_search_info": "有効にすると、エージェントは以下に表示されているファイル名を正確に認識し、それらのファイルから関連する情報を取得することができます。",
|
||||
"com_agents_instructions_placeholder": "エージェントが使用するシステムの指示",
|
||||
|
|
|
@ -9,9 +9,7 @@
|
|||
"com_agents_create_error": "თქვენი აგენტის შექმნისასდაფიქსირდა შეცდომა",
|
||||
"com_agents_description_placeholder": "არასავალდებულო: აღწერეთ თქვენი აგენტი",
|
||||
"com_agents_enable_file_search": "ფაილების ძიების ჩართვა",
|
||||
"com_agents_file_context": "ფაილის კონტექსტი (OCR)",
|
||||
"com_agents_file_context_disabled": "ფაილის კონტექსტისთვის, ატვირთვამდე უნდა შეიქმნას აგენტი.",
|
||||
"com_agents_file_context_info": "„კონტექსტის“ სახით ატვირთული ფაილები დამუშავდება OCR-ის გამოყენებით ტექსტის ამოსაღებად, რომელიც შემდეგ დაემატება აგენტის ინსტრუქციებს. იდეალურია დოკუმენტებისთვის, ტექსტიანი სურათებისთვის ან PDF ფაილებისთვის, სადაც გჭირდებათ ფაილის შინაარსი.",
|
||||
"com_agents_instructions_placeholder": "სისტემის ინსტრუქციები, რომლებსაც გამოიყენებს აგენტი",
|
||||
"com_agents_mcp_description_placeholder": "რამდენიმე სიტყვით ახსენით დავალება",
|
||||
"com_agents_mcp_icon_size": "მინიმალური ზომა 128 x 128 პიქსელი",
|
||||
|
|
|
@ -10,9 +10,7 @@
|
|||
"com_agents_create_error": "에이전트 생성 중 오류가 발생했습니다",
|
||||
"com_agents_description_placeholder": "선택 사항: 여기에 에이전트를 설명하세요",
|
||||
"com_agents_enable_file_search": "파일 검색 활성화",
|
||||
"com_agents_file_context": "파일 컨텍스트 (OCR)",
|
||||
"com_agents_file_context_disabled": "파일 컨텍스트를 위해 파일을 업로드하기 전에, 에이전트가 먼저 생성되어야 합니다.",
|
||||
"com_agents_file_context_info": "컨텍스트(Context)로 업로드 된 파일은 OCR을 사용하여 텍스트를 추출하고, 이 텍스트는 에이전트의 지시사항에 추가됩니다. 문서, 텍스트가 포함된 이미지, 또는 파일의 전체 내용이 필요한 PDF에 이상적입니다.",
|
||||
"com_agents_file_search_disabled": "파일 검색을 위해 파일을 업로드하기 전에 에이전트를 먼저 생성해야 합니다",
|
||||
"com_agents_file_search_info": "활성화하면 에이전트가 아래 나열된 파일명들을 정확히 인식하여 해당 파일들에서 관련 내용을 검색할 수 있습니다.",
|
||||
"com_agents_instructions_placeholder": "에이전트가 사용하는 시스템 지침",
|
||||
|
|
|
@ -59,9 +59,7 @@
|
|||
"com_agents_error_timeout_suggestion": "Lūdzu, pārbaudiet interneta savienojumu un mēģiniet vēlreiz.",
|
||||
"com_agents_error_timeout_title": "Savienojumu neizdevās izveidot",
|
||||
"com_agents_error_title": "Kaut kas nogāja greizi",
|
||||
"com_agents_file_context": "Failu konteksts (OCR)",
|
||||
"com_agents_file_context_disabled": "Pirms failu augšupielādes failu kontekstam ir jāizveido aģents.",
|
||||
"com_agents_file_context_info": "Faili, kas augšupielādēti kā “Konteksts”, tiek apstrādāti, izmantojot OCR, lai iegūtu tekstu, kas pēc tam tiek pievienots aģenta norādījumiem. Ideāli piemērots dokumentiem, attēliem ar tekstu vai PDF failiem, kuriem nepieciešams pilns faila teksta saturs.",
|
||||
"com_agents_file_search_disabled": "Lai varētu iespējot faila augšupielādi informācijas iegūšanai no tā ir jāizveido aģents.",
|
||||
"com_agents_file_search_info": "Kad šī opcija ir iespējota, aģents tiks informēts par precīziem tālāk norādītajiem failu nosaukumiem, ļaujot tam izgūt atbilstošu kontekstu no šiem failiem.",
|
||||
"com_agents_grid_announcement": "Rādu {{count}} aģentus {{category}} kategorijā",
|
||||
|
@ -654,7 +652,7 @@
|
|||
"com_ui_agent_chain_info": "Ļauj izveidot aģentu secību ķēdes. Katrs aģents var piekļūt iepriekšējo ķēdē esošo aģentu izvades datiem. Balstīts uz \"Aģentu sajaukuma\" arhitektūru, kurā aģenti izmanto iepriekšējos izvades datus kā palīginformāciju.",
|
||||
"com_ui_agent_chain_max": "Jūs esat sasniedzis maksimālo skaitu {{0}} aģentu.",
|
||||
"com_ui_agent_delete_error": "Dzēšot aģentu, radās kļūda.",
|
||||
"com_ui_agent_deleted": "Aģents veiksmīgi izdzēsts",
|
||||
"com_ui_agent_deleted": "Aģents veiksmīgi dzēsts",
|
||||
"com_ui_agent_duplicate_error": "Dublējot aģentu, radās kļūda.",
|
||||
"com_ui_agent_duplicated": "Aģents veiksmīgi dublēts",
|
||||
"com_ui_agent_name_is_required": "Obligāti jānorāda aģenta nosaukums",
|
||||
|
@ -695,7 +693,7 @@
|
|||
"com_ui_ascending": "Augošā",
|
||||
"com_ui_assistant": "Asistents",
|
||||
"com_ui_assistant_delete_error": "Dzēšot asistentu, radās kļūda.",
|
||||
"com_ui_assistant_deleted": "Asistents ir veiksmīgi izdzēsts.",
|
||||
"com_ui_assistant_deleted": "Asistents ir veiksmīgi dzēsts.",
|
||||
"com_ui_assistants": "Asistenti",
|
||||
"com_ui_assistants_output": "Asistentu izvade",
|
||||
"com_ui_at_least_one_owner_required": "Nepieciešams vismaz viens īpašnieks",
|
||||
|
@ -738,7 +736,7 @@
|
|||
"com_ui_bookmarks_create_success": "Grāmatzīme veiksmīgi izveidota",
|
||||
"com_ui_bookmarks_delete": "Dzēst grāmatzīmi",
|
||||
"com_ui_bookmarks_delete_error": "Dzēšot grāmatzīmi, radās kļūda.",
|
||||
"com_ui_bookmarks_delete_success": "Grāmatzīme veiksmīgi izdzēsta",
|
||||
"com_ui_bookmarks_delete_success": "Grāmatzīme veiksmīgi dzēsta",
|
||||
"com_ui_bookmarks_description": "Apraksts",
|
||||
"com_ui_bookmarks_edit": "Rediģēt grāmatzīmi",
|
||||
"com_ui_bookmarks_filter": "Filtrēt grāmatzīmes...",
|
||||
|
@ -825,7 +823,7 @@
|
|||
"com_ui_delete_mcp": "Dzēst MCP",
|
||||
"com_ui_delete_mcp_confirm": "Vai tiešām vēlaties dzēst šo MCP serveri?",
|
||||
"com_ui_delete_mcp_error": "Neizdevās izdzēst MCP serveri.",
|
||||
"com_ui_delete_mcp_success": "MCP serveris veiksmīgi izdzēsts",
|
||||
"com_ui_delete_mcp_success": "MCP serveris veiksmīgi dzēsts",
|
||||
"com_ui_delete_memory": "Dzēst atmiņu",
|
||||
"com_ui_delete_not_allowed": "Dzēšanas darbība nav atļauta",
|
||||
"com_ui_delete_prompt": "Vai dzēst uzvedni?",
|
||||
|
@ -847,7 +845,7 @@
|
|||
"com_ui_download_artifact": "Lejupielādēt artefaktu",
|
||||
"com_ui_download_backup": "Lejupielādēt rezerves kodus",
|
||||
"com_ui_download_backup_tooltip": "Pirms turpināt, lejupielādējiet rezerves kodus. Tie būs nepieciešami, lai atgūtu piekļuvi, ja pazaudēsiet autentifikatora ierīci.",
|
||||
"com_ui_download_error": "Kļūda, lejupielādējot failu. Iespējams, fails ir izdzēsts.",
|
||||
"com_ui_download_error": "Kļūda, lejupielādējot failu. Iespējams, fails ir dzēsts.",
|
||||
"com_ui_drag_drop": "Ievietojiet šeit jebkuru failu, lai pievienotu to sarunai",
|
||||
"com_ui_dropdown_variables": "Nolaižamās izvēlnes mainīgie:",
|
||||
"com_ui_dropdown_variables_info": "Izveidojiet pielāgotas nolaižamās izvēlnes savām uzvednēm:{{variable_name:option1|option2|option3}}` (mainīgā_nosakums:opcija1|opcija2|opcija3)",
|
||||
|
@ -1162,8 +1160,8 @@
|
|||
"com_ui_share_link_to_chat": "Kopīgot saiti sarunai",
|
||||
"com_ui_share_update_message": "Jūsu vārds, pielāgotie norādījumi un visas ziņas, ko pievienojat pēc kopīgošanas, paliek privātas.",
|
||||
"com_ui_share_var": "Kopīgot {{0}}",
|
||||
"com_ui_shared_link_bulk_delete_success": "Koplietotās saites ir veiksmīgi izdzēstas.",
|
||||
"com_ui_shared_link_delete_success": "Koplietotā saite ir veiksmīgi izdzēsta.",
|
||||
"com_ui_shared_link_bulk_delete_success": "Koplietotās saites ir veiksmīgi dzēstas.",
|
||||
"com_ui_shared_link_delete_success": "Koplietotā saite ir veiksmīgi dzēsta.",
|
||||
"com_ui_shared_link_not_found": "Kopīgotā saite nav atrasta",
|
||||
"com_ui_shared_prompts": "Koplietotas uzvednes",
|
||||
"com_ui_shop": "Iepirkšanās",
|
||||
|
|
|
@ -59,9 +59,7 @@
|
|||
"com_agents_error_timeout_suggestion": "Sjekk internettforbindelsen din og prøv igjen.",
|
||||
"com_agents_error_timeout_title": "Tidsavbrudd for tilkobling",
|
||||
"com_agents_error_title": "Noe gikk galt",
|
||||
"com_agents_file_context": "Filkontekst (OCR)",
|
||||
"com_agents_file_context_disabled": "Agenten må være opprettet før du kan laste opp filer for filkontekst.",
|
||||
"com_agents_file_context_info": "Filer lastet opp som \"kontekst\" behandles med OCR for å trekke ut tekst, som deretter legges til i agentens instruksjoner. Dette er ideelt for dokumenter, bilder med tekst eller PDF-er der du trenger hele tekstinnholdet.",
|
||||
"com_agents_file_search_disabled": "Agenten må være opprettet før du kan laste opp filer for filsøk.",
|
||||
"com_agents_file_search_info": "Når dette er aktivert, vil agenten bruke de eksakte filnavnene listet nedenfor for å hente relevant kontekst fra disse filene.",
|
||||
"com_agents_grid_announcement": "Viser {{count}} agenter i kategorien {{category}}.",
|
||||
|
|
|
@ -21,9 +21,7 @@
|
|||
"com_agents_error_bad_request_message": "De aanvraag kon niet worden verwerkt.",
|
||||
"com_agents_error_bad_request_suggestion": "Controleer uw invoer en probeer het opnieuw.",
|
||||
"com_agents_error_invalid_request": "Ongeldige aanvraag",
|
||||
"com_agents_file_context": "File Context (OCR)",
|
||||
"com_agents_file_context_disabled": "Agent moet worden aangemaakt voordat bestanden worden geüpload voor File Context",
|
||||
"com_agents_file_context_info": "Bestanden die als \"Context\" worden geüpload, worden verwerkt met OCR voor tekstherkenning. De tekst wordt daarna toegevoegd aan de instructies van de Agent. Ideaal voor documenten, afbeeldingen met tekst of PDF's waarvan je de volledige tekstinhoud nodig hebt.\"",
|
||||
"com_agents_file_search_disabled": "Maak eerst een Agent aan voordat je bestanden uploadt voor File Search.",
|
||||
"com_agents_file_search_info": "Als deze functie is ingeschakeld, krijgt de agent informatie over de exacte bestandsnamen die hieronder staan vermeld, zodat deze relevante context uit deze bestanden kan ophalen.",
|
||||
"com_agents_instructions_placeholder": "De systeeminstructies die de agent gebruikt",
|
||||
|
|
|
@ -9,9 +9,7 @@
|
|||
"com_agents_create_error": "Wystąpił błąd podczas tworzenia agenta.",
|
||||
"com_agents_description_placeholder": "Opcjonalnie: Opisz swojego agenta tutaj",
|
||||
"com_agents_enable_file_search": "Włącz wyszukiwanie plików",
|
||||
"com_agents_file_context": "Kontest Pliku (OCR)",
|
||||
"com_agents_file_context_disabled": "Agent musi zostać utworzony przed przesłaniem plików dla Kontekstu Plików",
|
||||
"com_agents_file_context_info": "Pliki przesłane jako \"Kontekst\" są przetworzone przez OCR by wydobyć tekst, który potem jest dodany do instrukcji Agenta. Jest to idealne dla dokumentów, obrazów z tekstem oraz plików PDF, gdzie potrzebujesz całego tekstu z pliku.",
|
||||
"com_agents_file_search_disabled": "Agent musi zostać utworzony przed przesłaniem plików do wyszukiwania.",
|
||||
"com_agents_file_search_info": "Po włączeniu agent zostanie poinformowany o dokładnych nazwach plików wymienionych poniżej, co pozwoli mu na pobranie odpowiedniego kontekstu z tych plików.",
|
||||
"com_agents_instructions_placeholder": "Instrukcje systemowe używane przez agenta",
|
||||
|
|
|
@ -37,9 +37,7 @@
|
|||
"com_agents_error_server_title": "Erro no servidor",
|
||||
"com_agents_error_timeout_title": "A conexão expirou",
|
||||
"com_agents_error_title": "Algo deu errado",
|
||||
"com_agents_file_context": "Contexto de arquivo (OCR)",
|
||||
"com_agents_file_context_disabled": "O agente deve ser criado antes de carregar arquivos para o Contexto de Arquivo.",
|
||||
"com_agents_file_context_info": "Os arquivos carregados como \"Contexto\" são processados usando OCR para extrair texto, que é então adicionado às instruções do Agente. Ideal para documentos, imagens com texto ou PDFs onde você precisa do conteúdo de texto completo de um arquivo",
|
||||
"com_agents_file_search_disabled": "O agente deve ser criado antes de carregar arquivos para Pesquisa de Arquivos.",
|
||||
"com_agents_file_search_info": "Quando ativado, o agente será informado dos nomes exatos dos arquivos listados abaixo, permitindo que ele recupere o contexto relevante desses arquivos.",
|
||||
"com_agents_instructions_placeholder": "As instruções do sistema que o agente usa",
|
||||
|
|
|
@ -11,7 +11,6 @@
|
|||
"com_agents_create_error": "Houve um erro ao criar seu agente.",
|
||||
"com_agents_description_placeholder": "Opcional: Descreva seu Agente aqui",
|
||||
"com_agents_enable_file_search": "Permitir Pesquisa de Ficheiros.",
|
||||
"com_agents_file_context": "Contexto de Ficheiro (OCR)",
|
||||
"com_agents_file_context_disabled": "Um agente deve ser criado antes de tentar fazer upload para contexto.",
|
||||
"com_agents_file_search_disabled": "O Agente deve ser criado antes carregar ficheiros para Pesquisar.",
|
||||
"com_agents_file_search_info": "Quando ativo, os agentes serão informados dos nomes de ficheiros listados abaixo, permitindo aos mesmos a extração de contexto relevante.",
|
||||
|
|
|
@ -9,9 +9,7 @@
|
|||
"com_agents_create_error": "Произошла ошибка при создании вашего агента",
|
||||
"com_agents_description_placeholder": "Необязательно: описание вашего агента",
|
||||
"com_agents_enable_file_search": "Включить поиск файлов",
|
||||
"com_agents_file_context": "Контекст файла (OCR)",
|
||||
"com_agents_file_context_disabled": "Агент должен быть создан перед загрузкой файлов для контекста файла",
|
||||
"com_agents_file_context_info": "Файлы, загруженные как «Контекст», обрабатываются с использованием OCR для извлечения текста, который затем добавляется в инструкции агента. Идеально подходит для документов, изображений с текстом или PDF-файлов, где требуется полный текстовый контент.",
|
||||
"com_agents_file_search_disabled": "Для загрузки файлов в Поиск необходимо сначала создать агента",
|
||||
"com_agents_file_search_info": "При включении агент получит доступ к точным названиям файлов, перечисленным ниже, что позволит ему извлекать из них релевантный контекст.",
|
||||
"com_agents_instructions_placeholder": "Системные инструкции, используемые агентом",
|
||||
|
|
|
@ -53,9 +53,7 @@
|
|||
"com_agents_error_suggestion_generic": "Försök att uppdatera sidan eller försök igen senare.",
|
||||
"com_agents_error_timeout_suggestion": "Kontrollera din internetanslutning och försök igen.",
|
||||
"com_agents_error_title": "Något gick fel",
|
||||
"com_agents_file_context": "Filkontext (OCR)",
|
||||
"com_agents_file_context_disabled": "Agent måste skapas innan filer laddas upp för filkontext.",
|
||||
"com_agents_file_context_info": "Filer som laddas upp som kontext bearbetas med OCR för att extrahera text, som sedan läggs till i agentens instruktioner. Lämpligt för dokument, bilder med text eller PDF-filer där du behöver hela textinnehållet i en fil",
|
||||
"com_agents_file_search_disabled": "Agenten måste skapas innan du laddar upp filer.",
|
||||
"com_agents_file_search_info": "När detta är aktiverat kommer agenten se och hämta relevant information från filnamnen nedan. ",
|
||||
"com_agents_grid_announcement": "Visar {{count}} agenter i {{category}} kategorin",
|
||||
|
|
|
@ -10,9 +10,7 @@
|
|||
"com_agents_create_error": "เกิดข้อผิดพลาดในการสร้างเอเจนต์ของคุณ",
|
||||
"com_agents_description_placeholder": "ตัวเลือกเพิ่มเติม: อธิบายเอเจนต์ของคุณที่นี่",
|
||||
"com_agents_enable_file_search": "เปิดใช้งานการค้นหาไฟล์",
|
||||
"com_agents_file_context": "ข้อความจากไฟล์ (OCR)",
|
||||
"com_agents_file_context_disabled": "ต้องสร้าง เอเจนท์ ก่อนอัปโหลดไฟล์",
|
||||
"com_agents_file_context_info": "ไฟล์ที่อัปโหลดเป็น “Context” (บริบท) จะถูกประมวลผลด้วยเทคโนโลยี OCR เพื่อดึงข้อความออกมา แล้วนำไปเพิ่มในคำสั่งของเอเจนต์ เหมาะอย่างยิ่งสำหรับเอกสาร ภาพที่มีข้อความ หรือไฟล์ PDF ที่คุณต้องการข้อความทั้งหมดของไฟล์",
|
||||
"com_agents_file_search_disabled": "ต้องสร้างเอเจนต์ก่อนที่จะอัปโหลดไฟล์สำหรับใช้ในการค้นหาไฟล์",
|
||||
"com_agents_file_search_info": "เมื่อเปิดใช้งาน เอเจนต์จะได้รับข้อมูลเกี่ยวกับชื่อไฟล์ที่ระบุไว้ด้านล่างอย่างถูกต้อง ทำให้สามารถดึงข้อมูลที่เกี่ยวข้องจากไฟล์เหล่านี้ได้",
|
||||
"com_agents_instructions_placeholder": "คำสั่งของระบบที่เอเจนต์ใช้งาน",
|
||||
|
|
|
@ -59,9 +59,7 @@
|
|||
"com_agents_error_timeout_suggestion": "Перевірте підключення до інтернету та спробуйте ще раз.",
|
||||
"com_agents_error_timeout_title": "Час очікування з'єднання вийшов",
|
||||
"com_agents_error_title": "Щось пішло не так",
|
||||
"com_agents_file_context": "Контекст файлу (OCR)",
|
||||
"com_agents_file_context_disabled": "Спочатку потрібно створити агента перед завантаженням файлів для контексту файлу",
|
||||
"com_agents_file_context_info": "Файли, завантажені як «Контекст», обробляються за допомогою OCR для вилучення тексту, який потім додається до інструкцій агента. Ідеально підходить для документів, зображень з текстом або PDF-файлів, де потрібен повний текстовий вміст.",
|
||||
"com_agents_file_search_disabled": "Для завантаження файлів у Пошук спочатку потрібно створити агента",
|
||||
"com_agents_file_search_info": "При ввімкненні агент отримає доступ до точних назв файлів, перелічених нижче, що дозволить йому отримувати з них відповідний контекст.",
|
||||
"com_agents_grid_announcement": "Показано {{count}} агентів у категорії {{category}}",
|
||||
|
|
|
@ -8,7 +8,6 @@
|
|||
"com_agents_description_placeholder": "Tùy chọn: Mô tả Agent của bạn ở đây",
|
||||
"com_agents_enable_file_search": "Bật Tìm kiếm Tệp",
|
||||
"com_agents_file_context_disabled": "Phải tạo Agent trước khi tải tệp lên cho File Context.",
|
||||
"com_agents_file_context_info": "Các tệp được tải lên dưới dạng \"Context\" được xử lý bằng OCR để trích xuất văn bản, sau đó được thêm vào hướng dẫn của Agent. Lý tưởng cho các tài liệu, hình ảnh có văn bản hoặc PDF khi bạn cần nội dung văn bản đầy đủ của tệp",
|
||||
"com_agents_file_search_disabled": "Phải tạo Agent trước khi tải tệp lên cho Tìm kiếm Tệp.",
|
||||
"com_agents_file_search_info": "Khi được bật, Agent sẽ được thông báo về tên tệp chính xác được liệt kê bên dưới, cho phép agent truy xuất ngữ cảnh có liên quan từ các tệp này.",
|
||||
"com_agents_instructions_placeholder": "\nCác hướng dẫn hệ thống mà agent sử dụng",
|
||||
|
|
|
@ -59,9 +59,7 @@
|
|||
"com_agents_error_timeout_suggestion": "请检查您的互联网连接并重试。",
|
||||
"com_agents_error_timeout_title": "连接超时",
|
||||
"com_agents_error_title": "出了点问题",
|
||||
"com_agents_file_context": "文件上下文(OCR)",
|
||||
"com_agents_file_context_disabled": "必须先创建智能体,才能上传文件用于文件上下文。",
|
||||
"com_agents_file_context_info": "作为 ”上下文“ 上传的文件会通过 OCR 处理以提取文本,然后将其添加到智能体的指令中。这非常适合文档、带有文本的图片或 PDF 文件等需要文件完整文本内容的场景。",
|
||||
"com_agents_file_search_disabled": "必须先创建智能体,才能上传文件用于文件搜索。",
|
||||
"com_agents_file_search_info": "启用后,系统会告知代理以下列出的具体文件名,使其能够从这些文件中检索相关内容。",
|
||||
"com_agents_grid_announcement": "显示 {{category}} 类别中的 {{count}} 个智能体",
|
||||
|
|
|
@ -35,9 +35,7 @@
|
|||
"com_agents_enable_file_search": "啟用檔案搜尋",
|
||||
"com_agents_error_bad_request_message": "無法處理該請求",
|
||||
"com_agents_error_bad_request_suggestion": "請檢查您的輸入並再試一次。",
|
||||
"com_agents_file_context": "文件內容 (OCR)",
|
||||
"com_agents_file_context_disabled": "在為檔案上下文上傳檔案之前,必須先建立 Agent。",
|
||||
"com_agents_file_context_info": "以「Context」標記上傳的檔案會使用 OCR 擷取文字,擷取後的文字會被加入到 Agent 的指示中。適合用於文件、含文字的圖片或需要取得檔案完整文字內容的 PDF。",
|
||||
"com_agents_file_search_disabled": "必須先建立代理才能上傳檔案進行檔案搜尋。",
|
||||
"com_agents_file_search_info": "啟用後,代理將會被告知以下列出的確切檔案名稱,使其能夠從這些檔案中擷取相關內容。",
|
||||
"com_agents_instructions_placeholder": "代理程式使用的系統指令",
|
||||
|
|
|
@ -253,7 +253,7 @@ export const validateFiles = ({
|
|||
}
|
||||
|
||||
let mimeTypesToCheck = supportedMimeTypes;
|
||||
if (toolResource === EToolResources.ocr) {
|
||||
if (toolResource === EToolResources.context) {
|
||||
mimeTypesToCheck = [
|
||||
...(fileConfig?.text?.supportedMimeTypes || []),
|
||||
...(fileConfig?.ocr?.supportedMimeTypes || []),
|
||||
|
|
|
@ -62,14 +62,19 @@ export const processAgentOption = ({
|
|||
fileMap?: Record<string, TFile | undefined>;
|
||||
}): TAgentOption => {
|
||||
const isGlobal = _agent?.isPublic ?? false;
|
||||
|
||||
const context_files = _agent?.tool_resources?.context?.file_ids ?? [];
|
||||
if (_agent?.tool_resources?.ocr?.file_ids) {
|
||||
/** Backwards-compatibility */
|
||||
context_files.push(..._agent.tool_resources.ocr.file_ids);
|
||||
}
|
||||
|
||||
const agent: TAgentOption = {
|
||||
...(_agent ?? ({} as Agent)),
|
||||
label: _agent?.name ?? '',
|
||||
value: _agent?.id ?? '',
|
||||
icon: isGlobal ? <EarthIcon className="icon-md text-green-400" /> : null,
|
||||
context_files: _agent?.tool_resources?.ocr?.file_ids
|
||||
? ([] as Array<[string, ExtendedFile]>)
|
||||
: undefined,
|
||||
context_files: context_files.length > 0 ? ([] as Array<[string, ExtendedFile]>) : undefined,
|
||||
knowledge_files: _agent?.tool_resources?.file_search?.file_ids
|
||||
? ([] as Array<[string, ExtendedFile]>)
|
||||
: undefined,
|
||||
|
@ -130,12 +135,12 @@ export const processAgentOption = ({
|
|||
}
|
||||
};
|
||||
|
||||
if (agent.context_files && _agent?.tool_resources?.ocr?.file_ids) {
|
||||
_agent.tool_resources.ocr.file_ids.forEach((file_id) =>
|
||||
if (agent.context_files && context_files.length > 0) {
|
||||
context_files.forEach((file_id) =>
|
||||
handleFile({
|
||||
file_id,
|
||||
list: agent.context_files,
|
||||
tool_resource: EToolResources.ocr,
|
||||
tool_resource: EToolResources.context,
|
||||
}),
|
||||
);
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
export * from './config';
|
||||
export * from './memory';
|
||||
export * from './migration';
|
||||
export * from './legacy';
|
||||
export * from './resources';
|
||||
export * from './run';
|
||||
export * from './validation';
|
||||
|
|
697
packages/api/src/agents/legacy.test.ts
Normal file
697
packages/api/src/agents/legacy.test.ts
Normal file
|
@ -0,0 +1,697 @@
|
|||
import { EToolResources } from 'librechat-data-provider';
|
||||
import { convertOcrToContextInPlace, mergeAgentOcrConversion } from './legacy';
|
||||
import type { AgentToolResources, TFile } from 'librechat-data-provider';
|
||||
|
||||
describe('OCR to Context Conversion for updateAgentHandler', () => {
|
||||
describe('convertOcrToContextInPlace', () => {
|
||||
it('should do nothing when no OCR resource exists', () => {
|
||||
const data = {
|
||||
tool_resources: {
|
||||
[EToolResources.execute_code]: {
|
||||
file_ids: ['file1'],
|
||||
},
|
||||
},
|
||||
tools: ['execute_code'],
|
||||
};
|
||||
|
||||
const originalCopy = JSON.parse(JSON.stringify(data));
|
||||
convertOcrToContextInPlace(data);
|
||||
|
||||
expect(data).toEqual(originalCopy);
|
||||
});
|
||||
|
||||
it('should convert OCR to context when context does not exist', () => {
|
||||
const data = {
|
||||
tool_resources: {
|
||||
[EToolResources.ocr]: {
|
||||
file_ids: ['ocr1', 'ocr2'],
|
||||
files: [
|
||||
{
|
||||
file_id: 'ocr1',
|
||||
filename: 'doc.pdf',
|
||||
filepath: '/doc.pdf',
|
||||
type: 'application/pdf',
|
||||
user: 'user1',
|
||||
object: 'file' as const,
|
||||
bytes: 1024,
|
||||
embedded: false,
|
||||
usage: 0,
|
||||
},
|
||||
],
|
||||
},
|
||||
} as AgentToolResources,
|
||||
};
|
||||
|
||||
convertOcrToContextInPlace(data);
|
||||
|
||||
expect(data.tool_resources?.[EToolResources.ocr]).toBeUndefined();
|
||||
expect(data.tool_resources?.[EToolResources.context]).toEqual({
|
||||
file_ids: ['ocr1', 'ocr2'],
|
||||
files: [
|
||||
{
|
||||
file_id: 'ocr1',
|
||||
filename: 'doc.pdf',
|
||||
filepath: '/doc.pdf',
|
||||
type: 'application/pdf',
|
||||
user: 'user1',
|
||||
object: 'file',
|
||||
bytes: 1024,
|
||||
embedded: false,
|
||||
usage: 0,
|
||||
},
|
||||
],
|
||||
});
|
||||
});
|
||||
|
||||
it('should merge OCR into existing context', () => {
|
||||
const data = {
|
||||
tool_resources: {
|
||||
[EToolResources.context]: {
|
||||
file_ids: ['context1'],
|
||||
files: [
|
||||
{
|
||||
file_id: 'context1',
|
||||
filename: 'existing.txt',
|
||||
filepath: '/existing.txt',
|
||||
type: 'text/plain',
|
||||
user: 'user1',
|
||||
object: 'file' as const,
|
||||
bytes: 256,
|
||||
embedded: false,
|
||||
usage: 0,
|
||||
},
|
||||
],
|
||||
},
|
||||
[EToolResources.ocr]: {
|
||||
file_ids: ['ocr1', 'ocr2'],
|
||||
files: [
|
||||
{
|
||||
file_id: 'ocr1',
|
||||
filename: 'scan.pdf',
|
||||
filepath: '/scan.pdf',
|
||||
type: 'application/pdf',
|
||||
user: 'user1',
|
||||
object: 'file' as const,
|
||||
bytes: 1024,
|
||||
embedded: false,
|
||||
usage: 0,
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
convertOcrToContextInPlace(data);
|
||||
|
||||
expect(data.tool_resources?.[EToolResources.ocr]).toBeUndefined();
|
||||
expect(data.tool_resources?.[EToolResources.context]?.file_ids).toEqual([
|
||||
'context1',
|
||||
'ocr1',
|
||||
'ocr2',
|
||||
]);
|
||||
expect(data.tool_resources?.[EToolResources.context]?.files).toHaveLength(2);
|
||||
expect(data.tool_resources?.[EToolResources.context]?.files?.map((f) => f.file_id)).toEqual([
|
||||
'context1',
|
||||
'ocr1',
|
||||
]);
|
||||
});
|
||||
|
||||
it('should deduplicate file_ids when merging', () => {
|
||||
const data = {
|
||||
tool_resources: {
|
||||
[EToolResources.context]: {
|
||||
file_ids: ['file1', 'file2'],
|
||||
},
|
||||
[EToolResources.ocr]: {
|
||||
file_ids: ['file2', 'file3'],
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
convertOcrToContextInPlace(data);
|
||||
|
||||
expect(data.tool_resources?.[EToolResources.context]?.file_ids).toEqual([
|
||||
'file1',
|
||||
'file2',
|
||||
'file3',
|
||||
]);
|
||||
});
|
||||
|
||||
it('should deduplicate files by file_id when merging', () => {
|
||||
const sharedFile: TFile = {
|
||||
file_id: 'shared',
|
||||
filename: 'shared.txt',
|
||||
filepath: '/shared.txt',
|
||||
type: 'text/plain',
|
||||
user: 'user1',
|
||||
object: 'file',
|
||||
bytes: 256,
|
||||
embedded: false,
|
||||
usage: 0,
|
||||
};
|
||||
|
||||
const data = {
|
||||
tool_resources: {
|
||||
[EToolResources.context]: {
|
||||
files: [sharedFile],
|
||||
},
|
||||
[EToolResources.ocr]: {
|
||||
files: [
|
||||
sharedFile,
|
||||
{
|
||||
file_id: 'unique',
|
||||
filename: 'unique.pdf',
|
||||
filepath: '/unique.pdf',
|
||||
type: 'application/pdf',
|
||||
user: 'user1',
|
||||
object: 'file' as const,
|
||||
bytes: 1024,
|
||||
embedded: false,
|
||||
usage: 0,
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
convertOcrToContextInPlace(data);
|
||||
|
||||
expect(data.tool_resources?.[EToolResources.context]?.files).toHaveLength(2);
|
||||
expect(
|
||||
data.tool_resources?.[EToolResources.context]?.files?.map((f) => f.file_id).sort(),
|
||||
).toEqual(['shared', 'unique']);
|
||||
});
|
||||
|
||||
it('should replace OCR with context in tools array', () => {
|
||||
const data = {
|
||||
tools: ['execute_code', 'ocr', 'file_search'],
|
||||
};
|
||||
|
||||
convertOcrToContextInPlace(data);
|
||||
|
||||
expect(data.tools).toEqual(['execute_code', 'context', 'file_search']);
|
||||
});
|
||||
|
||||
it('should remove duplicates when context already exists in tools', () => {
|
||||
const data = {
|
||||
tools: ['context', 'ocr', 'execute_code'],
|
||||
};
|
||||
|
||||
convertOcrToContextInPlace(data);
|
||||
|
||||
expect(data.tools).toEqual(['context', 'execute_code']);
|
||||
});
|
||||
|
||||
it('should handle both tool_resources and tools conversion', () => {
|
||||
const data = {
|
||||
tool_resources: {
|
||||
[EToolResources.ocr]: {
|
||||
file_ids: ['ocr1'],
|
||||
},
|
||||
} as AgentToolResources,
|
||||
tools: ['ocr', 'execute_code'],
|
||||
};
|
||||
|
||||
convertOcrToContextInPlace(data);
|
||||
|
||||
expect(data.tool_resources?.[EToolResources.ocr]).toBeUndefined();
|
||||
expect(data.tool_resources?.[EToolResources.context]).toEqual({
|
||||
file_ids: ['ocr1'],
|
||||
});
|
||||
expect(data.tools).toEqual(['context', 'execute_code']);
|
||||
});
|
||||
|
||||
it('should preserve other tool resources during OCR conversion', () => {
|
||||
const data = {
|
||||
tool_resources: {
|
||||
[EToolResources.execute_code]: {
|
||||
file_ids: ['exec1', 'exec2'],
|
||||
files: [
|
||||
{
|
||||
file_id: 'exec1',
|
||||
filename: 'script.py',
|
||||
filepath: '/script.py',
|
||||
type: 'text/x-python',
|
||||
user: 'user1',
|
||||
object: 'file' as const,
|
||||
bytes: 512,
|
||||
embedded: false,
|
||||
usage: 0,
|
||||
},
|
||||
],
|
||||
},
|
||||
[EToolResources.file_search]: {
|
||||
file_ids: ['search1'],
|
||||
vector_store_ids: ['vector1', 'vector2'],
|
||||
},
|
||||
[EToolResources.ocr]: {
|
||||
file_ids: ['ocr1'],
|
||||
},
|
||||
} as AgentToolResources,
|
||||
tools: ['execute_code', 'file_search', 'ocr'],
|
||||
};
|
||||
|
||||
const originalExecuteCode = JSON.parse(JSON.stringify(data.tool_resources.execute_code));
|
||||
const originalFileSearch = JSON.parse(JSON.stringify(data.tool_resources.file_search));
|
||||
|
||||
convertOcrToContextInPlace(data);
|
||||
|
||||
// OCR should be converted to context
|
||||
expect(data.tool_resources?.[EToolResources.ocr]).toBeUndefined();
|
||||
expect(data.tool_resources?.[EToolResources.context]).toEqual({
|
||||
file_ids: ['ocr1'],
|
||||
});
|
||||
|
||||
// Other resources should remain unchanged
|
||||
expect(data.tool_resources?.[EToolResources.execute_code]).toEqual(originalExecuteCode);
|
||||
expect(data.tool_resources?.[EToolResources.file_search]).toEqual(originalFileSearch);
|
||||
|
||||
// Tools array should have ocr replaced with context
|
||||
expect(data.tools).toEqual(['execute_code', 'file_search', 'context']);
|
||||
});
|
||||
|
||||
it('should preserve image_edit resource during OCR conversion', () => {
|
||||
const data = {
|
||||
tool_resources: {
|
||||
[EToolResources.image_edit]: {
|
||||
file_ids: ['image1'],
|
||||
files: [
|
||||
{
|
||||
file_id: 'image1',
|
||||
filename: 'photo.png',
|
||||
filepath: '/photo.png',
|
||||
type: 'image/png',
|
||||
user: 'user1',
|
||||
object: 'file' as const,
|
||||
bytes: 2048,
|
||||
embedded: false,
|
||||
usage: 0,
|
||||
width: 800,
|
||||
height: 600,
|
||||
},
|
||||
],
|
||||
},
|
||||
[EToolResources.ocr]: {
|
||||
file_ids: ['ocr1'],
|
||||
},
|
||||
} as AgentToolResources,
|
||||
};
|
||||
|
||||
const originalImageEdit = JSON.parse(JSON.stringify(data.tool_resources.image_edit));
|
||||
|
||||
convertOcrToContextInPlace(data);
|
||||
|
||||
expect(data.tool_resources?.[EToolResources.ocr]).toBeUndefined();
|
||||
expect(data.tool_resources?.[EToolResources.context]).toEqual({
|
||||
file_ids: ['ocr1'],
|
||||
});
|
||||
expect(data.tool_resources?.[EToolResources.image_edit]).toEqual(originalImageEdit);
|
||||
});
|
||||
});
|
||||
|
||||
describe('mergeAgentOcrConversion', () => {
|
||||
it('should return empty object when existing agent has no OCR', () => {
|
||||
const existingAgent = {
|
||||
tool_resources: {
|
||||
[EToolResources.execute_code]: {
|
||||
file_ids: ['file1'],
|
||||
},
|
||||
},
|
||||
tools: ['execute_code'],
|
||||
};
|
||||
|
||||
const updateData = {
|
||||
tool_resources: {
|
||||
[EToolResources.context]: {
|
||||
file_ids: ['context1'],
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
const result = mergeAgentOcrConversion(existingAgent, updateData);
|
||||
|
||||
expect(result).toEqual({});
|
||||
});
|
||||
|
||||
it('should convert existing OCR to context when no context exists', () => {
|
||||
const existingAgent = {
|
||||
tool_resources: {
|
||||
[EToolResources.ocr]: {
|
||||
file_ids: ['ocr1', 'ocr2'],
|
||||
files: [
|
||||
{
|
||||
file_id: 'ocr1',
|
||||
filename: 'doc.pdf',
|
||||
filepath: '/doc.pdf',
|
||||
type: 'application/pdf',
|
||||
user: 'user1',
|
||||
object: 'file' as const,
|
||||
bytes: 1024,
|
||||
embedded: false,
|
||||
usage: 0,
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
tools: ['ocr', 'execute_code'],
|
||||
};
|
||||
|
||||
const updateData = {};
|
||||
|
||||
const result = mergeAgentOcrConversion(existingAgent, updateData);
|
||||
|
||||
expect(result.tool_resources?.[EToolResources.ocr]).toBeUndefined();
|
||||
expect(result.tool_resources?.[EToolResources.context]).toEqual({
|
||||
file_ids: ['ocr1', 'ocr2'],
|
||||
files: [
|
||||
{
|
||||
file_id: 'ocr1',
|
||||
filename: 'doc.pdf',
|
||||
filepath: '/doc.pdf',
|
||||
type: 'application/pdf',
|
||||
user: 'user1',
|
||||
object: 'file',
|
||||
bytes: 1024,
|
||||
embedded: false,
|
||||
usage: 0,
|
||||
},
|
||||
],
|
||||
});
|
||||
expect(result.tools).toEqual(['context', 'execute_code']);
|
||||
});
|
||||
|
||||
it('should merge existing OCR with existing context', () => {
|
||||
const existingAgent = {
|
||||
tool_resources: {
|
||||
[EToolResources.context]: {
|
||||
file_ids: ['context1'],
|
||||
},
|
||||
[EToolResources.ocr]: {
|
||||
file_ids: ['ocr1'],
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
const updateData = {};
|
||||
|
||||
const result = mergeAgentOcrConversion(existingAgent, updateData);
|
||||
|
||||
expect(result.tool_resources?.[EToolResources.ocr]).toBeUndefined();
|
||||
expect(result.tool_resources?.[EToolResources.context]?.file_ids).toEqual([
|
||||
'context1',
|
||||
'ocr1',
|
||||
]);
|
||||
});
|
||||
|
||||
it('should merge converted context with updateData context', () => {
|
||||
const existingAgent = {
|
||||
tool_resources: {
|
||||
[EToolResources.ocr]: {
|
||||
file_ids: ['ocr1'],
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
const updateData = {
|
||||
tool_resources: {
|
||||
[EToolResources.context]: {
|
||||
file_ids: ['update-context1'],
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
const result = mergeAgentOcrConversion(existingAgent, updateData);
|
||||
|
||||
expect(result.tool_resources?.[EToolResources.ocr]).toBeUndefined();
|
||||
expect(result.tool_resources?.[EToolResources.context]?.file_ids?.sort()).toEqual([
|
||||
'ocr1',
|
||||
'update-context1',
|
||||
]);
|
||||
});
|
||||
|
||||
it('should handle complex merge with files and file_ids', () => {
|
||||
const existingAgent = {
|
||||
tool_resources: {
|
||||
[EToolResources.context]: {
|
||||
file_ids: ['context1'],
|
||||
files: [
|
||||
{
|
||||
file_id: 'context1',
|
||||
filename: 'existing.txt',
|
||||
filepath: '/existing.txt',
|
||||
type: 'text/plain',
|
||||
user: 'user1',
|
||||
object: 'file' as const,
|
||||
bytes: 256,
|
||||
embedded: false,
|
||||
usage: 0,
|
||||
},
|
||||
],
|
||||
},
|
||||
[EToolResources.ocr]: {
|
||||
file_ids: ['ocr1', 'ocr2'],
|
||||
files: [
|
||||
{
|
||||
file_id: 'ocr1',
|
||||
filename: 'scan.pdf',
|
||||
filepath: '/scan.pdf',
|
||||
type: 'application/pdf',
|
||||
user: 'user1',
|
||||
object: 'file' as const,
|
||||
bytes: 1024,
|
||||
embedded: false,
|
||||
usage: 0,
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
tools: ['context', 'ocr'],
|
||||
};
|
||||
|
||||
const updateData = {
|
||||
tool_resources: {
|
||||
[EToolResources.context]: {
|
||||
file_ids: ['update1'],
|
||||
files: [
|
||||
{
|
||||
file_id: 'update1',
|
||||
filename: 'update.txt',
|
||||
filepath: '/update.txt',
|
||||
type: 'text/plain',
|
||||
user: 'user1',
|
||||
object: 'file' as const,
|
||||
bytes: 512,
|
||||
embedded: false,
|
||||
usage: 0,
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
const result = mergeAgentOcrConversion(existingAgent, updateData);
|
||||
|
||||
expect(result.tool_resources?.[EToolResources.ocr]).toBeUndefined();
|
||||
expect(result.tool_resources?.[EToolResources.context]?.file_ids?.sort()).toEqual([
|
||||
'context1',
|
||||
'ocr1',
|
||||
'ocr2',
|
||||
'update1',
|
||||
]);
|
||||
expect(result.tool_resources?.[EToolResources.context]?.files).toHaveLength(3);
|
||||
expect(result.tools).toEqual(['context']);
|
||||
});
|
||||
|
||||
it('should not mutate original objects', () => {
|
||||
const existingAgent = {
|
||||
tool_resources: {
|
||||
[EToolResources.ocr]: {
|
||||
file_ids: ['ocr1'],
|
||||
},
|
||||
},
|
||||
tools: ['ocr'],
|
||||
};
|
||||
|
||||
const updateData = {
|
||||
tool_resources: {
|
||||
[EToolResources.context]: {
|
||||
file_ids: ['context1'],
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
const existingCopy = JSON.parse(JSON.stringify(existingAgent));
|
||||
const updateCopy = JSON.parse(JSON.stringify(updateData));
|
||||
|
||||
mergeAgentOcrConversion(existingAgent, updateData);
|
||||
|
||||
expect(existingAgent).toEqual(existingCopy);
|
||||
expect(updateData).toEqual(updateCopy);
|
||||
});
|
||||
|
||||
it('should preserve other tool resources in existing agent during merge', () => {
|
||||
const existingAgent = {
|
||||
tool_resources: {
|
||||
[EToolResources.execute_code]: {
|
||||
file_ids: ['exec1', 'exec2'],
|
||||
files: [
|
||||
{
|
||||
file_id: 'exec1',
|
||||
filename: 'script.py',
|
||||
filepath: '/script.py',
|
||||
type: 'text/x-python',
|
||||
user: 'user1',
|
||||
object: 'file' as const,
|
||||
bytes: 512,
|
||||
embedded: false,
|
||||
usage: 0,
|
||||
},
|
||||
],
|
||||
},
|
||||
[EToolResources.file_search]: {
|
||||
file_ids: ['search1'],
|
||||
vector_store_ids: ['vector1', 'vector2'],
|
||||
},
|
||||
[EToolResources.ocr]: {
|
||||
file_ids: ['ocr1'],
|
||||
},
|
||||
},
|
||||
tools: ['execute_code', 'file_search', 'ocr'],
|
||||
};
|
||||
|
||||
const updateData = {
|
||||
tool_resources: {
|
||||
[EToolResources.context]: {
|
||||
file_ids: ['new-context1'],
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
const originalExecuteCode = JSON.parse(
|
||||
JSON.stringify(existingAgent.tool_resources.execute_code),
|
||||
);
|
||||
const originalFileSearch = JSON.parse(
|
||||
JSON.stringify(existingAgent.tool_resources.file_search),
|
||||
);
|
||||
|
||||
const result = mergeAgentOcrConversion(existingAgent, updateData);
|
||||
|
||||
// OCR should be converted to context and merged with updateData context
|
||||
expect(result.tool_resources?.[EToolResources.ocr]).toBeUndefined();
|
||||
expect(result.tool_resources?.[EToolResources.context]?.file_ids?.sort()).toEqual([
|
||||
'new-context1',
|
||||
'ocr1',
|
||||
]);
|
||||
|
||||
// Other resources should be preserved
|
||||
expect(result.tool_resources?.[EToolResources.execute_code]).toEqual(originalExecuteCode);
|
||||
expect(result.tool_resources?.[EToolResources.file_search]).toEqual(originalFileSearch);
|
||||
|
||||
// Tools should have ocr replaced with context
|
||||
expect(result.tools).toEqual(['execute_code', 'file_search', 'context']);
|
||||
});
|
||||
|
||||
it('should not affect updateData tool resources that are not context', () => {
|
||||
const existingAgent = {
|
||||
tool_resources: {
|
||||
[EToolResources.ocr]: {
|
||||
file_ids: ['ocr1'],
|
||||
},
|
||||
},
|
||||
tools: ['ocr'],
|
||||
};
|
||||
|
||||
const updateData = {
|
||||
tool_resources: {
|
||||
[EToolResources.execute_code]: {
|
||||
file_ids: ['update-exec1'],
|
||||
},
|
||||
[EToolResources.file_search]: {
|
||||
file_ids: ['update-search1'],
|
||||
vector_store_ids: ['update-vector1'],
|
||||
},
|
||||
},
|
||||
tools: ['execute_code', 'file_search'],
|
||||
};
|
||||
|
||||
const originalUpdateData = JSON.parse(JSON.stringify(updateData));
|
||||
|
||||
const result = mergeAgentOcrConversion(existingAgent, updateData);
|
||||
|
||||
// OCR should be converted to context
|
||||
expect(result.tool_resources?.[EToolResources.ocr]).toBeUndefined();
|
||||
expect(result.tool_resources?.[EToolResources.context]).toEqual({
|
||||
file_ids: ['ocr1'],
|
||||
});
|
||||
|
||||
// UpdateData's other resources should not be affected
|
||||
expect(updateData.tool_resources?.[EToolResources.execute_code]).toEqual(
|
||||
originalUpdateData.tool_resources.execute_code,
|
||||
);
|
||||
expect(updateData.tool_resources?.[EToolResources.file_search]).toEqual(
|
||||
originalUpdateData.tool_resources.file_search,
|
||||
);
|
||||
|
||||
// Result should only have the converted OCR resources and tools
|
||||
expect(result.tools).toEqual(['context']);
|
||||
});
|
||||
|
||||
it('should handle all tool resources together', () => {
|
||||
const existingAgent = {
|
||||
tool_resources: {
|
||||
[EToolResources.execute_code]: {
|
||||
file_ids: ['exec1'],
|
||||
},
|
||||
[EToolResources.file_search]: {
|
||||
file_ids: ['search1'],
|
||||
vector_store_ids: ['vector1'],
|
||||
},
|
||||
[EToolResources.image_edit]: {
|
||||
file_ids: ['image1'],
|
||||
},
|
||||
[EToolResources.context]: {
|
||||
file_ids: ['existing-context1'],
|
||||
},
|
||||
[EToolResources.ocr]: {
|
||||
file_ids: ['ocr1', 'ocr2'],
|
||||
},
|
||||
},
|
||||
tools: ['execute_code', 'file_search', 'image_edit', 'context', 'ocr'],
|
||||
};
|
||||
|
||||
const updateData = {
|
||||
tool_resources: {
|
||||
[EToolResources.context]: {
|
||||
file_ids: ['update-context1'],
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
const result = mergeAgentOcrConversion(existingAgent, updateData);
|
||||
|
||||
// OCR should be merged with existing context and update context
|
||||
expect(result.tool_resources?.[EToolResources.ocr]).toBeUndefined();
|
||||
expect(result.tool_resources?.[EToolResources.context]?.file_ids?.sort()).toEqual([
|
||||
'existing-context1',
|
||||
'ocr1',
|
||||
'ocr2',
|
||||
'update-context1',
|
||||
]);
|
||||
|
||||
// All other resources should be preserved
|
||||
expect(result.tool_resources?.[EToolResources.execute_code]).toEqual({
|
||||
file_ids: ['exec1'],
|
||||
});
|
||||
expect(result.tool_resources?.[EToolResources.file_search]).toEqual({
|
||||
file_ids: ['search1'],
|
||||
vector_store_ids: ['vector1'],
|
||||
});
|
||||
expect(result.tool_resources?.[EToolResources.image_edit]).toEqual({
|
||||
file_ids: ['image1'],
|
||||
});
|
||||
|
||||
// Tools should have ocr replaced with context (no duplicates)
|
||||
expect(result.tools).toEqual(['execute_code', 'file_search', 'image_edit', 'context']);
|
||||
});
|
||||
});
|
||||
});
|
141
packages/api/src/agents/legacy.ts
Normal file
141
packages/api/src/agents/legacy.ts
Normal file
|
@ -0,0 +1,141 @@
|
|||
import { EToolResources } from 'librechat-data-provider';
|
||||
import type { AgentToolResources, TFile } from 'librechat-data-provider';
|
||||
|
||||
/**
|
||||
* Converts OCR tool resource to context tool resource in place.
|
||||
* This modifies the input object directly (used for updateData in the handler).
|
||||
*
|
||||
* @param data - Object containing tool_resources and/or tools to convert
|
||||
* @returns void - modifies the input object directly
|
||||
*/
|
||||
export function convertOcrToContextInPlace(data: {
|
||||
tool_resources?: AgentToolResources;
|
||||
tools?: string[];
|
||||
}): void {
|
||||
// Convert OCR to context in tool_resources
|
||||
if (data.tool_resources?.ocr) {
|
||||
if (!data.tool_resources.context) {
|
||||
data.tool_resources.context = data.tool_resources.ocr;
|
||||
} else {
|
||||
// Merge OCR into existing context
|
||||
if (data.tool_resources.ocr?.file_ids?.length) {
|
||||
const existingFileIds = data.tool_resources.context.file_ids || [];
|
||||
const ocrFileIds = data.tool_resources.ocr.file_ids || [];
|
||||
data.tool_resources.context.file_ids = [...new Set([...existingFileIds, ...ocrFileIds])];
|
||||
}
|
||||
if (data.tool_resources.ocr?.files?.length) {
|
||||
const existingFiles = data.tool_resources.context.files || [];
|
||||
const ocrFiles = data.tool_resources.ocr.files || [];
|
||||
const filesMap = new Map<string, TFile>();
|
||||
[...existingFiles, ...ocrFiles].forEach((file) => {
|
||||
if (file?.file_id) {
|
||||
filesMap.set(file.file_id, file);
|
||||
}
|
||||
});
|
||||
data.tool_resources.context.files = Array.from(filesMap.values());
|
||||
}
|
||||
}
|
||||
delete data.tool_resources.ocr;
|
||||
}
|
||||
|
||||
// Convert OCR to context in tools array
|
||||
if (data.tools?.includes(EToolResources.ocr)) {
|
||||
data.tools = data.tools.map((tool) =>
|
||||
tool === EToolResources.ocr ? EToolResources.context : tool,
|
||||
);
|
||||
data.tools = [...new Set(data.tools)];
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Merges tool resources from existing agent with incoming update data,
|
||||
* converting OCR to context and handling deduplication.
|
||||
* Used when existing agent has OCR that needs to be converted and merged with updateData.
|
||||
*
|
||||
* @param existingAgent - The existing agent data
|
||||
* @param updateData - The incoming update data
|
||||
* @returns Object with merged tool_resources and tools
|
||||
*/
|
||||
export function mergeAgentOcrConversion(
|
||||
existingAgent: { tool_resources?: AgentToolResources; tools?: string[] },
|
||||
updateData: { tool_resources?: AgentToolResources; tools?: string[] },
|
||||
): { tool_resources?: AgentToolResources; tools?: string[] } {
|
||||
if (!existingAgent.tool_resources?.ocr) {
|
||||
return {};
|
||||
}
|
||||
|
||||
const result: { tool_resources?: AgentToolResources; tools?: string[] } = {};
|
||||
|
||||
// Convert existing agent's OCR to context
|
||||
result.tool_resources = { ...existingAgent.tool_resources };
|
||||
|
||||
if (!result.tool_resources.context) {
|
||||
// Simple case: no context exists, just move ocr to context
|
||||
result.tool_resources.context = result.tool_resources.ocr;
|
||||
} else {
|
||||
// Merge case: context already exists, merge both file_ids and files arrays
|
||||
|
||||
// Merge file_ids if they exist
|
||||
if (result.tool_resources.ocr?.file_ids?.length) {
|
||||
const existingFileIds = result.tool_resources.context.file_ids || [];
|
||||
const ocrFileIds = result.tool_resources.ocr.file_ids || [];
|
||||
result.tool_resources.context.file_ids = [...new Set([...existingFileIds, ...ocrFileIds])];
|
||||
}
|
||||
|
||||
// Merge files array if it exists (already fetched files)
|
||||
if (result.tool_resources.ocr?.files?.length) {
|
||||
const existingFiles = result.tool_resources.context.files || [];
|
||||
const ocrFiles = result.tool_resources.ocr?.files || [];
|
||||
// Merge and deduplicate by file_id
|
||||
const filesMap = new Map<string, TFile>();
|
||||
[...existingFiles, ...ocrFiles].forEach((file) => {
|
||||
if (file?.file_id) {
|
||||
filesMap.set(file.file_id, file);
|
||||
}
|
||||
});
|
||||
result.tool_resources.context.files = Array.from(filesMap.values());
|
||||
}
|
||||
}
|
||||
|
||||
// Remove the deprecated ocr resource
|
||||
delete result.tool_resources.ocr;
|
||||
|
||||
// Update tools array: replace 'ocr' with 'context'
|
||||
if (existingAgent.tools?.includes(EToolResources.ocr)) {
|
||||
result.tools = existingAgent.tools.map((tool) =>
|
||||
tool === EToolResources.ocr ? EToolResources.context : tool,
|
||||
);
|
||||
// Remove duplicates if context already existed
|
||||
result.tools = [...new Set(result.tools)];
|
||||
}
|
||||
|
||||
// Merge with any context that might already be in updateData (from incoming OCR conversion)
|
||||
if (updateData.tool_resources?.context && result.tool_resources.context) {
|
||||
// Merge the contexts
|
||||
const mergedContext = { ...result.tool_resources.context };
|
||||
|
||||
// Merge file_ids
|
||||
if (updateData.tool_resources.context.file_ids?.length) {
|
||||
const existingIds = mergedContext.file_ids || [];
|
||||
const newIds = updateData.tool_resources.context.file_ids || [];
|
||||
mergedContext.file_ids = [...new Set([...existingIds, ...newIds])];
|
||||
}
|
||||
|
||||
// Merge files
|
||||
if (updateData.tool_resources.context.files?.length) {
|
||||
const existingFiles = mergedContext.files || [];
|
||||
const newFiles = updateData.tool_resources.context.files || [];
|
||||
const filesMap = new Map<string, TFile>();
|
||||
[...existingFiles, ...newFiles].forEach((file) => {
|
||||
if (file?.file_id) {
|
||||
filesMap.set(file.file_id, file);
|
||||
}
|
||||
});
|
||||
mergedContext.files = Array.from(filesMap.values());
|
||||
}
|
||||
|
||||
result.tool_resources.context = mergedContext;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
|
@ -31,7 +31,7 @@ describe('primeResources', () => {
|
|||
mockAppConfig = {
|
||||
endpoints: {
|
||||
[EModelEndpoint.agents]: {
|
||||
capabilities: [AgentCapabilities.ocr],
|
||||
capabilities: [AgentCapabilities.context],
|
||||
} as TAgentsEndpoint,
|
||||
},
|
||||
} as AppConfig;
|
||||
|
@ -43,8 +43,8 @@ describe('primeResources', () => {
|
|||
requestFileSet = new Set(['file1', 'file2', 'file3']);
|
||||
});
|
||||
|
||||
describe('when OCR is enabled and tool_resources has OCR file_ids', () => {
|
||||
it('should fetch OCR files and include them in attachments', async () => {
|
||||
describe('when `context` capability is enabled and tool_resources has "context" file_ids', () => {
|
||||
it('should fetch context files and include them in attachments', async () => {
|
||||
const mockOcrFiles: TFile[] = [
|
||||
{
|
||||
user: 'user1',
|
||||
|
@ -62,7 +62,7 @@ describe('primeResources', () => {
|
|||
mockGetFiles.mockResolvedValue(mockOcrFiles);
|
||||
|
||||
const tool_resources = {
|
||||
[EToolResources.ocr]: {
|
||||
[EToolResources.context]: {
|
||||
file_ids: ['ocr-file-1'],
|
||||
},
|
||||
};
|
||||
|
@ -83,16 +83,18 @@ describe('primeResources', () => {
|
|||
{ userId: undefined, agentId: undefined },
|
||||
);
|
||||
expect(result.attachments).toEqual(mockOcrFiles);
|
||||
expect(result.tool_resources).toEqual(tool_resources);
|
||||
// Context field is deleted after files are fetched and re-categorized
|
||||
// Since the file is not embedded and has no special properties, it won't be categorized
|
||||
expect(result.tool_resources).toEqual({});
|
||||
});
|
||||
});
|
||||
|
||||
describe('when OCR is disabled', () => {
|
||||
it('should not fetch OCR files even if tool_resources has OCR file_ids', async () => {
|
||||
describe('when `context` capability is disabled', () => {
|
||||
it('should not fetch context files even if tool_resources has context file_ids', async () => {
|
||||
(mockAppConfig.endpoints![EModelEndpoint.agents] as TAgentsEndpoint).capabilities = [];
|
||||
|
||||
const tool_resources = {
|
||||
[EToolResources.ocr]: {
|
||||
[EToolResources.context]: {
|
||||
file_ids: ['ocr-file-1'],
|
||||
},
|
||||
};
|
||||
|
@ -371,8 +373,60 @@ describe('primeResources', () => {
|
|||
});
|
||||
});
|
||||
|
||||
describe('when both OCR and attachments are provided', () => {
|
||||
it('should include both OCR files and attachment files', async () => {
|
||||
describe('when both "context" files and "attachments" are provided', () => {
|
||||
it('should include both context files and attachment files', async () => {
|
||||
const mockOcrFiles: TFile[] = [
|
||||
{
|
||||
user: 'user1',
|
||||
file_id: 'ocr-file-1',
|
||||
filename: 'document.pdf',
|
||||
filepath: '/uploads/document.pdf',
|
||||
object: 'file',
|
||||
type: 'application/pdf',
|
||||
bytes: 1024,
|
||||
embedded: false,
|
||||
usage: 0,
|
||||
},
|
||||
];
|
||||
|
||||
const mockAttachmentFiles: TFile[] = [
|
||||
{
|
||||
user: 'user1',
|
||||
file_id: 'file1',
|
||||
filename: 'attachment.txt',
|
||||
filepath: '/uploads/attachment.txt',
|
||||
object: 'file',
|
||||
type: 'text/plain',
|
||||
bytes: 256,
|
||||
embedded: false,
|
||||
usage: 0,
|
||||
},
|
||||
];
|
||||
|
||||
mockGetFiles.mockResolvedValue(mockOcrFiles);
|
||||
const attachments = Promise.resolve(mockAttachmentFiles);
|
||||
|
||||
const tool_resources = {
|
||||
[EToolResources.context]: {
|
||||
file_ids: ['ocr-file-1'],
|
||||
},
|
||||
};
|
||||
|
||||
const result = await primeResources({
|
||||
req: mockReq,
|
||||
appConfig: mockAppConfig,
|
||||
getFiles: mockGetFiles,
|
||||
requestFileSet,
|
||||
attachments,
|
||||
tool_resources,
|
||||
});
|
||||
|
||||
expect(result.attachments).toHaveLength(2);
|
||||
expect(result.attachments?.[0]?.file_id).toBe('ocr-file-1');
|
||||
expect(result.attachments?.[1]?.file_id).toBe('file1');
|
||||
});
|
||||
|
||||
it('should include both context (as `ocr` resource) files and attachment files', async () => {
|
||||
const mockOcrFiles: TFile[] = [
|
||||
{
|
||||
user: 'user1',
|
||||
|
@ -424,7 +478,7 @@ describe('primeResources', () => {
|
|||
expect(result.attachments?.[1]?.file_id).toBe('file1');
|
||||
});
|
||||
|
||||
it('should prevent duplicate files when same file exists in OCR and attachments', async () => {
|
||||
it('should prevent duplicate files when same file exists in context tool_resource and attachments', async () => {
|
||||
const sharedFile: TFile = {
|
||||
user: 'user1',
|
||||
file_id: 'shared-file-id',
|
||||
|
@ -457,7 +511,7 @@ describe('primeResources', () => {
|
|||
const attachments = Promise.resolve(mockAttachmentFiles);
|
||||
|
||||
const tool_resources = {
|
||||
[EToolResources.ocr]: {
|
||||
[EToolResources.context]: {
|
||||
file_ids: ['shared-file-id'],
|
||||
},
|
||||
};
|
||||
|
@ -500,7 +554,7 @@ describe('primeResources', () => {
|
|||
const attachments = Promise.resolve(mockAttachmentFiles);
|
||||
|
||||
const tool_resources = {
|
||||
[EToolResources.ocr]: {
|
||||
[EToolResources.context]: {
|
||||
file_ids: ['shared-file-id'],
|
||||
},
|
||||
};
|
||||
|
@ -569,7 +623,7 @@ describe('primeResources', () => {
|
|||
const attachments = Promise.resolve(mockAttachmentFiles);
|
||||
|
||||
const tool_resources = {
|
||||
[EToolResources.ocr]: {
|
||||
[EToolResources.context]: {
|
||||
file_ids: ['file-1', 'file-2'],
|
||||
},
|
||||
};
|
||||
|
@ -583,7 +637,7 @@ describe('primeResources', () => {
|
|||
tool_resources,
|
||||
});
|
||||
|
||||
// Should have 3 files total (2 from OCR + 1 unique from attachments)
|
||||
// Should have 3 files total (2 from context files + 1 unique from attachments)
|
||||
expect(result.attachments).toHaveLength(3);
|
||||
|
||||
// Each file should appear only once
|
||||
|
@ -628,7 +682,7 @@ describe('primeResources', () => {
|
|||
const attachments = Promise.resolve(mockAttachmentFiles);
|
||||
|
||||
const tool_resources = {
|
||||
[EToolResources.ocr]: {
|
||||
[EToolResources.context]: {
|
||||
file_ids: ['normal-file'],
|
||||
},
|
||||
};
|
||||
|
@ -801,7 +855,7 @@ describe('primeResources', () => {
|
|||
);
|
||||
});
|
||||
|
||||
it('should handle complex scenario with OCR, existing tool_resources, and attachments', async () => {
|
||||
it('should handle complex scenario with context files, existing tool_resources, and attachments', async () => {
|
||||
const ocrFile: TFile = {
|
||||
user: 'user1',
|
||||
file_id: 'ocr-file',
|
||||
|
@ -843,11 +897,11 @@ describe('primeResources', () => {
|
|||
width: 600,
|
||||
};
|
||||
|
||||
mockGetFiles.mockResolvedValue([ocrFile, existingFile]); // OCR returns both files
|
||||
mockGetFiles.mockResolvedValue([ocrFile, existingFile]); // context returns both files
|
||||
const attachments = Promise.resolve([existingFile, ocrFile, newFile]); // Attachments has duplicates
|
||||
|
||||
const existingToolResources = {
|
||||
[EToolResources.ocr]: {
|
||||
[EToolResources.context]: {
|
||||
file_ids: ['ocr-file', 'existing-file'],
|
||||
},
|
||||
[EToolResources.execute_code]: {
|
||||
|
@ -899,11 +953,11 @@ describe('primeResources', () => {
|
|||
const attachments = Promise.resolve(mockFiles);
|
||||
const error = new Error('Test error');
|
||||
|
||||
// Mock getFiles to throw an error when called for OCR
|
||||
// Mock getFiles to throw an error when called for context
|
||||
mockGetFiles.mockRejectedValue(error);
|
||||
|
||||
const tool_resources = {
|
||||
[EToolResources.ocr]: {
|
||||
[EToolResources.context]: {
|
||||
file_ids: ['ocr-file-1'],
|
||||
},
|
||||
};
|
||||
|
@ -949,6 +1003,245 @@ describe('primeResources', () => {
|
|||
});
|
||||
});
|
||||
|
||||
describe('tool_resources field deletion behavior', () => {
|
||||
it('should not mutate the original tool_resources object', async () => {
|
||||
const originalToolResources = {
|
||||
[EToolResources.context]: {
|
||||
file_ids: ['context-file-1'],
|
||||
files: [
|
||||
{
|
||||
user: 'user1',
|
||||
file_id: 'context-file-1',
|
||||
filename: 'original.txt',
|
||||
filepath: '/uploads/original.txt',
|
||||
object: 'file' as const,
|
||||
type: 'text/plain',
|
||||
bytes: 256,
|
||||
embedded: false,
|
||||
usage: 0,
|
||||
},
|
||||
],
|
||||
},
|
||||
[EToolResources.ocr]: {
|
||||
file_ids: ['ocr-file-1'],
|
||||
},
|
||||
};
|
||||
|
||||
// Create a deep copy to compare later
|
||||
const originalCopy = JSON.parse(JSON.stringify(originalToolResources));
|
||||
|
||||
const mockOcrFiles: TFile[] = [
|
||||
{
|
||||
user: 'user1',
|
||||
file_id: 'ocr-file-1',
|
||||
filename: 'document.pdf',
|
||||
filepath: '/uploads/document.pdf',
|
||||
object: 'file',
|
||||
type: 'application/pdf',
|
||||
bytes: 1024,
|
||||
embedded: true,
|
||||
usage: 0,
|
||||
},
|
||||
];
|
||||
|
||||
mockGetFiles.mockResolvedValue(mockOcrFiles);
|
||||
|
||||
const result = await primeResources({
|
||||
req: mockReq,
|
||||
appConfig: mockAppConfig,
|
||||
getFiles: mockGetFiles,
|
||||
requestFileSet,
|
||||
attachments: undefined,
|
||||
tool_resources: originalToolResources,
|
||||
});
|
||||
|
||||
// Original object should remain unchanged
|
||||
expect(originalToolResources).toEqual(originalCopy);
|
||||
|
||||
// Result should have modifications
|
||||
expect(result.tool_resources?.[EToolResources.ocr]).toBeUndefined();
|
||||
expect(result.tool_resources?.[EToolResources.context]).toBeUndefined();
|
||||
expect(result.tool_resources?.[EToolResources.file_search]).toBeDefined();
|
||||
});
|
||||
|
||||
it('should delete ocr field after merging file_ids with context', async () => {
|
||||
const mockOcrFiles: TFile[] = [
|
||||
{
|
||||
user: 'user1',
|
||||
file_id: 'ocr-file-1',
|
||||
filename: 'document.pdf',
|
||||
filepath: '/uploads/document.pdf',
|
||||
object: 'file',
|
||||
type: 'application/pdf',
|
||||
bytes: 1024,
|
||||
embedded: true, // Will be categorized as file_search
|
||||
usage: 0,
|
||||
},
|
||||
];
|
||||
|
||||
mockGetFiles.mockResolvedValue(mockOcrFiles);
|
||||
|
||||
const tool_resources = {
|
||||
[EToolResources.ocr]: {
|
||||
file_ids: ['ocr-file-1'],
|
||||
},
|
||||
[EToolResources.context]: {
|
||||
file_ids: ['context-file-1'],
|
||||
},
|
||||
};
|
||||
|
||||
const result = await primeResources({
|
||||
req: mockReq,
|
||||
appConfig: mockAppConfig,
|
||||
getFiles: mockGetFiles,
|
||||
requestFileSet,
|
||||
attachments: undefined,
|
||||
tool_resources,
|
||||
});
|
||||
|
||||
// OCR field should be deleted after merging
|
||||
expect(result.tool_resources?.[EToolResources.ocr]).toBeUndefined();
|
||||
// Context field should also be deleted since files were fetched and re-categorized
|
||||
expect(result.tool_resources?.[EToolResources.context]).toBeUndefined();
|
||||
// File should be categorized as file_search based on embedded=true
|
||||
expect(result.tool_resources?.[EToolResources.file_search]?.files).toHaveLength(1);
|
||||
expect(result.tool_resources?.[EToolResources.file_search]?.files?.[0]?.file_id).toBe(
|
||||
'ocr-file-1',
|
||||
);
|
||||
|
||||
// Verify getFiles was called with merged file_ids
|
||||
expect(mockGetFiles).toHaveBeenCalledWith(
|
||||
{ file_id: { $in: ['context-file-1', 'ocr-file-1'] } },
|
||||
{},
|
||||
{},
|
||||
{ userId: undefined, agentId: undefined },
|
||||
);
|
||||
});
|
||||
|
||||
it('should delete context field when fetching and re-categorizing files', async () => {
|
||||
const mockContextFiles: TFile[] = [
|
||||
{
|
||||
user: 'user1',
|
||||
file_id: 'context-file-1',
|
||||
filename: 'script.py',
|
||||
filepath: '/uploads/script.py',
|
||||
object: 'file',
|
||||
type: 'text/x-python',
|
||||
bytes: 512,
|
||||
embedded: false,
|
||||
usage: 0,
|
||||
metadata: {
|
||||
fileIdentifier: 'python-script',
|
||||
},
|
||||
},
|
||||
{
|
||||
user: 'user1',
|
||||
file_id: 'context-file-2',
|
||||
filename: 'data.txt',
|
||||
filepath: '/uploads/data.txt',
|
||||
object: 'file',
|
||||
type: 'text/plain',
|
||||
bytes: 256,
|
||||
embedded: true,
|
||||
usage: 0,
|
||||
},
|
||||
];
|
||||
|
||||
mockGetFiles.mockResolvedValue(mockContextFiles);
|
||||
|
||||
const tool_resources = {
|
||||
[EToolResources.context]: {
|
||||
file_ids: ['context-file-1', 'context-file-2'],
|
||||
},
|
||||
};
|
||||
|
||||
const result = await primeResources({
|
||||
req: mockReq,
|
||||
appConfig: mockAppConfig,
|
||||
getFiles: mockGetFiles,
|
||||
requestFileSet,
|
||||
attachments: undefined,
|
||||
tool_resources,
|
||||
});
|
||||
|
||||
// Context field should be deleted after fetching files
|
||||
expect(result.tool_resources?.[EToolResources.context]).toBeUndefined();
|
||||
|
||||
// Files should be re-categorized based on their properties
|
||||
expect(result.tool_resources?.[EToolResources.execute_code]?.files).toHaveLength(1);
|
||||
expect(result.tool_resources?.[EToolResources.execute_code]?.files?.[0]?.file_id).toBe(
|
||||
'context-file-1',
|
||||
);
|
||||
|
||||
expect(result.tool_resources?.[EToolResources.file_search]?.files).toHaveLength(1);
|
||||
expect(result.tool_resources?.[EToolResources.file_search]?.files?.[0]?.file_id).toBe(
|
||||
'context-file-2',
|
||||
);
|
||||
});
|
||||
|
||||
it('should preserve context field when context capability is disabled', async () => {
|
||||
// Disable context capability
|
||||
(mockAppConfig.endpoints![EModelEndpoint.agents] as TAgentsEndpoint).capabilities = [];
|
||||
|
||||
const tool_resources = {
|
||||
[EToolResources.context]: {
|
||||
file_ids: ['context-file-1'],
|
||||
},
|
||||
};
|
||||
|
||||
const result = await primeResources({
|
||||
req: mockReq,
|
||||
appConfig: mockAppConfig,
|
||||
getFiles: mockGetFiles,
|
||||
requestFileSet,
|
||||
attachments: undefined,
|
||||
tool_resources,
|
||||
});
|
||||
|
||||
// Context field should be preserved when capability is disabled
|
||||
expect(result.tool_resources?.[EToolResources.context]).toEqual({
|
||||
file_ids: ['context-file-1'],
|
||||
});
|
||||
|
||||
// getFiles should not have been called
|
||||
expect(mockGetFiles).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should still delete ocr field even when context capability is disabled', async () => {
|
||||
// Disable context capability
|
||||
(mockAppConfig.endpoints![EModelEndpoint.agents] as TAgentsEndpoint).capabilities = [];
|
||||
|
||||
const tool_resources = {
|
||||
[EToolResources.ocr]: {
|
||||
file_ids: ['ocr-file-1'],
|
||||
},
|
||||
[EToolResources.context]: {
|
||||
file_ids: ['context-file-1'],
|
||||
},
|
||||
};
|
||||
|
||||
const result = await primeResources({
|
||||
req: mockReq,
|
||||
appConfig: mockAppConfig,
|
||||
getFiles: mockGetFiles,
|
||||
requestFileSet,
|
||||
attachments: undefined,
|
||||
tool_resources,
|
||||
});
|
||||
|
||||
// OCR field should still be deleted (merged into context)
|
||||
expect(result.tool_resources?.[EToolResources.ocr]).toBeUndefined();
|
||||
|
||||
// Context field should contain merged file_ids but not be processed
|
||||
expect(result.tool_resources?.[EToolResources.context]).toEqual({
|
||||
file_ids: ['context-file-1', 'ocr-file-1'],
|
||||
});
|
||||
|
||||
// getFiles should not have been called since context is disabled
|
||||
expect(mockGetFiles).not.toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
|
||||
describe('edge cases', () => {
|
||||
it('should handle missing appConfig agents endpoint gracefully', async () => {
|
||||
const reqWithoutLocals = {} as ServerRequest & { user?: IUser };
|
||||
|
@ -961,14 +1254,14 @@ describe('primeResources', () => {
|
|||
requestFileSet,
|
||||
attachments: undefined,
|
||||
tool_resources: {
|
||||
[EToolResources.ocr]: {
|
||||
[EToolResources.context]: {
|
||||
file_ids: ['ocr-file-1'],
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
expect(mockGetFiles).not.toHaveBeenCalled();
|
||||
// When appConfig agents endpoint is missing, OCR is disabled
|
||||
// When appConfig agents endpoint is missing, context is disabled
|
||||
// and no attachments are provided, the function returns undefined
|
||||
expect(result.attachments).toBeUndefined();
|
||||
});
|
||||
|
|
|
@ -183,18 +183,32 @@ export const primeResources = async ({
|
|||
const processedResourceFiles = new Set<string>();
|
||||
/**
|
||||
* The agent's tool resources object that will be updated with categorized files
|
||||
* Initialized from input parameter or empty object if not provided
|
||||
* Create a shallow copy first to avoid mutating the original
|
||||
*/
|
||||
const tool_resources = _tool_resources ?? {};
|
||||
const tool_resources: AgentToolResources = { ...(_tool_resources ?? {}) };
|
||||
|
||||
// Track existing files in tool_resources to prevent duplicates within resources
|
||||
// Deep copy each resource to avoid mutating nested objects/arrays
|
||||
for (const [resourceType, resource] of Object.entries(tool_resources)) {
|
||||
if (resource?.files && Array.isArray(resource.files)) {
|
||||
if (!resource) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Deep copy the resource to avoid mutations
|
||||
tool_resources[resourceType as keyof AgentToolResources] = {
|
||||
...resource,
|
||||
// Deep copy arrays to prevent mutations
|
||||
...(resource.files && { files: [...resource.files] }),
|
||||
...(resource.file_ids && { file_ids: [...resource.file_ids] }),
|
||||
...(resource.vector_store_ids && { vector_store_ids: [...resource.vector_store_ids] }),
|
||||
} as AgentBaseResource;
|
||||
|
||||
// Now track existing files
|
||||
if (resource.files && Array.isArray(resource.files)) {
|
||||
for (const file of resource.files) {
|
||||
if (file?.file_id) {
|
||||
processedResourceFiles.add(`${resourceType}:${file.file_id}`);
|
||||
// Files from non-OCR resources should not be added to attachments from _attachments
|
||||
if (resourceType !== EToolResources.ocr) {
|
||||
// Files from non-context resources should not be added to attachments from _attachments
|
||||
if (resourceType !== EToolResources.context && resourceType !== EToolResources.ocr) {
|
||||
attachmentFileIds.add(file.file_id);
|
||||
}
|
||||
}
|
||||
|
@ -202,14 +216,22 @@ export const primeResources = async ({
|
|||
}
|
||||
}
|
||||
|
||||
const isOCREnabled = (
|
||||
const isContextEnabled = (
|
||||
appConfig?.endpoints?.[EModelEndpoint.agents]?.capabilities ?? []
|
||||
).includes(AgentCapabilities.ocr);
|
||||
).includes(AgentCapabilities.context);
|
||||
|
||||
if (tool_resources[EToolResources.ocr]?.file_ids && isOCREnabled) {
|
||||
const fileIds = tool_resources[EToolResources.context]?.file_ids ?? [];
|
||||
const ocrFileIds = tool_resources[EToolResources.ocr]?.file_ids;
|
||||
if (ocrFileIds != null) {
|
||||
fileIds.push(...ocrFileIds);
|
||||
delete tool_resources[EToolResources.ocr];
|
||||
}
|
||||
|
||||
if (fileIds.length > 0 && isContextEnabled) {
|
||||
delete tool_resources[EToolResources.context];
|
||||
const context = await getFiles(
|
||||
{
|
||||
file_id: { $in: tool_resources.ocr.file_ids },
|
||||
file_id: { $in: fileIds },
|
||||
},
|
||||
{},
|
||||
{},
|
||||
|
|
|
@ -26,6 +26,8 @@ export const agentToolResourcesSchema = z
|
|||
image_edit: agentBaseResourceSchema.optional(),
|
||||
execute_code: agentBaseResourceSchema.optional(),
|
||||
file_search: agentFileResourceSchema.optional(),
|
||||
context: agentBaseResourceSchema.optional(),
|
||||
/** @deprecated Use context instead */
|
||||
ocr: agentBaseResourceSchema.optional(),
|
||||
})
|
||||
.optional();
|
||||
|
|
|
@ -1311,6 +1311,142 @@ describe('updateInterfacePermissions - permissions', () => {
|
|||
});
|
||||
});
|
||||
|
||||
it('should re-enable memory permissions when memory.disabled changes from true to false', async () => {
|
||||
// Mock existing memory permissions that are disabled
|
||||
mockGetRoleByName.mockResolvedValue({
|
||||
permissions: {
|
||||
[PermissionTypes.MEMORIES]: {
|
||||
[Permissions.USE]: false,
|
||||
[Permissions.CREATE]: false,
|
||||
[Permissions.READ]: false,
|
||||
[Permissions.UPDATE]: false,
|
||||
[Permissions.OPT_OUT]: false,
|
||||
},
|
||||
// Other existing permissions
|
||||
[PermissionTypes.PROMPTS]: { [Permissions.USE]: true },
|
||||
[PermissionTypes.BOOKMARKS]: { [Permissions.USE]: true },
|
||||
},
|
||||
});
|
||||
|
||||
const config = {
|
||||
interface: {
|
||||
// Not explicitly configuring memories in interface
|
||||
prompts: true,
|
||||
bookmarks: true,
|
||||
},
|
||||
memory: {
|
||||
disabled: false, // Memory is explicitly enabled (changed from true to false)
|
||||
agent: {
|
||||
id: 'test-agent-id',
|
||||
},
|
||||
personalize: true,
|
||||
} as unknown as TCustomConfig['memory'],
|
||||
};
|
||||
const configDefaults = {
|
||||
interface: {
|
||||
memories: true,
|
||||
prompts: true,
|
||||
bookmarks: true,
|
||||
},
|
||||
} as TConfigDefaults;
|
||||
const interfaceConfig = await loadDefaultInterface({ config, configDefaults });
|
||||
const appConfig = { config, interfaceConfig } as unknown as AppConfig;
|
||||
|
||||
await updateInterfacePermissions({
|
||||
appConfig,
|
||||
getRoleByName: mockGetRoleByName,
|
||||
updateAccessPermissions: mockUpdateAccessPermissions,
|
||||
});
|
||||
|
||||
// Check USER role call
|
||||
const userCall = mockUpdateAccessPermissions.mock.calls.find(
|
||||
(call) => call[0] === SystemRoles.USER,
|
||||
);
|
||||
// Memory permissions should be re-enabled
|
||||
expect(userCall[1][PermissionTypes.MEMORIES]).toEqual({
|
||||
[Permissions.USE]: true,
|
||||
[Permissions.CREATE]:
|
||||
roleDefaults[SystemRoles.USER].permissions[PermissionTypes.MEMORIES]?.[Permissions.CREATE],
|
||||
[Permissions.READ]:
|
||||
roleDefaults[SystemRoles.USER].permissions[PermissionTypes.MEMORIES]?.[Permissions.READ],
|
||||
[Permissions.UPDATE]:
|
||||
roleDefaults[SystemRoles.USER].permissions[PermissionTypes.MEMORIES]?.[Permissions.UPDATE],
|
||||
[Permissions.OPT_OUT]: true, // Should be true when personalize is enabled
|
||||
});
|
||||
|
||||
// Check ADMIN role call
|
||||
const adminCall = mockUpdateAccessPermissions.mock.calls.find(
|
||||
(call) => call[0] === SystemRoles.ADMIN,
|
||||
);
|
||||
expect(adminCall[1][PermissionTypes.MEMORIES]).toEqual({
|
||||
[Permissions.USE]: true,
|
||||
[Permissions.CREATE]:
|
||||
roleDefaults[SystemRoles.ADMIN].permissions[PermissionTypes.MEMORIES]?.[Permissions.CREATE],
|
||||
[Permissions.READ]:
|
||||
roleDefaults[SystemRoles.ADMIN].permissions[PermissionTypes.MEMORIES]?.[Permissions.READ],
|
||||
[Permissions.UPDATE]:
|
||||
roleDefaults[SystemRoles.ADMIN].permissions[PermissionTypes.MEMORIES]?.[Permissions.UPDATE],
|
||||
[Permissions.OPT_OUT]: true, // Should be true when personalize is enabled
|
||||
});
|
||||
|
||||
// Verify the existing role data was passed to updateAccessPermissions
|
||||
expect(userCall[2]).toMatchObject({
|
||||
permissions: expect.objectContaining({
|
||||
[PermissionTypes.MEMORIES]: expect.any(Object),
|
||||
}),
|
||||
});
|
||||
});
|
||||
|
||||
it('should re-enable memory permissions when valid memory config exists without disabled field', async () => {
|
||||
// Mock existing memory permissions that are disabled
|
||||
mockGetRoleByName.mockResolvedValue({
|
||||
permissions: {
|
||||
[PermissionTypes.MEMORIES]: {
|
||||
[Permissions.USE]: false,
|
||||
[Permissions.CREATE]: false,
|
||||
[Permissions.READ]: false,
|
||||
[Permissions.UPDATE]: false,
|
||||
[Permissions.OPT_OUT]: false,
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
const config = {
|
||||
memory: {
|
||||
// No disabled field, but valid config
|
||||
agent: {
|
||||
id: 'test-agent-id',
|
||||
provider: 'openai',
|
||||
},
|
||||
personalize: false,
|
||||
} as unknown as TCustomConfig['memory'],
|
||||
};
|
||||
const configDefaults = { interface: {} } as TConfigDefaults;
|
||||
const interfaceConfig = await loadDefaultInterface({ config, configDefaults });
|
||||
const appConfig = { config, interfaceConfig } as unknown as AppConfig;
|
||||
|
||||
await updateInterfacePermissions({
|
||||
appConfig,
|
||||
getRoleByName: mockGetRoleByName,
|
||||
updateAccessPermissions: mockUpdateAccessPermissions,
|
||||
});
|
||||
|
||||
// Check USER role call - memory should be re-enabled
|
||||
const userCall = mockUpdateAccessPermissions.mock.calls.find(
|
||||
(call) => call[0] === SystemRoles.USER,
|
||||
);
|
||||
expect(userCall[1][PermissionTypes.MEMORIES]).toEqual({
|
||||
[Permissions.USE]: true,
|
||||
[Permissions.CREATE]:
|
||||
roleDefaults[SystemRoles.USER].permissions[PermissionTypes.MEMORIES]?.[Permissions.CREATE],
|
||||
[Permissions.READ]:
|
||||
roleDefaults[SystemRoles.USER].permissions[PermissionTypes.MEMORIES]?.[Permissions.READ],
|
||||
[Permissions.UPDATE]:
|
||||
roleDefaults[SystemRoles.USER].permissions[PermissionTypes.MEMORIES]?.[Permissions.UPDATE],
|
||||
[Permissions.OPT_OUT]: undefined, // Should be undefined when personalize is false
|
||||
});
|
||||
});
|
||||
|
||||
it('should override existing memory permissions when memory.disabled is true', async () => {
|
||||
// Mock existing memory permissions that are enabled
|
||||
mockGetRoleByName.mockResolvedValue({
|
||||
|
|
|
@ -69,8 +69,12 @@ export async function updateInterfacePermissions({
|
|||
const interfaceConfig = appConfig?.config?.interface;
|
||||
const memoryConfig = appConfig?.config?.memory;
|
||||
const memoryEnabled = isMemoryEnabled(memoryConfig);
|
||||
/** Check if memory is explicitly disabled */
|
||||
const isMemoryExplicitlyDisabled = memoryConfig && !memoryEnabled;
|
||||
/** Check if memory is explicitly disabled (memory.disabled === true) */
|
||||
const isMemoryExplicitlyDisabled = memoryConfig?.disabled === true;
|
||||
/** Check if memory should be enabled (explicitly enabled or valid config) */
|
||||
const shouldEnableMemory =
|
||||
memoryConfig?.disabled === false ||
|
||||
(memoryConfig && memoryEnabled && memoryConfig.disabled === undefined);
|
||||
/** Check if personalization is enabled (defaults to true if memory is configured and enabled) */
|
||||
const isPersonalizationEnabled =
|
||||
memoryConfig && memoryEnabled && memoryConfig.personalize !== false;
|
||||
|
@ -111,19 +115,24 @@ export async function updateInterfacePermissions({
|
|||
const permTypeExists = existingPermissions?.[permType];
|
||||
const isExplicitlyConfigured =
|
||||
interfaceConfig && hasExplicitConfig(interfaceConfig, permType);
|
||||
const isMemoryDisabled =
|
||||
permType === PermissionTypes.MEMORIES && isMemoryExplicitlyDisabled === true;
|
||||
const isMemoryDisabled = permType === PermissionTypes.MEMORIES && isMemoryExplicitlyDisabled;
|
||||
const isMemoryReenabling =
|
||||
permType === PermissionTypes.MEMORIES &&
|
||||
shouldEnableMemory &&
|
||||
existingPermissions?.[PermissionTypes.MEMORIES]?.[Permissions.USE] === false;
|
||||
|
||||
// Only update if: doesn't exist OR explicitly configured
|
||||
if (!permTypeExists || isExplicitlyConfigured || isMemoryDisabled) {
|
||||
// Only update if: doesn't exist OR explicitly configured OR memory state change
|
||||
if (!permTypeExists || isExplicitlyConfigured || isMemoryDisabled || isMemoryReenabling) {
|
||||
permissionsToUpdate[permType] = permissions;
|
||||
if (!permTypeExists) {
|
||||
logger.debug(`Role '${roleName}': Setting up default permissions for '${permType}'`);
|
||||
} else if (isExplicitlyConfigured) {
|
||||
logger.debug(`Role '${roleName}': Applying explicit config for '${permType}'`);
|
||||
} else if (isMemoryDisabled) {
|
||||
logger.debug(`Role '${roleName}': Disabling memories as memory.disabled is true`);
|
||||
} else if (isMemoryReenabling) {
|
||||
logger.debug(
|
||||
`Role '${roleName}': Disabling memories as it is explicitly disabled in config`,
|
||||
`Role '${roleName}': Re-enabling memories due to valid memory configuration`,
|
||||
);
|
||||
}
|
||||
} else {
|
||||
|
@ -147,13 +156,15 @@ export async function updateInterfacePermissions({
|
|||
),
|
||||
},
|
||||
[PermissionTypes.MEMORIES]: {
|
||||
[Permissions.USE]: isMemoryExplicitlyDisabled
|
||||
? false
|
||||
: getPermissionValue(
|
||||
loadedInterface.memories,
|
||||
defaultPerms[PermissionTypes.MEMORIES]?.[Permissions.USE],
|
||||
defaults.memories,
|
||||
),
|
||||
[Permissions.USE]: (() => {
|
||||
if (isMemoryExplicitlyDisabled) return false;
|
||||
if (shouldEnableMemory) return true;
|
||||
return getPermissionValue(
|
||||
loadedInterface.memories,
|
||||
defaultPerms[PermissionTypes.MEMORIES]?.[Permissions.USE],
|
||||
defaults.memories,
|
||||
);
|
||||
})(),
|
||||
...(defaultPerms[PermissionTypes.MEMORIES]?.[Permissions.CREATE] !== undefined && {
|
||||
[Permissions.CREATE]: isMemoryExplicitlyDisabled
|
||||
? false
|
||||
|
@ -169,7 +180,9 @@ export async function updateInterfacePermissions({
|
|||
? false
|
||||
: defaultPerms[PermissionTypes.MEMORIES][Permissions.UPDATE],
|
||||
}),
|
||||
[Permissions.OPT_OUT]: isPersonalizationEnabled,
|
||||
[Permissions.OPT_OUT]: isMemoryExplicitlyDisabled
|
||||
? false
|
||||
: isPersonalizationEnabled || undefined,
|
||||
},
|
||||
[PermissionTypes.MULTI_CONVO]: {
|
||||
[Permissions.USE]: getPermissionValue(
|
||||
|
|
|
@ -8,7 +8,7 @@ import type * as t from '~/mcp/types';
|
|||
import { ConnectionsRepository } from '~/mcp/ConnectionsRepository';
|
||||
import { detectOAuthRequirement } from '~/mcp/oauth';
|
||||
import { sanitizeUrlForLogging } from '~/mcp/utils';
|
||||
import { processMCPEnv } from '~/utils';
|
||||
import { processMCPEnv, isEnabled } from '~/utils';
|
||||
import { CONSTANTS } from '~/mcp/enum';
|
||||
|
||||
/**
|
||||
|
@ -158,8 +158,13 @@ export class MCPServersRegistry {
|
|||
private async fetchServerInstructions(serverName: string): Promise<void> {
|
||||
const config = this.parsedConfigs[serverName];
|
||||
if (!config.serverInstructions) return;
|
||||
if (typeof config.serverInstructions === 'string') return;
|
||||
|
||||
// If it's a string that's not "true", it's a custom instruction
|
||||
if (typeof config.serverInstructions === 'string' && !isEnabled(config.serverInstructions)) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Fetch from server if true (boolean) or "true" (string)
|
||||
const conn = await this.connections.get(serverName);
|
||||
config.serverInstructions = conn.client.getInstructions();
|
||||
if (!config.serverInstructions) {
|
||||
|
|
|
@ -288,5 +288,74 @@ describe('MCPServersRegistry - Initialize Function', () => {
|
|||
// Compare the actual parsedConfigs against the expected fixture
|
||||
expect(registry.parsedConfigs).toEqual(expectedParsedConfigs);
|
||||
});
|
||||
|
||||
it('should handle serverInstructions as string "true" correctly and fetch from server', async () => {
|
||||
// Create test config with serverInstructions as string "true"
|
||||
const testConfig: t.MCPServers = {
|
||||
test_server_string_true: {
|
||||
type: 'stdio',
|
||||
args: [],
|
||||
command: 'test-command',
|
||||
serverInstructions: 'true', // Simulating string "true" from YAML parsing
|
||||
},
|
||||
test_server_custom_string: {
|
||||
type: 'stdio',
|
||||
args: [],
|
||||
command: 'test-command',
|
||||
serverInstructions: 'Custom instructions here',
|
||||
},
|
||||
test_server_bool_true: {
|
||||
type: 'stdio',
|
||||
args: [],
|
||||
command: 'test-command',
|
||||
serverInstructions: true,
|
||||
},
|
||||
};
|
||||
|
||||
const registry = new MCPServersRegistry(testConfig);
|
||||
|
||||
// Setup mock connection for servers that should fetch
|
||||
const mockClient = {
|
||||
listTools: jest.fn().mockResolvedValue({ tools: [] }),
|
||||
getInstructions: jest.fn().mockReturnValue('Fetched instructions from server'),
|
||||
getServerCapabilities: jest.fn().mockReturnValue({ tools: {} }),
|
||||
};
|
||||
const mockConnection = {
|
||||
client: mockClient,
|
||||
} as unknown as jest.Mocked<MCPConnection>;
|
||||
|
||||
mockConnectionsRepo.get.mockResolvedValue(mockConnection);
|
||||
mockConnectionsRepo.getLoaded.mockResolvedValue(
|
||||
new Map([
|
||||
['test_server_string_true', mockConnection],
|
||||
['test_server_bool_true', mockConnection],
|
||||
]),
|
||||
);
|
||||
mockDetectOAuthRequirement.mockResolvedValue({
|
||||
requiresOAuth: false,
|
||||
method: 'no-metadata-found',
|
||||
metadata: null,
|
||||
});
|
||||
|
||||
await registry.initialize();
|
||||
|
||||
// Verify that string "true" was treated as fetch-from-server
|
||||
expect(registry.parsedConfigs['test_server_string_true'].serverInstructions).toBe(
|
||||
'Fetched instructions from server',
|
||||
);
|
||||
|
||||
// Verify that custom string was kept as-is
|
||||
expect(registry.parsedConfigs['test_server_custom_string'].serverInstructions).toBe(
|
||||
'Custom instructions here',
|
||||
);
|
||||
|
||||
// Verify that boolean true also fetched from server
|
||||
expect(registry.parsedConfigs['test_server_bool_true'].serverInstructions).toBe(
|
||||
'Fetched instructions from server',
|
||||
);
|
||||
|
||||
// Verify getInstructions was called for both "true" cases
|
||||
expect(mockClient.getInstructions).toHaveBeenCalledTimes(2);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
import type { MCPOptions } from 'librechat-data-provider';
|
||||
import type { AuthorizationServerMetadata } from '@modelcontextprotocol/sdk/shared/auth.js';
|
||||
import { MCPOAuthHandler } from '~/mcp/oauth';
|
||||
|
||||
jest.mock('@librechat/data-schemas', () => ({
|
||||
|
@ -12,11 +13,19 @@ jest.mock('@librechat/data-schemas', () => ({
|
|||
|
||||
jest.mock('@modelcontextprotocol/sdk/client/auth.js', () => ({
|
||||
startAuthorization: jest.fn(),
|
||||
discoverAuthorizationServerMetadata: jest.fn(),
|
||||
}));
|
||||
|
||||
import { startAuthorization } from '@modelcontextprotocol/sdk/client/auth.js';
|
||||
import {
|
||||
startAuthorization,
|
||||
discoverAuthorizationServerMetadata,
|
||||
} from '@modelcontextprotocol/sdk/client/auth.js';
|
||||
|
||||
const mockStartAuthorization = startAuthorization as jest.MockedFunction<typeof startAuthorization>;
|
||||
const mockDiscoverAuthorizationServerMetadata =
|
||||
discoverAuthorizationServerMetadata as jest.MockedFunction<
|
||||
typeof discoverAuthorizationServerMetadata
|
||||
>;
|
||||
|
||||
describe('MCPOAuthHandler - Configurable OAuth Metadata', () => {
|
||||
const mockServerName = 'test-server';
|
||||
|
@ -188,6 +197,432 @@ describe('MCPOAuthHandler - Configurable OAuth Metadata', () => {
|
|||
});
|
||||
});
|
||||
|
||||
describe('refreshOAuthTokens', () => {
|
||||
const mockRefreshToken = 'refresh-token-12345';
|
||||
const originalFetch = global.fetch;
|
||||
const mockFetch = jest.fn() as unknown as jest.MockedFunction<typeof fetch>;
|
||||
|
||||
beforeEach(() => {
|
||||
jest.clearAllMocks();
|
||||
global.fetch = mockFetch;
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
mockFetch.mockClear();
|
||||
});
|
||||
|
||||
afterAll(() => {
|
||||
global.fetch = originalFetch;
|
||||
});
|
||||
|
||||
describe('with stored metadata', () => {
|
||||
it('should use client_secret_post when server only supports that method', async () => {
|
||||
const metadata = {
|
||||
serverName: 'test-server',
|
||||
userId: 'user-123',
|
||||
serverUrl: 'https://auth.example.com',
|
||||
state: 'state-123',
|
||||
clientInfo: {
|
||||
client_id: 'test-client-id',
|
||||
client_secret: 'test-client-secret',
|
||||
grant_types: ['authorization_code', 'refresh_token'],
|
||||
scope: 'read write',
|
||||
},
|
||||
};
|
||||
|
||||
// Mock OAuth metadata discovery
|
||||
mockDiscoverAuthorizationServerMetadata.mockResolvedValueOnce({
|
||||
issuer: 'https://auth.example.com',
|
||||
authorization_endpoint: 'https://auth.example.com/oauth/authorize',
|
||||
token_endpoint: 'https://auth.example.com/oauth/token',
|
||||
token_endpoint_auth_methods_supported: ['client_secret_post'],
|
||||
response_types_supported: ['code'],
|
||||
jwks_uri: 'https://auth.example.com/.well-known/jwks.json',
|
||||
subject_types_supported: ['public'],
|
||||
id_token_signing_alg_values_supported: ['RS256'],
|
||||
} as AuthorizationServerMetadata);
|
||||
|
||||
mockFetch.mockResolvedValueOnce({
|
||||
ok: true,
|
||||
json: async () => ({
|
||||
access_token: 'new-access-token',
|
||||
refresh_token: 'new-refresh-token',
|
||||
expires_in: 3600,
|
||||
}),
|
||||
} as Response);
|
||||
|
||||
const result = await MCPOAuthHandler.refreshOAuthTokens(mockRefreshToken, metadata);
|
||||
|
||||
// Verify the call was made without Authorization header
|
||||
expect(mockFetch).toHaveBeenCalledWith(
|
||||
'https://auth.example.com/oauth/token',
|
||||
expect.objectContaining({
|
||||
method: 'POST',
|
||||
headers: expect.not.objectContaining({
|
||||
Authorization: expect.any(String),
|
||||
}),
|
||||
}),
|
||||
);
|
||||
|
||||
// Verify the body contains client_id and client_secret
|
||||
const callArgs = mockFetch.mock.calls[0];
|
||||
const body = callArgs[1]?.body as URLSearchParams;
|
||||
expect(body.toString()).toContain('client_id=test-client-id');
|
||||
expect(body.toString()).toContain('client_secret=test-client-secret');
|
||||
|
||||
expect(result).toEqual({
|
||||
access_token: 'new-access-token',
|
||||
refresh_token: 'new-refresh-token',
|
||||
expires_in: 3600,
|
||||
obtained_at: expect.any(Number),
|
||||
expires_at: expect.any(Number),
|
||||
});
|
||||
});
|
||||
|
||||
it('should use client_secret_basic when server only supports that method', async () => {
|
||||
const metadata = {
|
||||
serverName: 'test-server',
|
||||
userId: 'user-123',
|
||||
serverUrl: 'https://auth.example.com',
|
||||
state: 'state-123',
|
||||
clientInfo: {
|
||||
client_id: 'test-client-id',
|
||||
client_secret: 'test-client-secret',
|
||||
grant_types: ['authorization_code', 'refresh_token'],
|
||||
scope: 'read write',
|
||||
},
|
||||
};
|
||||
|
||||
// Mock OAuth metadata discovery
|
||||
mockDiscoverAuthorizationServerMetadata.mockResolvedValueOnce({
|
||||
issuer: 'https://auth.example.com',
|
||||
authorization_endpoint: 'https://auth.example.com/oauth/authorize',
|
||||
token_endpoint: 'https://auth.example.com/oauth/token',
|
||||
token_endpoint_auth_methods_supported: ['client_secret_basic'],
|
||||
response_types_supported: ['code'],
|
||||
jwks_uri: 'https://auth.example.com/.well-known/jwks.json',
|
||||
subject_types_supported: ['public'],
|
||||
id_token_signing_alg_values_supported: ['RS256'],
|
||||
} as AuthorizationServerMetadata);
|
||||
|
||||
mockFetch.mockResolvedValueOnce({
|
||||
ok: true,
|
||||
json: async () => ({
|
||||
access_token: 'new-access-token',
|
||||
refresh_token: 'new-refresh-token',
|
||||
expires_in: 3600,
|
||||
}),
|
||||
} as Response);
|
||||
|
||||
await MCPOAuthHandler.refreshOAuthTokens(mockRefreshToken, metadata);
|
||||
|
||||
const expectedAuth = `Basic ${Buffer.from('test-client-id:test-client-secret').toString('base64')}`;
|
||||
expect(mockFetch).toHaveBeenCalledWith(
|
||||
'https://auth.example.com/oauth/token',
|
||||
expect.objectContaining({
|
||||
method: 'POST',
|
||||
headers: expect.objectContaining({
|
||||
Authorization: expectedAuth,
|
||||
}),
|
||||
body: expect.not.stringContaining('client_id='),
|
||||
}),
|
||||
);
|
||||
});
|
||||
|
||||
it('should prefer client_secret_basic when both methods are supported', async () => {
|
||||
const metadata = {
|
||||
serverName: 'test-server',
|
||||
userId: 'user-123',
|
||||
serverUrl: 'https://auth.example.com',
|
||||
state: 'state-123',
|
||||
clientInfo: {
|
||||
client_id: 'test-client-id',
|
||||
client_secret: 'test-client-secret',
|
||||
grant_types: ['authorization_code', 'refresh_token'],
|
||||
},
|
||||
};
|
||||
|
||||
// Mock OAuth metadata discovery
|
||||
mockDiscoverAuthorizationServerMetadata.mockResolvedValueOnce({
|
||||
issuer: 'https://auth.example.com',
|
||||
authorization_endpoint: 'https://auth.example.com/oauth/authorize',
|
||||
token_endpoint: 'https://auth.example.com/oauth/token',
|
||||
token_endpoint_auth_methods_supported: ['client_secret_post', 'client_secret_basic'],
|
||||
response_types_supported: ['code'],
|
||||
jwks_uri: 'https://auth.example.com/.well-known/jwks.json',
|
||||
subject_types_supported: ['public'],
|
||||
id_token_signing_alg_values_supported: ['RS256'],
|
||||
} as AuthorizationServerMetadata);
|
||||
|
||||
mockFetch.mockResolvedValueOnce({
|
||||
ok: true,
|
||||
json: async () => ({
|
||||
access_token: 'new-access-token',
|
||||
refresh_token: 'new-refresh-token',
|
||||
expires_in: 3600,
|
||||
}),
|
||||
} as Response);
|
||||
|
||||
await MCPOAuthHandler.refreshOAuthTokens(mockRefreshToken, metadata);
|
||||
|
||||
const expectedAuth = `Basic ${Buffer.from('test-client-id:test-client-secret').toString('base64')}`;
|
||||
expect(mockFetch).toHaveBeenCalledWith(
|
||||
'https://auth.example.com/oauth/token',
|
||||
expect.objectContaining({
|
||||
headers: expect.objectContaining({
|
||||
Authorization: expectedAuth,
|
||||
}),
|
||||
}),
|
||||
);
|
||||
});
|
||||
|
||||
it('should default to client_secret_basic when no methods are advertised', async () => {
|
||||
const metadata = {
|
||||
serverName: 'test-server',
|
||||
userId: 'user-123',
|
||||
serverUrl: 'https://auth.example.com',
|
||||
state: 'state-123',
|
||||
clientInfo: {
|
||||
client_id: 'test-client-id',
|
||||
client_secret: 'test-client-secret',
|
||||
grant_types: ['authorization_code', 'refresh_token'],
|
||||
},
|
||||
};
|
||||
|
||||
// Mock OAuth metadata discovery with no auth methods specified
|
||||
mockDiscoverAuthorizationServerMetadata.mockResolvedValueOnce({
|
||||
issuer: 'https://auth.example.com',
|
||||
authorization_endpoint: 'https://auth.example.com/oauth/authorize',
|
||||
token_endpoint: 'https://auth.example.com/oauth/token',
|
||||
// No token_endpoint_auth_methods_supported field
|
||||
response_types_supported: ['code'],
|
||||
jwks_uri: 'https://auth.example.com/.well-known/jwks.json',
|
||||
subject_types_supported: ['public'],
|
||||
id_token_signing_alg_values_supported: ['RS256'],
|
||||
} as AuthorizationServerMetadata);
|
||||
|
||||
mockFetch.mockResolvedValueOnce({
|
||||
ok: true,
|
||||
json: async () => ({
|
||||
access_token: 'new-access-token',
|
||||
refresh_token: 'new-refresh-token',
|
||||
expires_in: 3600,
|
||||
}),
|
||||
} as Response);
|
||||
|
||||
await MCPOAuthHandler.refreshOAuthTokens(mockRefreshToken, metadata);
|
||||
|
||||
const expectedAuth = `Basic ${Buffer.from('test-client-id:test-client-secret').toString('base64')}`;
|
||||
expect(mockFetch).toHaveBeenCalledWith(
|
||||
'https://auth.example.com/oauth/token',
|
||||
expect.objectContaining({
|
||||
headers: expect.objectContaining({
|
||||
Authorization: expectedAuth,
|
||||
}),
|
||||
}),
|
||||
);
|
||||
});
|
||||
|
||||
it('should include client_id in body for public clients (no secret)', async () => {
|
||||
const metadata = {
|
||||
serverName: 'test-server',
|
||||
userId: 'user-123',
|
||||
serverUrl: 'https://auth.example.com',
|
||||
state: 'state-123',
|
||||
clientInfo: {
|
||||
client_id: 'test-client-id',
|
||||
// No client_secret - public client
|
||||
grant_types: ['authorization_code', 'refresh_token'],
|
||||
},
|
||||
};
|
||||
|
||||
// Mock OAuth metadata discovery
|
||||
mockDiscoverAuthorizationServerMetadata.mockResolvedValueOnce({
|
||||
issuer: 'https://auth.example.com',
|
||||
authorization_endpoint: 'https://auth.example.com/oauth/authorize',
|
||||
token_endpoint: 'https://auth.example.com/oauth/token',
|
||||
token_endpoint_auth_methods_supported: ['none'],
|
||||
response_types_supported: ['code'],
|
||||
jwks_uri: 'https://auth.example.com/.well-known/jwks.json',
|
||||
subject_types_supported: ['public'],
|
||||
id_token_signing_alg_values_supported: ['RS256'],
|
||||
} as AuthorizationServerMetadata);
|
||||
|
||||
mockFetch.mockResolvedValueOnce({
|
||||
ok: true,
|
||||
json: async () => ({
|
||||
access_token: 'new-access-token',
|
||||
refresh_token: 'new-refresh-token',
|
||||
expires_in: 3600,
|
||||
}),
|
||||
} as Response);
|
||||
|
||||
await MCPOAuthHandler.refreshOAuthTokens(mockRefreshToken, metadata);
|
||||
|
||||
// Verify the call was made without Authorization header
|
||||
expect(mockFetch).toHaveBeenCalledWith(
|
||||
'https://auth.example.com/oauth/token',
|
||||
expect.objectContaining({
|
||||
method: 'POST',
|
||||
headers: expect.not.objectContaining({
|
||||
Authorization: expect.any(String),
|
||||
}),
|
||||
}),
|
||||
);
|
||||
|
||||
// Verify the body contains client_id (public client)
|
||||
const callArgs = mockFetch.mock.calls[0];
|
||||
const body = callArgs[1]?.body as URLSearchParams;
|
||||
expect(body.toString()).toContain('client_id=test-client-id');
|
||||
});
|
||||
});
|
||||
|
||||
describe('with pre-configured OAuth settings', () => {
|
||||
it('should use client_secret_post when configured to only support that method', async () => {
|
||||
const config = {
|
||||
token_url: 'https://auth.example.com/oauth/token',
|
||||
client_id: 'test-client-id',
|
||||
client_secret: 'test-client-secret',
|
||||
token_endpoint_auth_methods_supported: ['client_secret_post'],
|
||||
};
|
||||
|
||||
mockFetch.mockResolvedValueOnce({
|
||||
ok: true,
|
||||
json: async () => ({
|
||||
access_token: 'new-access-token',
|
||||
refresh_token: 'new-refresh-token',
|
||||
expires_in: 3600,
|
||||
}),
|
||||
} as Response);
|
||||
|
||||
await MCPOAuthHandler.refreshOAuthTokens(
|
||||
mockRefreshToken,
|
||||
{ serverName: 'test-server' },
|
||||
config,
|
||||
);
|
||||
|
||||
// Verify the call was made without Authorization header
|
||||
expect(mockFetch).toHaveBeenCalledWith(
|
||||
new URL('https://auth.example.com/oauth/token'),
|
||||
expect.objectContaining({
|
||||
method: 'POST',
|
||||
headers: expect.not.objectContaining({
|
||||
Authorization: expect.any(String),
|
||||
}),
|
||||
}),
|
||||
);
|
||||
|
||||
// Verify the body contains client_id and client_secret
|
||||
const callArgs = mockFetch.mock.calls[0];
|
||||
const body = callArgs[1]?.body as URLSearchParams;
|
||||
expect(body.toString()).toContain('client_id=test-client-id');
|
||||
expect(body.toString()).toContain('client_secret=test-client-secret');
|
||||
});
|
||||
|
||||
it('should use client_secret_basic when configured to support that method', async () => {
|
||||
const config = {
|
||||
token_url: 'https://auth.example.com/oauth/token',
|
||||
client_id: 'test-client-id',
|
||||
client_secret: 'test-client-secret',
|
||||
token_endpoint_auth_methods_supported: ['client_secret_basic'],
|
||||
};
|
||||
|
||||
mockFetch.mockResolvedValueOnce({
|
||||
ok: true,
|
||||
json: async () => ({
|
||||
access_token: 'new-access-token',
|
||||
refresh_token: 'new-refresh-token',
|
||||
expires_in: 3600,
|
||||
}),
|
||||
} as Response);
|
||||
|
||||
await MCPOAuthHandler.refreshOAuthTokens(
|
||||
mockRefreshToken,
|
||||
{ serverName: 'test-server' },
|
||||
config,
|
||||
);
|
||||
|
||||
const expectedAuth = `Basic ${Buffer.from('test-client-id:test-client-secret').toString('base64')}`;
|
||||
expect(mockFetch).toHaveBeenCalledWith(
|
||||
new URL('https://auth.example.com/oauth/token'),
|
||||
expect.objectContaining({
|
||||
method: 'POST',
|
||||
headers: expect.objectContaining({
|
||||
Authorization: expectedAuth,
|
||||
}),
|
||||
body: expect.not.stringContaining('client_id='),
|
||||
}),
|
||||
);
|
||||
});
|
||||
|
||||
it('should default to client_secret_basic when no auth methods configured', async () => {
|
||||
const config = {
|
||||
token_url: 'https://auth.example.com/oauth/token',
|
||||
client_id: 'test-client-id',
|
||||
client_secret: 'test-client-secret',
|
||||
// No token_endpoint_auth_methods_supported field
|
||||
};
|
||||
|
||||
mockFetch.mockResolvedValueOnce({
|
||||
ok: true,
|
||||
json: async () => ({
|
||||
access_token: 'new-access-token',
|
||||
refresh_token: 'new-refresh-token',
|
||||
expires_in: 3600,
|
||||
}),
|
||||
} as Response);
|
||||
|
||||
await MCPOAuthHandler.refreshOAuthTokens(
|
||||
mockRefreshToken,
|
||||
{ serverName: 'test-server' },
|
||||
config,
|
||||
);
|
||||
|
||||
const expectedAuth = `Basic ${Buffer.from('test-client-id:test-client-secret').toString('base64')}`;
|
||||
expect(mockFetch).toHaveBeenCalledWith(
|
||||
new URL('https://auth.example.com/oauth/token'),
|
||||
expect.objectContaining({
|
||||
headers: expect.objectContaining({
|
||||
Authorization: expectedAuth,
|
||||
}),
|
||||
}),
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
it('should throw error when refresh fails', async () => {
|
||||
const metadata = {
|
||||
serverName: 'test-server',
|
||||
userId: 'user-123',
|
||||
serverUrl: 'https://auth.example.com',
|
||||
state: 'state-123',
|
||||
clientInfo: {
|
||||
client_id: 'test-client-id',
|
||||
client_secret: 'test-client-secret',
|
||||
grant_types: ['authorization_code', 'refresh_token'],
|
||||
},
|
||||
};
|
||||
|
||||
// Mock OAuth metadata discovery
|
||||
mockDiscoverAuthorizationServerMetadata.mockResolvedValueOnce({
|
||||
token_endpoint: 'https://auth.example.com/oauth/token',
|
||||
token_endpoint_auth_methods_supported: ['client_secret_post'],
|
||||
} as AuthorizationServerMetadata);
|
||||
|
||||
mockFetch.mockResolvedValueOnce({
|
||||
ok: false,
|
||||
status: 400,
|
||||
statusText: 'Bad Request',
|
||||
text: async () =>
|
||||
'{"error":"invalid_request","error_description":"refresh_token.client_id: Field required"}',
|
||||
} as Response);
|
||||
|
||||
await expect(MCPOAuthHandler.refreshOAuthTokens(mockRefreshToken, metadata)).rejects.toThrow(
|
||||
'Token refresh failed: 400 Bad Request - {"error":"invalid_request","error_description":"refresh_token.client_id: Field required"}',
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('revokeOAuthToken', () => {
|
||||
const mockServerName = 'test-server';
|
||||
const mockToken = 'test-token-12345';
|
||||
|
|
|
@ -501,6 +501,7 @@ export class MCPOAuthHandler {
|
|||
|
||||
/** Use the stored client information and metadata to determine the token URL */
|
||||
let tokenUrl: string;
|
||||
let authMethods: string[] | undefined;
|
||||
if (config?.token_url) {
|
||||
tokenUrl = config.token_url;
|
||||
} else if (!metadata.serverUrl) {
|
||||
|
@ -515,6 +516,7 @@ export class MCPOAuthHandler {
|
|||
throw new Error('No token endpoint found in OAuth metadata');
|
||||
}
|
||||
tokenUrl = oauthMetadata.token_endpoint;
|
||||
authMethods = oauthMetadata.token_endpoint_auth_methods_supported;
|
||||
}
|
||||
|
||||
const body = new URLSearchParams({
|
||||
|
@ -532,14 +534,36 @@ export class MCPOAuthHandler {
|
|||
Accept: 'application/json',
|
||||
};
|
||||
|
||||
/** Use client_secret for authentication if available */
|
||||
/** Handle authentication based on server's advertised methods */
|
||||
if (metadata.clientInfo.client_secret) {
|
||||
const clientAuth = Buffer.from(
|
||||
`${metadata.clientInfo.client_id}:${metadata.clientInfo.client_secret}`,
|
||||
).toString('base64');
|
||||
headers['Authorization'] = `Basic ${clientAuth}`;
|
||||
/** Default to client_secret_basic if no methods specified (per RFC 8414) */
|
||||
const tokenAuthMethods = authMethods ?? ['client_secret_basic'];
|
||||
const usesBasicAuth = tokenAuthMethods.includes('client_secret_basic');
|
||||
const usesClientSecretPost = tokenAuthMethods.includes('client_secret_post');
|
||||
|
||||
if (usesBasicAuth) {
|
||||
/** Use Basic auth */
|
||||
logger.debug('[MCPOAuth] Using client_secret_basic authentication method');
|
||||
const clientAuth = Buffer.from(
|
||||
`${metadata.clientInfo.client_id}:${metadata.clientInfo.client_secret}`,
|
||||
).toString('base64');
|
||||
headers['Authorization'] = `Basic ${clientAuth}`;
|
||||
} else if (usesClientSecretPost) {
|
||||
/** Use client_secret_post */
|
||||
logger.debug('[MCPOAuth] Using client_secret_post authentication method');
|
||||
body.append('client_id', metadata.clientInfo.client_id);
|
||||
body.append('client_secret', metadata.clientInfo.client_secret);
|
||||
} else {
|
||||
/** No recognized method, default to Basic auth per RFC */
|
||||
logger.debug('[MCPOAuth] No recognized auth method, defaulting to client_secret_basic');
|
||||
const clientAuth = Buffer.from(
|
||||
`${metadata.clientInfo.client_id}:${metadata.clientInfo.client_secret}`,
|
||||
).toString('base64');
|
||||
headers['Authorization'] = `Basic ${clientAuth}`;
|
||||
}
|
||||
} else {
|
||||
/** For public clients, client_id must be in the body */
|
||||
logger.debug('[MCPOAuth] Using public client authentication (no secret)');
|
||||
body.append('client_id', metadata.clientInfo.client_id);
|
||||
}
|
||||
|
||||
|
@ -575,9 +599,6 @@ export class MCPOAuthHandler {
|
|||
logger.debug(`[MCPOAuth] Using pre-configured OAuth settings for token refresh`);
|
||||
|
||||
const tokenUrl = new URL(config.token_url);
|
||||
const clientAuth = config.client_secret
|
||||
? Buffer.from(`${config.client_id}:${config.client_secret}`).toString('base64')
|
||||
: null;
|
||||
|
||||
const body = new URLSearchParams({
|
||||
grant_type: 'refresh_token',
|
||||
|
@ -593,10 +614,44 @@ export class MCPOAuthHandler {
|
|||
Accept: 'application/json',
|
||||
};
|
||||
|
||||
if (clientAuth) {
|
||||
headers['Authorization'] = `Basic ${clientAuth}`;
|
||||
/** Handle authentication based on configured methods */
|
||||
if (config.client_secret) {
|
||||
/** Default to client_secret_basic if no methods specified (per RFC 8414) */
|
||||
const tokenAuthMethods = config.token_endpoint_auth_methods_supported ?? [
|
||||
'client_secret_basic',
|
||||
];
|
||||
const usesBasicAuth = tokenAuthMethods.includes('client_secret_basic');
|
||||
const usesClientSecretPost = tokenAuthMethods.includes('client_secret_post');
|
||||
|
||||
if (usesBasicAuth) {
|
||||
/** Use Basic auth */
|
||||
logger.debug(
|
||||
'[MCPOAuth] Using client_secret_basic authentication method (pre-configured)',
|
||||
);
|
||||
const clientAuth = Buffer.from(`${config.client_id}:${config.client_secret}`).toString(
|
||||
'base64',
|
||||
);
|
||||
headers['Authorization'] = `Basic ${clientAuth}`;
|
||||
} else if (usesClientSecretPost) {
|
||||
/** Use client_secret_post */
|
||||
logger.debug(
|
||||
'[MCPOAuth] Using client_secret_post authentication method (pre-configured)',
|
||||
);
|
||||
body.append('client_id', config.client_id);
|
||||
body.append('client_secret', config.client_secret);
|
||||
} else {
|
||||
/** No recognized method, default to Basic auth per RFC */
|
||||
logger.debug(
|
||||
'[MCPOAuth] No recognized auth method, defaulting to client_secret_basic (pre-configured)',
|
||||
);
|
||||
const clientAuth = Buffer.from(`${config.client_id}:${config.client_secret}`).toString(
|
||||
'base64',
|
||||
);
|
||||
headers['Authorization'] = `Basic ${clientAuth}`;
|
||||
}
|
||||
} else {
|
||||
// Use client_id in body for public clients
|
||||
/** For public clients, client_id must be in the body */
|
||||
logger.debug('[MCPOAuth] Using public client authentication (no secret, pre-configured)');
|
||||
body.append('client_id', config.client_id);
|
||||
}
|
||||
|
||||
|
|
|
@ -180,6 +180,7 @@ export enum AgentCapabilities {
|
|||
web_search = 'web_search',
|
||||
artifacts = 'artifacts',
|
||||
actions = 'actions',
|
||||
context = 'context',
|
||||
tools = 'tools',
|
||||
chain = 'chain',
|
||||
ocr = 'ocr',
|
||||
|
@ -253,6 +254,7 @@ export const defaultAgentCapabilities = [
|
|||
AgentCapabilities.web_search,
|
||||
AgentCapabilities.artifacts,
|
||||
AgentCapabilities.actions,
|
||||
AgentCapabilities.context,
|
||||
AgentCapabilities.tools,
|
||||
AgentCapabilities.chain,
|
||||
AgentCapabilities.ocr,
|
||||
|
|
|
@ -31,6 +31,7 @@ export enum EToolResources {
|
|||
execute_code = 'execute_code',
|
||||
file_search = 'file_search',
|
||||
image_edit = 'image_edit',
|
||||
context = 'context',
|
||||
ocr = 'ocr',
|
||||
}
|
||||
|
||||
|
@ -182,6 +183,8 @@ export interface AgentToolResources {
|
|||
[EToolResources.image_edit]?: AgentBaseResource;
|
||||
[EToolResources.execute_code]?: ExecuteCodeResource;
|
||||
[EToolResources.file_search]?: AgentFileResource;
|
||||
[EToolResources.context]?: AgentBaseResource;
|
||||
/** @deprecated Use context instead */
|
||||
[EToolResources.ocr]?: AgentBaseResource;
|
||||
}
|
||||
/**
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue