diff --git a/api/models/Agent.js b/api/models/Agent.js index 11789ca63b..663285183a 100644 --- a/api/models/Agent.js +++ b/api/models/Agent.js @@ -589,10 +589,16 @@ const deleteAgent = async (searchParameter) => { const agent = await Agent.findOneAndDelete(searchParameter); if (agent) { await removeAgentFromAllProjects(agent.id); - await removeAllPermissions({ - resourceType: ResourceType.AGENT, - resourceId: agent._id, - }); + await Promise.all([ + removeAllPermissions({ + resourceType: ResourceType.AGENT, + resourceId: agent._id, + }), + removeAllPermissions({ + resourceType: ResourceType.REMOTE_AGENT, + resourceId: agent._id, + }), + ]); try { await Agent.updateMany({ 'edges.to': agent.id }, { $pull: { edges: { to: agent.id } } }); } catch (error) { @@ -631,7 +637,7 @@ const deleteUserAgents = async (userId) => { } await AclEntry.deleteMany({ - resourceType: ResourceType.AGENT, + resourceType: { $in: [ResourceType.AGENT, ResourceType.REMOTE_AGENT] }, resourceId: { $in: agentObjectIds }, }); diff --git a/api/server/controllers/PermissionsController.js b/api/server/controllers/PermissionsController.js index e22e9532c9..51993d083c 100644 --- a/api/server/controllers/PermissionsController.js +++ b/api/server/controllers/PermissionsController.js @@ -5,6 +5,7 @@ const mongoose = require('mongoose'); const { logger } = require('@librechat/data-schemas'); const { ResourceType, PrincipalType, PermissionBits } = require('librechat-data-provider'); +const { enrichRemoteAgentPrincipals, backfillRemoteAgentPermissions } = require('@librechat/api'); const { bulkUpdateResourcePermissions, ensureGroupPrincipalExists, @@ -14,7 +15,6 @@ const { findAccessibleResources, getResourcePermissionsMap, } = require('~/server/services/PermissionService'); -const { AclEntry } = require('~/db/models'); const { searchPrincipals: searchLocalPrincipals, sortPrincipalsByRelevance, @@ -24,6 +24,7 @@ const { entraIdPrincipalFeatureEnabled, searchEntraIdPrincipals, } = require('~/server/services/GraphApiService'); +const { AclEntry, AccessRole } = require('~/db/models'); /** * Generic controller for resource permission endpoints @@ -234,7 +235,7 @@ const getResourcePermissions = async (req, res) => { }, ]); - const principals = []; + let principals = []; let publicPermission = null; // Process aggregation results @@ -280,6 +281,13 @@ const getResourcePermissions = async (req, res) => { } } + if (resourceType === ResourceType.REMOTE_AGENT) { + const enricherDeps = { AclEntry, AccessRole, logger }; + const enrichResult = await enrichRemoteAgentPrincipals(enricherDeps, resourceId, principals); + principals = enrichResult.principals; + backfillRemoteAgentPermissions(enricherDeps, resourceId, enrichResult.entriesToBackfill); + } + // Return response in format expected by frontend const response = { resourceType, diff --git a/api/server/controllers/UserController.js b/api/server/controllers/UserController.js index b0cfd7ede2..0f17b4d3a9 100644 --- a/api/server/controllers/UserController.js +++ b/api/server/controllers/UserController.js @@ -22,6 +22,7 @@ const { } = require('~/models'); const { ConversationTag, + AgentApiKey, Transaction, MemoryEntry, Assistant, @@ -256,6 +257,7 @@ const deleteUserController = async (req, res) => { await deleteFiles(null, user.id); // delete database files in case of orphaned files from previous steps await deleteToolCalls(user.id); // delete user tool calls await deleteUserAgents(user.id); // delete user agents + await AgentApiKey.deleteMany({ user: user._id }); // delete user agent API keys await Assistant.deleteMany({ user: user.id }); // delete user assistants await ConversationTag.deleteMany({ user: user.id }); // delete user conversation tags await MemoryEntry.deleteMany({ userId: user.id }); // delete user memory entries diff --git a/api/server/controllers/agents/callbacks.js b/api/server/controllers/agents/callbacks.js index 490f05f832..c27f89fdf8 100644 --- a/api/server/controllers/agents/callbacks.js +++ b/api/server/controllers/agents/callbacks.js @@ -1,7 +1,7 @@ const { nanoid } = require('nanoid'); const { Constants } = require('@librechat/agents'); const { logger } = require('@librechat/data-schemas'); -const { sendEvent, GenerationJobManager } = require('@librechat/api'); +const { sendEvent, GenerationJobManager, writeAttachmentEvent } = require('@librechat/api'); const { Tools, StepTypes, FileContext, ErrorTypes } = require('librechat-data-provider'); const { EnvVar, @@ -489,7 +489,226 @@ function createToolEndCallback({ req, res, artifactPromises, streamId = null }) }; } +/** + * Helper to write attachment events in Open Responses format (librechat:attachment) + * @param {ServerResponse} res - The server response object + * @param {Object} tracker - The response tracker with sequence number + * @param {Object} attachment - The attachment data + * @param {Object} metadata - Additional metadata (messageId, conversationId) + */ +function writeResponsesAttachment(res, tracker, attachment, metadata) { + const sequenceNumber = tracker.nextSequence(); + writeAttachmentEvent(res, sequenceNumber, attachment, { + messageId: metadata.run_id, + conversationId: metadata.thread_id, + }); +} + +/** + * Creates a tool end callback specifically for the Responses API. + * Emits attachments as `librechat:attachment` events per the Open Responses extension spec. + * + * @param {Object} params + * @param {ServerRequest} params.req + * @param {ServerResponse} params.res + * @param {Object} params.tracker - Response tracker with sequence number + * @param {Promise[]} params.artifactPromises + * @returns {ToolEndCallback} The tool end callback. + */ +function createResponsesToolEndCallback({ req, res, tracker, artifactPromises }) { + /** + * @type {ToolEndCallback} + */ + return async (data, metadata) => { + const output = data?.output; + if (!output) { + return; + } + + if (!output.artifact) { + return; + } + + if (output.artifact[Tools.file_search]) { + artifactPromises.push( + (async () => { + const user = req.user; + const attachment = await processFileCitations({ + user, + metadata, + appConfig: req.config, + toolArtifact: output.artifact, + toolCallId: output.tool_call_id, + }); + if (!attachment) { + return null; + } + // For Responses API, emit attachment during streaming + if (res.headersSent && !res.writableEnded) { + writeResponsesAttachment(res, tracker, attachment, metadata); + } + return attachment; + })().catch((error) => { + logger.error('Error processing file citations:', error); + return null; + }), + ); + } + + if (output.artifact[Tools.ui_resources]) { + artifactPromises.push( + (async () => { + const attachment = { + type: Tools.ui_resources, + toolCallId: output.tool_call_id, + [Tools.ui_resources]: output.artifact[Tools.ui_resources].data, + }; + // For Responses API, always emit attachment during streaming + if (res.headersSent && !res.writableEnded) { + writeResponsesAttachment(res, tracker, attachment, metadata); + } + return attachment; + })().catch((error) => { + logger.error('Error processing artifact content:', error); + return null; + }), + ); + } + + if (output.artifact[Tools.web_search]) { + artifactPromises.push( + (async () => { + const attachment = { + type: Tools.web_search, + toolCallId: output.tool_call_id, + [Tools.web_search]: { ...output.artifact[Tools.web_search] }, + }; + // For Responses API, always emit attachment during streaming + if (res.headersSent && !res.writableEnded) { + writeResponsesAttachment(res, tracker, attachment, metadata); + } + return attachment; + })().catch((error) => { + logger.error('Error processing artifact content:', error); + return null; + }), + ); + } + + if (output.artifact.content) { + /** @type {FormattedContent[]} */ + const content = output.artifact.content; + for (let i = 0; i < content.length; i++) { + const part = content[i]; + if (!part) { + continue; + } + if (part.type !== 'image_url') { + continue; + } + const { url } = part.image_url; + artifactPromises.push( + (async () => { + const filename = `${output.name}_img_${nanoid()}`; + const file_id = output.artifact.file_ids?.[i]; + const file = await saveBase64Image(url, { + req, + file_id, + filename, + endpoint: metadata.provider, + context: FileContext.image_generation, + }); + const fileMetadata = Object.assign(file, { + toolCallId: output.tool_call_id, + }); + + if (!fileMetadata) { + return null; + } + + // For Responses API, emit attachment during streaming + if (res.headersSent && !res.writableEnded) { + const attachment = { + file_id: fileMetadata.file_id, + filename: fileMetadata.filename, + type: fileMetadata.type, + url: fileMetadata.filepath, + width: fileMetadata.width, + height: fileMetadata.height, + tool_call_id: output.tool_call_id, + }; + writeResponsesAttachment(res, tracker, attachment, metadata); + } + + return fileMetadata; + })().catch((error) => { + logger.error('Error processing artifact content:', error); + return null; + }), + ); + } + return; + } + + const isCodeTool = + output.name === Tools.execute_code || output.name === Constants.PROGRAMMATIC_TOOL_CALLING; + if (!isCodeTool) { + return; + } + + if (!output.artifact.files) { + return; + } + + for (const file of output.artifact.files) { + const { id, name } = file; + artifactPromises.push( + (async () => { + const result = await loadAuthValues({ + userId: req.user.id, + authFields: [EnvVar.CODE_API_KEY], + }); + const fileMetadata = await processCodeOutput({ + req, + id, + name, + apiKey: result[EnvVar.CODE_API_KEY], + messageId: metadata.run_id, + toolCallId: output.tool_call_id, + conversationId: metadata.thread_id, + session_id: output.artifact.session_id, + }); + + if (!fileMetadata) { + return null; + } + + // For Responses API, emit attachment during streaming + if (res.headersSent && !res.writableEnded) { + const attachment = { + file_id: fileMetadata.file_id, + filename: fileMetadata.filename, + type: fileMetadata.type, + url: fileMetadata.filepath, + width: fileMetadata.width, + height: fileMetadata.height, + tool_call_id: output.tool_call_id, + }; + writeResponsesAttachment(res, tracker, attachment, metadata); + } + + return fileMetadata; + })().catch((error) => { + logger.error('Error processing code output:', error); + return null; + }), + ); + } + }; +} + module.exports = { getDefaultHandlers, createToolEndCallback, + createResponsesToolEndCallback, }; diff --git a/api/server/controllers/agents/openai.js b/api/server/controllers/agents/openai.js new file mode 100644 index 0000000000..331179c7f4 --- /dev/null +++ b/api/server/controllers/agents/openai.js @@ -0,0 +1,660 @@ +const { nanoid } = require('nanoid'); +const { logger } = require('@librechat/data-schemas'); +const { EModelEndpoint, ResourceType, PermissionBits } = require('librechat-data-provider'); +const { + Callback, + ToolEndHandler, + formatAgentMessages, + ChatModelStreamHandler, +} = require('@librechat/agents'); +const { + writeSSE, + createRun, + createChunk, + sendFinalChunk, + createSafeUser, + validateRequest, + initializeAgent, + createErrorResponse, + buildNonStreamingResponse, + createOpenAIStreamTracker, + createOpenAIContentAggregator, + isChatCompletionValidationFailure, +} = require('@librechat/api'); +const { createToolEndCallback } = require('~/server/controllers/agents/callbacks'); +const { findAccessibleResources } = require('~/server/services/PermissionService'); +const { loadAgentTools } = require('~/server/services/ToolService'); +const { getConvoFiles } = require('~/models/Conversation'); +const { getAgent, getAgents } = require('~/models/Agent'); +const db = require('~/models'); + +/** + * Creates a tool loader function for the agent. + * @param {AbortSignal} signal - The abort signal + */ +function createToolLoader(signal) { + return async function loadTools({ + req, + res, + tools, + model, + agentId, + provider, + tool_options, + tool_resources, + }) { + const agent = { id: agentId, tools, provider, model, tool_options }; + try { + return await loadAgentTools({ + req, + res, + agent, + signal, + tool_resources, + streamId: null, // No resumable stream for OpenAI compat + }); + } catch (error) { + logger.error('Error loading tools for agent ' + agentId, error); + } + }; +} + +/** + * Convert content part to internal format + * @param {Object} part - Content part + * @returns {Object} Converted part + */ +function convertContentPart(part) { + if (part.type === 'text') { + return { type: 'text', text: part.text }; + } + if (part.type === 'image_url') { + return { type: 'image_url', image_url: part.image_url }; + } + return part; +} + +/** + * Convert OpenAI messages to internal format + * @param {Array} messages - OpenAI format messages + * @returns {Array} Internal format messages + */ +function convertMessages(messages) { + return messages.map((msg) => { + let content; + if (typeof msg.content === 'string') { + content = msg.content; + } else if (msg.content) { + content = msg.content.map(convertContentPart); + } else { + content = ''; + } + + return { + role: msg.role, + content, + ...(msg.name && { name: msg.name }), + ...(msg.tool_calls && { tool_calls: msg.tool_calls }), + ...(msg.tool_call_id && { tool_call_id: msg.tool_call_id }), + }; + }); +} + +/** + * Send an error response in OpenAI format + */ +function sendErrorResponse(res, statusCode, message, type = 'invalid_request_error', code = null) { + res.status(statusCode).json(createErrorResponse(message, type, code)); +} + +/** + * OpenAI-compatible chat completions controller for agents. + * + * POST /v1/chat/completions + * + * Request format: + * { + * "model": "agent_id_here", + * "messages": [{"role": "user", "content": "Hello!"}], + * "stream": true, + * "conversation_id": "optional", + * "parent_message_id": "optional" + * } + */ +const OpenAIChatCompletionController = async (req, res) => { + const appConfig = req.config; + + // Validate request + const validation = validateRequest(req.body); + if (isChatCompletionValidationFailure(validation)) { + return sendErrorResponse(res, 400, validation.error); + } + + const request = validation.request; + const agentId = request.model; + + // Look up the agent + const agent = await getAgent({ id: agentId }); + if (!agent) { + return sendErrorResponse( + res, + 404, + `Agent not found: ${agentId}`, + 'invalid_request_error', + 'model_not_found', + ); + } + + // Generate IDs + const requestId = `chatcmpl-${nanoid()}`; + const conversationId = request.conversation_id ?? nanoid(); + const parentMessageId = request.parent_message_id ?? null; + const created = Math.floor(Date.now() / 1000); + + const context = { + created, + requestId, + model: agentId, + }; + + // Set up abort controller + const abortController = new AbortController(); + + // Handle client disconnect + req.on('close', () => { + if (!abortController.signal.aborted) { + abortController.abort(); + logger.debug('[OpenAI API] Client disconnected, aborting'); + } + }); + + try { + // Build allowed providers set + const allowedProviders = new Set( + appConfig?.endpoints?.[EModelEndpoint.agents]?.allowedProviders, + ); + + // Create tool loader + const loadTools = createToolLoader(abortController.signal); + + // Initialize the agent first to check for disableStreaming + const endpointOption = { + endpoint: agent.provider, + model_parameters: agent.model_parameters ?? {}, + }; + + const primaryConfig = await initializeAgent( + { + req, + res, + loadTools, + requestFiles: [], + conversationId, + parentMessageId, + agent, + endpointOption, + allowedProviders, + isInitialAgent: true, + }, + { + getConvoFiles, + getFiles: db.getFiles, + getUserKey: db.getUserKey, + getMessages: db.getMessages, + updateFilesUsage: db.updateFilesUsage, + getUserKeyValues: db.getUserKeyValues, + getUserCodeFiles: db.getUserCodeFiles, + getToolFilesByIds: db.getToolFilesByIds, + getCodeGeneratedFiles: db.getCodeGeneratedFiles, + }, + ); + + // Determine if streaming is enabled (check both request and agent config) + const streamingDisabled = !!primaryConfig.model_parameters?.disableStreaming; + const isStreaming = request.stream === true && !streamingDisabled; + + // Create tracker for streaming or aggregator for non-streaming + const tracker = isStreaming ? createOpenAIStreamTracker() : null; + const aggregator = isStreaming ? null : createOpenAIContentAggregator(); + + // Set up response for streaming + if (isStreaming) { + res.setHeader('Content-Type', 'text/event-stream'); + res.setHeader('Cache-Control', 'no-cache'); + res.setHeader('Connection', 'keep-alive'); + res.setHeader('X-Accel-Buffering', 'no'); + res.flushHeaders(); + + // Send initial chunk with role + const initialChunk = createChunk(context, { role: 'assistant' }); + writeSSE(res, initialChunk); + } + + // Create handler config for OpenAI streaming (only used when streaming) + const handlerConfig = isStreaming + ? { + res, + context, + tracker, + } + : null; + + // We need custom handlers that stream in OpenAI format + const collectedUsage = []; + /** @type {Promise[]} */ + const artifactPromises = []; + + // Create tool end callback for processing artifacts (images, file citations, code output) + const toolEndCallback = createToolEndCallback({ req, res, artifactPromises, streamId: null }); + + // Convert messages to internal format + const openaiMessages = convertMessages(request.messages); + + // Format for agent + const toolSet = new Set((primaryConfig.tools ?? []).map((tool) => tool && tool.name)); + const { messages: formattedMessages, indexTokenCountMap } = formatAgentMessages( + openaiMessages, + {}, + toolSet, + ); + + /** + * Create a simple handler that processes data + */ + const createHandler = (processor) => ({ + handle: (_event, data) => { + if (processor) { + processor(data); + } + }, + }); + + /** + * Stream text content in OpenAI format + */ + const streamText = (text) => { + if (!text) { + return; + } + if (isStreaming) { + tracker.addText(); + writeSSE(res, createChunk(context, { content: text })); + } else { + aggregator.addText(text); + } + }; + + /** + * Stream reasoning content in OpenAI format (OpenRouter convention) + */ + const streamReasoning = (text) => { + if (!text) { + return; + } + if (isStreaming) { + tracker.addReasoning(); + writeSSE(res, createChunk(context, { reasoning: text })); + } else { + aggregator.addReasoning(text); + } + }; + + // Built-in handler for processing raw model stream chunks + const chatModelStreamHandler = new ChatModelStreamHandler(); + + // Event handlers for OpenAI-compatible streaming + const handlers = { + // Process raw model chunks and dispatch message/reasoning deltas + on_chat_model_stream: { + handle: async (event, data, metadata, graph) => { + await chatModelStreamHandler.handle(event, data, metadata, graph); + }, + }, + + // Text content streaming + on_message_delta: createHandler((data) => { + const content = data?.delta?.content; + if (Array.isArray(content)) { + for (const part of content) { + if (part.type === 'text' && part.text) { + streamText(part.text); + } + } + } + }), + + // Reasoning/thinking content streaming + on_reasoning_delta: createHandler((data) => { + const content = data?.delta?.content; + if (Array.isArray(content)) { + for (const part of content) { + const text = part.think || part.text; + if (text) { + streamReasoning(text); + } + } + } + }), + + // Tool call initiation - streams id and name (from on_run_step) + on_run_step: createHandler((data) => { + const stepDetails = data?.stepDetails; + if (stepDetails?.type === 'tool_calls' && stepDetails.tool_calls) { + for (const tc of stepDetails.tool_calls) { + const toolIndex = data.index ?? 0; + const toolId = tc.id ?? ''; + const toolName = tc.name ?? ''; + const toolCall = { + id: toolId, + type: 'function', + function: { name: toolName, arguments: '' }, + }; + + // Track tool call in tracker or aggregator + if (isStreaming) { + if (!tracker.toolCalls.has(toolIndex)) { + tracker.toolCalls.set(toolIndex, toolCall); + } + // Stream initial tool call chunk (like OpenAI does) + writeSSE( + res, + createChunk(context, { + tool_calls: [{ index: toolIndex, ...toolCall }], + }), + ); + } else { + if (!aggregator.toolCalls.has(toolIndex)) { + aggregator.toolCalls.set(toolIndex, toolCall); + } + } + } + } + }), + + // Tool call argument streaming (from on_run_step_delta) + on_run_step_delta: createHandler((data) => { + const delta = data?.delta; + if (delta?.type === 'tool_calls' && delta.tool_calls) { + for (const tc of delta.tool_calls) { + const args = tc.args ?? ''; + if (!args) { + continue; + } + + const toolIndex = tc.index ?? 0; + + // Update tool call arguments + const targetMap = isStreaming ? tracker.toolCalls : aggregator.toolCalls; + const tracked = targetMap.get(toolIndex); + if (tracked) { + tracked.function.arguments += args; + } + + // Stream argument delta (only for streaming) + if (isStreaming) { + writeSSE( + res, + createChunk(context, { + tool_calls: [ + { + index: toolIndex, + function: { arguments: args }, + }, + ], + }), + ); + } + } + } + }), + + // Usage tracking + on_chat_model_end: createHandler((data) => { + const usage = data?.output?.usage_metadata; + if (usage) { + collectedUsage.push(usage); + const target = isStreaming ? tracker : aggregator; + target.usage.promptTokens += usage.input_tokens ?? 0; + target.usage.completionTokens += usage.output_tokens ?? 0; + } + }), + on_run_step_completed: createHandler(), + // Use proper ToolEndHandler for processing artifacts (images, file citations, code output) + on_tool_end: new ToolEndHandler(toolEndCallback, logger), + on_chain_stream: createHandler(), + on_chain_end: createHandler(), + on_agent_update: createHandler(), + on_custom_event: createHandler(), + }; + + // Create and run the agent + const userId = req.user?.id ?? 'api-user'; + + // Extract userMCPAuthMap from primaryConfig (needed for MCP tool connections) + const userMCPAuthMap = primaryConfig.userMCPAuthMap; + + const run = await createRun({ + agents: [primaryConfig], + messages: formattedMessages, + indexTokenCountMap, + runId: requestId, + signal: abortController.signal, + customHandlers: handlers, + requestBody: { + messageId: requestId, + conversationId, + }, + user: { id: userId }, + }); + + if (!run) { + throw new Error('Failed to create agent run'); + } + + // Process the stream + const config = { + runName: 'AgentRun', + configurable: { + thread_id: conversationId, + user_id: userId, + user: createSafeUser(req.user), + ...(userMCPAuthMap != null && { userMCPAuthMap }), + }, + signal: abortController.signal, + streamMode: 'values', + version: 'v2', + }; + + await run.processStream({ messages: formattedMessages }, config, { + callbacks: { + [Callback.TOOL_ERROR]: (graph, error, toolId) => { + logger.error(`[OpenAI API] Tool Error "${toolId}"`, error); + }, + }, + }); + + // Finalize response + if (isStreaming) { + sendFinalChunk(handlerConfig); + res.end(); + + // Wait for artifact processing after response ends (non-blocking) + if (artifactPromises.length > 0) { + Promise.all(artifactPromises).catch((artifactError) => { + logger.warn('[OpenAI API] Error processing artifacts:', artifactError); + }); + } + } else { + // For non-streaming, wait for artifacts before sending response + if (artifactPromises.length > 0) { + try { + await Promise.all(artifactPromises); + } catch (artifactError) { + logger.warn('[OpenAI API] Error processing artifacts:', artifactError); + } + } + + // Build usage from aggregated data + const usage = { + prompt_tokens: aggregator.usage.promptTokens, + completion_tokens: aggregator.usage.completionTokens, + total_tokens: aggregator.usage.promptTokens + aggregator.usage.completionTokens, + }; + + if (aggregator.usage.reasoningTokens > 0) { + usage.completion_tokens_details = { + reasoning_tokens: aggregator.usage.reasoningTokens, + }; + } + + const response = buildNonStreamingResponse( + context, + aggregator.getText(), + aggregator.getReasoning(), + aggregator.toolCalls, + usage, + ); + res.json(response); + } + } catch (error) { + const errorMessage = error instanceof Error ? error.message : 'An error occurred'; + logger.error('[OpenAI API] Error:', error); + + // Check if we already started streaming (headers sent) + if (res.headersSent) { + // Headers already sent, send error in stream + const errorChunk = createChunk(context, { content: `\n\nError: ${errorMessage}` }, 'stop'); + writeSSE(res, errorChunk); + writeSSE(res, '[DONE]'); + res.end(); + } else { + sendErrorResponse(res, 500, errorMessage, 'server_error'); + } + } +}; + +/** + * List available agents as models (filtered by remote access permissions) + * + * GET /v1/models + */ +const ListModelsController = async (req, res) => { + try { + const userId = req.user?.id; + const userRole = req.user?.role; + + if (!userId) { + return sendErrorResponse(res, 401, 'Authentication required', 'auth_error'); + } + + // Find agents the user has remote access to (VIEW permission on REMOTE_AGENT) + const accessibleAgentIds = await findAccessibleResources({ + userId, + role: userRole, + resourceType: ResourceType.REMOTE_AGENT, + requiredPermissions: PermissionBits.VIEW, + }); + + // Get the accessible agents + let agents = []; + if (accessibleAgentIds.length > 0) { + agents = await getAgents({ _id: { $in: accessibleAgentIds } }); + } + + const models = agents.map((agent) => ({ + id: agent.id, + object: 'model', + created: Math.floor(new Date(agent.createdAt || Date.now()).getTime() / 1000), + owned_by: 'librechat', + permission: [], + root: agent.id, + parent: null, + // LibreChat extensions + name: agent.name, + description: agent.description, + provider: agent.provider, + })); + + res.json({ + object: 'list', + data: models, + }); + } catch (error) { + const errorMessage = error instanceof Error ? error.message : 'Failed to list models'; + logger.error('[OpenAI API] Error listing models:', error); + sendErrorResponse(res, 500, errorMessage, 'server_error'); + } +}; + +/** + * Get a specific model/agent (with remote access permission check) + * + * GET /v1/models/:model + */ +const GetModelController = async (req, res) => { + try { + const { model } = req.params; + const userId = req.user?.id; + const userRole = req.user?.role; + + if (!userId) { + return sendErrorResponse(res, 401, 'Authentication required', 'auth_error'); + } + + const agent = await getAgent({ id: model }); + + if (!agent) { + return sendErrorResponse( + res, + 404, + `Model not found: ${model}`, + 'invalid_request_error', + 'model_not_found', + ); + } + + // Check if user has remote access to this agent + const accessibleAgentIds = await findAccessibleResources({ + userId, + role: userRole, + resourceType: ResourceType.REMOTE_AGENT, + requiredPermissions: PermissionBits.VIEW, + }); + + const hasAccess = accessibleAgentIds.some((id) => id.toString() === agent._id.toString()); + + if (!hasAccess) { + return sendErrorResponse( + res, + 403, + `No remote access to model: ${model}`, + 'permission_error', + 'access_denied', + ); + } + + res.json({ + id: agent.id, + object: 'model', + created: Math.floor(new Date(agent.createdAt || Date.now()).getTime() / 1000), + owned_by: 'librechat', + permission: [], + root: agent.id, + parent: null, + // LibreChat extensions + name: agent.name, + description: agent.description, + provider: agent.provider, + }); + } catch (error) { + const errorMessage = error instanceof Error ? error.message : 'Failed to get model'; + logger.error('[OpenAI API] Error getting model:', error); + sendErrorResponse(res, 500, errorMessage, 'server_error'); + } +}; + +module.exports = { + OpenAIChatCompletionController, + ListModelsController, + GetModelController, +}; diff --git a/api/server/controllers/agents/responses.js b/api/server/controllers/agents/responses.js new file mode 100644 index 0000000000..bf52edcf7d --- /dev/null +++ b/api/server/controllers/agents/responses.js @@ -0,0 +1,800 @@ +const { nanoid } = require('nanoid'); +const { v4: uuidv4 } = require('uuid'); +const { logger } = require('@librechat/data-schemas'); +const { EModelEndpoint, ResourceType, PermissionBits } = require('librechat-data-provider'); +const { + Callback, + ToolEndHandler, + formatAgentMessages, + ChatModelStreamHandler, +} = require('@librechat/agents'); +const { + createRun, + createSafeUser, + initializeAgent, + // Responses API + writeDone, + buildResponse, + generateResponseId, + isValidationFailure, + emitResponseCreated, + createResponseContext, + createResponseTracker, + setupStreamingResponse, + emitResponseInProgress, + convertInputToMessages, + validateResponseRequest, + buildAggregatedResponse, + createResponseAggregator, + sendResponsesErrorResponse, + createResponsesEventHandlers, + createAggregatorEventHandlers, +} = require('@librechat/api'); +const { + createResponsesToolEndCallback, + createToolEndCallback, +} = require('~/server/controllers/agents/callbacks'); +const { findAccessibleResources } = require('~/server/services/PermissionService'); +const { getConvoFiles, saveConvo, getConvo } = require('~/models/Conversation'); +const { loadAgentTools } = require('~/server/services/ToolService'); +const { getAgent, getAgents } = require('~/models/Agent'); +const db = require('~/models'); + +/** @type {import('@librechat/api').AppConfig | null} */ +let appConfig = null; + +/** + * Set the app config for the controller + * @param {import('@librechat/api').AppConfig} config + */ +function setAppConfig(config) { + appConfig = config; +} + +/** + * Creates a tool loader function for the agent. + * @param {AbortSignal} signal - The abort signal + */ +function createToolLoader(signal) { + return async function loadTools({ + req, + res, + tools, + model, + agentId, + provider, + tool_options, + tool_resources, + }) { + const agent = { id: agentId, tools, provider, model, tool_options }; + try { + return await loadAgentTools({ + req, + res, + agent, + signal, + tool_resources, + streamId: null, + }); + } catch (error) { + logger.error('Error loading tools for agent ' + agentId, error); + } + }; +} + +/** + * Convert Open Responses input items to internal messages + * @param {import('@librechat/api').InputItem[]} input + * @returns {Array} Internal messages + */ +function convertToInternalMessages(input) { + return convertInputToMessages(input); +} + +/** + * Load messages from a previous response/conversation + * @param {string} conversationId - The conversation/response ID + * @param {string} userId - The user ID + * @returns {Promise} Messages from the conversation + */ +async function loadPreviousMessages(conversationId, userId) { + try { + const messages = await db.getMessages({ conversationId, user: userId }); + if (!messages || messages.length === 0) { + return []; + } + + // Convert stored messages to internal format + return messages.map((msg) => { + const internalMsg = { + role: msg.isCreatedByUser ? 'user' : 'assistant', + content: '', + messageId: msg.messageId, + }; + + // Handle content - could be string or array + if (typeof msg.text === 'string') { + internalMsg.content = msg.text; + } else if (Array.isArray(msg.content)) { + // Handle content parts + internalMsg.content = msg.content; + } else if (msg.text) { + internalMsg.content = String(msg.text); + } + + return internalMsg; + }); + } catch (error) { + logger.error('[Responses API] Error loading previous messages:', error); + return []; + } +} + +/** + * Save input messages to database + * @param {import('express').Request} req + * @param {string} conversationId + * @param {Array} inputMessages - Internal format messages + * @param {string} agentId + * @returns {Promise} + */ +async function saveInputMessages(req, conversationId, inputMessages, agentId) { + for (const msg of inputMessages) { + if (msg.role === 'user') { + await db.saveMessage( + req, + { + messageId: msg.messageId || nanoid(), + conversationId, + parentMessageId: null, + isCreatedByUser: true, + text: typeof msg.content === 'string' ? msg.content : JSON.stringify(msg.content), + sender: 'User', + endpoint: EModelEndpoint.agents, + model: agentId, + }, + { context: 'Responses API - save user input' }, + ); + } + } +} + +/** + * Save response output to database + * @param {import('express').Request} req + * @param {string} conversationId + * @param {string} responseId + * @param {import('@librechat/api').Response} response + * @param {string} agentId + * @returns {Promise} + */ +async function saveResponseOutput(req, conversationId, responseId, response, agentId) { + // Extract text content from output items + let responseText = ''; + for (const item of response.output) { + if (item.type === 'message' && item.content) { + for (const part of item.content) { + if (part.type === 'output_text' && part.text) { + responseText += part.text; + } + } + } + } + + // Save the assistant message + await db.saveMessage( + req, + { + messageId: responseId, + conversationId, + parentMessageId: null, + isCreatedByUser: false, + text: responseText, + sender: 'Agent', + endpoint: EModelEndpoint.agents, + model: agentId, + finish_reason: response.status === 'completed' ? 'stop' : response.status, + tokenCount: response.usage?.output_tokens, + }, + { context: 'Responses API - save assistant response' }, + ); +} + +/** + * Save or update conversation + * @param {import('express').Request} req + * @param {string} conversationId + * @param {string} agentId + * @param {object} agent + * @returns {Promise} + */ +async function saveConversation(req, conversationId, agentId, agent) { + await saveConvo( + req, + { + conversationId, + endpoint: EModelEndpoint.agents, + agentId, + title: agent?.name || 'Open Responses Conversation', + model: agent?.model, + }, + { context: 'Responses API - save conversation' }, + ); +} + +/** + * Convert stored messages to Open Responses output format + * @param {Array} messages - Stored messages + * @returns {Array} Output items + */ +function convertMessagesToOutputItems(messages) { + const output = []; + + for (const msg of messages) { + if (!msg.isCreatedByUser) { + output.push({ + type: 'message', + id: msg.messageId, + role: 'assistant', + status: 'completed', + content: [ + { + type: 'output_text', + text: msg.text || '', + annotations: [], + }, + ], + }); + } + } + + return output; +} + +/** + * Create Response - POST /v1/responses + * + * Creates a model response following the Open Responses API specification. + * Supports both streaming and non-streaming responses. + * + * @param {import('express').Request} req + * @param {import('express').Response} res + */ +const createResponse = async (req, res) => { + // Validate request + const validation = validateResponseRequest(req.body); + if (isValidationFailure(validation)) { + return sendResponsesErrorResponse(res, 400, validation.error); + } + + const request = validation.request; + const agentId = request.model; + const isStreaming = request.stream === true; + + // Look up the agent + const agent = await getAgent({ id: agentId }); + if (!agent) { + return sendResponsesErrorResponse( + res, + 404, + `Agent not found: ${agentId}`, + 'not_found', + 'model_not_found', + ); + } + + // Generate IDs + const responseId = generateResponseId(); + const conversationId = request.previous_response_id ?? uuidv4(); + const parentMessageId = null; + + // Create response context + const context = createResponseContext(request, responseId); + + // Set up abort controller + const abortController = new AbortController(); + + // Handle client disconnect + req.on('close', () => { + if (!abortController.signal.aborted) { + abortController.abort(); + logger.debug('[Responses API] Client disconnected, aborting'); + } + }); + + try { + // Build allowed providers set + const allowedProviders = new Set( + appConfig?.endpoints?.[EModelEndpoint.agents]?.allowedProviders, + ); + + // Create tool loader + const loadTools = createToolLoader(abortController.signal); + + // Initialize the agent first to check for disableStreaming + const endpointOption = { + endpoint: agent.provider, + model_parameters: agent.model_parameters ?? {}, + }; + + const primaryConfig = await initializeAgent( + { + req, + res, + loadTools, + requestFiles: [], + conversationId, + parentMessageId, + agent, + endpointOption, + allowedProviders, + isInitialAgent: true, + }, + { + getConvoFiles, + getFiles: db.getFiles, + getUserKey: db.getUserKey, + getMessages: db.getMessages, + updateFilesUsage: db.updateFilesUsage, + getUserKeyValues: db.getUserKeyValues, + getUserCodeFiles: db.getUserCodeFiles, + getToolFilesByIds: db.getToolFilesByIds, + getCodeGeneratedFiles: db.getCodeGeneratedFiles, + }, + ); + + // Determine if streaming is enabled (check both request and agent config) + const streamingDisabled = !!primaryConfig.model_parameters?.disableStreaming; + const actuallyStreaming = isStreaming && !streamingDisabled; + + // Load previous messages if previous_response_id is provided + let previousMessages = []; + if (request.previous_response_id) { + const userId = req.user?.id ?? 'api-user'; + previousMessages = await loadPreviousMessages(request.previous_response_id, userId); + } + + // Convert input to internal messages + const inputMessages = convertToInternalMessages( + typeof request.input === 'string' ? request.input : request.input, + ); + + // Merge previous messages with new input + const allMessages = [...previousMessages, ...inputMessages]; + + // Format for agent + const toolSet = new Set((primaryConfig.tools ?? []).map((tool) => tool && tool.name)); + const { messages: formattedMessages, indexTokenCountMap } = formatAgentMessages( + allMessages, + {}, + toolSet, + ); + + // Create tracker for streaming or aggregator for non-streaming + const tracker = actuallyStreaming ? createResponseTracker() : null; + const aggregator = actuallyStreaming ? null : createResponseAggregator(); + + // Set up response for streaming + if (actuallyStreaming) { + setupStreamingResponse(res); + + // Create handler config + const handlerConfig = { + res, + context, + tracker, + }; + + // Emit response.created then response.in_progress per Open Responses spec + emitResponseCreated(handlerConfig); + emitResponseInProgress(handlerConfig); + + // Create event handlers + const { handlers: responsesHandlers, finalizeStream } = + createResponsesEventHandlers(handlerConfig); + + // Built-in handler for processing raw model stream chunks + const chatModelStreamHandler = new ChatModelStreamHandler(); + + // Artifact promises for processing tool outputs + /** @type {Promise[]} */ + const artifactPromises = []; + // Use Responses API-specific callback that emits librechat:attachment events + const toolEndCallback = createResponsesToolEndCallback({ + req, + res, + tracker, + artifactPromises, + }); + + // Combine handlers + const handlers = { + on_chat_model_stream: { + handle: async (event, data, metadata, graph) => { + await chatModelStreamHandler.handle(event, data, metadata, graph); + }, + }, + on_message_delta: responsesHandlers.on_message_delta, + on_reasoning_delta: responsesHandlers.on_reasoning_delta, + on_run_step: responsesHandlers.on_run_step, + on_run_step_delta: responsesHandlers.on_run_step_delta, + on_chat_model_end: responsesHandlers.on_chat_model_end, + on_tool_end: new ToolEndHandler(toolEndCallback, logger), + on_run_step_completed: { handle: () => {} }, + on_chain_stream: { handle: () => {} }, + on_chain_end: { handle: () => {} }, + on_agent_update: { handle: () => {} }, + on_custom_event: { handle: () => {} }, + }; + + // Create and run the agent + const userId = req.user?.id ?? 'api-user'; + const userMCPAuthMap = primaryConfig.userMCPAuthMap; + + const run = await createRun({ + agents: [primaryConfig], + messages: formattedMessages, + indexTokenCountMap, + runId: responseId, + signal: abortController.signal, + customHandlers: handlers, + requestBody: { + messageId: responseId, + conversationId, + }, + user: { id: userId }, + }); + + if (!run) { + throw new Error('Failed to create agent run'); + } + + // Process the stream + const config = { + runName: 'AgentRun', + configurable: { + thread_id: conversationId, + user_id: userId, + user: createSafeUser(req.user), + ...(userMCPAuthMap != null && { userMCPAuthMap }), + }, + signal: abortController.signal, + streamMode: 'values', + version: 'v2', + }; + + await run.processStream({ messages: formattedMessages }, config, { + callbacks: { + [Callback.TOOL_ERROR]: (graph, error, toolId) => { + logger.error(`[Responses API] Tool Error "${toolId}"`, error); + }, + }, + }); + + // Finalize the stream + finalizeStream(); + res.end(); + + // Save to database if store: true + if (request.store === true) { + try { + // Save conversation + await saveConversation(req, conversationId, agentId, agent); + + // Save input messages + await saveInputMessages(req, conversationId, inputMessages, agentId); + + // Build response for saving (use tracker with buildResponse for streaming) + const finalResponse = buildResponse(context, tracker, 'completed'); + await saveResponseOutput(req, conversationId, responseId, finalResponse, agentId); + + logger.debug( + `[Responses API] Stored response ${responseId} in conversation ${conversationId}`, + ); + } catch (saveError) { + logger.error('[Responses API] Error saving response:', saveError); + // Don't fail the request if saving fails + } + } + + // Wait for artifact processing after response ends (non-blocking) + if (artifactPromises.length > 0) { + Promise.all(artifactPromises).catch((artifactError) => { + logger.warn('[Responses API] Error processing artifacts:', artifactError); + }); + } + } else { + // Non-streaming response + const aggregatorHandlers = createAggregatorEventHandlers(aggregator); + + // Built-in handler for processing raw model stream chunks + const chatModelStreamHandler = new ChatModelStreamHandler(); + + // Artifact promises for processing tool outputs + /** @type {Promise[]} */ + const artifactPromises = []; + const toolEndCallback = createToolEndCallback({ req, res, artifactPromises, streamId: null }); + + // Combine handlers + const handlers = { + on_chat_model_stream: { + handle: async (event, data, metadata, graph) => { + await chatModelStreamHandler.handle(event, data, metadata, graph); + }, + }, + on_message_delta: aggregatorHandlers.on_message_delta, + on_reasoning_delta: aggregatorHandlers.on_reasoning_delta, + on_run_step: aggregatorHandlers.on_run_step, + on_run_step_delta: aggregatorHandlers.on_run_step_delta, + on_chat_model_end: aggregatorHandlers.on_chat_model_end, + on_tool_end: new ToolEndHandler(toolEndCallback, logger), + on_run_step_completed: { handle: () => {} }, + on_chain_stream: { handle: () => {} }, + on_chain_end: { handle: () => {} }, + on_agent_update: { handle: () => {} }, + on_custom_event: { handle: () => {} }, + }; + + // Create and run the agent + const userId = req.user?.id ?? 'api-user'; + const userMCPAuthMap = primaryConfig.userMCPAuthMap; + + const run = await createRun({ + agents: [primaryConfig], + messages: formattedMessages, + indexTokenCountMap, + runId: responseId, + signal: abortController.signal, + customHandlers: handlers, + requestBody: { + messageId: responseId, + conversationId, + }, + user: { id: userId }, + }); + + if (!run) { + throw new Error('Failed to create agent run'); + } + + // Process the stream + const config = { + runName: 'AgentRun', + configurable: { + thread_id: conversationId, + user_id: userId, + user: createSafeUser(req.user), + ...(userMCPAuthMap != null && { userMCPAuthMap }), + }, + signal: abortController.signal, + streamMode: 'values', + version: 'v2', + }; + + await run.processStream({ messages: formattedMessages }, config, { + callbacks: { + [Callback.TOOL_ERROR]: (graph, error, toolId) => { + logger.error(`[Responses API] Tool Error "${toolId}"`, error); + }, + }, + }); + + // Wait for artifacts before sending response + if (artifactPromises.length > 0) { + try { + await Promise.all(artifactPromises); + } catch (artifactError) { + logger.warn('[Responses API] Error processing artifacts:', artifactError); + } + } + + // Build and send the response + const response = buildAggregatedResponse(context, aggregator); + + // Save to database if store: true + if (request.store === true) { + try { + // Save conversation + await saveConversation(req, conversationId, agentId, agent); + + // Save input messages + await saveInputMessages(req, conversationId, inputMessages, agentId); + + // Save response output + await saveResponseOutput(req, conversationId, responseId, response, agentId); + + logger.debug( + `[Responses API] Stored response ${responseId} in conversation ${conversationId}`, + ); + } catch (saveError) { + logger.error('[Responses API] Error saving response:', saveError); + // Don't fail the request if saving fails + } + } + + res.json(response); + } + } catch (error) { + const errorMessage = error instanceof Error ? error.message : 'An error occurred'; + logger.error('[Responses API] Error:', error); + + // Check if we already started streaming (headers sent) + if (res.headersSent) { + // Headers already sent, write error event and close + writeDone(res); + res.end(); + } else { + sendResponsesErrorResponse(res, 500, errorMessage, 'server_error'); + } + } +}; + +/** + * List available agents as models - GET /v1/models (also works with /v1/responses/models) + * + * Returns a list of available agents the user has remote access to. + * + * @param {import('express').Request} req + * @param {import('express').Response} res + */ +const listModels = async (req, res) => { + try { + const userId = req.user?.id; + const userRole = req.user?.role; + + if (!userId) { + return sendResponsesErrorResponse(res, 401, 'Authentication required', 'auth_error'); + } + + // Find agents the user has remote access to (VIEW permission on REMOTE_AGENT) + const accessibleAgentIds = await findAccessibleResources({ + userId, + role: userRole, + resourceType: ResourceType.REMOTE_AGENT, + requiredPermissions: PermissionBits.VIEW, + }); + + // Get the accessible agents + let agents = []; + if (accessibleAgentIds.length > 0) { + agents = await getAgents({ _id: { $in: accessibleAgentIds } }); + } + + // Convert to models format + const models = agents.map((agent) => ({ + id: agent.id, + object: 'model', + created: Math.floor(new Date(agent.createdAt).getTime() / 1000), + owned_by: agent.author ?? 'librechat', + // Additional metadata + name: agent.name, + description: agent.description, + provider: agent.provider, + })); + + res.json({ + object: 'list', + data: models, + }); + } catch (error) { + logger.error('[Responses API] Error listing models:', error); + sendResponsesErrorResponse( + res, + 500, + error instanceof Error ? error.message : 'Failed to list models', + 'server_error', + ); + } +}; + +/** + * Get Response - GET /v1/responses/:id + * + * Retrieves a stored response by its ID. + * The response ID maps to a conversationId in LibreChat's storage. + * + * @param {import('express').Request} req + * @param {import('express').Response} res + */ +const getResponse = async (req, res) => { + try { + const responseId = req.params.id; + const userId = req.user?.id; + + if (!responseId) { + return sendResponsesErrorResponse(res, 400, 'Response ID is required'); + } + + // The responseId could be either the response ID or the conversation ID + // Try to find a conversation with this ID + const conversation = await getConvo(userId, responseId); + + if (!conversation) { + return sendResponsesErrorResponse( + res, + 404, + `Response not found: ${responseId}`, + 'not_found', + 'response_not_found', + ); + } + + // Load messages for this conversation + const messages = await db.getMessages({ conversationId: responseId, user: userId }); + + if (!messages || messages.length === 0) { + return sendResponsesErrorResponse( + res, + 404, + `No messages found for response: ${responseId}`, + 'not_found', + 'response_not_found', + ); + } + + // Convert messages to Open Responses output format + const output = convertMessagesToOutputItems(messages); + + // Find the last assistant message for usage info + const lastAssistantMessage = messages.filter((m) => !m.isCreatedByUser).pop(); + + // Build the response object + const response = { + id: responseId, + object: 'response', + created_at: Math.floor(new Date(conversation.createdAt || Date.now()).getTime() / 1000), + completed_at: Math.floor(new Date(conversation.updatedAt || Date.now()).getTime() / 1000), + status: 'completed', + incomplete_details: null, + model: conversation.agentId || conversation.model || 'unknown', + previous_response_id: null, + instructions: null, + output, + error: null, + tools: [], + tool_choice: 'auto', + truncation: 'disabled', + parallel_tool_calls: true, + text: { format: { type: 'text' } }, + temperature: 1, + top_p: 1, + presence_penalty: 0, + frequency_penalty: 0, + top_logprobs: null, + reasoning: null, + user: userId, + usage: lastAssistantMessage?.tokenCount + ? { + input_tokens: 0, + output_tokens: lastAssistantMessage.tokenCount, + total_tokens: lastAssistantMessage.tokenCount, + } + : null, + max_output_tokens: null, + max_tool_calls: null, + store: true, + background: false, + service_tier: 'default', + metadata: {}, + safety_identifier: null, + prompt_cache_key: null, + }; + + res.json(response); + } catch (error) { + logger.error('[Responses API] Error getting response:', error); + sendResponsesErrorResponse( + res, + 500, + error instanceof Error ? error.message : 'Failed to get response', + 'server_error', + ); + } +}; + +module.exports = { + createResponse, + getResponse, + listModels, + setAppConfig, +}; diff --git a/api/server/controllers/agents/v1.js b/api/server/controllers/agents/v1.js index 9f0a4a2279..34078b2250 100644 --- a/api/server/controllers/agents/v1.js +++ b/api/server/controllers/agents/v1.js @@ -11,7 +11,9 @@ const { convertOcrToContextInPlace, } = require('@librechat/api'); const { + Time, Tools, + CacheKeys, Constants, FileSources, ResourceType, @@ -21,8 +23,6 @@ const { PermissionBits, actionDelimiter, removeNullishValues, - CacheKeys, - Time, } = require('librechat-data-provider'); const { getListAgentsByAccess, @@ -94,16 +94,25 @@ const createAgentHandler = async (req, res) => { const agent = await createAgent(agentData); - // Automatically grant owner permissions to the creator try { - await grantPermission({ - principalType: PrincipalType.USER, - principalId: userId, - resourceType: ResourceType.AGENT, - resourceId: agent._id, - accessRoleId: AccessRoleIds.AGENT_OWNER, - grantedBy: userId, - }); + await Promise.all([ + grantPermission({ + principalType: PrincipalType.USER, + principalId: userId, + resourceType: ResourceType.AGENT, + resourceId: agent._id, + accessRoleId: AccessRoleIds.AGENT_OWNER, + grantedBy: userId, + }), + grantPermission({ + principalType: PrincipalType.USER, + principalId: userId, + resourceType: ResourceType.REMOTE_AGENT, + resourceId: agent._id, + accessRoleId: AccessRoleIds.REMOTE_AGENT_OWNER, + grantedBy: userId, + }), + ]); logger.debug( `[createAgent] Granted owner permissions to user ${userId} for agent ${agent.id}`, ); @@ -396,16 +405,25 @@ const duplicateAgentHandler = async (req, res) => { newAgentData.actions = agentActions; const newAgent = await createAgent(newAgentData); - // Automatically grant owner permissions to the duplicator try { - await grantPermission({ - principalType: PrincipalType.USER, - principalId: userId, - resourceType: ResourceType.AGENT, - resourceId: newAgent._id, - accessRoleId: AccessRoleIds.AGENT_OWNER, - grantedBy: userId, - }); + await Promise.all([ + grantPermission({ + principalType: PrincipalType.USER, + principalId: userId, + resourceType: ResourceType.AGENT, + resourceId: newAgent._id, + accessRoleId: AccessRoleIds.AGENT_OWNER, + grantedBy: userId, + }), + grantPermission({ + principalType: PrincipalType.USER, + principalId: userId, + resourceType: ResourceType.REMOTE_AGENT, + resourceId: newAgent._id, + accessRoleId: AccessRoleIds.REMOTE_AGENT_OWNER, + grantedBy: userId, + }), + ]); logger.debug( `[duplicateAgent] Granted owner permissions to user ${userId} for duplicated agent ${newAgent.id}`, ); diff --git a/api/server/experimental.js b/api/server/experimental.js index 91ef9ef286..4a457abf61 100644 --- a/api/server/experimental.js +++ b/api/server/experimental.js @@ -299,6 +299,7 @@ if (cluster.isMaster) { app.use('/api/auth', routes.auth); app.use('/api/actions', routes.actions); app.use('/api/keys', routes.keys); + app.use('/api/api-keys', routes.apiKeys); app.use('/api/user', routes.user); app.use('/api/search', routes.search); app.use('/api/messages', routes.messages); diff --git a/api/server/index.js b/api/server/index.js index d5129c9a7e..fcd0229c9f 100644 --- a/api/server/index.js +++ b/api/server/index.js @@ -137,6 +137,7 @@ const startServer = async () => { app.use('/api/admin', routes.adminAuth); app.use('/api/actions', routes.actions); app.use('/api/keys', routes.keys); + app.use('/api/api-keys', routes.apiKeys); app.use('/api/user', routes.user); app.use('/api/search', routes.search); app.use('/api/messages', routes.messages); diff --git a/api/server/middleware/checkSharePublicAccess.js b/api/server/middleware/checkSharePublicAccess.js index c094d54acb..0e95b9f6f8 100644 --- a/api/server/middleware/checkSharePublicAccess.js +++ b/api/server/middleware/checkSharePublicAccess.js @@ -9,6 +9,7 @@ const resourceToPermissionType = { [ResourceType.AGENT]: PermissionTypes.AGENTS, [ResourceType.PROMPTGROUP]: PermissionTypes.PROMPTS, [ResourceType.MCPSERVER]: PermissionTypes.MCP_SERVERS, + [ResourceType.REMOTE_AGENT]: PermissionTypes.REMOTE_AGENTS, }; /** diff --git a/api/server/routes/accessPermissions.js b/api/server/routes/accessPermissions.js index 79e7f3ddca..45afec133b 100644 --- a/api/server/routes/accessPermissions.js +++ b/api/server/routes/accessPermissions.js @@ -53,6 +53,12 @@ const checkResourcePermissionAccess = (requiredPermission) => (req, res, next) = requiredPermission, resourceIdParam: 'resourceId', }); + } else if (resourceType === ResourceType.REMOTE_AGENT) { + middleware = canAccessResource({ + resourceType: ResourceType.REMOTE_AGENT, + requiredPermission, + resourceIdParam: 'resourceId', + }); } else if (resourceType === ResourceType.PROMPTGROUP) { middleware = canAccessResource({ resourceType: ResourceType.PROMPTGROUP, diff --git a/api/server/routes/agents/__tests__/abort.spec.js b/api/server/routes/agents/__tests__/abort.spec.js index e879d51452..442665d973 100644 --- a/api/server/routes/agents/__tests__/abort.spec.js +++ b/api/server/routes/agents/__tests__/abort.spec.js @@ -26,10 +26,12 @@ const mockGenerationJobManager = { const mockSaveMessage = jest.fn(); jest.mock('@librechat/data-schemas', () => ({ + ...jest.requireActual('@librechat/data-schemas'), logger: mockLogger, })); jest.mock('@librechat/api', () => ({ + ...jest.requireActual('@librechat/api'), isEnabled: jest.fn().mockReturnValue(false), GenerationJobManager: mockGenerationJobManager, })); diff --git a/api/server/routes/agents/__tests__/responses.spec.js b/api/server/routes/agents/__tests__/responses.spec.js new file mode 100644 index 0000000000..4d83219b84 --- /dev/null +++ b/api/server/routes/agents/__tests__/responses.spec.js @@ -0,0 +1,1125 @@ +/** + * Open Responses API Integration Tests + * + * Tests the /v1/responses endpoint against the Open Responses specification + * compliance tests. Uses real Anthropic API for LLM calls. + * + * @see https://openresponses.org/specification + * @see https://github.com/openresponses/openresponses/blob/main/src/lib/compliance-tests.ts + */ + +// Load environment variables from root .env file for API keys +require('dotenv').config({ path: require('path').resolve(__dirname, '../../../../../.env') }); + +const originalEnv = { + CREDS_KEY: process.env.CREDS_KEY, + CREDS_IV: process.env.CREDS_IV, +}; + +process.env.CREDS_KEY = '0123456789abcdef0123456789abcdef'; +process.env.CREDS_IV = '0123456789abcdef'; + +/** Skip tests if ANTHROPIC_API_KEY is not available */ +const SKIP_INTEGRATION_TESTS = !process.env.ANTHROPIC_API_KEY; +if (SKIP_INTEGRATION_TESTS) { + console.warn('ANTHROPIC_API_KEY not found - skipping integration tests'); +} + +jest.mock('meilisearch', () => ({ + MeiliSearch: jest.fn().mockImplementation(() => ({ + getIndex: jest.fn().mockRejectedValue(new Error('mocked')), + index: jest.fn().mockReturnValue({ + getRawInfo: jest.fn().mockResolvedValue({ primaryKey: 'id' }), + updateSettings: jest.fn().mockResolvedValue({}), + addDocuments: jest.fn().mockResolvedValue({}), + updateDocuments: jest.fn().mockResolvedValue({}), + deleteDocument: jest.fn().mockResolvedValue({}), + }), + })), +})); + +jest.mock('~/server/services/Config', () => ({ + loadCustomConfig: jest.fn(() => Promise.resolve({})), + getAppConfig: jest.fn().mockResolvedValue({ + paths: { + uploads: '/tmp', + dist: '/tmp/dist', + fonts: '/tmp/fonts', + assets: '/tmp/assets', + }, + fileStrategy: 'local', + imageOutputType: 'PNG', + endpoints: { + agents: { + allowedProviders: ['anthropic', 'openAI'], + }, + }, + }), + setCachedTools: jest.fn(), + getCachedTools: jest.fn(), + getMCPServerTools: jest.fn().mockReturnValue([]), +})); + +jest.mock('~/app/clients/tools', () => ({ + createOpenAIImageTools: jest.fn(() => []), + createYouTubeTools: jest.fn(() => []), + manifestToolMap: {}, + toolkits: [], +})); + +jest.mock('~/config', () => ({ + createMCPServersRegistry: jest.fn(), + createMCPManager: jest.fn().mockResolvedValue({ + getAppToolFunctions: jest.fn().mockResolvedValue({}), + }), +})); + +const express = require('express'); +const request = require('supertest'); +const mongoose = require('mongoose'); +const { v4: uuidv4 } = require('uuid'); +const { MongoMemoryServer } = require('mongodb-memory-server'); +const { hashToken, getRandomValues, createModels } = require('@librechat/data-schemas'); +const { + SystemRoles, + ResourceType, + AccessRoleIds, + PrincipalType, + PrincipalModel, + PermissionBits, + EModelEndpoint, +} = require('librechat-data-provider'); + +/** @type {import('mongoose').Model} */ +let Agent; +/** @type {import('mongoose').Model} */ +let AgentApiKey; +/** @type {import('mongoose').Model} */ +let User; +/** @type {import('mongoose').Model} */ +let AclEntry; +/** @type {import('mongoose').Model} */ +let AccessRole; + +/** + * Parse SSE stream into events + * @param {string} text - Raw SSE text + * @returns {Array<{event: string, data: unknown}>} + */ +function parseSSEEvents(text) { + const events = []; + const lines = text.split('\n'); + + let currentEvent = ''; + let currentData = ''; + + for (const line of lines) { + if (line.startsWith('event:')) { + currentEvent = line.slice(6).trim(); + } else if (line.startsWith('data:')) { + currentData = line.slice(5).trim(); + } else if (line === '' && currentData) { + if (currentData === '[DONE]') { + events.push({ event: 'done', data: '[DONE]' }); + } else { + try { + const parsed = JSON.parse(currentData); + events.push({ + event: currentEvent || parsed.type || 'unknown', + data: parsed, + }); + } catch { + // Skip unparseable data + } + } + currentEvent = ''; + currentData = ''; + } + } + + return events; +} + +/** + * Valid streaming event types per Open Responses specification + * @see https://github.com/openresponses/openresponses/blob/main/src/lib/sse-parser.ts + */ +const VALID_STREAMING_EVENT_TYPES = new Set([ + // Standard Open Responses events + 'response.created', + 'response.queued', + 'response.in_progress', + 'response.completed', + 'response.failed', + 'response.incomplete', + 'response.output_item.added', + 'response.output_item.done', + 'response.content_part.added', + 'response.content_part.done', + 'response.output_text.delta', + 'response.output_text.done', + 'response.refusal.delta', + 'response.refusal.done', + 'response.function_call_arguments.delta', + 'response.function_call_arguments.done', + 'response.reasoning_summary_part.added', + 'response.reasoning_summary_part.done', + 'response.reasoning.delta', + 'response.reasoning.done', + 'response.reasoning_summary_text.delta', + 'response.reasoning_summary_text.done', + 'response.output_text.annotation.added', + 'error', + // LibreChat extension events (prefixed per Open Responses spec) + // @see https://openresponses.org/specification#extending-streaming-events + 'librechat:attachment', +]); + +/** + * Validate a streaming event against Open Responses spec + * @param {Object} event - Parsed event with data + * @returns {string[]} Array of validation errors + */ +function validateStreamingEvent(event) { + const errors = []; + const data = event.data; + + if (!data || typeof data !== 'object') { + return errors; // Skip non-object data (e.g., [DONE]) + } + + const eventType = data.type; + + // Check event type is valid + if (!VALID_STREAMING_EVENT_TYPES.has(eventType)) { + errors.push(`Invalid event type: ${eventType}`); + return errors; + } + + // Validate required fields based on event type + switch (eventType) { + case 'response.output_text.delta': + if (typeof data.sequence_number !== 'number') { + errors.push('response.output_text.delta: missing sequence_number'); + } + if (typeof data.item_id !== 'string') { + errors.push('response.output_text.delta: missing item_id'); + } + if (typeof data.output_index !== 'number') { + errors.push('response.output_text.delta: missing output_index'); + } + if (typeof data.content_index !== 'number') { + errors.push('response.output_text.delta: missing content_index'); + } + if (typeof data.delta !== 'string') { + errors.push('response.output_text.delta: missing delta'); + } + if (!Array.isArray(data.logprobs)) { + errors.push('response.output_text.delta: missing logprobs array'); + } + break; + + case 'response.output_text.done': + if (typeof data.sequence_number !== 'number') { + errors.push('response.output_text.done: missing sequence_number'); + } + if (typeof data.item_id !== 'string') { + errors.push('response.output_text.done: missing item_id'); + } + if (typeof data.output_index !== 'number') { + errors.push('response.output_text.done: missing output_index'); + } + if (typeof data.content_index !== 'number') { + errors.push('response.output_text.done: missing content_index'); + } + if (typeof data.text !== 'string') { + errors.push('response.output_text.done: missing text'); + } + if (!Array.isArray(data.logprobs)) { + errors.push('response.output_text.done: missing logprobs array'); + } + break; + + case 'response.reasoning.delta': + if (typeof data.sequence_number !== 'number') { + errors.push('response.reasoning.delta: missing sequence_number'); + } + if (typeof data.item_id !== 'string') { + errors.push('response.reasoning.delta: missing item_id'); + } + if (typeof data.output_index !== 'number') { + errors.push('response.reasoning.delta: missing output_index'); + } + if (typeof data.content_index !== 'number') { + errors.push('response.reasoning.delta: missing content_index'); + } + if (typeof data.delta !== 'string') { + errors.push('response.reasoning.delta: missing delta'); + } + break; + + case 'response.reasoning.done': + if (typeof data.sequence_number !== 'number') { + errors.push('response.reasoning.done: missing sequence_number'); + } + if (typeof data.item_id !== 'string') { + errors.push('response.reasoning.done: missing item_id'); + } + if (typeof data.output_index !== 'number') { + errors.push('response.reasoning.done: missing output_index'); + } + if (typeof data.content_index !== 'number') { + errors.push('response.reasoning.done: missing content_index'); + } + if (typeof data.text !== 'string') { + errors.push('response.reasoning.done: missing text'); + } + break; + + case 'response.in_progress': + case 'response.completed': + case 'response.failed': + if (!data.response || typeof data.response !== 'object') { + errors.push(`${eventType}: missing response object`); + } + break; + + case 'response.output_item.added': + case 'response.output_item.done': + if (typeof data.output_index !== 'number') { + errors.push(`${eventType}: missing output_index`); + } + if (!data.item || typeof data.item !== 'object') { + errors.push(`${eventType}: missing item object`); + } + break; + } + + return errors; +} + +/** + * Validate all streaming events and return errors + * @param {Array} events - Array of parsed events + * @returns {string[]} Array of all validation errors + */ +function validateAllStreamingEvents(events) { + const allErrors = []; + for (const event of events) { + const errors = validateStreamingEvent(event); + allErrors.push(...errors); + } + return allErrors; +} + +/** + * Create a test agent with Anthropic provider + * @param {Object} overrides + * @returns {Promise} + */ +async function createTestAgent(overrides = {}) { + const timestamp = new Date(); + const agentData = { + id: `agent_${uuidv4().replace(/-/g, '').substring(0, 21)}`, + name: 'Test Anthropic Agent', + description: 'An agent for testing Open Responses API', + instructions: 'You are a helpful assistant. Be concise.', + provider: EModelEndpoint.anthropic, + model: 'claude-sonnet-4-5-20250929', + author: new mongoose.Types.ObjectId(), + tools: [], + model_parameters: {}, + ...overrides, + }; + + const versionData = { ...agentData }; + delete versionData.author; + + const initialAgentData = { + ...agentData, + versions: [ + { + ...versionData, + createdAt: timestamp, + updatedAt: timestamp, + }, + ], + category: 'general', + }; + + return (await Agent.create(initialAgentData)).toObject(); +} + +/** + * Create an agent with extended thinking enabled + * @param {Object} overrides + * @returns {Promise} + */ +async function createThinkingAgent(overrides = {}) { + return createTestAgent({ + name: 'Test Thinking Agent', + description: 'An agent with extended thinking enabled', + model_parameters: { + thinking: { + type: 'enabled', + budget_tokens: 5000, + }, + }, + ...overrides, + }); +} + +const describeWithApiKey = SKIP_INTEGRATION_TESTS ? describe.skip : describe; + +describeWithApiKey('Open Responses API Integration Tests', () => { + // Increase timeout for real API calls + jest.setTimeout(120000); + + let mongoServer; + let app; + let testAgent; + let thinkingAgent; + let testUser; + let testApiKey; // The raw API key for Authorization header + + afterAll(() => { + process.env.CREDS_KEY = originalEnv.CREDS_KEY; + process.env.CREDS_IV = originalEnv.CREDS_IV; + }); + + beforeAll(async () => { + // Start MongoDB Memory Server + mongoServer = await MongoMemoryServer.create(); + const mongoUri = mongoServer.getUri(); + + // Connect to MongoDB + await mongoose.connect(mongoUri); + + // Register all models + const models = createModels(mongoose); + + // Get models + Agent = models.Agent; + AgentApiKey = models.AgentApiKey; + User = models.User; + AclEntry = models.AclEntry; + AccessRole = models.AccessRole; + + // Create minimal Express app with just the responses routes + app = express(); + app.use(express.json()); + + // Mount the responses routes + const responsesRoutes = require('~/server/routes/agents/responses'); + app.use('/api/agents/v1/responses', responsesRoutes); + + // Create test user + testUser = await User.create({ + name: 'Test API User', + username: 'testapiuser', + email: 'testapiuser@test.com', + emailVerified: true, + provider: 'local', + role: SystemRoles.ADMIN, + }); + + // Create REMOTE_AGENT access roles (if they don't exist) + const existingRoles = await AccessRole.find({ + accessRoleId: { + $in: [ + AccessRoleIds.REMOTE_AGENT_VIEWER, + AccessRoleIds.REMOTE_AGENT_EDITOR, + AccessRoleIds.REMOTE_AGENT_OWNER, + ], + }, + }); + + if (existingRoles.length === 0) { + await AccessRole.create([ + { + accessRoleId: AccessRoleIds.REMOTE_AGENT_VIEWER, + name: 'API Viewer', + description: 'Can query the agent via API', + resourceType: ResourceType.REMOTE_AGENT, + permBits: PermissionBits.VIEW, + }, + { + accessRoleId: AccessRoleIds.REMOTE_AGENT_EDITOR, + name: 'API Editor', + description: 'Can view and modify the agent via API', + resourceType: ResourceType.REMOTE_AGENT, + permBits: PermissionBits.VIEW | PermissionBits.EDIT, + }, + { + accessRoleId: AccessRoleIds.REMOTE_AGENT_OWNER, + name: 'API Owner', + description: 'Full API access + can grant remote access to others', + resourceType: ResourceType.REMOTE_AGENT, + permBits: + PermissionBits.VIEW | + PermissionBits.EDIT | + PermissionBits.DELETE | + PermissionBits.SHARE, + }, + ]); + } + + // Generate and create an API key for the test user + const rawKey = `sk-${await getRandomValues(32)}`; + const keyHash = await hashToken(rawKey); + const keyPrefix = rawKey.substring(0, 8); + + await AgentApiKey.create({ + userId: testUser._id, + name: 'Test API Key', + keyHash, + keyPrefix, + }); + + testApiKey = rawKey; + + // Create test agents with the test user as author + testAgent = await createTestAgent({ author: testUser._id }); + thinkingAgent = await createThinkingAgent({ author: testUser._id }); + + // Grant REMOTE_AGENT permissions for the test agents + await AclEntry.create([ + { + principalType: PrincipalType.USER, + principalModel: PrincipalModel.USER, + principalId: testUser._id, + resourceType: ResourceType.REMOTE_AGENT, + resourceId: testAgent._id, + accessRoleId: AccessRoleIds.REMOTE_AGENT_OWNER, + permBits: + PermissionBits.VIEW | PermissionBits.EDIT | PermissionBits.DELETE | PermissionBits.SHARE, + }, + { + principalType: PrincipalType.USER, + principalModel: PrincipalModel.USER, + principalId: testUser._id, + resourceType: ResourceType.REMOTE_AGENT, + resourceId: thinkingAgent._id, + accessRoleId: AccessRoleIds.REMOTE_AGENT_OWNER, + permBits: + PermissionBits.VIEW | PermissionBits.EDIT | PermissionBits.DELETE | PermissionBits.SHARE, + }, + ]); + }, 60000); + + afterAll(async () => { + await mongoose.disconnect(); + await mongoServer.stop(); + }); + + beforeEach(async () => { + // Clean up any test data between tests if needed + }); + + /* =========================================================================== + * COMPLIANCE TESTS + * Based on: https://github.com/openresponses/openresponses/blob/main/src/lib/compliance-tests.ts + * =========================================================================== */ + + /** Helper to add auth header to requests */ + const authRequest = () => ({ + post: (url) => request(app).post(url).set('Authorization', `Bearer ${testApiKey}`), + get: (url) => request(app).get(url).set('Authorization', `Bearer ${testApiKey}`), + }); + + describe('Compliance Tests', () => { + describe('basic-response', () => { + it('should return a valid ResponseResource for a simple text request', async () => { + const response = await authRequest() + .post('/api/agents/v1/responses') + .send({ + model: testAgent.id, + input: [ + { + type: 'message', + role: 'user', + content: 'Say hello in exactly 3 words.', + }, + ], + }); + + expect(response.status).toBe(200); + expect(response.body).toBeDefined(); + + // Validate ResponseResource schema + const body = response.body; + expect(body.id).toMatch(/^resp_/); + expect(body.object).toBe('response'); + expect(typeof body.created_at).toBe('number'); + expect(body.status).toBe('completed'); + expect(body.model).toBe(testAgent.id); + + // Validate output + expect(Array.isArray(body.output)).toBe(true); + expect(body.output.length).toBeGreaterThan(0); + + // Should have at least one message item + const messageItem = body.output.find((item) => item.type === 'message'); + expect(messageItem).toBeDefined(); + expect(messageItem.role).toBe('assistant'); + expect(messageItem.status).toBe('completed'); + expect(Array.isArray(messageItem.content)).toBe(true); + }); + }); + + describe('streaming-response', () => { + it('should return valid SSE streaming events', async () => { + const response = await authRequest() + .post('/api/agents/v1/responses') + .send({ + model: testAgent.id, + input: [ + { + type: 'message', + role: 'user', + content: 'Count from 1 to 5.', + }, + ], + stream: true, + }) + .buffer(true) + .parse((res, callback) => { + let data = ''; + res.on('data', (chunk) => { + data += chunk.toString(); + }); + res.on('end', () => { + callback(null, data); + }); + }); + + expect(response.status).toBe(200); + expect(response.headers['content-type']).toMatch(/text\/event-stream/); + + const events = parseSSEEvents(response.body); + expect(events.length).toBeGreaterThan(0); + + // Validate all streaming events against Open Responses spec + // This catches issues like: + // - Invalid event types (e.g., response.reasoning_text.delta instead of response.reasoning.delta) + // - Missing required fields (e.g., logprobs on output_text events) + const validationErrors = validateAllStreamingEvents(events); + if (validationErrors.length > 0) { + console.error('Streaming event validation errors:', validationErrors); + } + expect(validationErrors).toEqual([]); + + // Validate streaming event types + const eventTypes = events.map((e) => e.event); + + // Should have response.created first (per Open Responses spec) + expect(eventTypes).toContain('response.created'); + + // Should have response.in_progress + expect(eventTypes).toContain('response.in_progress'); + + // response.created should come before response.in_progress + const createdIdx = eventTypes.indexOf('response.created'); + const inProgressIdx = eventTypes.indexOf('response.in_progress'); + expect(createdIdx).toBeLessThan(inProgressIdx); + + // Should have response.completed or response.failed + expect(eventTypes.some((t) => t === 'response.completed' || t === 'response.failed')).toBe( + true, + ); + + // Should have [DONE] + expect(eventTypes).toContain('done'); + + // Validate response.completed has full response + const completedEvent = events.find((e) => e.event === 'response.completed'); + if (completedEvent) { + expect(completedEvent.data.response).toBeDefined(); + expect(completedEvent.data.response.status).toBe('completed'); + expect(completedEvent.data.response.output.length).toBeGreaterThan(0); + } + }); + + it('should emit valid event types per Open Responses spec', async () => { + const response = await authRequest() + .post('/api/agents/v1/responses') + .send({ + model: testAgent.id, + input: [ + { + type: 'message', + role: 'user', + content: 'Say hi.', + }, + ], + stream: true, + }) + .buffer(true) + .parse((res, callback) => { + let data = ''; + res.on('data', (chunk) => { + data += chunk.toString(); + }); + res.on('end', () => { + callback(null, data); + }); + }); + + expect(response.status).toBe(200); + + const events = parseSSEEvents(response.body); + + // Check all event types are valid + for (const event of events) { + if (event.data && typeof event.data === 'object' && event.data.type) { + expect(VALID_STREAMING_EVENT_TYPES.has(event.data.type)).toBe(true); + } + } + }); + + it('should include logprobs array in output_text events', async () => { + const response = await authRequest() + .post('/api/agents/v1/responses') + .send({ + model: testAgent.id, + input: [ + { + type: 'message', + role: 'user', + content: 'Say one word.', + }, + ], + stream: true, + }) + .buffer(true) + .parse((res, callback) => { + let data = ''; + res.on('data', (chunk) => { + data += chunk.toString(); + }); + res.on('end', () => { + callback(null, data); + }); + }); + + expect(response.status).toBe(200); + + const events = parseSSEEvents(response.body); + + // Find output_text delta/done events and verify logprobs + const textDeltaEvents = events.filter( + (e) => e.data && e.data.type === 'response.output_text.delta', + ); + const textDoneEvents = events.filter( + (e) => e.data && e.data.type === 'response.output_text.done', + ); + + // Should have at least one output_text event + expect(textDeltaEvents.length + textDoneEvents.length).toBeGreaterThan(0); + + // All output_text.delta events must have logprobs array + for (const event of textDeltaEvents) { + expect(Array.isArray(event.data.logprobs)).toBe(true); + } + + // All output_text.done events must have logprobs array + for (const event of textDoneEvents) { + expect(Array.isArray(event.data.logprobs)).toBe(true); + } + }); + }); + + describe('system-prompt', () => { + it('should handle developer role messages in input (as system)', async () => { + // Note: For Anthropic, system messages must be first and there can only be one. + // Since the agent already has instructions, we use 'developer' role which + // gets merged into the system prompt, or we test with a simple user message + // that instructs the behavior. + const response = await authRequest() + .post('/api/agents/v1/responses') + .send({ + model: testAgent.id, + input: [ + { + type: 'message', + role: 'user', + content: 'Pretend you are a pirate and say hello in pirate speak.', + }, + ], + }); + + expect(response.status).toBe(200); + expect(response.body.status).toBe('completed'); + expect(response.body.output.length).toBeGreaterThan(0); + + // The response should reflect the pirate persona + const messageItem = response.body.output.find((item) => item.type === 'message'); + expect(messageItem).toBeDefined(); + expect(messageItem.content.length).toBeGreaterThan(0); + }); + }); + + describe('multi-turn', () => { + it('should handle multi-turn conversation history', async () => { + const response = await authRequest() + .post('/api/agents/v1/responses') + .send({ + model: testAgent.id, + input: [ + { + type: 'message', + role: 'user', + content: 'My name is Alice.', + }, + { + type: 'message', + role: 'assistant', + content: 'Hello Alice! Nice to meet you. How can I help you today?', + }, + { + type: 'message', + role: 'user', + content: 'What is my name?', + }, + ], + }); + + expect(response.status).toBe(200); + expect(response.body.status).toBe('completed'); + + // The response should reference "Alice" + const messageItem = response.body.output.find((item) => item.type === 'message'); + expect(messageItem).toBeDefined(); + + const textContent = messageItem.content.find((c) => c.type === 'output_text'); + expect(textContent).toBeDefined(); + expect(textContent.text.toLowerCase()).toContain('alice'); + }); + }); + + // Note: tool-calling test requires tool setup which may need additional configuration + // Note: image-input test requires vision-capable model + + describe('string-input', () => { + it('should accept simple string input', async () => { + const response = await authRequest().post('/api/agents/v1/responses').send({ + model: testAgent.id, + input: 'Hello!', + }); + + expect(response.status).toBe(200); + expect(response.body.status).toBe('completed'); + expect(response.body.output.length).toBeGreaterThan(0); + }); + }); + }); + + /* =========================================================================== + * EXTENDED THINKING TESTS + * Tests reasoning output from Claude models with extended thinking enabled + * =========================================================================== */ + + describe('Extended Thinking', () => { + it('should return reasoning output when thinking is enabled', async () => { + const response = await authRequest() + .post('/api/agents/v1/responses') + .send({ + model: thinkingAgent.id, + input: [ + { + type: 'message', + role: 'user', + content: 'What is 15 * 7? Think step by step.', + }, + ], + }); + + expect(response.status).toBe(200); + expect(response.body.status).toBe('completed'); + + // Check for reasoning item in output + const reasoningItem = response.body.output.find((item) => item.type === 'reasoning'); + // If reasoning is present, validate its structure per Open Responses spec + // Note: reasoning items do NOT have a 'status' field per the spec + // @see https://github.com/openresponses/openresponses/blob/main/src/generated/kubb/zod/reasoningBodySchema.ts + if (reasoningItem) { + expect(reasoningItem).toHaveProperty('id'); + expect(reasoningItem).toHaveProperty('type', 'reasoning'); + // Note: 'status' is NOT a field on reasoning items per the spec + expect(reasoningItem).toHaveProperty('summary'); + expect(Array.isArray(reasoningItem.summary)).toBe(true); + + // Validate content items + if (reasoningItem.content && reasoningItem.content.length > 0) { + const reasoningContent = reasoningItem.content[0]; + expect(reasoningContent).toHaveProperty('type', 'reasoning_text'); + expect(reasoningContent).toHaveProperty('text'); + } + } + + const messageItem = response.body.output.find((item) => item.type === 'message'); + expect(messageItem).toBeDefined(); + }); + + it('should stream reasoning events when thinking is enabled', async () => { + const response = await authRequest() + .post('/api/agents/v1/responses') + .send({ + model: thinkingAgent.id, + input: [ + { + type: 'message', + role: 'user', + content: 'What is 12 + 8? Think step by step.', + }, + ], + stream: true, + }) + .buffer(true) + .parse((res, callback) => { + let data = ''; + res.on('data', (chunk) => { + data += chunk.toString(); + }); + res.on('end', () => { + callback(null, data); + }); + }); + + expect(response.status).toBe(200); + + const events = parseSSEEvents(response.body); + + // Validate all events against Open Responses spec + const validationErrors = validateAllStreamingEvents(events); + if (validationErrors.length > 0) { + console.error('Reasoning streaming event validation errors:', validationErrors); + } + expect(validationErrors).toEqual([]); + + // Check for reasoning-related events using correct event types per Open Responses spec + // Note: The spec uses response.reasoning.delta NOT response.reasoning_text.delta + const reasoningDeltaEvents = events.filter( + (e) => e.data && e.data.type === 'response.reasoning.delta', + ); + const reasoningDoneEvents = events.filter( + (e) => e.data && e.data.type === 'response.reasoning.done', + ); + + // If reasoning events are present, validate their structure + if (reasoningDeltaEvents.length > 0) { + const deltaEvent = reasoningDeltaEvents[0]; + expect(deltaEvent.data).toHaveProperty('item_id'); + expect(deltaEvent.data).toHaveProperty('delta'); + expect(deltaEvent.data).toHaveProperty('output_index'); + expect(deltaEvent.data).toHaveProperty('content_index'); + expect(deltaEvent.data).toHaveProperty('sequence_number'); + } + + if (reasoningDoneEvents.length > 0) { + const doneEvent = reasoningDoneEvents[0]; + expect(doneEvent.data).toHaveProperty('item_id'); + expect(doneEvent.data).toHaveProperty('text'); + expect(doneEvent.data).toHaveProperty('output_index'); + expect(doneEvent.data).toHaveProperty('content_index'); + expect(doneEvent.data).toHaveProperty('sequence_number'); + } + + // Verify stream completed properly + const eventTypes = events.map((e) => e.event); + expect(eventTypes).toContain('response.completed'); + }); + }); + + /* =========================================================================== + * SCHEMA VALIDATION TESTS + * Verify response schema compliance + * =========================================================================== */ + + describe('Schema Validation', () => { + it('should include all required fields in response', async () => { + const response = await authRequest().post('/api/agents/v1/responses').send({ + model: testAgent.id, + input: 'Test', + }); + + expect(response.status).toBe(200); + const body = response.body; + + // Required fields per Open Responses spec + expect(body).toHaveProperty('id'); + expect(body).toHaveProperty('object', 'response'); + expect(body).toHaveProperty('created_at'); + expect(body).toHaveProperty('completed_at'); + expect(body).toHaveProperty('status'); + expect(body).toHaveProperty('model'); + expect(body).toHaveProperty('output'); + expect(body).toHaveProperty('tools'); + expect(body).toHaveProperty('tool_choice'); + expect(body).toHaveProperty('truncation'); + expect(body).toHaveProperty('parallel_tool_calls'); + expect(body).toHaveProperty('text'); + expect(body).toHaveProperty('temperature'); + expect(body).toHaveProperty('top_p'); + expect(body).toHaveProperty('presence_penalty'); + expect(body).toHaveProperty('frequency_penalty'); + expect(body).toHaveProperty('top_logprobs'); + expect(body).toHaveProperty('store'); + expect(body).toHaveProperty('background'); + expect(body).toHaveProperty('service_tier'); + expect(body).toHaveProperty('metadata'); + + // top_logprobs must be a number (not null) + expect(typeof body.top_logprobs).toBe('number'); + + // Usage must have required detail fields + expect(body).toHaveProperty('usage'); + expect(body.usage).toHaveProperty('input_tokens'); + expect(body.usage).toHaveProperty('output_tokens'); + expect(body.usage).toHaveProperty('total_tokens'); + expect(body.usage).toHaveProperty('input_tokens_details'); + expect(body.usage).toHaveProperty('output_tokens_details'); + expect(body.usage.input_tokens_details).toHaveProperty('cached_tokens'); + expect(body.usage.output_tokens_details).toHaveProperty('reasoning_tokens'); + }); + + it('should have valid message item structure', async () => { + const response = await authRequest().post('/api/agents/v1/responses').send({ + model: testAgent.id, + input: 'Hello', + }); + + expect(response.status).toBe(200); + + const messageItem = response.body.output.find((item) => item.type === 'message'); + expect(messageItem).toBeDefined(); + + // Message item required fields + expect(messageItem).toHaveProperty('type', 'message'); + expect(messageItem).toHaveProperty('id'); + expect(messageItem).toHaveProperty('status'); + expect(messageItem).toHaveProperty('role', 'assistant'); + expect(messageItem).toHaveProperty('content'); + expect(Array.isArray(messageItem.content)).toBe(true); + + // Content part structure - verify all required fields + if (messageItem.content.length > 0) { + const textContent = messageItem.content.find((c) => c.type === 'output_text'); + if (textContent) { + expect(textContent).toHaveProperty('type', 'output_text'); + expect(textContent).toHaveProperty('text'); + expect(textContent).toHaveProperty('annotations'); + expect(textContent).toHaveProperty('logprobs'); + expect(Array.isArray(textContent.annotations)).toBe(true); + expect(Array.isArray(textContent.logprobs)).toBe(true); + } + } + + // Verify reasoning item has required summary field + const reasoningItem = response.body.output.find((item) => item.type === 'reasoning'); + if (reasoningItem) { + expect(reasoningItem).toHaveProperty('type', 'reasoning'); + expect(reasoningItem).toHaveProperty('id'); + expect(reasoningItem).toHaveProperty('summary'); + expect(Array.isArray(reasoningItem.summary)).toBe(true); + } + }); + }); + + /* =========================================================================== + * RESPONSE STORAGE TESTS + * Tests for store: true and GET /v1/responses/:id + * =========================================================================== */ + + describe('Response Storage', () => { + it('should store response when store: true and retrieve it', async () => { + // Create a stored response + const createResponse = await authRequest().post('/api/agents/v1/responses').send({ + model: testAgent.id, + input: 'Remember this: The answer is 42.', + store: true, + }); + + expect(createResponse.status).toBe(200); + expect(createResponse.body.status).toBe('completed'); + + const responseId = createResponse.body.id; + expect(responseId).toMatch(/^resp_/); + + // Small delay to ensure database write completes + await new Promise((resolve) => setTimeout(resolve, 500)); + + // Retrieve the stored response + const getResponseResult = await authRequest().get(`/api/agents/v1/responses/${responseId}`); + + // Note: The response might be stored under conversationId, not responseId + // If we get 404, that's expected behavior for now since we store by conversationId + if (getResponseResult.status === 200) { + expect(getResponseResult.body.object).toBe('response'); + expect(getResponseResult.body.status).toBe('completed'); + expect(getResponseResult.body.output.length).toBeGreaterThan(0); + } + }); + + it('should return 404 for non-existent response', async () => { + const response = await authRequest().get('/api/agents/v1/responses/resp_nonexistent123'); + + expect(response.status).toBe(404); + expect(response.body.error).toBeDefined(); + }); + }); + + /* =========================================================================== + * ERROR HANDLING TESTS + * =========================================================================== */ + + describe('Error Handling', () => { + it('should return error for missing model', async () => { + const response = await authRequest().post('/api/agents/v1/responses').send({ + input: 'Hello', + }); + + expect(response.status).toBe(400); + expect(response.body.error).toBeDefined(); + }); + + it('should return error for missing input', async () => { + const response = await authRequest().post('/api/agents/v1/responses').send({ + model: testAgent.id, + }); + + expect(response.status).toBe(400); + expect(response.body.error).toBeDefined(); + }); + + it('should return error for non-existent agent', async () => { + const response = await authRequest().post('/api/agents/v1/responses').send({ + model: 'agent_nonexistent123456789', + input: 'Hello', + }); + + expect(response.status).toBe(404); + expect(response.body.error).toBeDefined(); + }); + }); + + /* =========================================================================== + * MODELS ENDPOINT TESTS + * =========================================================================== */ + + describe('GET /v1/responses/models', () => { + it('should list available agents as models', async () => { + const response = await authRequest().get('/api/agents/v1/responses/models'); + + expect(response.status).toBe(200); + expect(response.body.object).toBe('list'); + expect(Array.isArray(response.body.data)).toBe(true); + + // Should include our test agent + const foundAgent = response.body.data.find((m) => m.id === testAgent.id); + expect(foundAgent).toBeDefined(); + expect(foundAgent.object).toBe('model'); + expect(foundAgent.name).toBe(testAgent.name); + }); + }); +}); diff --git a/api/server/routes/agents/index.js b/api/server/routes/agents/index.js index bf790aeee8..f8d39cb4d8 100644 --- a/api/server/routes/agents/index.js +++ b/api/server/routes/agents/index.js @@ -10,6 +10,8 @@ const { messageUserLimiter, } = require('~/server/middleware'); const { saveMessage } = require('~/models'); +const openai = require('./openai'); +const responses = require('./responses'); const { v1 } = require('./v1'); const chat = require('./chat'); @@ -17,6 +19,20 @@ const { LIMIT_MESSAGE_IP, LIMIT_MESSAGE_USER } = process.env ?? {}; const router = express.Router(); +/** + * Open Responses API routes (API key authentication handled in route file) + * Mounted at /agents/v1/responses (full path: /api/agents/v1/responses) + * NOTE: Must be mounted BEFORE /v1 to avoid being caught by the less specific route + * @see https://openresponses.org/specification + */ +router.use('/v1/responses', responses); + +/** + * OpenAI-compatible API routes (API key authentication handled in route file) + * Mounted at /agents/v1 (full path: /api/agents/v1/chat/completions) + */ +router.use('/v1', openai); + router.use(requireJwtAuth); router.use(checkBan); router.use(uaParser); diff --git a/api/server/routes/agents/openai.js b/api/server/routes/agents/openai.js new file mode 100644 index 0000000000..9a0d9a3564 --- /dev/null +++ b/api/server/routes/agents/openai.js @@ -0,0 +1,110 @@ +/** + * OpenAI-compatible API routes for LibreChat agents. + * + * Provides a /v1/chat/completions compatible interface for + * interacting with LibreChat agents remotely via API. + * + * Usage: + * POST /v1/chat/completions - Chat with an agent + * GET /v1/models - List available agents + * GET /v1/models/:model - Get agent details + * + * Request format: + * { + * "model": "agent_id_here", + * "messages": [{"role": "user", "content": "Hello!"}], + * "stream": true + * } + */ +const express = require('express'); +const { PermissionTypes, Permissions } = require('librechat-data-provider'); +const { + generateCheckAccess, + createRequireApiKeyAuth, + createCheckRemoteAgentAccess, +} = require('@librechat/api'); +const { + OpenAIChatCompletionController, + ListModelsController, + GetModelController, +} = require('~/server/controllers/agents/openai'); +const { getEffectivePermissions } = require('~/server/services/PermissionService'); +const { validateAgentApiKey, findUser } = require('~/models'); +const { configMiddleware } = require('~/server/middleware'); +const { getRoleByName } = require('~/models/Role'); +const { getAgent } = require('~/models/Agent'); + +const router = express.Router(); + +const requireApiKeyAuth = createRequireApiKeyAuth({ + validateAgentApiKey, + findUser, +}); + +const checkRemoteAgentsFeature = generateCheckAccess({ + permissionType: PermissionTypes.REMOTE_AGENTS, + permissions: [Permissions.USE], + getRoleByName, +}); + +const checkAgentPermission = createCheckRemoteAgentAccess({ + getAgent, + getEffectivePermissions, +}); + +router.use(requireApiKeyAuth); +router.use(configMiddleware); +router.use(checkRemoteAgentsFeature); + +/** + * @route POST /v1/chat/completions + * @desc OpenAI-compatible chat completions with agents + * @access Private (API key auth required) + * + * Request body: + * { + * "model": "agent_id", // Required: The agent ID to use + * "messages": [...], // Required: Array of chat messages + * "stream": true, // Optional: Whether to stream (default: false) + * "conversation_id": "...", // Optional: Conversation ID for context + * "parent_message_id": "..." // Optional: Parent message for threading + * } + * + * Response (streaming): + * - SSE stream with OpenAI chat.completion.chunk format + * - Includes delta.reasoning for thinking/reasoning content + * + * Response (non-streaming): + * - Standard OpenAI chat.completion format + */ +router.post('/chat/completions', checkAgentPermission, OpenAIChatCompletionController); + +/** + * @route GET /v1/models + * @desc List available agents as models + * @access Private (API key auth required) + * + * Response: + * { + * "object": "list", + * "data": [ + * { + * "id": "agent_id", + * "object": "model", + * "name": "Agent Name", + * "provider": "openai", + * ... + * } + * ] + * } + */ +router.get('/models', ListModelsController); + +/** + * @route GET /v1/models/:model + * @desc Get details for a specific agent/model + * @access Private (API key auth required) + */ +router.get('/models/:model', GetModelController); + +module.exports = router; diff --git a/api/server/routes/agents/responses.js b/api/server/routes/agents/responses.js new file mode 100644 index 0000000000..431942e921 --- /dev/null +++ b/api/server/routes/agents/responses.js @@ -0,0 +1,144 @@ +/** + * Open Responses API routes for LibreChat agents. + * + * Implements the Open Responses specification for a forward-looking, + * agentic API that uses items as the fundamental unit and semantic + * streaming events. + * + * Usage: + * POST /v1/responses - Create a response + * GET /v1/models - List available agents + * + * Request format: + * { + * "model": "agent_id_here", + * "input": "Hello!" or [{ type: "message", role: "user", content: "Hello!" }], + * "stream": true, + * "previous_response_id": "optional_conversation_id" + * } + * + * @see https://openresponses.org/specification + */ +const express = require('express'); +const { PermissionTypes, Permissions } = require('librechat-data-provider'); +const { + generateCheckAccess, + createRequireApiKeyAuth, + createCheckRemoteAgentAccess, +} = require('@librechat/api'); +const { + createResponse, + getResponse, + listModels, +} = require('~/server/controllers/agents/responses'); +const { getEffectivePermissions } = require('~/server/services/PermissionService'); +const { validateAgentApiKey, findUser } = require('~/models'); +const { configMiddleware } = require('~/server/middleware'); +const { getRoleByName } = require('~/models/Role'); +const { getAgent } = require('~/models/Agent'); + +const router = express.Router(); + +const requireApiKeyAuth = createRequireApiKeyAuth({ + validateAgentApiKey, + findUser, +}); + +const checkRemoteAgentsFeature = generateCheckAccess({ + permissionType: PermissionTypes.REMOTE_AGENTS, + permissions: [Permissions.USE], + getRoleByName, +}); + +const checkAgentPermission = createCheckRemoteAgentAccess({ + getAgent, + getEffectivePermissions, +}); + +router.use(requireApiKeyAuth); +router.use(configMiddleware); +router.use(checkRemoteAgentsFeature); + +/** + * @route POST /v1/responses + * @desc Create a model response following Open Responses specification + * @access Private (API key auth required) + * + * Request body: + * { + * "model": "agent_id", // Required: The agent ID to use + * "input": "..." | [...], // Required: String or array of input items + * "stream": true, // Optional: Whether to stream (default: false) + * "previous_response_id": "...", // Optional: Previous response for continuation + * "instructions": "...", // Optional: Additional instructions + * "tools": [...], // Optional: Additional tools + * "tool_choice": "auto", // Optional: Tool choice mode + * "max_output_tokens": 4096, // Optional: Max tokens + * "temperature": 0.7 // Optional: Temperature + * } + * + * Response (streaming): + * - SSE stream with semantic events: + * - response.in_progress + * - response.output_item.added + * - response.content_part.added + * - response.output_text.delta + * - response.output_text.done + * - response.function_call_arguments.delta + * - response.output_item.done + * - response.completed + * - [DONE] + * + * Response (non-streaming): + * { + * "id": "resp_xxx", + * "object": "response", + * "created_at": 1234567890, + * "status": "completed", + * "model": "agent_id", + * "output": [...], // Array of output items + * "usage": { ... } + * } + */ +router.post('/', checkAgentPermission, createResponse); + +/** + * @route GET /v1/responses/models + * @desc List available agents as models + * @access Private (API key auth required) + * + * Response: + * { + * "object": "list", + * "data": [ + * { + * "id": "agent_id", + * "object": "model", + * "name": "Agent Name", + * "provider": "openai", + * ... + * } + * ] + * } + */ +router.get('/models', listModels); + +/** + * @route GET /v1/responses/:id + * @desc Retrieve a stored response by ID + * @access Private (API key auth required) + * + * Response: + * { + * "id": "resp_xxx", + * "object": "response", + * "created_at": 1234567890, + * "status": "completed", + * "model": "agent_id", + * "output": [...], + * "usage": { ... } + * } + */ +router.get('/:id', getResponse); + +module.exports = router; diff --git a/api/server/routes/apiKeys.js b/api/server/routes/apiKeys.js new file mode 100644 index 0000000000..29dcc326f5 --- /dev/null +++ b/api/server/routes/apiKeys.js @@ -0,0 +1,36 @@ +const express = require('express'); +const { generateCheckAccess, createApiKeyHandlers } = require('@librechat/api'); +const { PermissionTypes, Permissions } = require('librechat-data-provider'); +const { + getAgentApiKeyById, + createAgentApiKey, + deleteAgentApiKey, + listAgentApiKeys, +} = require('~/models'); +const { requireJwtAuth } = require('~/server/middleware'); +const { getRoleByName } = require('~/models/Role'); + +const router = express.Router(); + +const handlers = createApiKeyHandlers({ + createAgentApiKey, + listAgentApiKeys, + deleteAgentApiKey, + getAgentApiKeyById, +}); + +const checkRemoteAgentsUse = generateCheckAccess({ + permissionType: PermissionTypes.REMOTE_AGENTS, + permissions: [Permissions.USE], + getRoleByName, +}); + +router.post('/', requireJwtAuth, checkRemoteAgentsUse, handlers.createApiKey); + +router.get('/', requireJwtAuth, checkRemoteAgentsUse, handlers.listApiKeys); + +router.get('/:id', requireJwtAuth, checkRemoteAgentsUse, handlers.getApiKey); + +router.delete('/:id', requireJwtAuth, checkRemoteAgentsUse, handlers.deleteApiKey); + +module.exports = router; diff --git a/api/server/routes/index.js b/api/server/routes/index.js index 785e74bb8f..6a48919db3 100644 --- a/api/server/routes/index.js +++ b/api/server/routes/index.js @@ -10,6 +10,7 @@ const presets = require('./presets'); const prompts = require('./prompts'); const balance = require('./balance'); const actions = require('./actions'); +const apiKeys = require('./apiKeys'); const banner = require('./banner'); const search = require('./search'); const models = require('./models'); @@ -31,6 +32,7 @@ module.exports = { auth, adminAuth, keys, + apiKeys, user, tags, roles, diff --git a/api/server/routes/roles.js b/api/server/routes/roles.js index abb53141bd..12e18c7624 100644 --- a/api/server/routes/roles.js +++ b/api/server/routes/roles.js @@ -6,9 +6,10 @@ const { agentPermissionsSchema, promptPermissionsSchema, memoryPermissionsSchema, + mcpServersPermissionsSchema, marketplacePermissionsSchema, peoplePickerPermissionsSchema, - mcpServersPermissionsSchema, + remoteAgentsPermissionsSchema, } = require('librechat-data-provider'); const { checkAdmin, requireJwtAuth } = require('~/server/middleware'); const { updateRoleByName, getRoleByName } = require('~/models/Role'); @@ -51,6 +52,11 @@ const permissionConfigs = { permissionType: PermissionTypes.MARKETPLACE, errorMessage: 'Invalid marketplace permissions.', }, + 'remote-agents': { + schema: remoteAgentsPermissionsSchema, + permissionType: PermissionTypes.REMOTE_AGENTS, + errorMessage: 'Invalid remote agents permissions.', + }, }; /** @@ -160,4 +166,10 @@ router.put('/:roleName/mcp-servers', checkAdmin, createPermissionUpdateHandler(' */ router.put('/:roleName/marketplace', checkAdmin, createPermissionUpdateHandler('marketplace')); +/** + * PUT /api/roles/:roleName/remote-agents + * Update remote agents (API) permissions for a specific role + */ +router.put('/:roleName/remote-agents', checkAdmin, createPermissionUpdateHandler('remote-agents')); + module.exports = router; diff --git a/api/server/services/PermissionService.js b/api/server/services/PermissionService.js index c35faf7c8d..a843f48f6f 100644 --- a/api/server/services/PermissionService.js +++ b/api/server/services/PermissionService.js @@ -141,7 +141,6 @@ const checkPermission = async ({ userId, role, resourceType, resourceId, require validateResourceType(resourceType); - // Get all principals for the user (user + groups + public) const principals = await getUserPrincipals({ userId, role }); if (principals.length === 0) { @@ -151,7 +150,6 @@ const checkPermission = async ({ userId, role, resourceType, resourceId, require return await hasPermission(principals, resourceType, resourceId, requiredPermission); } catch (error) { logger.error(`[PermissionService.checkPermission] Error: ${error.message}`); - // Re-throw validation errors if (error.message.includes('requiredPermission must be')) { throw error; } @@ -172,12 +170,12 @@ const getEffectivePermissions = async ({ userId, role, resourceType, resourceId try { validateResourceType(resourceType); - // Get all principals for the user (user + groups + public) const principals = await getUserPrincipals({ userId, role }); if (principals.length === 0) { return 0; } + return await getEffectivePermissionsACL(principals, resourceType, resourceId); } catch (error) { logger.error(`[PermissionService.getEffectivePermissions] Error: ${error.message}`); diff --git a/client/src/components/Nav/SettingsTabs/Data/AgentApiKeys.tsx b/client/src/components/Nav/SettingsTabs/Data/AgentApiKeys.tsx new file mode 100644 index 0000000000..f75b93526a --- /dev/null +++ b/client/src/components/Nav/SettingsTabs/Data/AgentApiKeys.tsx @@ -0,0 +1,362 @@ +import React, { useState } from 'react'; +import { + useGetAgentApiKeysQuery, + useCreateAgentApiKeyMutation, + useDeleteAgentApiKeyMutation, +} from 'librechat-data-provider/react-query'; +import { Permissions, PermissionTypes } from 'librechat-data-provider'; +import { Plus, Trash2, Copy, CopyCheck, Key, Eye, EyeOff, ShieldEllipsis } from 'lucide-react'; +import { + Button, + Input, + Label, + Spinner, + OGDialog, + OGDialogClose, + OGDialogTitle, + OGDialogHeader, + OGDialogContent, + OGDialogTrigger, + useToastContext, +} from '@librechat/client'; +import type { PermissionConfig } from '~/components/ui'; +import { useUpdateRemoteAgentsPermissionsMutation } from '~/data-provider'; +import { useLocalize, useCopyToClipboard } from '~/hooks'; +import { AdminSettingsDialog } from '~/components/ui'; + +function CreateKeyDialog({ onKeyCreated }: { onKeyCreated?: () => void }) { + const localize = useLocalize(); + const { showToast } = useToastContext(); + const [open, setOpen] = useState(false); + const [name, setName] = useState(''); + const [newKey, setNewKey] = useState(null); + const [showKey, setShowKey] = useState(false); + const [isCopying, setIsCopying] = useState(false); + const createMutation = useCreateAgentApiKeyMutation(); + const copyKey = useCopyToClipboard({ text: newKey || '' }); + + const handleCreate = async () => { + if (!name.trim()) { + showToast({ message: localize('com_ui_api_key_name_required'), status: 'error' }); + return; + } + + try { + const result = await createMutation.mutateAsync({ name: name.trim() }); + setNewKey(result.key); + showToast({ message: localize('com_ui_api_key_created'), status: 'success' }); + onKeyCreated?.(); + } catch { + showToast({ message: localize('com_ui_api_key_create_error'), status: 'error' }); + } + }; + + const handleClose = () => { + setName(''); + setNewKey(null); + setShowKey(false); + setOpen(false); + }; + + const handleCopy = () => { + if (isCopying) { + return; + } + copyKey(setIsCopying); + showToast({ message: localize('com_ui_api_key_copied'), status: 'success' }); + }; + + return ( + + + + + + {localize('com_ui_create_api_key')} +
+ {!newKey ? ( + <> +
+ + setName(e.target.value)} + placeholder={localize('com_ui_api_key_name_placeholder')} + /> +
+
+ + + + +
+ + ) : ( + <> +
+

+ {localize('com_ui_api_key_warning')} +

+
+
+ +
+ + + +
+
+
+ +
+ + )} +
+
+
+ ); +} + +function KeyItem({ + id, + name, + keyPrefix, + createdAt, + lastUsedAt, +}: { + id: string; + name: string; + keyPrefix: string; + createdAt: string; + lastUsedAt?: string; +}) { + const localize = useLocalize(); + const { showToast } = useToastContext(); + const [confirmDelete, setConfirmDelete] = useState(false); + const deleteMutation = useDeleteAgentApiKeyMutation(); + + const handleDelete = async () => { + try { + await deleteMutation.mutateAsync(id); + showToast({ message: localize('com_ui_api_key_deleted'), status: 'success' }); + } catch { + showToast({ message: localize('com_ui_api_key_delete_error'), status: 'error' }); + } + setConfirmDelete(false); + }; + + const formatDate = (dateStr: string) => { + return new Date(dateStr).toLocaleDateString(undefined, { + year: 'numeric', + month: 'short', + day: 'numeric', + }); + }; + + return ( +
+
+ +
+
{name}
+
+ {keyPrefix}... + • + + {localize('com_ui_created')} {formatDate(createdAt)} + + {lastUsedAt && ( + <> + • + + {localize('com_ui_last_used')} {formatDate(lastUsedAt)} + + + )} +
+
+
+
+ {confirmDelete ? ( +
+ + +
+ ) : ( + + )} +
+
+ ); +} + +function ApiKeysContent({ isOpen }: { isOpen: boolean }) { + const localize = useLocalize(); + const { data, isLoading, error } = useGetAgentApiKeysQuery({ enabled: isOpen }); + + if (error) { + return
{localize('com_ui_api_keys_load_error')}
; + } + + return ( +
+
+ + +
+ +
+ {isLoading && ( +
+ +
+ )} + {!isLoading && + data?.keys && + data.keys.length > 0 && + data.keys.map((key) => ( + + ))} + {!isLoading && (!data?.keys || data.keys.length === 0) && ( +
+ +

{localize('com_ui_no_api_keys')}

+
+ )} +
+
+ ); +} + +const remoteAgentsPermissions: PermissionConfig[] = [ + { permission: Permissions.USE, labelKey: 'com_ui_remote_agents_allow_use' }, + { permission: Permissions.CREATE, labelKey: 'com_ui_remote_agents_allow_create' }, + { permission: Permissions.SHARE, labelKey: 'com_ui_remote_agents_allow_share' }, + { permission: Permissions.SHARE_PUBLIC, labelKey: 'com_ui_remote_agents_allow_share_public' }, +]; + +function RemoteAgentsAdminSettings() { + const localize = useLocalize(); + const { showToast } = useToastContext(); + + const mutation = useUpdateRemoteAgentsPermissionsMutation({ + onSuccess: () => { + showToast({ status: 'success', message: localize('com_ui_saved') }); + }, + onError: () => { + showToast({ status: 'error', message: localize('com_ui_error_save_admin_settings') }); + }, + }); + + const trigger = ( + + ); + + return ( + + ); +} + +export function AgentApiKeys() { + const localize = useLocalize(); + const [isOpen, setIsOpen] = useState(false); + + return ( +
+ + + + + + + + + + {localize('com_ui_agent_api_keys')} +

+ {localize('com_ui_agent_api_keys_description')} +

+
+ +
+
+
+ ); +} diff --git a/client/src/components/Nav/SettingsTabs/Data/Data.tsx b/client/src/components/Nav/SettingsTabs/Data/Data.tsx index 0bba5a152e..eb8cea98c2 100644 --- a/client/src/components/Nav/SettingsTabs/Data/Data.tsx +++ b/client/src/components/Nav/SettingsTabs/Data/Data.tsx @@ -1,15 +1,22 @@ import React, { useState, useRef } from 'react'; import { useOnClickOutside } from '@librechat/client'; +import { Permissions, PermissionTypes } from 'librechat-data-provider'; import ImportConversations from './ImportConversations'; -import { RevokeKeys } from './RevokeKeys'; +import { AgentApiKeys } from './AgentApiKeys'; import { DeleteCache } from './DeleteCache'; +import { RevokeKeys } from './RevokeKeys'; import { ClearChats } from './ClearChats'; import SharedLinks from './SharedLinks'; +import { useHasAccess } from '~/hooks'; function Data() { const dataTabRef = useRef(null); const [confirmClearConvos, setConfirmClearConvos] = useState(false); useOnClickOutside(dataTabRef, () => confirmClearConvos && setConfirmClearConvos(false), []); + const hasAccessToApiKeys = useHasAccess({ + permissionType: PermissionTypes.REMOTE_AGENTS, + permission: Permissions.USE, + }); return (
@@ -19,6 +26,11 @@ function Data() {
+ {hasAccessToApiKeys && ( +
+ +
+ )}
diff --git a/client/src/components/SidePanel/Agents/AgentFooter.tsx b/client/src/components/SidePanel/Agents/AgentFooter.tsx index 80a449bb2d..b2fa996596 100644 --- a/client/src/components/SidePanel/Agents/AgentFooter.tsx +++ b/client/src/components/SidePanel/Agents/AgentFooter.tsx @@ -1,3 +1,4 @@ +import { Globe } from 'lucide-react'; import { Spinner } from '@librechat/client'; import { useWatch, useFormContext } from 'react-hook-form'; import { @@ -44,13 +45,20 @@ export default function AgentFooter({ permissionType: PermissionTypes.AGENTS, permission: Permissions.SHARE, }); + const hasAccessToShareRemoteAgents = useHasAccess({ + permissionType: PermissionTypes.REMOTE_AGENTS, + permission: Permissions.SHARE, + }); const { hasPermission, isLoading: permissionsLoading } = useResourcePermissions( ResourceType.AGENT, agent?._id || '', ); + const { hasPermission: hasRemoteAgentPermission, isLoading: remotePermissionsLoading } = + useResourcePermissions(ResourceType.REMOTE_AGENT, agent?._id || ''); const canShareThisAgent = hasPermission(PermissionBits.SHARE); const canDeleteThisAgent = hasPermission(PermissionBits.DELETE); + const canShareRemoteAgent = hasRemoteAgentPermission(PermissionBits.SHARE); const isSaving = createMutation.isLoading || updateMutation.isLoading || isAvatarUploading; const renderSaveButton = () => { if (isSaving) { @@ -91,6 +99,25 @@ export default function AgentFooter({ resourceType={ResourceType.AGENT} /> )} + {(agent?.author === user?.id || user?.role === SystemRoles.ADMIN || canShareRemoteAgent) && + hasAccessToShareRemoteAgents && + !remotePermissionsLoading && + agent?._id && ( + + + + )} {agent && agent.author === user?.id && } {/* Submit Button */}