📑 feat: Anthropic Direct Provider Upload (#9072)

* feat: implement Anthropic native PDF support with document preservation

- Add comprehensive debug logging throughout PDF processing pipeline
- Refactor attachment processing to separate image and document handling
- Create distinct addImageURLs(), addDocuments(), and processAttachments() methods
- Fix critical bugs in stream handling and parameter passing
- Add streamToBuffer utility for proper stream-to-buffer conversion
- Remove api/agents submodule from repository

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>

* chore: remove out of scope formatting changes

* fix: stop duplication of file in chat on end of response stream

* chore: bring back file search and ocr options

* chore: localize upload to provider string in file menu

* refactor: change createMenuItems args to fit new pattern introduced by anthropic-native-pdf-support

* feat: add cache point for pdfs processed by anthropic endpoint since they are unlikely to change and should benefit from caching

* feat: combine Upload Image into Upload to Provider since they both perform direct upload and change provider upload icon to reflect multimodal upload

* feat: add citations support according to docs

* refactor: remove redundant 'document' check since documents are handled properly by formatMessage in the agents repo now

* refactor: change upload logic so anthropic endpoint isn't exempted from normal upload path using Agents for consistency with the rest of the upload logic

* fix: include width and height in return from uploadLocalFile so images are correctly identified when going through an AgentUpload in addImageURLs

* chore: remove client specific handling since the direct provider stuff is handled by the agent client

* feat: handle documents in AgentClient so no need for change to agents repo

* chore: removed unused changes

* chore: remove auto generated comments from OG commit

* feat: add logic for agents to use direct to provider uploads if supported (currently just anthropic)

* fix: reintroduce role check to fix render error because of undefined value for Content Part

* fix: actually fix render bug by using proper isCreatedByUser check and making sure our mutation of formattedMessage.content is consistent

---------

Co-authored-by: Andres Restrepo <andres@thelinuxkid.com>
Co-authored-by: Claude <noreply@anthropic.com>
This commit is contained in:
Dustin Healy 2025-08-15 19:46:00 -07:00 committed by Dustin Healy
parent 48f6f8f2f8
commit 89843262b2
14 changed files with 398 additions and 14 deletions

View file

@ -4,6 +4,7 @@ const axios = require('axios');
const { logger } = require('@librechat/data-schemas');
const { EModelEndpoint } = require('librechat-data-provider');
const { generateShortLivedToken } = require('@librechat/api');
const { resizeImageBuffer } = require('~/server/services/Files/images/resize');
const { getBufferMetadata } = require('~/server/utils');
const paths = require('~/config/paths');
@ -286,7 +287,18 @@ async function uploadLocalFile({ req, file, file_id }) {
await fs.promises.writeFile(newPath, inputBuffer);
const filepath = path.posix.join('/', 'uploads', req.user.id, path.basename(newPath));
return { filepath, bytes };
let height, width;
if (file.mimetype && file.mimetype.startsWith('image/')) {
try {
const { width: imgWidth, height: imgHeight } = await resizeImageBuffer(inputBuffer, 'high');
height = imgHeight;
width = imgWidth;
} catch (error) {
logger.warn('[uploadLocalFile] Could not get image dimensions:', error.message);
}
}
return { filepath, bytes, height, width };
}
/**

View file

@ -0,0 +1,164 @@
const { EModelEndpoint } = require('librechat-data-provider');
const { getStrategyFunctions } = require('~/server/services/Files/strategies');
const { validateAnthropicPdf } = require('../validation/pdfValidator');
/**
* Converts a readable stream to a buffer.
*
* @param {NodeJS.ReadableStream} stream - The readable stream to convert.
* @returns {Promise<Buffer>} - Promise resolving to the buffer.
*/
async function streamToBuffer(stream) {
return new Promise((resolve, reject) => {
const chunks = [];
stream.on('data', (chunk) => {
chunks.push(chunk);
});
stream.on('end', () => {
try {
const buffer = Buffer.concat(chunks);
chunks.length = 0;
resolve(buffer);
} catch (err) {
reject(err);
}
});
stream.on('error', (error) => {
chunks.length = 0;
reject(error);
});
}).finally(() => {
if (stream.destroy && typeof stream.destroy === 'function') {
stream.destroy();
}
});
}
/**
* Processes and encodes document files for various endpoints
*
* @param {Express.Request} req - Express request object
* @param {MongoFile[]} files - Array of file objects to process
* @param {string} endpoint - The endpoint identifier (e.g., EModelEndpoint.anthropic)
* @returns {Promise<{documents: MessageContentDocument[], files: MongoFile[]}>}
*/
async function encodeAndFormatDocuments(req, files, endpoint) {
const promises = [];
/** @type {Record<FileSources, Pick<ReturnType<typeof getStrategyFunctions>, 'prepareDocumentPayload' | 'getDownloadStream'>>} */
const encodingMethods = {};
/** @type {{ documents: MessageContentDocument[]; files: MongoFile[] }} */
const result = {
documents: [],
files: [],
};
if (!files || !files.length) {
return result;
}
const documentFiles = files.filter(
(file) => file.type === 'application/pdf' || file.type?.startsWith('application/'), // Future: support for other document types
);
if (!documentFiles.length) {
return result;
}
for (let file of documentFiles) {
/** @type {FileSources} */
const source = file.source ?? 'local';
if (file.type !== 'application/pdf' || endpoint !== EModelEndpoint.anthropic) {
continue;
}
if (!encodingMethods[source]) {
encodingMethods[source] = getStrategyFunctions(source);
}
const fileMetadata = {
file_id: file.file_id || file._id,
temp_file_id: file.temp_file_id,
filepath: file.filepath,
source: file.source,
filename: file.filename,
type: file.type,
};
promises.push([file, fileMetadata]);
}
const results = await Promise.allSettled(
promises.map(async ([file, fileMetadata]) => {
if (!file || !fileMetadata) {
return { file: null, content: null, metadata: fileMetadata };
}
try {
const source = file.source ?? 'local';
const { getDownloadStream } = encodingMethods[source];
const stream = await getDownloadStream(req, file.filepath);
const buffer = await streamToBuffer(stream);
const documentContent = buffer.toString('base64');
return {
file,
content: documentContent,
metadata: fileMetadata,
};
} catch (error) {
console.error(`Error processing document ${file.filename}:`, error);
return { file, content: null, metadata: fileMetadata };
}
}),
);
for (const settledResult of results) {
if (settledResult.status === 'rejected') {
console.error('Document processing failed:', settledResult.reason);
continue;
}
const { file, content, metadata } = settledResult.value;
if (!content || !file) {
if (metadata) {
result.files.push(metadata);
}
continue;
}
if (file.type === 'application/pdf' && endpoint === EModelEndpoint.anthropic) {
const pdfBuffer = Buffer.from(content, 'base64');
const validation = await validateAnthropicPdf(pdfBuffer, pdfBuffer.length);
if (!validation.isValid) {
throw new Error(`PDF validation failed: ${validation.error}`);
}
const documentPart = {
type: 'document',
source: {
type: 'base64',
media_type: 'application/pdf',
data: content,
},
cache_control: { type: 'ephemeral' },
citations: { enabled: true },
};
result.documents.push(documentPart);
result.files.push(metadata);
}
}
return result;
}
module.exports = {
encodeAndFormatDocuments,
};

View file

@ -0,0 +1,5 @@
const { encodeAndFormatDocuments } = require('./encode');
module.exports = {
encodeAndFormatDocuments,
};

View file

@ -419,11 +419,11 @@ const processFileUpload = async ({ req, res, metadata }) => {
const isAssistantUpload = isAssistantsEndpoint(metadata.endpoint);
const assistantSource =
metadata.endpoint === EModelEndpoint.azureAssistants ? FileSources.azure : FileSources.openai;
// Use the configured file strategy for regular file uploads (not vectordb)
const source = isAssistantUpload ? assistantSource : appConfig.fileStrategy;
const { handleFileUpload } = getStrategyFunctions(source);
const { file_id, temp_file_id = null } = metadata;
/** @type {OpenAI | undefined} */
let openai;
if (checkOpenAIStorage(source)) {

View file

@ -0,0 +1,72 @@
const { logger } = require('~/config');
const { anthropicPdfSizeLimit } = require('librechat-data-provider');
/**
* Validates if a PDF meets Anthropic's requirements
* @param {Buffer} pdfBuffer - The PDF file as a buffer
* @param {number} fileSize - The file size in bytes
* @returns {Promise<{isValid: boolean, error?: string}>}
*/
async function validateAnthropicPdf(pdfBuffer, fileSize) {
try {
if (fileSize > anthropicPdfSizeLimit) {
return {
isValid: false,
error: `PDF file size (${Math.round(fileSize / (1024 * 1024))}MB) exceeds Anthropic's 32MB limit`,
};
}
if (!pdfBuffer || pdfBuffer.length < 5) {
return {
isValid: false,
error: 'Invalid PDF file: too small or corrupted',
};
}
const pdfHeader = pdfBuffer.subarray(0, 5).toString();
if (!pdfHeader.startsWith('%PDF-')) {
return {
isValid: false,
error: 'Invalid PDF file: missing PDF header',
};
}
const pdfContent = pdfBuffer.toString('binary');
if (
pdfContent.includes('/Encrypt ') ||
pdfContent.includes('/U (') ||
pdfContent.includes('/O (')
) {
return {
isValid: false,
error: 'PDF is password-protected or encrypted. Anthropic requires unencrypted PDFs.',
};
}
const pageMatches = pdfContent.match(/\/Type[\s]*\/Page[^s]/g);
const estimatedPages = pageMatches ? pageMatches.length : 1;
if (estimatedPages > 100) {
return {
isValid: false,
error: `PDF has approximately ${estimatedPages} pages, exceeding Anthropic's 100-page limit`,
};
}
logger.debug(
`PDF validation passed: ${Math.round(fileSize / 1024)}KB, ~${estimatedPages} pages`,
);
return { isValid: true };
} catch (error) {
logger.error('PDF validation error:', error);
return {
isValid: false,
error: 'Failed to validate PDF file',
};
}
}
module.exports = {
validateAnthropicPdf,
};