Fixed Non-ASCII attachment filename will crash when downloading.

Thanks to xet7 !

Fixes #2759
This commit is contained in:
Lauri Ojansivu 2021-04-29 13:26:49 +03:00
parent 843ff8eaaa
commit c2da477735
277 changed files with 30568 additions and 52 deletions

View file

@ -0,0 +1,5 @@
language: node_js
node_js:
- "0.10"
before_install:
- "curl -L http://git.io/s0Zu-w | /bin/sh"

View file

@ -0,0 +1,20 @@
The MIT License (MIT)
Copyright (c) 2013-2014 [@raix](https://github.com/raix) and [@aldeed](https://github.com/aldeed), aka Morten N.O. Nørgaard Henriksen, mh@gi-software.com
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View file

@ -0,0 +1,8 @@
wekan-cfs-upload-http
=========================
This is a Meteor package that provides HTTP uploads for
[CollectionFS](https://github.com/zcfs/Meteor-CollectionFS).
You don't need to manually add this package to your app. It is added when you
add the `wekan-cfs-standard-packages` package.

View file

@ -0,0 +1,24 @@
## wekan-cfs-upload-http Public API ##
CollectionFS, HTTP File Upload
_API documentation automatically generated by [docmeteor](https://github.com/raix/docmeteor)._
2MB default upload chunk size
Can be overridden by user with FS.config.uploadChunkSize or per FS.Collection in collection options
-
### <a name="FS.File.prototype.resume"></a>*fsFile*.resume(ref)&nbsp;&nbsp;<sub><i>Client</i></sub> ###
*This method __resume__ is defined in `prototype` of `FS.File`*
__Arguments__
* __ref__ *{[File](#File)|[Blob](#Blob)|Buffer}*
> This function is not yet implemented for server
> ```FS.File.prototype.resume = function(ref) { ...``` [upload-http-client.js:257](upload-http-client.js#L257)

View file

@ -0,0 +1,160 @@
## Public and Private API ##
_API documentation automatically generated by [docmeteor](https://github.com/raix/docmeteor)._
***
__File: ["upload-http-client.js"](upload-http-client.js) Where: {client}__
***
2MB default upload chunk size
Can be overridden by user with FS.config.uploadChunkSize or per FS.Collection in collection options
-
### <a name="_taskHandler"></a>_taskHandler(task, next)&nbsp;&nbsp;<sub><i>Client</i></sub> ###
*This method is private*
__Arguments__
* __task__ *{Object}*
* __next__ *{Function}*
__Returns__ *{undefined}*
> ```var _taskHandler = function(task, next) { ...``` [upload-http-client.js:15](upload-http-client.js#L15)
-
### <a name="_errorHandler"></a>_errorHandler(data, addTask)&nbsp;&nbsp;<sub><i>Client</i></sub> ###
*This method is private*
__Arguments__
* __data__ *{Object}*
* __addTask__ *{Function}*
__Returns__ *{undefined}*
> ```var _errorHandler = function(data, addTask, failures) { ...``` [upload-http-client.js:49](upload-http-client.js#L49)
-
### <a name="UploadTransferQueue"></a>new UploadTransferQueue([options])&nbsp;&nbsp;<sub><i>Client</i></sub> ###
__Arguments__
* __options__ *{Object}* (Optional)
> ```UploadTransferQueue = function(options) { ...``` [upload-http-client.js:60](upload-http-client.js#L60)
-
### <a name="UploadTransferQueue.isUploadingFile"></a>*uploadtransferqueue*.isUploadingFile(fileObj)&nbsp;&nbsp;<sub><i>Client</i></sub> ###
*This method __isUploadingFile__ is defined in `UploadTransferQueue`*
__Arguments__
* __fileObj__ *{[FS.File](#FS.File)}*
File to check if uploading
__Returns__ *{Boolean}*
True if the file is uploading
__TODO__
```
* Maybe have a similar function for accessing the file upload queue?
```
> ```self.isUploadingFile = function(fileObj) { ...``` [upload-http-client.js:90](upload-http-client.js#L90)
-
### <a name="UploadTransferQueue.resumeUploadingFile"></a>*uploadtransferqueue*.resumeUploadingFile(File)&nbsp;&nbsp;<sub><i>Client</i></sub> ###
*This method __resumeUploadingFile__ is defined in `UploadTransferQueue`*
__Arguments__
* __File__ *{[FS.File](#FS.File)}*
to resume uploading
__TODO__
```
* Not sure if this is the best way to handle resumes
```
> ```self.resumeUploadingFile = function(fileObj) { ...``` [upload-http-client.js:99](upload-http-client.js#L99)
-
### <a name="UploadTransferQueue.uploadFile"></a>*uploadtransferqueue*.uploadFile(File)&nbsp;&nbsp;<sub><i>Client</i></sub> ###
*This method __uploadFile__ is defined in `UploadTransferQueue`*
__Arguments__
* __File__ *{[FS.File](#FS.File)}*
to upload
__TODO__
```
* Check that a file can only be added once - maybe a visual helper on the FS.File?
* Have an initial request to the server getting uploaded chunks for resume
```
> ```self.uploadFile = function(fileObj) { ...``` [upload-http-client.js:120](upload-http-client.js#L120)
-
### <a name="FS.HTTP.uploadQueue"></a>*fsHttp*.uploadQueue UploadTransferQueue&nbsp;&nbsp;<sub><i>Client</i></sub> ###
*This property __uploadQueue__ is defined in `FS.HTTP`*
There is a single uploads transfer queue per client (not per CFS)
> ```FS.HTTP.uploadQueue = new UploadTransferQueue();``` [upload-http-client.js:243](upload-http-client.js#L243)
-
### <a name="FS.File.prototype.resume"></a>*fsFile*.resume(ref)&nbsp;&nbsp;<sub><i>Client</i></sub> ###
*This method __resume__ is defined in `prototype` of `FS.File`*
__Arguments__
* __ref__ *{[File](#File)|[Blob](#Blob)|Buffer}*
__TODO__
```
* WIP, Not yet implemented for server
```
> This function is not yet implemented for server
> ```FS.File.prototype.resume = function(ref) { ...``` [upload-http-client.js:257](upload-http-client.js#L257)

View file

@ -0,0 +1,37 @@
Package.describe({
name: 'wekan-cfs-upload-http',
version: '0.0.21',
summary: 'CollectionFS, HTTP File Upload',
});
Package.onUse(function(api) {
api.versionsFrom('1.0');
api.use([
'wekan-cfs-base-package@0.0.30',
'wekan-cfs-tempstore@0.1.4',
'wekan-cfs-file@0.1.16',
'wekan-cfs-access-point@0.1.49',
'wekan-cfs-power-queue@0.9.11',
'wekan-cfs-reactive-list@0.0.9'
]);
api.addFiles([
'upload-http-common.js',
'upload-http-client.js'
], 'client');
api.addFiles([
'upload-http-common.js'
], 'server');
});
// Package.onTest(function (api) {
// api.use('collectionfs');
// api.use('test-helpers', 'server');
// api.use(['tinytest', 'underscore', 'ejson', 'ordered-dict',
// 'random', 'deps']);
// api.addFiles('tests/server-tests.js', 'server');
// api.addFiles('tests/client-tests.js', 'client');
// });

View file

@ -0,0 +1,27 @@
function equals(a, b) {
return !!(EJSON.stringify(a) === EJSON.stringify(b));
}
Tinytest.add('cfs-upload-http - client - test environment', function(test) {
test.isTrue(typeof FS.Collection !== 'undefined', 'test environment not initialized FS.Collection');
});
//Test API:
//test.isFalse(v, msg)
//test.isTrue(v, msg)
//test.equalactual, expected, message, not
//test.length(obj, len)
//test.include(s, v)
//test.isNaN(v, msg)
//test.isUndefined(v, msg)
//test.isNotNull
//test.isNull
//test.throws(func)
//test.instanceOf(obj, klass)
//test.notEqual(actual, expected, message)
//test.runId()
//test.exception(exception)
//test.expect_fail()
//test.ok(doc)
//test.fail(doc)
//test.equal(a, b, msg)

View file

@ -0,0 +1,27 @@
function equals(a, b) {
return !!(EJSON.stringify(a) === EJSON.stringify(b));
}
Tinytest.add('cfs-upload-http - server - test environment', function(test) {
test.isTrue(typeof FS.Collection !== 'undefined', 'test environment not initialized FS.Collection');
});
//Test API:
//test.isFalse(v, msg)
//test.isTrue(v, msg)
//test.equalactual, expected, message, not
//test.length(obj, len)
//test.include(s, v)
//test.isNaN(v, msg)
//test.isUndefined(v, msg)
//test.isNotNull
//test.isNull
//test.throws(func)
//test.instanceOf(obj, klass)
//test.notEqual(actual, expected, message)
//test.runId()
//test.exception(exception)
//test.expect_fail()
//test.ok(doc)
//test.fail(doc)
//test.equal(a, b, msg)

View file

@ -0,0 +1,260 @@
/*
* HTTP Upload Transfer Queue
*/
// 2MB default upload chunk size
// Can be overridden by user with FS.config.uploadChunkSize or per FS.Collection in collection options
var defaultChunkSize = 2 * 1024 * 1024;
/**
* @private
* @param {Object} task
* @param {Function} next
* @return {undefined}
*/
var _taskHandler = function(task, next) {
FS.debug && console.log("uploading chunk " + task.chunk + ", bytes " + task.start + " to " + Math.min(task.end, task.fileObj.size()) + " of " + task.fileObj.size());
task.fileObj.data.getBinary(task.start, task.end, function gotBinaryCallback(err, data) {
if (err) {
next(new Meteor.Error(err.error, err.message));
} else {
FS.debug && console.log('PUT to URL', task.url, task.urlParams);
HTTP.call("PUT", task.url, {
params: FS.Utility.extend({chunk: task.chunk}, task.urlParams),
content: data,
headers: {
'Content-Type': task.fileObj.type()
}
}, function(error, result) {
task = null;
if (error) {
next(new Meteor.Error(error.error, error.message));
} else {
next();
}
});
}
});
};
/**
* @private
* @param {Object} data
* @param {Function} addTask
* @return {undefined}
*/
var _errorHandler = function(data, addTask, failures) {
// If file upload fails
// TODO We should retry a few times and then emit error?
// data.fileObj.emit("error", error);
};
/** @method UploadTransferQueue
* @namespace UploadTransferQueue
* @constructor
* @param {Object} [options]
*/
UploadTransferQueue = function(options) {
// Rig options
options = options || {};
// Init the power queue
var self = new PowerQueue({
name: 'HTTPUploadTransferQueue',
// spinalQueue: ReactiveList,
maxProcessing: 1,
maxFailures: 5,
jumpOnFailure: true,
autostart: true,
isPaused: false,
filo: false,
debug: FS.debug
});
// Keep track of uploaded files via this queue
self.files = {};
// cancel maps onto queue reset
self.cancel = self.reset;
/**
* @method UploadTransferQueue.isUploadingFile
* @param {FS.File} fileObj File to check if uploading
* @returns {Boolean} True if the file is uploading
*
* @todo Maybe have a similar function for accessing the file upload queue?
*/
self.isUploadingFile = function(fileObj) {
// Check if file is already in queue
return !!(fileObj && fileObj._id && fileObj.collectionName && (self.files[fileObj.collectionName] || {})[fileObj._id]);
};
/** @method UploadTransferQueue.resumeUploadingFile
* @param {FS.File} File to resume uploading
* @todo Not sure if this is the best way to handle resumes
*/
self.resumeUploadingFile = function(fileObj) {
// Make sure we are handed a FS.File
if (!(fileObj instanceof FS.File)) {
throw new Error('Transfer queue expects a FS.File');
}
if (fileObj.isMounted()) {
// This might still be true, preventing upload, if
// there was a server restart without client restart.
self.files[fileObj.collectionName] = self.files[fileObj.collectionName] || {};
self.files[fileObj.collectionName][fileObj._id] = false;
// Kick off normal upload
self.uploadFile(fileObj);
}
};
/** @method UploadTransferQueue.uploadFile
* @param {FS.File} File to upload
* @todo Check that a file can only be added once - maybe a visual helper on the FS.File?
* @todo Have an initial request to the server getting uploaded chunks for resume
*/
self.uploadFile = function(fileObj) {
FS.debug && console.log("HTTP uploadFile");
// Make sure we are handed a FS.File
if (!(fileObj instanceof FS.File)) {
throw new Error('Transfer queue expects a FS.File');
}
// Make sure that we have size as number
if (typeof fileObj.size() !== 'number') {
throw new Error('TransferQueue upload failed: fileObj size not set');
}
// We don't add the file if it's already in transfer or if already uploaded
if (self.isUploadingFile(fileObj) || fileObj.isUploaded()) {
return;
}
// Make sure the file object is mounted on a collection
if (fileObj.isMounted()) {
var collectionName = fileObj.collectionName;
var id = fileObj._id;
// Set the chunkSize to match the collection options, or global config, or default
fileObj.chunkSize = fileObj.collection.options.chunkSize || FS.config.uploadChunkSize || defaultChunkSize;
// Set counter for uploaded chunks
fileObj.chunkCount = 0;
// Calc the number of chunks
fileObj.chunkSum = Math.ceil(fileObj.size() / fileObj.chunkSize);
if (fileObj.chunkSum === 0)
return;
// Update the filerecord
// TODO eventually we should be able to do this without storing any chunk info in the filerecord
fileObj.update({$set: {chunkSize: fileObj.chunkSize, chunkCount: fileObj.chunkCount, chunkSum: fileObj.chunkSum}});
// Create a sub queue
var chunkQueue = new PowerQueue({
onEnded: function oneChunkQueueEnded() {
// Remove from list of files being uploaded
self.files[collectionName][id] = false;
// XXX It might be possible for this to be called even though there were errors uploading?
fileObj.emit("uploaded");
},
spinalQueue: ReactiveList,
maxProcessing: 1,
maxFailures: 5,
jumpOnFailure: true,
autostart: false,
isPaused: false,
filo: false
});
// Rig the custom task handler
chunkQueue.taskHandler = _taskHandler;
// Rig the error handler
chunkQueue.errorHandler = _errorHandler;
// Set flag that this file is being transfered
self.files[collectionName] = self.files[collectionName] || {};
self.files[collectionName][id] = true;
// Construct URL
var url = FS.HTTP.uploadUrl + '/' + collectionName;
if (id) {
url += '/' + id;
}
// TODO: Could we somehow figure out if the collection requires login?
var authToken = '';
if (typeof Accounts !== "undefined") {
var authObject = {
authToken: Accounts._storedLoginToken() || '',
};
// Set the authToken
var authString = JSON.stringify(authObject);
authToken = FS.Utility.btoa(authString);
}
// Construct query string
var urlParams = {
filename: fileObj.name()
};
if (authToken !== '') {
urlParams.token = authToken;
}
// Add chunk upload tasks
for (var chunk = 0, start; chunk < fileObj.chunkSum; chunk++) {
start = chunk * fileObj.chunkSize;
// Create and add the task
// XXX should we somehow make sure we haven't uploaded this chunk already, in
// case we are resuming?
chunkQueue.add({
chunk: chunk,
name: fileObj.name(),
url: url,
urlParams: urlParams,
fileObj: fileObj,
start: start,
end: (chunk + 1) * fileObj.chunkSize
});
}
// Add the queue to the main upload queue
self.add(chunkQueue);
}
};
return self;
};
/**
* @namespace FS
* @type UploadTransferQueue
*
* There is a single uploads transfer queue per client (not per CFS)
*/
FS.HTTP.uploadQueue = new UploadTransferQueue();
/*
* FS.File extensions
*/
/**
* @method FS.File.prototype.resume
* @public
* @param {File|Blob|Buffer} ref
* @todo WIP, Not yet implemented for server
*
* > This function is not yet implemented for server
*/
FS.File.prototype.resume = function(ref) {
var self = this;
FS.uploadQueue.resumeUploadingFile(self);
};

View file

@ -0,0 +1 @@
FS.HTTP = FS.HTTP || {};