mirror of
https://github.com/wekan/wekan.git
synced 2026-02-06 08:31:48 +01:00
Bugfix: 2560, 2604 - enable mixed mode mongodb attachment and filesystem attachment while reading
This commit is contained in:
parent
13a13e8eca
commit
c569565ec0
6 changed files with 180 additions and 24 deletions
|
|
@ -10,27 +10,169 @@ const defaultStoreOptions = {
|
|||
return {};
|
||||
},
|
||||
};
|
||||
const Store = localFSStore
|
||||
? new FS.Store.FileSystem(storeName, {
|
||||
path: localFSStore,
|
||||
...defaultStoreOptions,
|
||||
})
|
||||
: new FS.Store.GridFS(storeName, {
|
||||
// XXX Add a new store for cover thumbnails so we don't load big images in
|
||||
// the general board view
|
||||
// If the uploaded document is not an image we need to enforce browser
|
||||
// download instead of execution. This is particularly important for HTML
|
||||
// files that the browser will just execute if we don't serve them with the
|
||||
// appropriate `application/octet-stream` MIME header which can lead to user
|
||||
// data leaks. I imagine other formats (like PDF) can also be attack vectors.
|
||||
// See https://github.com/wekan/wekan/issues/99
|
||||
// XXX Should we use `beforeWrite` option of CollectionFS instead of
|
||||
// collection-hooks?
|
||||
// We should use `beforeWrite`.
|
||||
...defaultStoreOptions,
|
||||
});
|
||||
let store;
|
||||
if (localFSStore) {
|
||||
// have to reinvent methods from FS.Store.GridFS and FS.Store.FileSystem
|
||||
const fs = Npm.require('fs');
|
||||
const path = Npm.require('path');
|
||||
const mongodb = Npm.require('mongodb');
|
||||
const Grid = Npm.require('gridfs-stream');
|
||||
// calulate the absolute path here, because FS.Store.FileSystem didn't expose the aboslutepath or FS.Store didn't expose api calls :(
|
||||
let pathname = localFSStore;
|
||||
/*eslint camelcase: ["error", {allow: ["__meteor_bootstrap__"]}] */
|
||||
|
||||
if (!pathname && __meteor_bootstrap__ && __meteor_bootstrap__.serverDir) {
|
||||
pathname = path.join(
|
||||
__meteor_bootstrap__.serverDir,
|
||||
`../../../cfs/files/${storeName}`,
|
||||
);
|
||||
}
|
||||
|
||||
if (!pathname)
|
||||
throw new Error('FS.Store.FileSystem unable to determine path');
|
||||
|
||||
// Check if we have '~/foo/bar'
|
||||
if (pathname.split(path.sep)[0] === '~') {
|
||||
const homepath =
|
||||
process.env.HOME || process.env.HOMEPATH || process.env.USERPROFILE;
|
||||
if (homepath) {
|
||||
pathname = pathname.replace('~', homepath);
|
||||
} else {
|
||||
throw new Error('FS.Store.FileSystem unable to resolve "~" in path');
|
||||
}
|
||||
}
|
||||
|
||||
// Set absolute path
|
||||
const absolutePath = path.resolve(pathname);
|
||||
|
||||
const _FStore = new FS.Store.FileSystem(storeName, {
|
||||
path: localFSStore,
|
||||
...defaultStoreOptions,
|
||||
});
|
||||
const GStore = {
|
||||
fileKey(fileObj) {
|
||||
const key = {
|
||||
_id: null,
|
||||
filename: null,
|
||||
};
|
||||
|
||||
// If we're passed a fileObj, we retrieve the _id and filename from it.
|
||||
if (fileObj) {
|
||||
const info = fileObj._getInfo(storeName, {
|
||||
updateFileRecordFirst: false,
|
||||
});
|
||||
key._id = info.key || null;
|
||||
key.filename =
|
||||
info.name ||
|
||||
fileObj.name({ updateFileRecordFirst: false }) ||
|
||||
`${fileObj.collectionName}-${fileObj._id}`;
|
||||
}
|
||||
|
||||
// If key._id is null at this point, createWriteStream will let GridFS generate a new ID
|
||||
return key;
|
||||
},
|
||||
db: undefined,
|
||||
mongoOptions: { useNewUrlParser: true },
|
||||
mongoUrl: process.env.MONGO_URL,
|
||||
init() {
|
||||
this._init(err => {
|
||||
this.inited = !err;
|
||||
});
|
||||
},
|
||||
_init(callback) {
|
||||
const self = this;
|
||||
mongodb.MongoClient.connect(self.mongoUrl, self.mongoOptions, function(
|
||||
err,
|
||||
db,
|
||||
) {
|
||||
if (err) {
|
||||
return callback(err);
|
||||
}
|
||||
self.db = db;
|
||||
return callback(null);
|
||||
});
|
||||
return;
|
||||
},
|
||||
createReadStream(fileKey, options) {
|
||||
const self = this;
|
||||
if (!self.inited) {
|
||||
self.init();
|
||||
return undefined;
|
||||
}
|
||||
options = options || {};
|
||||
|
||||
// Init GridFS
|
||||
const gfs = new Grid(self.db, mongodb);
|
||||
|
||||
// Set the default streamning settings
|
||||
const settings = {
|
||||
_id: new mongodb.ObjectID(fileKey._id),
|
||||
root: `cfs_gridfs.${storeName}`,
|
||||
};
|
||||
|
||||
// Check if this should be a partial read
|
||||
if (
|
||||
typeof options.start !== 'undefined' &&
|
||||
typeof options.end !== 'undefined'
|
||||
) {
|
||||
// Add partial info
|
||||
settings.range = {
|
||||
startPos: options.start,
|
||||
endPos: options.end,
|
||||
};
|
||||
}
|
||||
return gfs.createReadStream(settings);
|
||||
},
|
||||
};
|
||||
GStore.init();
|
||||
const CRS = 'createReadStream';
|
||||
const _CRS = `_${CRS}`;
|
||||
const FStore = _FStore._transform;
|
||||
FStore[_CRS] = FStore[CRS].bind(FStore);
|
||||
FStore[CRS] = function(fileObj, options) {
|
||||
let stream;
|
||||
try {
|
||||
const localFile = path.join(
|
||||
absolutePath,
|
||||
FStore.storage.fileKey(fileObj),
|
||||
);
|
||||
const state = fs.statSync(localFile);
|
||||
if (state) {
|
||||
stream = FStore[_CRS](fileObj, options);
|
||||
}
|
||||
} catch (e) {
|
||||
// file is not there, try GridFS ?
|
||||
stream = undefined;
|
||||
}
|
||||
if (stream) return stream;
|
||||
else {
|
||||
try {
|
||||
const stream = GStore[CRS](GStore.fileKey(fileObj), options);
|
||||
return stream;
|
||||
} catch (e) {
|
||||
return undefined;
|
||||
}
|
||||
}
|
||||
}.bind(FStore);
|
||||
store = _FStore;
|
||||
} else {
|
||||
store = new FS.Store.GridFS(localFSStore ? `G${storeName}` : storeName, {
|
||||
// XXX Add a new store for cover thumbnails so we don't load big images in
|
||||
// the general board view
|
||||
// If the uploaded document is not an image we need to enforce browser
|
||||
// download instead of execution. This is particularly important for HTML
|
||||
// files that the browser will just execute if we don't serve them with the
|
||||
// appropriate `application/octet-stream` MIME header which can lead to user
|
||||
// data leaks. I imagine other formats (like PDF) can also be attack vectors.
|
||||
// See https://github.com/wekan/wekan/issues/99
|
||||
// XXX Should we use `beforeWrite` option of CollectionFS instead of
|
||||
// collection-hooks?
|
||||
// We should use `beforeWrite`.
|
||||
...defaultStoreOptions,
|
||||
});
|
||||
}
|
||||
Attachments = new FS.Collection('attachments', {
|
||||
stores: [Store],
|
||||
stores: [store],
|
||||
});
|
||||
|
||||
if (Meteor.isServer) {
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue