流星文件 s3 上传:插入不是客户端的功能
Meteor-files s3 upload: insert is not a function on client side
我正在尝试将 ostrio:files aka Meteor 文件与 s3 一起使用。在 CLIENT returns 上使用 example s3 code 运行 Images.insert 时,插入不是函数。除了更改集合名称外,我的代码与 s3 集成页面上的示例完全相同。是否需要额外的代码,或者有人可以 post s3 路径的客户端代码示例?我没有使用模板,我在前端使用 React,尽管这是基本的 js。如果我将 images.js 中的代码与 collection-fs 的原始非 s3 代码交换,就可以正常工作,仅供参考。
import { FilesCollection } from 'meteor/ostrio:files';
import Images from '../../../api/Images/Images'; //Server-side-file-store.js
submitLogo() {
var logo = document.getElementById('logoInput').files[0]
const upload = Images.insert({
file: logo,
streams: 'dynamic',
chunkSize: 'dynamic'
}, false);
upload.on('end', function (error, fileObj) {
if (error) {
alert('Error during upload: ' + error);
} else {
alert('File "' + fileObj.name + '" successfully uploaded');
}
template.currentUpload.set(false);
});
upload.start();
}
集合名称已更改的 s3 示例代码
import { Meteor } from 'meteor/meteor';
import { _ } from 'meteor/underscore';
import { Random } from 'meteor/random';
import { FilesCollection } from 'meteor/ostrio:files';
import stream from 'stream';
import S3 from 'aws-sdk/clients/s3'; /* http://docs.aws.amazon.com/AWSJavaScriptSDK/latest/AWS/S3.html */
/* See fs-extra and graceful-fs NPM packages */
/* For better i/o performance */
import fs from 'fs';
/* Example: S3='{"s3":{"key": "xxx", "secret": "xxx", "bucket": "xxx", "region": "xxx""}}' meteor */
if (process.env.S3) {
Meteor.settings.s3 = JSON.parse(process.env.S3).s3;
}
const s3Conf = Meteor.settings.s3 || {};
const bound = Meteor.bindEnvironment((callback) => {
return callback();
});
/* Check settings existence in `Meteor.settings` */
/* This is the best practice for app security */
if (s3Conf && s3Conf.key && s3Conf.secret && s3Conf.bucket && s3Conf.region) {
// Create a new S3 object
const s3 = new S3({
secretAccessKey: s3Conf.secret,
accessKeyId: s3Conf.key,
region: s3Conf.region,
// sslEnabled: true, // optional
httpOptions: {
timeout: 6000,
agent: false
}
});
// Declare the Meteor file collection on the Server
const Images = new FilesCollection({
debug: false, // Change to `true` for debugging
storagePath: 'assets/app/uploads/uploadedFiles',
collectionName: 'Images',
// Disallow Client to execute remove, use the Meteor.method
allowClientCode: false,
// Start moving files to AWS:S3
// after fully received by the Meteor server
onAfterUpload(fileRef) {
// Run through each of the uploaded file
_.each(fileRef.versions, (vRef, version) => {
// We use Random.id() instead of real file's _id
// to secure files from reverse engineering on the AWS client
const filePath = 'files/' + (Random.id()) + '-' + version + '.' + fileRef.extension;
// Create the AWS:S3 object.
// Feel free to change the storage class from, see the documentation,
// `STANDARD_IA` is the best deal for low access files.
// Key is the file name we are creating on AWS:S3, so it will be like files/XXXXXXXXXXXXXXXXX-original.XXXX
// Body is the file stream we are sending to AWS
s3.putObject({
// ServerSideEncryption: 'AES256', // Optional
StorageClass: 'STANDARD',
Bucket: s3Conf.bucket,
Key: filePath,
Body: fs.createReadStream(vRef.path),
ContentType: vRef.type,
}, (error) => {
bound(() => {
if (error) {
console.error(error);
} else {
// Update FilesCollection with link to the file at AWS
const upd = { $set: {} };
upd['$set']['versions.' + version + '.meta.pipePath'] = filePath;
this.collection.update({
_id: fileRef._id
}, upd, (updError) => {
if (updError) {
console.error(updError);
} else {
// Unlink original files from FS after successful upload to AWS:S3
this.unlink(this.collection.findOne(fileRef._id), version);
}
});
}
});
});
});
},
// Intercept access to the file
// And redirect request to AWS:S3
interceptDownload(http, fileRef, version) {
let path;
if (fileRef && fileRef.versions && fileRef.versions[version] && fileRef.versions[version].meta && fileRef.versions[version].meta.pipePath) {
path = fileRef.versions[version].meta.pipePath;
}
if (path) {
// If file is successfully moved to AWS:S3
// We will pipe request to AWS:S3
// So, original link will stay always secure
// To force ?play and ?download parameters
// and to keep original file name, content-type,
// content-disposition, chunked "streaming" and cache-control
// we're using low-level .serve() method
const opts = {
Bucket: s3Conf.bucket,
Key: path
};
if (http.request.headers.range) {
const vRef = fileRef.versions[version];
let range = _.clone(http.request.headers.range);
const array = range.split(/bytes=([0-9]*)-([0-9]*)/);
const start = parseInt(array[1]);
let end = parseInt(array[2]);
if (isNaN(end)) {
// Request data from AWS:S3 by small chunks
end = (start + this.chunkSize) - 1;
if (end >= vRef.size) {
end = vRef.size - 1;
}
}
opts.Range = `bytes=${start}-${end}`;
http.request.headers.range = `bytes=${start}-${end}`;
}
const fileColl = this;
s3.getObject(opts, function (error) {
if (error) {
console.error(error);
if (!http.response.finished) {
http.response.end();
}
} else {
if (http.request.headers.range && this.httpResponse.headers['content-range']) {
// Set proper range header in according to what is returned from AWS:S3
http.request.headers.range = this.httpResponse.headers['content-range'].split('/')[0].replace('bytes ', 'bytes=');
}
const dataStream = new stream.PassThrough();
fileColl.serve(http, fileRef, fileRef.versions[version], version, dataStream);
dataStream.end(this.data.Body);
}
});
return true;
}
// While file is not yet uploaded to AWS:S3
// It will be served file from FS
return false;
}
});
// Intercept FilesCollection's remove method to remove file from AWS:S3
const _origRemove = Images.remove;
Images.remove = function (search) {
const cursor = this.collection.find(search);
cursor.forEach((fileRef) => {
_.each(fileRef.versions, (vRef) => {
if (vRef && vRef.meta && vRef.meta.pipePath) {
// Remove the object from AWS:S3 first, then we will call the original FilesCollection remove
s3.deleteObject({
Bucket: s3Conf.bucket,
Key: vRef.meta.pipePath,
}, (error) => {
bound(() => {
if (error) {
console.error(error);
}
});
});
}
});
});
//remove original file from database
_origRemove.call(this, search);
};
} else {
throw new Meteor.Error(401, 'Missing Meteor file settings');
}
Further image (JPEG, PNG) processing with AWS Lambda
The basic concept: you already have a S3 folder that you use for storage above. We are going to set a Lambda trigger on that folder and for each file saved into (plus any other condition you wish), we will save a thumb into another folder.
First, sign in to your AWS console and select your region from the top bar. Go to your Lambda dashboard and create a new function.
Add a trigger for S3, select your bucket, select "Object Created(All)", check Enable trigger and save (Add). Then add the "Function Code". The code will be your xxx.js file zipped together with the node_modules folder used by your xxx.js file. Please note that your Lambda function will need to have the same name as your xxx.js file (e.g. JS file name: ImageResizer.js will require the Lambda function name/handler ImageResizer.handler. Upload your ZIP file.
Your resizer JS file
We will be using two differents methods so please feel free to chose the one you prefer.
Official Lambda resizer by AWS: full documentation here. This is based on sharp.js, claimed to be 4-5 times faster than ImageMagick. Just download the ZIP from the Amazon documentation and follow the steps above. You might want to make sure that the packages in the package.json file are at the toppes version. If not, please run an npm install to latest versions in order to generate the updated node_modules before you zip your index.js and node_modules folder together.
Resizer based on ImageMagic (example shows a resize to output JPG, 420px width, 85% quality, with a meta attached for CachControl set to 10 days).
package.json
{
"name": "amazon-lambda-resizer",
"version": "0.0.1",
"description": "Resizer for lambda images in a S3 bucket from a source_folder to target_folder",
"main": "index.js",
"scripts": {
"start": "node index.js"
},
"dependencies": {
"async": "^2.6.0",
"aws-sdk": "^2.240.1",
"gm": "^1.23.1",
"path": "^0.12.7"
},
"keywords": [
"node",
"lambda",
"aws"
]
}
index.js
Change to something like ImageResizer.js and make sure this has the same name as your Lambda function / handler
/* Dependencies: */
const async = require('async');
const AWS = require('aws-sdk');
const gm = require('gm');
const util = require('util');
const path = require('path');
const imageMagick = gm.subClass({ imageMagick: true });
const WEB_WIDTH_MAX = 420;
const WEB_Q_MAX = 85;
const FOLDER_DEST = 'thumb/';
AWS.config.update({
accessKeyId: 'xxxxxxxxxxx',
secretAccessKey: 'xxxxxxxxxxxxxxxxxxxx'
});
const s3 = new AWS.S3();
exports.handler = (event, context, callback) => {
// Read options from the event.
// console.log('Reading options from event:\n', util.inspect(event, {depth: 5}));
const srcBucket = event.Records[0].s3.bucket.name;
// Object key may have spaces or unicode non-ASCII characters.
const srcKey = decodeURIComponent(event.Records[0].s3.object.key.replace(/\+/g, ' '));
const dstBucket = srcBucket;
const imageName = path.basename(srcKey);
// Infer the image type.
const typeMatch = srcKey.match(/\.([^.]*)$/);
if (!typeMatch) {
callback('Could not determine the image type.');
return;
}
const imageType = typeMatch[1];
if (imageType.toUpperCase() !== 'jpg'.toUpperCase() && imageType.toUpperCase() !== 'png'.toUpperCase() && imageType.toUpperCase() !== 'jpeg'.toUpperCase()) {
callback(`Unsupported image type: ${imageType}`);
return;
}
// ****************before async******************
// Download the image from S3, transform, and upload to a different S3 bucket.
async.waterfall([
function download (next) {
// Download the image from S3 into a buffer.
s3.getObject({
Bucket: srcBucket,
Key: srcKey
}, next);
},
function transformWebMax (response, next) {
imageMagick(response.Body)
.resize(WEB_WIDTH_MAX)
.quality(WEB_Q_MAX)
// .gravity('Center')
.strip()
// .crop(WEB_WIDTH_MAX, WEB_HEIGHT_MAX)
.toBuffer('jpg', (err, buffer) => {
if (err) {
console.log('An error occurred while saving IM to buffer: ', err)
return false /* stop the remaining sequence and prevent sending an empty or invalid buffer to AWS */
} else {
next(null, response, buffer)
}
})
},
function uploadWebMax (response, buffer, next) {
// Stream the transformed image to a different S3 bucket.
const dstKeyResized = FOLDER_DEST + imageName;
s3.putObject({
Bucket: dstBucket,
Key: dstKeyResized,
Body: buffer,
ContentType: response.ContentType,
CacheControl: 'max-age=864000'
}, (err, data) => {
if (err) {
console.error(err, err.stack);
} else {
console.log('uploaded to web-max Successfully !!');
next(null, response, buffer);
}
});
}
], err => {
if (err) {
console.error('Unable to resize image');
} else {
console.log('Successfully resized image');
}
callback(null, 'message');
});
};
export { Images }
Meteor-Files 中的代码示例没有 export
任何模块,因此当您导入默认导出时,您会得到一个空对象 {}
。
此外,如示例所述,该代码应仅位于服务器端,这意味着您不能将其导入客户端。
我会通过三个文件来解决这个问题:
/imports/api/images/server/images.js
/imports/api/images/client/images.js
/imports/api/images/index.js
前两个是Images
定义的地方:
- 一个用于服务器端,使用示例代码,但在最后
export
ing Images
- 还有一个给客户,也出口
Images
将它们放在 client/server 文件夹中将确保它们永远不会打包到错误的包中(确保 aws-sdk 不会发送到客户端很重要)
第三个文件index.js
,统一依赖管理,不用到处挑client或server:
let Images;
if (Meteor.isServer) {
Images = require('./server/images.js');
} else if (Meteor.isClient) {
Images = require('./client/images.js');
}
export default Images
然后你可以导入/imports/api/images/
,它会给你正确的平台集合实例。
编辑:因为看起来您正在将 Images
导出为命名导出
export { Images }
您需要在 require
ing:
时提取导出的符号
Images = require('./server/images.js').Images
如果您想继续使用命名导出,您需要对 client/images.js
执行相同的操作并更改 index.js
:
中的导出默认行
export { Images }
然后导入命名符号:
import { Images } from '/imports/api/images'
此外,client/images.js
可以是非常小的,只要 collectionName
匹配:
export const Images = new FilesCollection({
storagePath: '',
collectionName: 'Images',
allowClientCode: false, // Disallow remove files from Client
});
https://github.com/cacciagrano/file-upload/
流星+鸵鸟|文件上传
简介:
在此示例中,使用了两个组件。首先 - 处理上传,添加文件输入框和进度条。第二 - 显示文件详细信息 (FileIndividualFile.js)。
单个文件组件允许删除、重命名和查看文件。
Materialize 用于样式
使用 Meteor@1.10.1 和 React 16.8.6
进行测试
使用最新的useTracker访问流星数据。
使用 React 功能组件和 Hooks
基于@dr-dimitru @VeliovGroup 的工作https://github.com/VeliovGroup/Meteor-Files/wiki/React-Example
我正在尝试将 ostrio:files aka Meteor 文件与 s3 一起使用。在 CLIENT returns 上使用 example s3 code 运行 Images.insert 时,插入不是函数。除了更改集合名称外,我的代码与 s3 集成页面上的示例完全相同。是否需要额外的代码,或者有人可以 post s3 路径的客户端代码示例?我没有使用模板,我在前端使用 React,尽管这是基本的 js。如果我将 images.js 中的代码与 collection-fs 的原始非 s3 代码交换,就可以正常工作,仅供参考。
import { FilesCollection } from 'meteor/ostrio:files';
import Images from '../../../api/Images/Images'; //Server-side-file-store.js
submitLogo() {
var logo = document.getElementById('logoInput').files[0]
const upload = Images.insert({
file: logo,
streams: 'dynamic',
chunkSize: 'dynamic'
}, false);
upload.on('end', function (error, fileObj) {
if (error) {
alert('Error during upload: ' + error);
} else {
alert('File "' + fileObj.name + '" successfully uploaded');
}
template.currentUpload.set(false);
});
upload.start();
}
集合名称已更改的 s3 示例代码
import { Meteor } from 'meteor/meteor';
import { _ } from 'meteor/underscore';
import { Random } from 'meteor/random';
import { FilesCollection } from 'meteor/ostrio:files';
import stream from 'stream';
import S3 from 'aws-sdk/clients/s3'; /* http://docs.aws.amazon.com/AWSJavaScriptSDK/latest/AWS/S3.html */
/* See fs-extra and graceful-fs NPM packages */
/* For better i/o performance */
import fs from 'fs';
/* Example: S3='{"s3":{"key": "xxx", "secret": "xxx", "bucket": "xxx", "region": "xxx""}}' meteor */
if (process.env.S3) {
Meteor.settings.s3 = JSON.parse(process.env.S3).s3;
}
const s3Conf = Meteor.settings.s3 || {};
const bound = Meteor.bindEnvironment((callback) => {
return callback();
});
/* Check settings existence in `Meteor.settings` */
/* This is the best practice for app security */
if (s3Conf && s3Conf.key && s3Conf.secret && s3Conf.bucket && s3Conf.region) {
// Create a new S3 object
const s3 = new S3({
secretAccessKey: s3Conf.secret,
accessKeyId: s3Conf.key,
region: s3Conf.region,
// sslEnabled: true, // optional
httpOptions: {
timeout: 6000,
agent: false
}
});
// Declare the Meteor file collection on the Server
const Images = new FilesCollection({
debug: false, // Change to `true` for debugging
storagePath: 'assets/app/uploads/uploadedFiles',
collectionName: 'Images',
// Disallow Client to execute remove, use the Meteor.method
allowClientCode: false,
// Start moving files to AWS:S3
// after fully received by the Meteor server
onAfterUpload(fileRef) {
// Run through each of the uploaded file
_.each(fileRef.versions, (vRef, version) => {
// We use Random.id() instead of real file's _id
// to secure files from reverse engineering on the AWS client
const filePath = 'files/' + (Random.id()) + '-' + version + '.' + fileRef.extension;
// Create the AWS:S3 object.
// Feel free to change the storage class from, see the documentation,
// `STANDARD_IA` is the best deal for low access files.
// Key is the file name we are creating on AWS:S3, so it will be like files/XXXXXXXXXXXXXXXXX-original.XXXX
// Body is the file stream we are sending to AWS
s3.putObject({
// ServerSideEncryption: 'AES256', // Optional
StorageClass: 'STANDARD',
Bucket: s3Conf.bucket,
Key: filePath,
Body: fs.createReadStream(vRef.path),
ContentType: vRef.type,
}, (error) => {
bound(() => {
if (error) {
console.error(error);
} else {
// Update FilesCollection with link to the file at AWS
const upd = { $set: {} };
upd['$set']['versions.' + version + '.meta.pipePath'] = filePath;
this.collection.update({
_id: fileRef._id
}, upd, (updError) => {
if (updError) {
console.error(updError);
} else {
// Unlink original files from FS after successful upload to AWS:S3
this.unlink(this.collection.findOne(fileRef._id), version);
}
});
}
});
});
});
},
// Intercept access to the file
// And redirect request to AWS:S3
interceptDownload(http, fileRef, version) {
let path;
if (fileRef && fileRef.versions && fileRef.versions[version] && fileRef.versions[version].meta && fileRef.versions[version].meta.pipePath) {
path = fileRef.versions[version].meta.pipePath;
}
if (path) {
// If file is successfully moved to AWS:S3
// We will pipe request to AWS:S3
// So, original link will stay always secure
// To force ?play and ?download parameters
// and to keep original file name, content-type,
// content-disposition, chunked "streaming" and cache-control
// we're using low-level .serve() method
const opts = {
Bucket: s3Conf.bucket,
Key: path
};
if (http.request.headers.range) {
const vRef = fileRef.versions[version];
let range = _.clone(http.request.headers.range);
const array = range.split(/bytes=([0-9]*)-([0-9]*)/);
const start = parseInt(array[1]);
let end = parseInt(array[2]);
if (isNaN(end)) {
// Request data from AWS:S3 by small chunks
end = (start + this.chunkSize) - 1;
if (end >= vRef.size) {
end = vRef.size - 1;
}
}
opts.Range = `bytes=${start}-${end}`;
http.request.headers.range = `bytes=${start}-${end}`;
}
const fileColl = this;
s3.getObject(opts, function (error) {
if (error) {
console.error(error);
if (!http.response.finished) {
http.response.end();
}
} else {
if (http.request.headers.range && this.httpResponse.headers['content-range']) {
// Set proper range header in according to what is returned from AWS:S3
http.request.headers.range = this.httpResponse.headers['content-range'].split('/')[0].replace('bytes ', 'bytes=');
}
const dataStream = new stream.PassThrough();
fileColl.serve(http, fileRef, fileRef.versions[version], version, dataStream);
dataStream.end(this.data.Body);
}
});
return true;
}
// While file is not yet uploaded to AWS:S3
// It will be served file from FS
return false;
}
});
// Intercept FilesCollection's remove method to remove file from AWS:S3
const _origRemove = Images.remove;
Images.remove = function (search) {
const cursor = this.collection.find(search);
cursor.forEach((fileRef) => {
_.each(fileRef.versions, (vRef) => {
if (vRef && vRef.meta && vRef.meta.pipePath) {
// Remove the object from AWS:S3 first, then we will call the original FilesCollection remove
s3.deleteObject({
Bucket: s3Conf.bucket,
Key: vRef.meta.pipePath,
}, (error) => {
bound(() => {
if (error) {
console.error(error);
}
});
});
}
});
});
//remove original file from database
_origRemove.call(this, search);
};
} else {
throw new Meteor.Error(401, 'Missing Meteor file settings');
}
Further image (JPEG, PNG) processing with AWS Lambda
The basic concept: you already have a S3 folder that you use for storage above. We are going to set a Lambda trigger on that folder and for each file saved into (plus any other condition you wish), we will save a thumb into another folder.
First, sign in to your AWS console and select your region from the top bar. Go to your Lambda dashboard and create a new function.
Add a trigger for S3, select your bucket, select "Object Created(All)", check Enable trigger and save (Add). Then add the "Function Code". The code will be your xxx.js file zipped together with the node_modules folder used by your xxx.js file. Please note that your Lambda function will need to have the same name as your xxx.js file (e.g. JS file name: ImageResizer.js will require the Lambda function name/handler ImageResizer.handler. Upload your ZIP file.
Your resizer JS file
We will be using two differents methods so please feel free to chose the one you prefer.
Official Lambda resizer by AWS: full documentation here. This is based on sharp.js, claimed to be 4-5 times faster than ImageMagick. Just download the ZIP from the Amazon documentation and follow the steps above. You might want to make sure that the packages in the package.json file are at the toppes version. If not, please run an npm install to latest versions in order to generate the updated node_modules before you zip your index.js and node_modules folder together.
Resizer based on ImageMagic (example shows a resize to output JPG, 420px width, 85% quality, with a meta attached for CachControl set to 10 days).
package.json
{
"name": "amazon-lambda-resizer",
"version": "0.0.1",
"description": "Resizer for lambda images in a S3 bucket from a source_folder to target_folder",
"main": "index.js",
"scripts": {
"start": "node index.js"
},
"dependencies": {
"async": "^2.6.0",
"aws-sdk": "^2.240.1",
"gm": "^1.23.1",
"path": "^0.12.7"
},
"keywords": [
"node",
"lambda",
"aws"
]
}
index.js
Change to something like ImageResizer.js and make sure this has the same name as your Lambda function / handler
/* Dependencies: */
const async = require('async');
const AWS = require('aws-sdk');
const gm = require('gm');
const util = require('util');
const path = require('path');
const imageMagick = gm.subClass({ imageMagick: true });
const WEB_WIDTH_MAX = 420;
const WEB_Q_MAX = 85;
const FOLDER_DEST = 'thumb/';
AWS.config.update({
accessKeyId: 'xxxxxxxxxxx',
secretAccessKey: 'xxxxxxxxxxxxxxxxxxxx'
});
const s3 = new AWS.S3();
exports.handler = (event, context, callback) => {
// Read options from the event.
// console.log('Reading options from event:\n', util.inspect(event, {depth: 5}));
const srcBucket = event.Records[0].s3.bucket.name;
// Object key may have spaces or unicode non-ASCII characters.
const srcKey = decodeURIComponent(event.Records[0].s3.object.key.replace(/\+/g, ' '));
const dstBucket = srcBucket;
const imageName = path.basename(srcKey);
// Infer the image type.
const typeMatch = srcKey.match(/\.([^.]*)$/);
if (!typeMatch) {
callback('Could not determine the image type.');
return;
}
const imageType = typeMatch[1];
if (imageType.toUpperCase() !== 'jpg'.toUpperCase() && imageType.toUpperCase() !== 'png'.toUpperCase() && imageType.toUpperCase() !== 'jpeg'.toUpperCase()) {
callback(`Unsupported image type: ${imageType}`);
return;
}
// ****************before async******************
// Download the image from S3, transform, and upload to a different S3 bucket.
async.waterfall([
function download (next) {
// Download the image from S3 into a buffer.
s3.getObject({
Bucket: srcBucket,
Key: srcKey
}, next);
},
function transformWebMax (response, next) {
imageMagick(response.Body)
.resize(WEB_WIDTH_MAX)
.quality(WEB_Q_MAX)
// .gravity('Center')
.strip()
// .crop(WEB_WIDTH_MAX, WEB_HEIGHT_MAX)
.toBuffer('jpg', (err, buffer) => {
if (err) {
console.log('An error occurred while saving IM to buffer: ', err)
return false /* stop the remaining sequence and prevent sending an empty or invalid buffer to AWS */
} else {
next(null, response, buffer)
}
})
},
function uploadWebMax (response, buffer, next) {
// Stream the transformed image to a different S3 bucket.
const dstKeyResized = FOLDER_DEST + imageName;
s3.putObject({
Bucket: dstBucket,
Key: dstKeyResized,
Body: buffer,
ContentType: response.ContentType,
CacheControl: 'max-age=864000'
}, (err, data) => {
if (err) {
console.error(err, err.stack);
} else {
console.log('uploaded to web-max Successfully !!');
next(null, response, buffer);
}
});
}
], err => {
if (err) {
console.error('Unable to resize image');
} else {
console.log('Successfully resized image');
}
callback(null, 'message');
});
};
export { Images }
Meteor-Files 中的代码示例没有 export
任何模块,因此当您导入默认导出时,您会得到一个空对象 {}
。
此外,如示例所述,该代码应仅位于服务器端,这意味着您不能将其导入客户端。
我会通过三个文件来解决这个问题:
/imports/api/images/server/images.js
/imports/api/images/client/images.js
/imports/api/images/index.js
前两个是Images
定义的地方:
- 一个用于服务器端,使用示例代码,但在最后
export
ingImages
- 还有一个给客户,也出口
Images
将它们放在 client/server 文件夹中将确保它们永远不会打包到错误的包中(确保 aws-sdk 不会发送到客户端很重要)
第三个文件index.js
,统一依赖管理,不用到处挑client或server:
let Images;
if (Meteor.isServer) {
Images = require('./server/images.js');
} else if (Meteor.isClient) {
Images = require('./client/images.js');
}
export default Images
然后你可以导入/imports/api/images/
,它会给你正确的平台集合实例。
编辑:因为看起来您正在将 Images
导出为命名导出
export { Images }
您需要在 require
ing:
Images = require('./server/images.js').Images
如果您想继续使用命名导出,您需要对 client/images.js
执行相同的操作并更改 index.js
:
export { Images }
然后导入命名符号:
import { Images } from '/imports/api/images'
此外,client/images.js
可以是非常小的,只要 collectionName
匹配:
export const Images = new FilesCollection({
storagePath: '',
collectionName: 'Images',
allowClientCode: false, // Disallow remove files from Client
});
https://github.com/cacciagrano/file-upload/
流星+鸵鸟|文件上传
简介:
在此示例中,使用了两个组件。首先 - 处理上传,添加文件输入框和进度条。第二 - 显示文件详细信息 (FileIndividualFile.js)。
单个文件组件允许删除、重命名和查看文件。
Materialize 用于样式
使用 Meteor@1.10.1 和 React 16.8.6
进行测试
使用最新的useTracker访问流星数据。
使用 React 功能组件和 Hooks
基于@dr-dimitru @VeliovGroup 的工作https://github.com/VeliovGroup/Meteor-Files/wiki/React-Example